text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# Copyright 2009 New England Biolabs <davisp@neb.com>
#
# This file is part of the BioNEB package released
# under the MIT license.
#
import re
import stream
class FastaRecord(object):
def __init__(self, headers, seq):
self.headers = headers
self.seq = seq
def __iter__(self):
if isinstance(self.seq, basestring):
raise ValueError("Sequence has already been parsed.")
return self.seq
def _getident(self):
return self.headers[0][0]
ident = property(_getident, doc="Return the first header's identity.")
def _getdesc(self):
return self.headers[0][1]
desc = property(_getdesc, doc="Return the first header's description.")
def parse(filename=None, handle=None, stream_seq=False):
handle = stream.Stream(filename, handle)
for line in handle:
if line.lstrip()[:1] == ">":
descs = parse_description(line.strip())
seqiter = parse_sequence(handle)
if not stream_seq:
seqiter = ''.join(list(seqiter))
yield FastaRecord(descs, seqiter)
def parse_description(header):
ret = []
if header[:1] != ">":
raise ValueError("Invalid header has no '>': %s" % header)
header = header[1:]
if '\x01' in header:
headers = header.split('\x01')
else:
headers = [header]
for h in headers:
bits = h.split(None, 1)
if len(bits) == 1:
ident, desc = bits[0], None
else:
ident, desc = bits
ident = parse_idents(ident)
ret.append((ident, desc))
return ret
# As specified in the Blast book
IDENT_TYPES = set("dbj emb gb gi ref pir prf sp pdb pat bbs lcl gnl".split())
def parse_idents(ident):
bits = ident.split("|")
if len(bits) == 1:
return ident
ret = {}
while len(bits) > 0:
itype = bits[0]
parts = []
for b in bits[1:]:
if b in IDENT_TYPES: break
parts.append(b)
bits = bits[len(parts)+1:]
parts = filter(None, parts)
if len(parts) == 1:
parts = parts[0]
if isinstance(ret.get(itype, None), list):
ret[itype].append(parts)
elif itype in ret:
ret[itype] = [ret[itype], parts]
else:
ret[itype] = parts
return ret
def parse_sequence(handle):
for line in handle:
if line.lstrip()[:1] == ">":
handle.undo(line)
raise StopIteration()
yield line.strip() | pombredanne/bioneb | bioneb/parsers/fasta.py | Python | mit | 2,564 | [
"BLAST"
] | c494318ab45e95c5cda9ebe03ac49f5bf72cbd248939713f1809ea6e46d6356c |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflowcx_v3.services.pages import PagesAsyncClient
from google.cloud.dialogflowcx_v3.services.pages import PagesClient
from google.cloud.dialogflowcx_v3.services.pages import pagers
from google.cloud.dialogflowcx_v3.services.pages import transports
from google.cloud.dialogflowcx_v3.types import fulfillment
from google.cloud.dialogflowcx_v3.types import page
from google.cloud.dialogflowcx_v3.types import page as gcdc_page
from google.cloud.dialogflowcx_v3.types import response_message
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert PagesClient._get_default_mtls_endpoint(None) is None
assert PagesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
PagesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
)
assert (
PagesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
PagesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert PagesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [PagesClient, PagesAsyncClient,])
def test_pages_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.PagesGrpcTransport, "grpc"),
(transports.PagesGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_pages_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [PagesClient, PagesAsyncClient,])
def test_pages_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_pages_client_get_transport_class():
transport = PagesClient.get_transport_class()
available_transports = [
transports.PagesGrpcTransport,
]
assert transport in available_transports
transport = PagesClient.get_transport_class("grpc")
assert transport == transports.PagesGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(PagesClient, transports.PagesGrpcTransport, "grpc"),
(PagesAsyncClient, transports.PagesGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
PagesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PagesClient)
)
@mock.patch.object(
PagesAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PagesAsyncClient)
)
def test_pages_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(PagesClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(PagesClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(PagesClient, transports.PagesGrpcTransport, "grpc", "true"),
(
PagesAsyncClient,
transports.PagesGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(PagesClient, transports.PagesGrpcTransport, "grpc", "false"),
(
PagesAsyncClient,
transports.PagesGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
PagesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PagesClient)
)
@mock.patch.object(
PagesAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PagesAsyncClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_pages_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [PagesClient, PagesAsyncClient])
@mock.patch.object(
PagesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PagesClient)
)
@mock.patch.object(
PagesAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PagesAsyncClient)
)
def test_pages_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(PagesClient, transports.PagesGrpcTransport, "grpc"),
(PagesAsyncClient, transports.PagesGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_pages_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(PagesClient, transports.PagesGrpcTransport, "grpc", grpc_helpers),
(
PagesAsyncClient,
transports.PagesGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_pages_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_pages_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflowcx_v3.services.pages.transports.PagesGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = PagesClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(PagesClient, transports.PagesGrpcTransport, "grpc", grpc_helpers),
(
PagesAsyncClient,
transports.PagesGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_pages_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [page.ListPagesRequest, dict,])
def test_list_pages(request_type, transport: str = "grpc"):
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = page.ListPagesResponse(
next_page_token="next_page_token_value",
)
response = client.list_pages(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == page.ListPagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPagesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_pages_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
client.list_pages()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == page.ListPagesRequest()
@pytest.mark.asyncio
async def test_list_pages_async(
transport: str = "grpc_asyncio", request_type=page.ListPagesRequest
):
client = PagesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
page.ListPagesResponse(next_page_token="next_page_token_value",)
)
response = await client.list_pages(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == page.ListPagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPagesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_pages_async_from_dict():
await test_list_pages_async(request_type=dict)
def test_list_pages_field_headers():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = page.ListPagesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
call.return_value = page.ListPagesResponse()
client.list_pages(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_pages_field_headers_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = page.ListPagesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
page.ListPagesResponse()
)
await client.list_pages(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_pages_flattened():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = page.ListPagesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_pages(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_pages_flattened_error():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_pages(
page.ListPagesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_pages_flattened_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = page.ListPagesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
page.ListPagesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_pages(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_pages_flattened_error_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_pages(
page.ListPagesRequest(), parent="parent_value",
)
def test_list_pages_pager(transport_name: str = "grpc"):
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
page.ListPagesResponse(
pages=[page.Page(), page.Page(), page.Page(),], next_page_token="abc",
),
page.ListPagesResponse(pages=[], next_page_token="def",),
page.ListPagesResponse(pages=[page.Page(),], next_page_token="ghi",),
page.ListPagesResponse(pages=[page.Page(), page.Page(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_pages(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, page.Page) for i in results)
def test_list_pages_pages(transport_name: str = "grpc"):
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
page.ListPagesResponse(
pages=[page.Page(), page.Page(), page.Page(),], next_page_token="abc",
),
page.ListPagesResponse(pages=[], next_page_token="def",),
page.ListPagesResponse(pages=[page.Page(),], next_page_token="ghi",),
page.ListPagesResponse(pages=[page.Page(), page.Page(),],),
RuntimeError,
)
pages = list(client.list_pages(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_pages_async_pager():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pages), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
page.ListPagesResponse(
pages=[page.Page(), page.Page(), page.Page(),], next_page_token="abc",
),
page.ListPagesResponse(pages=[], next_page_token="def",),
page.ListPagesResponse(pages=[page.Page(),], next_page_token="ghi",),
page.ListPagesResponse(pages=[page.Page(), page.Page(),],),
RuntimeError,
)
async_pager = await client.list_pages(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, page.Page) for i in responses)
@pytest.mark.asyncio
async def test_list_pages_async_pages():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pages), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
page.ListPagesResponse(
pages=[page.Page(), page.Page(), page.Page(),], next_page_token="abc",
),
page.ListPagesResponse(pages=[], next_page_token="def",),
page.ListPagesResponse(pages=[page.Page(),], next_page_token="ghi",),
page.ListPagesResponse(pages=[page.Page(), page.Page(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_pages(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [page.GetPageRequest, dict,])
def test_get_page(request_type, transport: str = "grpc"):
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = page.Page(
name="name_value",
display_name="display_name_value",
transition_route_groups=["transition_route_groups_value"],
)
response = client.get_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == page.GetPageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, page.Page)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.transition_route_groups == ["transition_route_groups_value"]
def test_get_page_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
client.get_page()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == page.GetPageRequest()
@pytest.mark.asyncio
async def test_get_page_async(
transport: str = "grpc_asyncio", request_type=page.GetPageRequest
):
client = PagesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
page.Page(
name="name_value",
display_name="display_name_value",
transition_route_groups=["transition_route_groups_value"],
)
)
response = await client.get_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == page.GetPageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, page.Page)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.transition_route_groups == ["transition_route_groups_value"]
@pytest.mark.asyncio
async def test_get_page_async_from_dict():
await test_get_page_async(request_type=dict)
def test_get_page_field_headers():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = page.GetPageRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
call.return_value = page.Page()
client.get_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_page_field_headers_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = page.GetPageRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(page.Page())
await client.get_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_page_flattened():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = page.Page()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_page(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_page_flattened_error():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_page(
page.GetPageRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_page_flattened_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = page.Page()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(page.Page())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_page(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_page_flattened_error_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_page(
page.GetPageRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [gcdc_page.CreatePageRequest, dict,])
def test_create_page(request_type, transport: str = "grpc"):
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_page.Page(
name="name_value",
display_name="display_name_value",
transition_route_groups=["transition_route_groups_value"],
)
response = client.create_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_page.CreatePageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_page.Page)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.transition_route_groups == ["transition_route_groups_value"]
def test_create_page_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
client.create_page()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_page.CreatePageRequest()
@pytest.mark.asyncio
async def test_create_page_async(
transport: str = "grpc_asyncio", request_type=gcdc_page.CreatePageRequest
):
client = PagesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_page.Page(
name="name_value",
display_name="display_name_value",
transition_route_groups=["transition_route_groups_value"],
)
)
response = await client.create_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_page.CreatePageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_page.Page)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.transition_route_groups == ["transition_route_groups_value"]
@pytest.mark.asyncio
async def test_create_page_async_from_dict():
await test_create_page_async(request_type=dict)
def test_create_page_field_headers():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_page.CreatePageRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
call.return_value = gcdc_page.Page()
client.create_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_page_field_headers_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_page.CreatePageRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_page.Page())
await client.create_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_page_flattened():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_page.Page()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_page(
parent="parent_value", page=gcdc_page.Page(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].page
mock_val = gcdc_page.Page(name="name_value")
assert arg == mock_val
def test_create_page_flattened_error():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_page(
gcdc_page.CreatePageRequest(),
parent="parent_value",
page=gcdc_page.Page(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_page_flattened_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_page.Page()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_page.Page())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_page(
parent="parent_value", page=gcdc_page.Page(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].page
mock_val = gcdc_page.Page(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_page_flattened_error_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_page(
gcdc_page.CreatePageRequest(),
parent="parent_value",
page=gcdc_page.Page(name="name_value"),
)
@pytest.mark.parametrize("request_type", [gcdc_page.UpdatePageRequest, dict,])
def test_update_page(request_type, transport: str = "grpc"):
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_page.Page(
name="name_value",
display_name="display_name_value",
transition_route_groups=["transition_route_groups_value"],
)
response = client.update_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_page.UpdatePageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_page.Page)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.transition_route_groups == ["transition_route_groups_value"]
def test_update_page_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
client.update_page()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_page.UpdatePageRequest()
@pytest.mark.asyncio
async def test_update_page_async(
transport: str = "grpc_asyncio", request_type=gcdc_page.UpdatePageRequest
):
client = PagesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_page.Page(
name="name_value",
display_name="display_name_value",
transition_route_groups=["transition_route_groups_value"],
)
)
response = await client.update_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_page.UpdatePageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_page.Page)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.transition_route_groups == ["transition_route_groups_value"]
@pytest.mark.asyncio
async def test_update_page_async_from_dict():
await test_update_page_async(request_type=dict)
def test_update_page_field_headers():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_page.UpdatePageRequest()
request.page.name = "page.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
call.return_value = gcdc_page.Page()
client.update_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "page.name=page.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_page_field_headers_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_page.UpdatePageRequest()
request.page.name = "page.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_page.Page())
await client.update_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "page.name=page.name/value",) in kw["metadata"]
def test_update_page_flattened():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_page.Page()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_page(
page=gcdc_page.Page(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].page
mock_val = gcdc_page.Page(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_page_flattened_error():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_page(
gcdc_page.UpdatePageRequest(),
page=gcdc_page.Page(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_page_flattened_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_page.Page()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_page.Page())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_page(
page=gcdc_page.Page(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].page
mock_val = gcdc_page.Page(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_page_flattened_error_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_page(
gcdc_page.UpdatePageRequest(),
page=gcdc_page.Page(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [page.DeletePageRequest, dict,])
def test_delete_page(request_type, transport: str = "grpc"):
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == page.DeletePageRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_page_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
client.delete_page()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == page.DeletePageRequest()
@pytest.mark.asyncio
async def test_delete_page_async(
transport: str = "grpc_asyncio", request_type=page.DeletePageRequest
):
client = PagesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == page.DeletePageRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_page_async_from_dict():
await test_delete_page_async(request_type=dict)
def test_delete_page_field_headers():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = page.DeletePageRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
call.return_value = None
client.delete_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_page_field_headers_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = page.DeletePageRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_page_flattened():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_page(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_page_flattened_error():
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_page(
page.DeletePageRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_page_flattened_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_page(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_page_flattened_error_async():
client = PagesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_page(
page.DeletePageRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.PagesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.PagesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PagesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.PagesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = PagesClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = PagesClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.PagesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PagesClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.PagesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = PagesClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.PagesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.PagesGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.PagesGrpcTransport, transports.PagesGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = PagesClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.PagesGrpcTransport,)
def test_pages_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.PagesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_pages_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflowcx_v3.services.pages.transports.PagesTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.PagesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_pages",
"get_page",
"create_page",
"update_page",
"delete_page",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_pages_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflowcx_v3.services.pages.transports.PagesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PagesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_pages_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflowcx_v3.services.pages.transports.PagesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PagesTransport()
adc.assert_called_once()
def test_pages_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
PagesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.PagesGrpcTransport, transports.PagesGrpcAsyncIOTransport,],
)
def test_pages_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.PagesGrpcTransport, grpc_helpers),
(transports.PagesGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_pages_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.PagesGrpcTransport, transports.PagesGrpcAsyncIOTransport],
)
def test_pages_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_pages_host_no_port():
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_pages_host_with_port():
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_pages_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.PagesGrpcTransport(host="squid.clam.whelk", channel=channel,)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_pages_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.PagesGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.PagesGrpcTransport, transports.PagesGrpcAsyncIOTransport],
)
def test_pages_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.PagesGrpcTransport, transports.PagesGrpcAsyncIOTransport],
)
def test_pages_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_entity_type_path():
project = "squid"
location = "clam"
agent = "whelk"
entity_type = "octopus"
expected = "projects/{project}/locations/{location}/agents/{agent}/entityTypes/{entity_type}".format(
project=project, location=location, agent=agent, entity_type=entity_type,
)
actual = PagesClient.entity_type_path(project, location, agent, entity_type)
assert expected == actual
def test_parse_entity_type_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"agent": "cuttlefish",
"entity_type": "mussel",
}
path = PagesClient.entity_type_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_entity_type_path(path)
assert expected == actual
def test_flow_path():
project = "winkle"
location = "nautilus"
agent = "scallop"
flow = "abalone"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}".format(
project=project, location=location, agent=agent, flow=flow,
)
actual = PagesClient.flow_path(project, location, agent, flow)
assert expected == actual
def test_parse_flow_path():
expected = {
"project": "squid",
"location": "clam",
"agent": "whelk",
"flow": "octopus",
}
path = PagesClient.flow_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_flow_path(path)
assert expected == actual
def test_intent_path():
project = "oyster"
location = "nudibranch"
agent = "cuttlefish"
intent = "mussel"
expected = "projects/{project}/locations/{location}/agents/{agent}/intents/{intent}".format(
project=project, location=location, agent=agent, intent=intent,
)
actual = PagesClient.intent_path(project, location, agent, intent)
assert expected == actual
def test_parse_intent_path():
expected = {
"project": "winkle",
"location": "nautilus",
"agent": "scallop",
"intent": "abalone",
}
path = PagesClient.intent_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_intent_path(path)
assert expected == actual
def test_page_path():
project = "squid"
location = "clam"
agent = "whelk"
flow = "octopus"
page = "oyster"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/pages/{page}".format(
project=project, location=location, agent=agent, flow=flow, page=page,
)
actual = PagesClient.page_path(project, location, agent, flow, page)
assert expected == actual
def test_parse_page_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"agent": "mussel",
"flow": "winkle",
"page": "nautilus",
}
path = PagesClient.page_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_page_path(path)
assert expected == actual
def test_transition_route_group_path():
project = "scallop"
location = "abalone"
agent = "squid"
flow = "clam"
transition_route_group = "whelk"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/transitionRouteGroups/{transition_route_group}".format(
project=project,
location=location,
agent=agent,
flow=flow,
transition_route_group=transition_route_group,
)
actual = PagesClient.transition_route_group_path(
project, location, agent, flow, transition_route_group
)
assert expected == actual
def test_parse_transition_route_group_path():
expected = {
"project": "octopus",
"location": "oyster",
"agent": "nudibranch",
"flow": "cuttlefish",
"transition_route_group": "mussel",
}
path = PagesClient.transition_route_group_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_transition_route_group_path(path)
assert expected == actual
def test_webhook_path():
project = "winkle"
location = "nautilus"
agent = "scallop"
webhook = "abalone"
expected = "projects/{project}/locations/{location}/agents/{agent}/webhooks/{webhook}".format(
project=project, location=location, agent=agent, webhook=webhook,
)
actual = PagesClient.webhook_path(project, location, agent, webhook)
assert expected == actual
def test_parse_webhook_path():
expected = {
"project": "squid",
"location": "clam",
"agent": "whelk",
"webhook": "octopus",
}
path = PagesClient.webhook_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_webhook_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = PagesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = PagesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = PagesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = PagesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = PagesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = PagesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = PagesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = PagesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = PagesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = PagesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.PagesTransport, "_prep_wrapped_messages") as prep:
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.PagesTransport, "_prep_wrapped_messages") as prep:
transport_class = PagesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = PagesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = PagesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(PagesClient, transports.PagesGrpcTransport),
(PagesAsyncClient, transports.PagesGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-dialogflow-cx | tests/unit/gapic/dialogflowcx_v3/test_pages.py | Python | apache-2.0 | 94,547 | [
"Octopus"
] | b4020b9abaa738293ba34589e9d618a88c520ee0dc92fc9df1f31e2bf6b1bb43 |
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
from __future__ import print_function
import mx
import mx_gate
import mx_jardistribution
import mx_sdk_vm, mx_sdk_vm_impl
import mx_vm_benchmark
import mx_vm_gate
import os
from os.path import basename, isdir, join, relpath
_suite = mx.suite('vm')
""":type: mx.SourceSuite | mx.Suite"""
mx_sdk_vm.register_graalvm_component(mx_sdk_vm.GraalVmJdkComponent(
suite=_suite,
name='Component installer',
short_name='gu',
dir_name='installer',
license_files=[],
third_party_license_files=[],
dependencies=['sdk'],
jar_distributions=[
'vm:INSTALLER',
'truffle:TruffleJSON'
],
support_distributions=['vm:INSTALLER_GRAALVM_SUPPORT'],
launcher_configs=[
mx_sdk_vm.LauncherConfig(
destination="bin/<exe:gu>",
jar_distributions=[
'vm:INSTALLER',
'truffle:TruffleJSON'
],
dir_jars=True,
main_class="org.graalvm.component.installer.ComponentInstaller",
build_args=[],
# Please see META-INF/native-image in the project for custom build options for native-image
is_sdk_launcher=True,
custom_launcher_script="mx.vm/gu.cmd" if mx.is_windows() else None,
),
],
stability="supported",
))
mx_sdk_vm.register_graalvm_component(mx_sdk_vm.GraalVmComponent(
suite=_suite,
name='GraalVM license files',
short_name='gvm',
dir_name='.',
license_files=['LICENSE.txt'],
third_party_license_files=['THIRD_PARTY_LICENSE.txt'],
dependencies=[],
support_distributions=['vm:VM_GRAALVM_SUPPORT'],
stability="supported",
))
mx_sdk_vm.register_graalvm_component(mx_sdk_vm.GraalVmJreComponent(
suite=_suite,
name='Polybench Launcher',
short_name='pbm',
license_files=[],
third_party_license_files=[],
dir_name='polybench',
launcher_configs=[mx_sdk_vm.LauncherConfig(
destination='bin/<exe:polybench>',
jar_distributions=['vm:POLYBENCH'],
main_class='org.graalvm.polybench.PolyBenchLauncher',
build_args=[
'-H:-ParseRuntimeOptions',
'-H:Features=org.graalvm.launcher.PolyglotLauncherFeature',
'--tool:all',
],
is_main_launcher=True,
default_symlinks=True,
is_sdk_launcher=True,
is_polyglot=True,
)],
))
mx_sdk_vm.register_graalvm_component(mx_sdk_vm.GraalVmTool(
suite=_suite,
name='Polybench Instruments',
short_name='pbi',
dir_name='pbi',
license_files=[],
third_party_license_files=[],
dependencies=['Truffle', 'Polybench Launcher'],
truffle_jars=['vm:POLYBENCH_INSTRUMENTS'],
support_distributions=['vm:POLYBENCH_INSTRUMENTS_SUPPORT'],
))
mx_sdk_vm.register_graalvm_component(mx_sdk_vm.GraalVmLanguage(
suite=_suite,
name='Polyglot Microbenchmark Harness',
short_name='pmh',
dir_name='pmh',
license_files=[],
third_party_license_files=[],
dependencies=['Truffle', 'Polybench Launcher'],
truffle_jars=['vm:PMH'],
support_distributions=['vm:PMH_SUPPORT'],
installable=False,
))
polybench_benchmark_methods = ["_run"]
# pylint: disable=line-too-long
ce_components = ['bpolyglot', 'cmp', 'cov', 'dap', 'gu', 'gvm', 'icu4j', 'ins', 'insight', 'insightheap', 'js', 'lg', 'libpoly', 'llrc', 'llrl', 'llrn', 'lsp', 'nfi-libffi', 'nfi', 'poly', 'polynative', 'pro', 'rgx', 'sdk', 'spolyglot', 'svm', 'svmnfi', 'svml', 'tfl', 'tflm', 'vvm']
ce_win_complete_components = ['bnative-image-configure', 'bpolyglot', 'cmp', 'cov', 'dap', 'ejvm', 'gu', 'gvm', 'gwa', 'icu4j', 'ins', 'insight', 'insightheap', 'java', 'js', 'lg', 'libpoly', 'lsp', 'nfi-libffi', 'nfi', 'ni', 'nic', 'nil', 'njs', 'poly', 'polynative', 'pro', 'rgx', 'sdk', 'spolyglot', 'svm', 'svmnfi', 'tfl', 'tflm', 'vvm']
ce_aarch64_complete_components = ce_win_complete_components + ['llp', 'llrc', 'llrl', 'llrn', 'rby', 'rbyl', 'svml']
ce_darwin_complete_components = ce_aarch64_complete_components + ['pyn', 'R', 'bRMain', 'pynl']
ce_complete_components = ce_darwin_complete_components + ['ellvm']
ce_ruby_components = ['cmp', 'cov', 'dap', 'gvm', 'ins', 'insight', 'insightheap', 'lg', 'llp', 'llrc', 'llrn', 'lsp', 'nfi-libffi', 'nfi', 'pro', 'rby', 'rbyl', 'rgx', 'sdk', 'svm', 'svmnfi', 'tfl', 'tflm', 'vvm']
ce_python_components = ['bgraalvm-native-binutil', 'bgraalvm-native-clang', 'bgraalvm-native-clang++', 'bgraalvm-native-ld', 'bgu', 'sjsvm', 'blli', 'bnative-image', 'bnative-image-configure', 'bpolybench', 'bpolyglot', 'cmp', 'cov', 'dap', 'dis', 'gu', 'gvm', 'icu4j', 'ins', 'insight', 'insightheap', 'js', 'lg', 'libpoly', 'llp', 'llrc', 'llrl', 'llrn', 'lsp', 'nfi-libffi', 'nfi', 'ni', 'nic', 'nil', 'nju', 'njucp', 'pbm', 'pmh', 'poly', 'polynative', 'pro', 'pyn', 'pynl', 'rgx', 'sdk', 'snative-image-agent', 'snative-image-diagnostics-agent', 'spolyglot', 'svm', 'svml', 'svmnfi', 'tfl', 'tflm', 'vvm']
ce_fastr_components = ['R', 'bRMain', 'bgraalvm-native-binutil', 'bgraalvm-native-clang', 'bgraalvm-native-clang++', 'bgraalvm-native-ld', 'bgu', 'sjsvm', 'blli', 'bpolyglot', 'cmp', 'cov', 'dap', 'gu', 'gvm', 'icu4j', 'ins', 'insight', 'insightheap', 'js', 'lg', 'libpoly', 'llp', 'llrc', 'llrl', 'llrn', 'lsp', 'nfi-libffi', 'nfi', 'poly', 'polynative', 'pro', 'rgx', 'sdk', 'spolyglot', 'svm', 'svml', 'svmnfi', 'tfl', 'tflm', 'vvm']
ce_no_native_components = ['bgu', 'sjsvm', 'blli', 'bgraalvm-native-clang', 'bgraalvm-native-clang++', 'bgraalvm-native-ld', 'bgraalvm-native-binutil', 'bnative-image', 'bnative-image-configure', 'bpolyglot', 'cmp', 'cov', 'dap', 'gu', 'gvm', 'icu4j', 'ins', 'insight', 'insightheap', 'js', 'lsp', 'nfi-libffi', 'nfi', 'ni', 'nic', 'nil', 'polynative', 'pro', 'rgx', 'sdk', 'llrc', 'llrn', 'llrl', 'snative-image-agent', 'snative-image-diagnostics-agent', 'spolyglot', 'svm', 'svmnfi', 'svml', 'tfl', 'tflm', 'libpoly', 'poly', 'vvm']
mx_sdk_vm.register_vm_config('ce', ['insight', 'insightheap', 'cmp', 'cov', 'dap', 'gu', 'gvm', 'icu4j', 'ins', 'js', 'lg', 'libpoly', 'lsp', 'nfi-libffi', 'nfi', 'poly', 'bpolyglot', 'polynative', 'pro', 'rgx', 'sdk', 'spolyglot', 'svm', 'svmnfi', 'tfl', 'tflm', 'vvm'], _suite, env_file='ce-win')
mx_sdk_vm.register_vm_config('ce', ce_components, _suite, env_file='ce-aarch64')
mx_sdk_vm.register_vm_config('ce', ce_components, _suite, env_file='ce-darwin')
mx_sdk_vm.register_vm_config('ce', ce_components, _suite)
mx_sdk_vm.register_vm_config('ce', ce_components + ['njs'], _suite, dist_name='ce', env_file='ce-nodejs')
mx_sdk_vm.register_vm_config('ce', ce_ruby_components, _suite, dist_name='ce-ruby', env_file='ce-ruby')
mx_sdk_vm.register_vm_config('ce', ce_win_complete_components, _suite, dist_name='ce-win-complete')
mx_sdk_vm.register_vm_config('ce', ce_aarch64_complete_components, _suite, dist_name='ce-aarch64-complete')
mx_sdk_vm.register_vm_config('ce', ce_darwin_complete_components, _suite, dist_name='ce-darwin-complete')
mx_sdk_vm.register_vm_config('ce', ce_complete_components, _suite, dist_name='ce-complete')
mx_sdk_vm.register_vm_config('ce-python', ce_python_components, _suite)
mx_sdk_vm.register_vm_config('ce-fastr', ce_fastr_components, _suite)
mx_sdk_vm.register_vm_config('ce-no_native', ce_no_native_components, _suite)
mx_sdk_vm.register_vm_config('libgraal', ['bgu', 'cmp', 'dis', 'gu', 'gvm', 'lg', 'nfi-libffi', 'nfi', 'poly', 'polynative', 'sdk', 'svm', 'svmnfi', 'svml', 'tfl', 'tflm', 'bpolyglot'], _suite)
mx_sdk_vm.register_vm_config('toolchain-only', ['sdk', 'tfl', 'tflm', 'nfi-libffi', 'nfi', 'cmp', 'svm', 'svmnfi', 'llp', 'llrc', 'llrn'], _suite)
mx_sdk_vm.register_vm_config('libgraal-bash', ['bgraalvm-native-clang', 'bgraalvm-native-clang++', 'bgraalvm-native-ld', 'bgraalvm-native-binutil', 'bgu', 'cmp', 'gu', 'gvm', 'lg', 'nfi-libffi', 'nfi', 'poly', 'polynative', 'sdk', 'svm', 'svmnfi', 'svml', 'tfl', 'tflm', 'bpolyglot'], _suite, env_file=False)
mx_sdk_vm.register_vm_config('toolchain-only-bash', ['bgraalvm-native-clang', 'bgraalvm-native-clang++', 'bgraalvm-native-ld', 'bgraalvm-native-binutil', 'tfl', 'tflm', 'gu', 'svm', 'svmnfi', 'gvm', 'polynative', 'llp', 'nfi-libffi', 'nfi', 'svml', 'bgu', 'sdk', 'llrc', 'llrn', 'cmp'], _suite, env_file=False)
mx_sdk_vm.register_vm_config('ce', ['bgraalvm-native-binutil', 'bgraalvm-native-clang', 'bgraalvm-native-clang++', 'bgraalvm-native-ld', 'java', 'libpoly', 'sespresso', 'spolyglot', 'ejvm', 'sjsvm', 'blli', 'bnative-image', 'srubyvm', 'pynl', 'bgraalpython', 'pyn', 'bwasm', 'cmp', 'gwa', 'icu4j', 'js', 'lg', 'llp', 'nfi-libffi', 'nfi', 'ni', 'nil', 'pbm', 'pmh', 'pbi', 'rby', 'rbyl', 'rgx', 'sdk', 'llrc', 'llrn', 'llrl', 'snative-image-agent', 'snative-image-diagnostics-agent', 'svm', 'svmnfi', 'tfl', 'tflm'], _suite, env_file='polybench-ce')
mx_sdk_vm.register_vm_config('ce', ['pbm', 'pmh', 'pbi', 'ni', 'icu4j', 'js', 'lg', 'nfi-libffi', 'nfi', 'tfl', 'svm', 'nil', 'rgx', 'sdk', 'cmp', 'tflm', 'svmnfi', 'bnative-image', 'sjsvm', 'snative-image-agent', 'snative-image-diagnostics-agent'], _suite, env_file='polybench-nfi-ce')
mx_sdk_vm.register_vm_config('ce', ['bgraalvm-native-binutil', 'bgraalvm-native-clang', 'bgraalvm-native-clang++', 'bgraalvm-native-ld', 'blli', 'bnative-image', 'cmp', 'lg', 'llrc', 'llrl', 'llrn', 'nfi-libffi', 'nfi', 'ni', 'nil', 'pbm', 'pbi', 'sdk', 'snative-image-agent', 'snative-image-diagnostics-agent', 'svm', 'svmnfi', 'tfl', 'tflm'], _suite, env_file='polybench-sulong-ce')
if mx.get_os() == 'windows':
mx_sdk_vm.register_vm_config('svm', ['bnative-image', 'bnative-image-configure', 'bpolyglot', 'cmp', 'gvm', 'nfi-libffi', 'nfi', 'ni', 'nil', 'nju', 'njucp', 'nic', 'poly', 'polynative', 'rgx', 'sdk', 'snative-image-agent', 'snative-image-diagnostics-agent', 'svm', 'svmnfi', 'tfl', 'tflm'], _suite, env_file=False)
else:
mx_sdk_vm.register_vm_config('svm', ['bnative-image', 'bnative-image-configure', 'bpolyglot', 'cmp', 'gu', 'gvm', 'nfi-libffi', 'nfi', 'ni', 'nil', 'nju', 'njucp', 'nic', 'poly', 'polynative', 'rgx', 'sdk', 'snative-image-agent', 'snative-image-diagnostics-agent', 'svm', 'svmnfi', 'svml', 'tfl', 'tflm'], _suite, env_file=False)
# pylint: enable=line-too-long
mx_gate.add_gate_runner(_suite, mx_vm_gate.gate_body)
def mx_post_parse_cmd_line(args):
mx_vm_benchmark.register_graalvm_vms()
def mx_register_dynamic_suite_constituents(register_project, register_distribution):
"""
:type register_project: (mx.Project) -> None
:type register_distribution: (mx.Distribution) -> None
"""
if mx_sdk_vm_impl.has_component('FastR'):
fastr_release_env = mx.get_env('FASTR_RELEASE', None)
if fastr_release_env != 'true':
mx.abort(('When including FastR, please set FASTR_RELEASE to \'true\' (env FASTR_RELEASE=true mx ...). Got FASTR_RELEASE={}. '
'For local development, you may also want to disable recommended packages build (FASTR_NO_RECOMMENDED=true) and '
'capturing of system libraries (export FASTR_CAPTURE_DEPENDENCIES set to an empty value). '
'See building.md in FastR documentation for more details.').format(fastr_release_env))
if register_project:
register_project(GraalVmSymlinks())
benchmark_dist = _suite.dependency("POLYBENCH_BENCHMARKS")
def _add_project_to_dist(destination, name, source='dependency:{name}/*'):
if destination not in benchmark_dist.layout:
benchmark_dist.layout[destination] = []
benchmark_dist.layout[destination].append(source.format(name=name))
benchmark_dist.buildDependencies.append(name)
if mx_sdk_vm_impl.has_component('GraalWasm'):
import mx_wasm
class GraalVmWatProject(mx_wasm.WatProject):
def getSourceDir(self):
return self.subDir
def isBenchmarkProject(self):
return self.name.startswith("benchmarks.")
register_project(GraalVmWatProject(
suite=_suite,
name='benchmarks.interpreter.wasm',
deps=[],
workingSets=None,
subDir=join(_suite.dir, 'benchmarks', 'interpreter'),
theLicense=None,
testProject=True,
defaultBuild=False,
))
# add wasm to the layout of the benchmark distribution
_add_project_to_dist('./interpreter/', 'benchmarks.interpreter.wasm')
if mx_sdk_vm_impl.has_component('LLVM Runtime Native'):
register_project(mx.NativeProject(
suite=_suite,
name='benchmarks.interpreter.llvm.native',
results=['interpreter/'],
buildEnv={
'NATIVE_LLVM_CC': '<toolchainGetToolPath:native,CC>',
},
buildDependencies=[
'sulong:SULONG_BOOTSTRAP_TOOLCHAIN',
],
vpath=True,
deps=[],
workingSets=None,
d=join(_suite.dir, 'benchmarks', 'interpreter'),
subDir=None,
srcDirs=[''],
output=None,
theLicense=None,
testProject=True,
defaultBuild=False,
))
# add bitcode to the layout of the benchmark distribution
_add_project_to_dist('./', 'benchmarks.interpreter.llvm.native')
if mx_sdk_vm_impl.has_component('Java on Truffle'):
java_benchmarks = join(_suite.dir, 'benchmarks', 'interpreter', 'java')
for f in os.listdir(java_benchmarks):
if isdir(join(java_benchmarks, f)) and not f.startswith("."):
main_class = basename(f)
simple_name = main_class.split(".")[-1]
project_name = 'benchmarks.interpreter.espresso.' + simple_name.lower()
register_project(mx.JavaProject(
suite=_suite,
subDir=None,
srcDirs=[join(_suite.dir, 'benchmarks', 'interpreter', 'java', main_class)],
deps=[],
name=project_name,
d=join(_suite.dir, 'benchmarks', 'interpreter', 'java', main_class),
javaCompliance='11+',
checkstyleProj=project_name,
workingSets=None,
theLicense=None,
testProject=True,
defaultBuild=False,
))
dist_name = 'POLYBENCH_ESPRESSO_' + simple_name.upper()
register_distribution(mx_jardistribution.JARDistribution(
suite=_suite,
subDir=None,
srcDirs=[''],
sourcesPath=[],
deps=[project_name],
mainClass=main_class,
name=dist_name,
path=simple_name + '.jar',
platformDependent=False,
distDependencies=[],
javaCompliance='11+',
excludedLibs=[],
workingSets=None,
theLicense=None,
testProject=True,
defaultBuild=False,
))
# add jars to the layout of the benchmark distribution
_add_project_to_dist('./interpreter/{}.jar'.format(simple_name), dist_name,
source='dependency:{name}/polybench-espresso-' + simple_name.lower() + '.jar')
class GraalVmSymlinks(mx.Project):
def __init__(self, **kw_args):
super(GraalVmSymlinks, self).__init__(_suite, 'vm-symlinks', subDir=None, srcDirs=[], deps=['sdk:' + mx_sdk_vm_impl.graalvm_dist_name()], workingSets=None, d=_suite.dir, theLicense=None, testProject=False, **kw_args)
self.links = []
sdk_suite = mx.suite('sdk')
for link_name in 'latest_graalvm', 'latest_graalvm_home':
self.links += [(relpath(join(sdk_suite.dir, link_name), _suite.dir), join(_suite.dir, link_name))]
def getArchivableResults(self, use_relpath=True, single=False):
raise mx.abort("Project '{}' cannot be archived".format(self.name))
def getBuildTask(self, args):
return GraalVmSymLinksBuildTask(args, 1, self)
class GraalVmSymLinksBuildTask(mx.ProjectBuildTask):
"""
For backward compatibility, maintain `latest_graalvm` and `latest_graalvm_home` symlinks in the `vm` suite
"""
def needsBuild(self, newestInput):
sup = super(GraalVmSymLinksBuildTask, self).needsBuild(newestInput)
if sup[0]:
return sup
if mx.get_os() != 'windows':
for src, dest in self.subject.links:
if not os.path.lexists(dest):
return True, '{} does not exist'.format(dest)
link_file = mx.TimeStampFile(dest, False)
if newestInput and link_file.isOlderThan(newestInput):
return True, '{} is older than {}'.format(dest, newestInput)
if src != os.readlink(dest):
return True, '{} points to the wrong file'.format(dest)
return False, None
def build(self):
if mx.get_os() == 'windows':
mx.warn('Skip adding symlinks to the latest GraalVM (Platform Windows)')
return
self.rm_links()
self.add_links()
def clean(self, forBuild=False):
self.rm_links()
def add_links(self):
for src, dest in self.subject.links:
os.symlink(src, dest)
def rm_links(self):
if mx.get_os() == 'windows':
return
for _, dest in self.subject.links:
if os.path.lexists(dest):
os.unlink(dest)
def __str__(self):
return "Generating GraalVM symlinks in the vm suite"
| smarr/Truffle | vm/mx.vm/mx_vm.py | Python | gpl-2.0 | 19,478 | [
"ESPResSo",
"VisIt"
] | de62b4eb7801c274617c8f6be4c70476619330f9c733e26d82828c42bae7cff3 |
# Analyzing neural spiking data using Hopfield networks
# - Markov probability of window labels approach
# Felix Effenberger, Jan 2015
# This assumes that a Hopfield network has already been fitted to windowed
# spike train data and the denoised patterns have been computed and saved
# via the Patterns class (see file my_first_script.py).
# In this script basic analysis of likely occurring pattern sequences is
# carried out, based on a Markov approach.
# The so called pattern sequence is an ordered list of memory patterns
# (fixed points of Hopfield dynamics) obtained from converging the Hopfield
# dynamics on windows of the raw spiking data. The occurring patterns are
# labeled by integer numbers (starting from 0), where each pattern is
# assigned a new label of increasing value if it has not been encountered
# before. A label thus establishes pattern identity and the pattern sequence
# consists of N labels with value 0 <= k <= N (with usually k << N).
# After loading the denoised patterns and computing label probabilities
# and entropies of the one-step transition probabilities for each label
# (this can be thought of as a measure of how 'stably' a given sequence
# occurs in the data), a graph is constructed with the labels as nodes.
# Edges are inserted between nodes, the labels of which have a
# non-vanishing conditional probability of occurrence in the pattern sequence.
# The edge weight is set to this (Markov) probability.
# In most cases, one or more 'central' node can be identified in the Markov
# graph that has/have a high in-degree (i.e. number of incoming edges),
# possibly also accompanied by a high out-degree.
# This is characteristic for a situation in which such nodes (i.e. label,
# i.e. pattern) are the termination point (resp. starting point) of prominently
# occurring sub-sequences of patterns occurring in the pattern sequence.
# Such a node often corresponds to some resting state of the network, that
# it repeatedly returns to. Its fixed point memory and memory triggered average
# will likely show a silent (or low activity) state of the network.
# cycles (closed paths) starting and ending at such a central node can give
# insight on how the network is driven out of its resting state (often by some
# stimulus) and enters a transient excited state before falling back to the
# resting state.
# The code below enumerates such cycles (if existent) and sorts them by their
# (scored) entropy, a proxy measure for how reliably the network dynamics visit
# those cycles (i.e. excited states) in the data considered.
import numpy as np
import matplotlib as mpl
#to set mpl backend:
#mpl.use('Agg')
import matplotlib.pyplot as plt
from hdnet.patterns import PatternsHopfield
from hdnet.stats import SequenceAnalyzer
from hdnet.visualization import combine_windows, plot_graph
# load coverged patterns here
n = 10 #NUMBER_OF_NEURONS
ws = 1 #WINDOW_SIZE
#pattern_file = 'SAVED_PATTERNS_FILE'
# load pattern sequence
patterns = PatternsHopfield.load('my_spikes_model/patterns_hopfield.npz')
sequence = patterns.sequence
labels = set(sequence)
n_labels = len(labels)
# create sequence analyzer instance
sa = SequenceAnalyzer(patterns)
#optionally filter labels to remove occurrences of repeated labels
FILTER_LABEL_SEQUENCE = False
if FILTER_LABEL_SEQUENCE:
# NB. this alters label probabilities and Markov transition probabilities
sa.filter_sequence_repeating_labels(repetitions=2)
# compute probabilities of labels, markov transition probabilities and
label_probabilities = sa.compute_label_probabilities()
markov_probabilities = sa.compute_label_markov_probabilities()
label_entropy = sa.compute_label_markov_entropies()
# plot label probabilities, markov transition probabilities and node entropy
fig, ax = plt.subplots()
ax.hist(label_probabilities, weights=[1. / n_labels] * n_labels,
range=(label_probabilities.min(), label_probabilities.max()),
bins=100, color='k')
ax.set_xlabel('probability')
ax.set_ylabel('fraction')
ax.set_yscale('log', nonposy='clip')
ax.set_xscale('log', nonposx='clip')
plt.tight_layout()
plt.savefig('label_probabilities.png')
plt.close()
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(1, 1, 1)
cmap = mpl.cm.autumn
cmap.set_bad('k')
mp_masked = np.ma.masked_where(markov_probabilities < 0.001 , markov_probabilities)
im = ax.matshow(mp_masked, cmap=cmap,
norm=mpl.colors.LogNorm(vmin=0.001, vmax=1))
ax.set_xlabel('to pattern')
ax.set_ylabel('from pattern')
ax.xaxis.set_ticks([0, 500])
ax.yaxis.set_ticks([0, 500])
plt.colorbar(im)
plt.savefig('label_probabilities_markov.png')
plt.tight_layout()
plt.close()
fig, ax = plt.subplots()
plt.hist(label_entropy,
weights=[1. / n_labels] * n_labels, bins=50, color='k')
plt.xlabel('entropy')
plt.ylabel('fraction')
plt.yscale('log', nonposy='clip')
plt.tight_layout()
plt.savefig('label_entropy.png')
plt.close()
# construct markov graph
markov_graph = sa.compute_markov_graph()
print ("Markov graph has %d nodes, %d edges" % (len(markov_graph.nodes()),
len(markov_graph.edges())))
# reduce markov graph to most likely occurring labels
# adjust threshold if needed
threshold = 20
sa.reduce_graph_brute(np.argsort(label_probabilities)[::-1][:threshold])
# plot markov graph
plot_graph(markov_graph)
plt.savefig('markov_graph_filtered.png')
# plot memory triggered averages for all nodes of markov graph
fig, ax = plt.subplots(int(threshold / 10), 10)
for i, node in enumerate(markov_graph.nodes()):
ax = plt.subplot(threshold / 10, 10, i + 1)
ax.matshow(patterns.pattern_to_mta_matrix(node).reshape(n, ws),
vmin=0, vmax=1, cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig('mtas.png')
plt.close()
print ("filtering markov graph")
sa.reduce_graph_self_cycles()
sa.reduce_graph_triangles()
sa.reduce_graph_stub()
print ("Filtered Markov graph has %d nodes, %d edges" % \
(len(markov_graph.nodes()), len(markov_graph.edges())))
# try to guess base node (resting state memory) as node with highest degree
# (converging and diverging connections)
# -- adjust post hoc if necessary!
markov_degrees = markov_graph.degree(list(markov_graph.nodes))
base_node = max(markov_degrees)[0]
print ("base node is %d" % base_node)
# calculate cycles of entropies around base node
# adjust weighting and weighting per element if needed
print ("calculating cycles around base node..")
weighting = lambda x: 1. / len(x)
weighting_element = lambda x, p: x / ((p + 1) * 2.) # prefer longer sequences
cycles, scores = sa.calculate_cycles_entropy_scores(
base_node,
min_len=3,
max_len=20,
weighting=weighting,
weighting_element=weighting_element)
print ("%d cycles" % (len(cycles)))
# plot cycle statistics
n_cycles = len(cycles)
cycle_len = np.array([])
for i in range(0,n_cycles):
cycle_len = np.append(cycle_len,len(cycles[i]))
fig, ax = plt.subplots()
ax.hist(cycle_len, weights=[1. / n_cycles] * n_cycles, bins=50, color='k')
ax.set_xlabel('cycle length')
ax.set_ylabel('fraction')
plt.locator_params(nbins=3)
plt.tight_layout()
plt.savefig('cycle_lengths.png')
plt.close()
fig, ax = plt.subplots()
plt.hist(scores, weights=[1. / n_cycles] * n_cycles, bins=50, color='k')
plt.xlabel('cycle score')
plt.ylabel('fraction')
plt.locator_params(nbins=3)
plt.tight_layout()
plt.savefig('cycle_scores.png')
plt.close()
fig, ax = plt.subplots()
plt.scatter(cycle_len, scores, color='k')
plt.xlabel('cycle length')
plt.ylabel('cycle score')
plt.locator_params(nbins=3)
plt.tight_layout()
plt.savefig('cycle_lengths_vs_scores_scatter.png')
plt.close()
fig, ax = plt.subplots()
plt.hist2d(cycle_len, scores, bins=100)
plt.xlabel('cycle length')
plt.ylabel('cycle score')
plt.colorbar()
plt.locator_params(nbins=3)
plt.tight_layout()
plt.savefig('cycle_lengths_vs_scores_hist.png')
plt.close()
# plot max_plot extracted cycles
# adjust if needed
max_plot = 100
interesting = np.arange(min(n_cycles, max_plot))
print ("plotting averaged sequences of %d cycles.." % (len(interesting)))
for i, idx in enumerate(interesting):
cycle = cycles[idx]
mta_sequence = [patterns.pattern_to_mta_matrix(l).reshape(n, ws)
for l in cycle]
combined = combine_windows(np.array(mta_sequence))
fig, ax = plt.subplots()
plt.matshow(combined, cmap='gray')
plt.axis('off')
plt.title('cycle %d\nlength %d\nscore %f' % \
(idx, len(cycle), scores[idx]), loc='left')
plt.savefig('likely-%04d.png' % i)
plt.close()
# end of script
| team-hdnet/hdnet | examples/examine_pattern_sequence.py | Python | gpl-3.0 | 8,834 | [
"VisIt"
] | 40f81ed63d0de2f2400932b6b913ae393f96b713ba2ff721f36e8543c70ff866 |
""" A computing element class using singularity containers.
This computing element will start the job in the container set by
the "ContainerRoot" config option.
DIRAC will the re-installed within the container, extra flags can
be given to the dirac-install command with the "ContainerExtraOpts"
option.
See the Configuration/Resources/Computing documention for details on
where to set the option parameters.
"""
import os
import sys
import shutil
import tempfile
import DIRAC
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.Subprocess import systemCall
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals
from DIRAC.ConfigurationSystem.Client.Helpers import Operations
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.WorkloadManagementSystem.Utilities.Utils import createRelocatedJobWrapper
__RCSID__ = "$Id$"
DIRAC_INSTALL = os.path.join(DIRAC.rootPath, 'DIRAC', 'Core', 'scripts', 'dirac-install.py')
# Default container to use if it isn't specified in the CE options
CONTAINER_DEFROOT = "/cvmfs/cernvm-prod.cern.ch/cvm3"
CONTAINER_WORKDIR = "containers"
CONTAINER_INNERDIR = "/tmp"
CONTAINER_WRAPPER = """#!/bin/bash
echo "Starting inner container wrapper scripts at `date`."
set -x
cd /tmp
# Install DIRAC
./dirac-install.py %(install_args)s
source bashrc
dirac-configure -F %(config_args)s -I
# Run next wrapper (to start actual job)
bash %(next_wrapper)s
# Write the payload errorcode to a file for the outer scripts
echo $? > retcode
chmod 644 retcode
echo "Finishing inner continer wrapper scripts at `date`."
"""
class SingularityComputingElement(ComputingElement):
""" A Computing Element for running a job within a Singularity container.
"""
def __init__(self, ceUniqueID):
""" Standard constructor.
"""
super(SingularityComputingElement, self).__init__(ceUniqueID)
self.__submittedJobs = 0
self.__runningJobs = 0
self.__root = CONTAINER_DEFROOT
if 'ContainerRoot' in self.ceParameters:
self.__root = self.ceParameters['ContainerRoot']
self.__workdir = CONTAINER_WORKDIR
self.__innerdir = CONTAINER_INNERDIR
self.__singularityBin = 'singularity'
self.log = gLogger.getSubLogger('Singularity')
def __hasSingularity(self):
""" Search the current PATH for an exectuable named singularity.
Returns True if it is found, False otherwise.
"""
if self.ceParameters.get('ContainerBin'):
binPath = self.ceParameters['ContainerBin']
if os.path.isfile(binPath) and os.access(binPath, os.X_OK):
self.__singularityBin = binPath
self.log.debug('Use singularity from "%s"' % self.__singularityBin)
return True
if "PATH" not in os.environ:
return False # Hmm, PATH not set? How unusual...
for searchPath in os.environ["PATH"].split(os.pathsep):
binPath = os.path.join(searchPath, 'singularity')
if os.path.isfile(binPath):
# File found, check it's exectuable to be certain:
if os.access(binPath, os.X_OK):
self.log.debug('Find singularity from PATH "%s"' % binPath)
return True
# No suitablable binaries found
return False
def __getInstallFlags(self):
""" Get the flags to pass to dirac-install.py inside the container.
Returns a string containing the command line flags.
"""
instOpts = []
setup = gConfig.getValue("/DIRAC/Setup", "unknown")
opsHelper = Operations.Operations(setup=setup)
installationName = opsHelper.getValue("Pilot/Installation", "")
if installationName:
instOpts.append('-V %s' % installationName)
diracVersions = opsHelper.getValue("Pilot/Version", [])
instOpts.append("-r '%s'" % diracVersions[0])
pyVer = "%u%u" % (sys.version_info.major, sys.version_info.minor)
instOpts.append("-i %s" % pyVer)
pilotExtensionsList = opsHelper.getValue("Pilot/Extensions", [])
extensionsList = []
if pilotExtensionsList:
if pilotExtensionsList[0] != 'None':
extensionsList = pilotExtensionsList
else:
extensionsList = CSGlobals.getCSExtensions()
if extensionsList:
instOpts.append("-e '%s'" % ','.join([ext for ext in extensionsList if 'Web' not in ext]))
if 'ContainerExtraOpts' in self.ceParameters:
instOpts.append(self.ceParameters['ContainerExtraOpts'])
return ' '.join(instOpts)
@staticmethod
def __getConfigFlags():
""" Get the flags for dirac-configure inside the container.
Returns a string containing the command line flags.
"""
cfgOpts = []
setup = gConfig.getValue("/DIRAC/Setup", "unknown")
if setup:
cfgOpts.append("-S '%s'" % setup)
csServers = gConfig.getValue("/DIRAC/Configuration/Servers", [])
cfgOpts.append("-C '%s'" % ','.join(csServers))
cfgOpts.append("-n '%s'" % DIRAC.siteName())
return ' '.join(cfgOpts)
def __createWorkArea(self, proxy, jobDesc, log, logLevel):
""" Creates a directory for the container and populates it with the
template directories, scripts & proxy.
"""
# Create the directory for our continer area
try:
os.mkdir(self.__workdir)
except OSError:
if not os.path.isdir(self.__workdir):
result = S_ERROR("Failed to create container base directory '%s'" % self.__workdir)
result['ReschedulePayload'] = True
return result
# Otherwise, directory probably just already exists...
baseDir = None
try:
baseDir = tempfile.mkdtemp(prefix="job%s_" % jobDesc["jobID"], dir=self.__workdir)
except OSError:
result = S_ERROR("Failed to create container work directory in '%s'" % self.__workdir)
result['ReschedulePayload'] = True
return result
self.log.debug('Use singularity workarea: %s' % baseDir)
for subdir in ["home", "tmp", "var_tmp"]:
os.mkdir(os.path.join(baseDir, subdir))
tmpDir = os.path.join(baseDir, "tmp")
# Now we have a directory, we can stage in the proxy and scripts
# Proxy
proxyLoc = os.path.join(tmpDir, "proxy")
rawfd = os.open(proxyLoc, os.O_WRONLY | os.O_CREAT, 0o600)
fd = os.fdopen(rawfd, "w")
fd.write(proxy)
fd.close()
# dirac-install.py
install_loc = os.path.join(tmpDir, "dirac-install.py")
shutil.copyfile(DIRAC_INSTALL, install_loc)
os.chmod(install_loc, 0o755)
# Job Wrapper (Standard DIRAC wrapper)
result = createRelocatedJobWrapper(tmpDir, self.__innerdir,
log=log, logLevel=logLevel, **jobDesc)
if not result['OK']:
result['ReschedulePayload'] = True
return result
wrapperPath = result['Value']
# Extra Wrapper (Container DIRAC installer)
wrapSubs = {'next_wrapper': wrapperPath,
'install_args': self.__getInstallFlags(),
'config_args': self.__getConfigFlags(),
}
wrapLoc = os.path.join(tmpDir, "dirac_container.sh")
rawfd = os.open(wrapLoc, os.O_WRONLY | os.O_CREAT, 0o700)
fd = os.fdopen(rawfd, "w")
fd.write(CONTAINER_WRAPPER % wrapSubs)
fd.close()
ret = S_OK()
ret['baseDir'] = baseDir
ret['tmpDir'] = tmpDir
ret['proxyLocation'] = proxyLoc
return ret
def __getEnv(self):
""" Gets the environment for use within the container.
We blank almost everything to prevent contamination from the host system.
"""
payloadEnv = {}
if 'TERM' in os.environ:
payloadEnv['TERM'] = os.environ['TERM']
payloadEnv['TMP'] = '/tmp'
payloadEnv['TMPDIR'] = '/tmp'
payloadEnv['X509_USER_PROXY'] = os.path.join(self.__innerdir, "proxy")
return payloadEnv
@staticmethod
def __checkResult(tmpDir):
""" Gets the result of the payload command and returns it. """
# The wrapper writes the inner job return code to "retcode"
# in the working directory.
try:
fd = open(os.path.join(tmpDir, "retcode"), "r")
retCode = int(fd.read())
fd.close()
except (IOError, ValueError):
# Something failed while trying to get the return code
result = S_ERROR("Failed to get return code from inner wrapper")
result['ReschedulePayload'] = True
return result
result = S_OK()
if retCode:
# This is the one case where we don't reschedule:
# An actual failure of the inner payload for some reason
result = S_ERROR("Command failed with exit code %d" % retCode)
return result
# pylint: disable=unused-argument,arguments-differ
def submitJob(self, executableFile, proxy, jobDesc, log, logLevel, **kwargs):
""" Start a container for a job.
executableFile is ignored. A new wrapper suitable for running in a
container is created from jobDesc.
"""
rootImage = self.__root
# Check that singularity is available
if not self.__hasSingularity():
self.log.error('Singularity is not installed on PATH.')
result = S_ERROR("Failed to find singularity ")
result['ReschedulePayload'] = True
return result
self.log.info('Creating singularity container')
# Start by making the directory for the container
ret = self.__createWorkArea(proxy, jobDesc, log, logLevel)
if not ret['OK']:
return ret
baseDir = ret['baseDir']
tmpDir = ret['tmpDir']
proxyLoc = ret['proxyLocation']
# Now we have to set-up proxy renewal for the container
# This is fairly easy as it remains visible on the host filesystem
ret = getProxyInfo()
if not ret['OK']:
pilotProxy = None
else:
pilotProxy = ret['Value']['path']
result = gThreadScheduler.addPeriodicTask(self.proxyCheckPeriod, self._monitorProxy,
taskArgs=(pilotProxy, proxyLoc),
executions=0, elapsedTime=0)
renewTask = None
if result['OK']:
renewTask = result['Value']
else:
self.log.warn('Failed to start proxy renewal task')
# Very simple accounting
self.__submittedJobs += 1
self.__runningJobs += 1
# Now prepare start singularity
# Mount /cvmfs in if it exists on the host
withCVMFS = os.path.isdir("/cvmfs")
innerCmd = os.path.join(self.__innerdir, "dirac_container.sh")
cmd = [self.__singularityBin, "exec"]
cmd.extend(["-c", "-i", "-p"])
cmd.extend(["-W", baseDir])
if withCVMFS:
cmd.extend(["-B", "/cvmfs"])
if 'ContainerBind' in self.ceParameters:
bindPaths = self.ceParameters['ContainerBind'].split(',')
for bindPath in bindPaths:
cmd.extend(["-B", bindPath.strip()])
if 'ContainerOptions' in self.ceParameters:
containerOpts = self.ceParameters['ContainerOptions'].split(',')
for opt in containerOpts:
cmd.extend([opt.strip()])
cmd.extend([rootImage, innerCmd])
self.log.debug('Execute singularity command: %s' % cmd)
self.log.debug('Execute singularity env: %s' % self.__getEnv())
result = systemCall(0, cmd, callbackFunction=self.sendOutput, env=self.__getEnv())
self.__runningJobs -= 1
if not result["OK"]:
if renewTask:
gThreadScheduler.removeTask(renewTask)
result = S_ERROR("Error running singularity command")
result['ReschedulePayload'] = True
return result
result = self.__checkResult(tmpDir)
if not result["OK"]:
if renewTask:
gThreadScheduler.removeTask(renewTask)
return result
def getCEStatus(self, jobIDList=None):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = self.__submittedJobs
result['RunningJobs'] = self.__runningJobs
result['WaitingJobs'] = 0
return result
| chaen/DIRAC | Resources/Computing/SingularityComputingElement.py | Python | gpl-3.0 | 11,852 | [
"DIRAC"
] | 17ae3d365b8a9127d76a82ec3364ceb2cf2df183e59d652cef04a50d0b996a85 |
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.apis.zip_counties_api import ZipCountiesApi
class TestZipCountiesApi(unittest.TestCase):
""" ZipCountiesApi unit test stubs """
def setUp(self):
self.api = vericred_client.apis.zip_counties_api.ZipCountiesApi()
def tearDown(self):
pass
def test_get_zip_counties(self):
"""
Test case for get_zip_counties
Search for Zip Counties
"""
pass
def test_show_zip_county(self):
"""
Test case for show_zip_county
Show an individual ZipCounty
"""
pass
if __name__ == '__main__':
unittest.main()
| vericred/vericred-python | test/test_zip_counties_api.py | Python | apache-2.0 | 10,219 | [
"VisIt"
] | 71351bd485f01996d0c0e8801338d205d00e855edaf4c5b329f9efee4fb8e6c2 |
""" This is the RFIO StorageClass
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import re
import os
import time
import six
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
from DIRAC.Resources.Storage.StorageBase import StorageBase
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.File import getSize
class RFIOStorage(StorageBase):
_INPUT_PROTOCOLS = ["file", "rfio"]
_OUTPUT_PROTOCOLS = ["rfio"]
def __init__(self, storageName, parameters):
StorageBase.__init__(self, storageName, parameters)
self.spaceToken = self.protocolParameters["SpaceToken"]
self.isok = True
self.pluginName = "RFIO"
self.timeout = 100
self.long_timeout = 600
#############################################################
#
# These are the methods for manipulating the client
#
def getName(self):
"""The name with which the storage was instantiated"""
return S_OK(self.name)
#############################################################
#
# These are the methods for file manipulation
#
def exists(self, path):
"""Check if the given path exists. The 'path' variable can be a string or a list of strings."""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
gLogger.debug("RFIOStorage.exists: Determining the existance of %s files." % len(urls))
comm = "nsls -d"
for url in urls:
comm = " %s %s" % (comm, url)
res = shellCall(self.timeout, comm)
successful = {}
failed = {}
if res["OK"]:
returncode, stdout, stderr = res["Value"]
if returncode in [0, 1]:
for line in stdout.splitlines():
url = line.strip()
successful[url] = True
for line in stderr.splitlines():
pfn, _ = line.split(": ")
url = pfn.strip()
successful[url] = False
else:
errStr = "RFIOStorage.exists: Completely failed to determine the existance files."
gLogger.error(errStr, "%s %s" % (self.name, stderr))
return S_ERROR(errStr)
else:
errStr = "RFIOStorage.exists: Completely failed to determine the existance files."
gLogger.error(errStr, "%s %s" % (self.name, res["Message"]))
return S_ERROR(errStr)
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def isFile(self, path):
"""Check if the given path exists and it is a file"""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
gLogger.debug("RFIOStorage.isFile: Determining whether %s paths are files." % len(urls))
successful = {}
failed = {}
comm = "nsls -ld"
for url in urls:
comm = " %s %s" % (comm, url)
res = shellCall(self.timeout, comm)
if not res["OK"]:
return res
returncode, stdout, stderr = res["Value"]
if returncode in [0, 1]:
for line in stdout.splitlines():
permissions, _subdirs, _owner, _group, _size, _month, _date, _timeYear, pfn = line.split()
if permissions[0] != "d":
successful[pfn] = True
else:
successful[pfn] = False
for line in stderr.splitlines():
pfn, error = line.split(": ")
url = pfn.strip()
failed[url] = error
else:
errStr = "RFIOStorage.isFile: Completely failed to determine whether path is file."
gLogger.error(errStr, "%s %s" % (self.name, stderr))
return S_ERROR(errStr)
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def __getPathMetadata(self, urls):
gLogger.debug("RFIOStorage.__getPathMetadata: Attempting to get metadata for %s paths." % (len(urls)))
comm = "nsls -ld"
for url in urls:
comm = " %s %s" % (comm, url)
res = shellCall(self.timeout, comm)
successful = {}
failed = {}
if not res["OK"]:
errStr = "RFIOStorage.__getPathMetadata: Completely failed to get path metadata."
gLogger.error(errStr, res["Message"])
return S_ERROR(errStr)
else:
returncode, stdout, stderr = res["Value"]
if returncode not in [0, 1]:
errStr = "RFIOStorage.__getPathMetadata: failed to perform nsls."
gLogger.error(errStr, stderr)
else:
for line in stdout.splitlines():
permissions, subdirs, owner, group, size, month, date, timeYear, pfn = line.split()
successful[pfn] = {}
if permissions[0] == "d":
successful[pfn]["Type"] = "Directory"
else:
successful[pfn]["Type"] = "File"
successful[pfn]["Mode"] = self.__permissionsToInt(permissions)
successful[pfn]["NbSubDirs"] = subdirs
successful[pfn]["Owner"] = owner
successful[pfn]["Group"] = group
successful[pfn]["Size"] = int(size)
successful[pfn]["Month"] = month
successful[pfn]["Date"] = date
successful[pfn]["Year"] = timeYear
for line in stderr.splitlines():
pfn, error = line.split(": ")
url = pfn.strip()
failed[url] = error
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def __permissionsToInt(self, permissions):
mode = permissions[1:]
return sum(pow(2, 8 - i) * int(mode[i] != "-") for i in range(0, 9))
def __getFileMetadata(self, urls):
gLogger.debug(
"RFIOStorage.__getPathMetadata: Attempting to get additional metadata for %s files." % (len(urls))
)
# Check whether the files that exist are staged
comm = "stager_qry -S %s" % self.spaceToken
successful = {}
for pfn in urls:
successful[pfn] = {}
comm = "%s -M %s" % (comm, pfn)
res = shellCall(self.timeout, comm)
if not res["OK"]:
errStr = "RFIOStorage.__getFileMetadata: Completely failed to get cached status."
gLogger.error(errStr, res["Message"])
return S_ERROR(errStr)
else:
_returncode, stdout, _stderr = res["Value"]
for line in stdout.splitlines():
pfn = line.split()[0]
status = line.split()[-1]
if status in ["STAGED", "CANBEMIGR"]:
successful[pfn]["Cached"] = True
for pfn in urls:
if "Cached" not in successful[pfn]:
successful[pfn]["Cached"] = False
# Now for the files that exist get the tape segment (i.e. whether they have been migrated) and related checksum
comm = "nsls -lT --checksum"
for pfn in urls:
comm = "%s %s" % (comm, pfn)
res = shellCall(self.timeout, comm)
if not res["OK"]:
errStr = "RFIOStorage.__getFileMetadata: Completely failed to get migration status."
gLogger.error(errStr, res["Message"])
return S_ERROR(errStr)
else:
_returncode, stdout, _stderr = res["Value"]
for line in stdout.splitlines():
pfn = line.split()[-1]
checksum = line.split()[-2]
successful[pfn]["Migrated"] = True
successful[pfn]["Checksum"] = checksum
for pfn in urls:
if "Migrated" not in successful[pfn]:
successful[pfn]["Migrated"] = False
# Update all the metadata with the common one
for lfn in successful:
successful[lfn] = self._addCommonMetadata(successful[lfn])
resDict = {"Failed": {}, "Successful": successful}
return S_OK(resDict)
def getFile(self, path, localPath=False):
"""Get a local copy in the current directory of a physical file specified by its path"""
res = checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
failed = {}
successful = {}
for src_url in urls.keys():
fileName = os.path.basename(src_url)
if localPath:
dest_file = "%s/%s" % (localPath, fileName)
else:
dest_file = "%s/%s" % (os.getcwd(), fileName)
res = self.__getFile(src_url, dest_file)
if res["OK"]:
successful[src_url] = res["Value"]
else:
failed[src_url] = res["Message"]
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def __getFile(self, src_url, dest_file):
"""Get a local copy in the current directory of a physical file specified by its path"""
if not os.path.exists(os.path.dirname(dest_file)):
os.makedirs(os.path.dirname(dest_file))
if os.path.exists(dest_file):
gLogger.debug("RFIOStorage.getFile: Local file already exists %s. Removing..." % dest_file)
os.remove(dest_file)
res = self.__executeOperation(src_url, "getFileSize")
if not res["OK"]:
return S_ERROR(res["Message"])
remoteSize = res["Value"]
MIN_BANDWIDTH = 1024 * 100 # 100 KB/s
timeout = int(remoteSize / MIN_BANDWIDTH + 300)
gLogger.debug("RFIOStorage.getFile: Executing transfer of %s to %s" % (src_url, dest_file))
comm = "rfcp %s %s" % (src_url, dest_file)
res = shellCall(timeout, comm)
if res["OK"]:
returncode, _stdout, stderr = res["Value"]
if returncode == 0:
gLogger.debug("RFIOStorage.__getFile: Got file from storage, performing post transfer check.")
localSize = getSize(dest_file)
if localSize == remoteSize:
gLogger.debug("RFIOStorage.getFile: Post transfer check successful.")
return S_OK(localSize)
errorMessage = "RFIOStorage.__getFile: Source and destination file sizes do not match."
gLogger.error(errorMessage, src_url)
else:
errStr = "RFIOStorage.__getFile: Failed to get local copy of file."
gLogger.error(errStr, stderr)
errorMessage = "%s %s" % (errStr, stderr)
else:
errStr = "RFIOStorage.__getFile: Failed to get local copy of file."
gLogger.error(errStr, res["Message"])
errorMessage = "%s %s" % (errStr, res["Message"])
if os.path.exists(dest_file):
gLogger.debug("RFIOStorage.getFile: Removing local file %s." % dest_file)
os.remove(dest_file)
return S_ERROR(errorMessage)
def putFile(self, path, sourceSize=0):
res = checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
failed = {}
successful = {}
for dest_url, src_file in urls.items():
res = self.__executeOperation(os.path.dirname(dest_url), "createDirectory")
if not res["OK"]:
failed[dest_url] = res["Message"]
else:
res = self.__putFile(src_file, dest_url, sourceSize)
if res["OK"]:
successful[dest_url] = res["Value"]
else:
failed[dest_url] = res["Message"]
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def __putFile(self, src_file, dest_url, sourceSize):
"""Put a copy of the local file to the current directory on the physical storage"""
# Pre-transfer check
res = self.__executeOperation(dest_url, "exists")
if not res["OK"]:
gLogger.debug("RFIOStorage.__putFile: Failed to find pre-existance of destination file.")
return res
if res["Value"]:
res = self.__executeOperation(dest_url, "removeFile")
if not res["OK"]:
gLogger.debug("RFIOStorage.__putFile: Failed to remove remote file %s." % dest_url)
else:
gLogger.debug("RFIOStorage.__putFile: Removed remote file %s." % dest_url)
if not os.path.exists(src_file):
errStr = "RFIOStorage.__putFile: The source local file does not exist."
gLogger.error(errStr, src_file)
return S_ERROR(errStr)
sourceSize = getSize(src_file)
if sourceSize == -1:
errStr = "RFIOStorage.__putFile: Failed to get file size."
gLogger.error(errStr, src_file)
return S_ERROR(errStr)
res = self.__getTransportURL(dest_url)
if not res["OK"]:
gLogger.debug("RFIOStorage.__putFile: Failed to get transport URL for file.")
return res
turl = res["Value"]
MIN_BANDWIDTH = 1024 * 100 # 100 KB/s
timeout = sourceSize / MIN_BANDWIDTH + 300
gLogger.debug("RFIOStorage.putFile: Executing transfer of %s to %s" % (src_file, turl))
comm = "rfcp %s '%s'" % (src_file, turl)
res = shellCall(timeout, comm)
if res["OK"]:
returncode, _stdout, stderr = res["Value"]
if returncode == 0:
gLogger.debug("RFIOStorage.putFile: Put file to storage, performing post transfer check.")
res = self.__executeOperation(dest_url, "getFileSize")
if res["OK"]:
destinationSize = res["Value"]
if sourceSize == destinationSize:
gLogger.debug("RFIOStorage.__putFile: Post transfer check successful.")
return S_OK(destinationSize)
errorMessage = "RFIOStorage.__putFile: Source and destination file sizes do not match."
gLogger.error(errorMessage, dest_url)
else:
errStr = "RFIOStorage.__putFile: Failed to put file to remote storage."
gLogger.error(errStr, stderr)
errorMessage = "%s %s" % (errStr, stderr)
else:
errStr = "RFIOStorage.__putFile: Failed to put file to remote storage."
gLogger.error(errStr, res["Message"])
errorMessage = "%s %s" % (errStr, res["Message"])
res = self.__executeOperation(dest_url, "removeFile")
if res["OK"]:
gLogger.debug("RFIOStorage.__putFile: Removed remote file remnant %s." % dest_url)
else:
gLogger.debug("RFIOStorage.__putFile: Unable to remove remote file remnant %s." % dest_url)
return S_ERROR(errorMessage)
def removeFile(self, path):
"""Remove physically the file specified by its path"""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
successful = {}
failed = {}
listOfLists = breakListIntoChunks(urls, 100)
for urls in listOfLists:
gLogger.debug("RFIOStorage.removeFile: Attempting to remove %s files." % len(urls))
comm = "stager_rm -S %s" % self.spaceToken
for url in urls:
comm = "%s -M %s" % (comm, url)
res = shellCall(100, comm)
if res["OK"]:
returncode, _stdout, stderr = res["Value"]
if returncode in [0, 1]:
comm = "nsrm -f"
for url in urls:
comm = "%s %s" % (comm, url)
res = shellCall(100, comm)
if res["OK"]:
returncode, _stdout, stderr = res["Value"]
if returncode in [0, 1]:
for pfn in urls:
successful[pfn] = True
else:
errStr = "RFIOStorage.removeFile. Completely failed to remove files from the nameserver."
gLogger.error(errStr, stderr)
for pfn in urls:
failed[pfn] = errStr
else:
errStr = "RFIOStorage.removeFile. Completely failed to remove files from the nameserver."
gLogger.error(errStr, res["Message"])
for pfn in urls:
failed[pfn] = errStr
else:
errStr = "RFIOStorage.removeFile. Completely failed to remove files from the stager."
gLogger.error(errStr, stderr)
for pfn in urls:
failed[pfn] = errStr
else:
errStr = "RFIOStorage.removeFile. Completely failed to remove files from the stager."
gLogger.error(errStr, res["Message"])
for pfn in urls:
failed[pfn] = errStr
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def getFileMetadata(self, path):
"""Get metadata associated to the file"""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
gLogger.debug("RFIOStorage.getFileMetadata: Obtaining metadata for %s files." % len(urls))
res = self.__getPathMetadata(urls)
if not res["OK"]:
return res
failed = {}
successful = {}
for pfn, error in res["Value"]["Failed"].items():
if error == "No such file or directory":
failed[pfn] = "File does not exist"
else:
failed[pfn] = error
files = []
for pfn, pfnDict in res["Value"]["Successful"].items():
if pfnDict["Type"] == "Directory":
failed[pfn] = "Supplied path is not a file"
else:
successful[pfn] = res["Value"]["Successful"][pfn]
files.append(pfn)
if files:
res = self.__getFileMetadata(files)
if not res["OK"]:
return res
for pfn, pfnDict in res["Value"]["Successful"].items():
successful[pfn].update(pfnDict)
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def getFileSize(self, path):
"""Get the physical size of the given file"""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
gLogger.debug("RFIOStorage.getFileSize: Determining the sizes for %s files." % len(urls))
res = self.__getPathMetadata(urls)
if not res["OK"]:
return res
failed = {}
successful = {}
for pfn, error in res["Value"]["Failed"].items():
if error == "No such file or directory":
failed[pfn] = "File does not exist"
else:
failed[pfn] = error
for pfn, pfnDict in res["Value"]["Successful"].items():
if pfnDict["Type"] == "Directory":
failed[pfn] = "Supplied path is not a file"
else:
successful[pfn] = res["Value"]["Successful"][pfn]["Size"]
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def prestageFile(self, path):
"""Issue prestage request for file"""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
userTag = "%s-%s" % (self.spaceToken, time.time())
comm = "stager_get -S %s -U %s " % (self.spaceToken, userTag)
for url in urls:
comm = "%s -M %s" % (comm, url)
res = shellCall(100, comm)
successful = {}
failed = {}
if res["OK"]:
returncode, stdout, stderr = res["Value"]
if returncode in [0, 1]:
for line in stdout.splitlines():
if re.search("SUBREQUEST_READY", line):
pfn, _status = line.split()
successful[pfn] = userTag
elif re.search("SUBREQUEST_FAILED", line):
pfn, _status, err = line.split(" ", 2)
failed[pfn] = err
else:
errStr = "RFIOStorage.prestageFile: Got unexpected return code from stager_get."
gLogger.error(errStr, stderr)
return S_ERROR(errStr)
else:
errStr = "RFIOStorage.prestageFile: Completely failed to issue stage requests."
gLogger.error(errStr, res["Message"])
return S_ERROR(errStr)
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def prestageFileStatus(self, path):
"""Monitor the status of a prestage request"""
res = checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
successful = {}
failed = {}
requestFiles = {}
for url, requestID in urls.items():
if requestID not in requestFiles:
requestFiles[requestID] = []
requestFiles[requestID].append(url)
for requestID, urls in requestFiles.items():
comm = "stager_qry -S %s -U %s " % (self.spaceToken, requestID)
res = shellCall(100, comm)
if res["OK"]:
returncode, stdout, stderr = res["Value"]
if returncode in [0, 1]:
for line in stdout.splitlines():
pfn = line.split()[0]
status = line.split()[-1]
if status in ["STAGED", "CANBEMIGR"]:
successful[pfn] = True
else:
successful[pfn] = False
else:
errStr = "RFIOStorage.prestageFileStatus: Got unexpected return code from stager_get."
gLogger.error(errStr, stderr)
return S_ERROR(errStr)
else:
errStr = "RFIOStorage.prestageFileStatus: Completely failed to obtain prestage status."
gLogger.error(errStr, res["Message"])
return S_ERROR(errStr)
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def getTransportURL(self, path, protocols=False):
"""Obtain the TURLs for the supplied path and protocols"""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
successful = {}
failed = {}
res = self.exists(urls)
if not res["OK"]:
return res
for path, exists in res["Value"]["Successful"].items():
if not exists:
failed[path] = "File does not exist"
else:
res = self.__getTransportURL(path)
if not res["OK"]:
failed[path] = res["Message"]
else:
successful[path] = res["Value"]
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def __getTransportURL(self, path):
try:
if self.spaceToken:
tURL = "%s://%s:%s/?svcClass=%s&castorVersion=2&path=%s" % (
self.protocolParameters["Protocol"],
self.protocolParameters["Host"],
self.protocolParameters["Port"],
self.spaceToken,
path,
)
else:
tURL = "castor:%s" % (path)
return S_OK(tURL)
except Exception as x:
errStr = "RFIOStorage.__getTransportURL: Exception while creating turl."
gLogger.exception(errStr, self.name, x)
return S_ERROR(errStr)
#############################################################
#
# These are the methods for directory manipulation
#
def isDirectory(self, path):
"""Check if the given path exists and it is a directory"""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
gLogger.debug("RFIOStorage.isDirectory: Determining whether %s paths are directories." % len(urls))
res = self.__getPathMetadata(urls)
if not res["OK"]:
return res
failed = {}
successful = {}
for pfn, error in res["Value"]["Failed"].items():
if error == "No such file or directory":
failed[pfn] = "Directory does not exist"
else:
failed[pfn] = error
for pfn, pfnDict in res["Value"]["Successful"].items():
if pfnDict["Type"] == "Directory":
successful[pfn] = True
else:
successful[pfn] = False
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def getDirectory(self, path, localPath=False):
"""Get locally a directory from the physical storage together with all its files and subdirectories."""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
successful = {}
failed = {}
gLogger.debug("RFIOStorage.getDirectory: Attempting to get local copies of %s directories." % len(urls))
for src_directory in urls:
dirName = os.path.basename(src_directory)
if localPath:
dest_dir = "%s/%s" % (localPath, dirName)
else:
dest_dir = "%s/%s" % (os.getcwd(), dirName)
res = self.__getDir(src_directory, dest_dir)
if res["OK"]:
if res["Value"]["AllGot"]:
gLogger.debug("RFIOStorage.getDirectory: Successfully got local copy of %s" % src_directory)
successful[src_directory] = {"Files": res["Value"]["Files"], "Size": res["Value"]["Size"]}
else:
gLogger.error("RFIOStorage.getDirectory: Failed to get entire directory.", src_directory)
failed[src_directory] = {"Files": res["Value"]["Files"], "Size": res["Value"]["Size"]}
else:
gLogger.error(
"RFIOStorage.getDirectory: Completely failed to get local copy of directory.", src_directory
)
failed[src_directory] = {"Files": 0, "Size": 0}
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def __getDir(self, srcDirectory, destDirectory):
"""Black magic contained within..."""
filesGot = 0
sizeGot = 0
# Check the remote directory exists
res = self.isDirectory(srcDirectory)
if not res["OK"]:
errStr = "RFIOStorage.__getDir: Failed to find the supplied source directory."
gLogger.error(errStr, srcDirectory)
return S_ERROR(errStr)
if srcDirectory not in res["Value"]["Successful"]:
errStr = "RFIOStorage.__getDir: Failed to find the supplied source directory."
gLogger.error(errStr, srcDirectory)
return S_ERROR(errStr)
if not res["Value"]["Successful"][srcDirectory]:
errStr = "RFIOStorage.__getDir: The supplied source directory does not exist."
gLogger.error(errStr, srcDirectory)
return S_ERROR(errStr)
# Check the local directory exists and create it if not
if not os.path.exists(destDirectory):
os.makedirs(destDirectory)
# Get the remote directory contents
res = self.listDirectory(srcDirectory)
if not res["OK"]:
errStr = "RFIOStorage.__getDir: Failed to list the source directory."
gLogger.error(errStr, srcDirectory)
if srcDirectory not in res["Value"]["Successful"]:
errStr = "RFIOStorage.__getDir: Failed to list the source directory."
gLogger.error(errStr, srcDirectory)
surlsDict = res["Value"]["Successful"][srcDirectory]["Files"]
subDirsDict = res["Value"]["Successful"][srcDirectory]["SubDirs"]
# First get all the files in the directory
gotFiles = True
for surl in surlsDict.keys():
surlGot = False
fileSize = surlsDict[surl]["Size"]
fileName = os.path.basename(surl)
localPath = "%s/%s" % (destDirectory, fileName)
fileDict = {surl: localPath}
res = self.getFile(fileDict)
if res["OK"]:
if surl in res["Value"]["Successful"]:
filesGot += 1
sizeGot += fileSize
surlGot = True
if not surlGot:
gotFiles = False
# Then recursively get the sub directories
subDirsGot = True
for subDir in subDirsDict.keys():
subDirName = os.path.basename(subDir)
localPath = "%s/%s" % (destDirectory, subDirName)
dirSuccessful = False
res = self.__getDir(subDir, localPath)
if res["OK"]:
if res["Value"]["AllGot"]:
dirSuccessful = True
filesGot += res["Value"]["Files"]
sizeGot += res["Value"]["Size"]
if not dirSuccessful:
subDirsGot = False
# Check whether all the operations were successful
if subDirsGot and gotFiles:
allGot = True
else:
allGot = False
resDict = {"AllGot": allGot, "Files": filesGot, "Size": sizeGot}
return S_OK(resDict)
def putDirectory(self, path):
"""Put a local directory to the physical storage together with all its files and subdirectories."""
res = checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
successful = {}
failed = {}
gLogger.debug("RFIOStorage.putDirectory: Attemping to put %s directories to remote storage." % len(urls))
for destDir, sourceDir in urls.items():
res = self.__putDir(sourceDir, destDir)
if res["OK"]:
if res["Value"]["AllPut"]:
gLogger.debug(
"RFIOStorage.putDirectory: Successfully put directory to remote storage: %s" % destDir
)
successful[destDir] = {"Files": res["Value"]["Files"], "Size": res["Value"]["Size"]}
else:
gLogger.error(
"RFIOStorage.putDirectory: Failed to put entire directory to remote storage.", destDir
)
failed[destDir] = {"Files": res["Value"]["Files"], "Size": res["Value"]["Size"]}
else:
gLogger.error(
"RFIOStorage.putDirectory: Completely failed to put directory to remote storage.", destDir
)
failed[destDir] = {"Files": 0, "Size": 0}
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def __putDir(self, src_directory, dest_directory):
"""Black magic contained within..."""
filesPut = 0
sizePut = 0
# Check the local directory exists
if not os.path.isdir(src_directory):
errStr = "RFIOStorage.__putDir: The supplied source directory does not exist."
gLogger.error(errStr, src_directory)
return S_ERROR(errStr)
# Create the remote directory
res = self.createDirectory(dest_directory)
if not res["OK"]:
errStr = "RFIOStorage.__putDir: Failed to create destination directory."
gLogger.error(errStr, dest_directory)
return S_ERROR(errStr)
# Get the local directory contents
contents = os.listdir(src_directory)
allSuccessful = True
for cFile in contents:
pathSuccessful = False
localPath = "%s/%s" % (src_directory, cFile)
remotePath = "%s/%s" % (dest_directory, cFile)
if os.path.isdir(localPath):
res = self.__putDir(localPath, remotePath)
if res["OK"]:
if res["Value"]["AllPut"]:
pathSuccessful = True
filesPut += res["Value"]["Files"]
sizePut += res["Value"]["Size"]
else:
return S_ERROR("Failed to put directory")
else:
fileDict = {remotePath: localPath}
res = self.putFile(fileDict)
if res["OK"]:
if remotePath in res["Value"]["Successful"]:
filesPut += 1
sizePut += res["Value"]["Successful"][remotePath]
pathSuccessful = True
if not pathSuccessful:
allSuccessful = False
resDict = {"AllPut": allSuccessful, "Files": filesPut, "Size": sizePut}
return S_OK(resDict)
def createDirectory(self, path):
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
successful = {}
failed = {}
gLogger.debug("RFIOStorage.createDirectory: Attempting to create %s directories." % len(urls))
for url in urls:
strippedUrl = url.rstrip("/")
res = self.__makeDirs(strippedUrl)
if res["OK"]:
gLogger.debug("RFIOStorage.createDirectory: Successfully created directory on storage: %s" % url)
successful[url] = True
else:
gLogger.error("RFIOStorage.createDirectory: Failed to create directory on storage.", url)
failed[url] = res["Message"]
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def __makeDir(self, path):
# First create a local file that will be used as a directory place holder in storage name space
comm = "nsmkdir -m 775 %s" % path
res = shellCall(100, comm)
if not res["OK"]:
return res
returncode, _stdout, stderr = res["Value"]
if returncode not in [0]:
return S_ERROR(stderr)
return S_OK()
def __makeDirs(self, path):
"""Black magic contained within...."""
pDir = os.path.dirname(path)
res = self.exists(path)
if not res["OK"]:
return res
if res["OK"]:
if path in res["Value"]["Successful"]:
if res["Value"]["Successful"][path]:
return S_OK()
else:
res = self.exists(pDir)
if res["OK"]:
if pDir in res["Value"]["Successful"]:
if res["Value"]["Successful"][pDir]:
res = self.__makeDir(path)
else:
res = self.__makeDirs(pDir)
res = self.__makeDir(path)
return res
def removeDirectory(self, path, recursive=False):
"""Remove a directory on the physical storage together with all its files and
subdirectories.
"""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
gLogger.debug("RFIOStorage.removeDirectory: Attempting to remove %s directories." % len(urls))
successful = {}
failed = {}
for url in urls:
comm = "nsrm -r %s" % url
res = shellCall(100, comm)
if res["OK"]:
returncode, _stdout, stderr = res["Value"]
if returncode == 0:
successful[url] = {"FilesRemoved": 0, "SizeRemoved": 0}
elif returncode == 1:
successful[url] = {"FilesRemoved": 0, "SizeRemoved": 0}
else:
failed[url] = stderr
else:
errStr = "RFIOStorage.removeDirectory: Completely failed to remove directory."
gLogger.error(errStr, "%s %s" % (url, res["Message"]))
failed[url] = res["Message"]
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def listDirectory(self, path):
"""List the supplied path. First checks whether the path is a directory then gets the contents."""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
gLogger.debug("RFIOStorage.listDirectory: Attempting to list %s directories." % len(urls))
res = self.isDirectory(urls)
if not res["OK"]:
return res
successful = {}
failed = res["Value"]["Failed"]
directories = []
for url, isDirectory in res["Value"]["Successful"].items():
if isDirectory:
directories.append(url)
else:
errStr = "RFIOStorage.listDirectory: Directory does not exist."
gLogger.error(errStr, url)
failed[url] = errStr
for directory in directories:
comm = "nsls -l %s" % directory
res = shellCall(self.timeout, comm)
if res["OK"]:
returncode, stdout, stderr = res["Value"]
if not returncode == 0:
errStr = "RFIOStorage.listDirectory: Failed to list directory."
gLogger.error(errStr, "%s %s" % (directory, stderr))
failed[directory] = errStr
else:
subDirs = {}
files = {}
successful[directory] = {}
for line in stdout.splitlines():
permissions, _subdirs, _owner, _group, size, _month, _date, _timeYear, pfn = line.split()
if not pfn == "dirac_directory":
path = "%s/%s" % (directory, pfn)
if permissions[0] == "d":
# If the subpath is a directory
subDirs[path] = True
elif permissions[0] == "m":
# In the case that the path is a migrated file
files[path] = {"Size": int(size), "Migrated": 1}
else:
# In the case that the path is not migrated file
files[path] = {"Size": int(size), "Migrated": 0}
successful[directory]["SubDirs"] = subDirs
successful[directory]["Files"] = files
else:
errStr = "RFIOStorage.listDirectory: Completely failed to list directory."
gLogger.error(errStr, "%s %s" % (directory, res["Message"]))
return S_ERROR(errStr)
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def getDirectoryMetadata(self, path):
"""Get the metadata for the directory"""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
gLogger.debug("RFIOStorage.getDirectoryMetadata: Attempting to get metadata for %s directories." % len(urls))
res = self.isDirectory(urls)
if not res["OK"]:
return res
successful = {}
failed = res["Value"]["Failed"]
directories = []
for url, isDirectory in res["Value"]["Successful"].items():
if isDirectory:
directories.append(url)
else:
errStr = "RFIOStorage.getDirectoryMetadata: Directory does not exist."
gLogger.error(errStr, url)
failed[url] = errStr
res = self.__getPathMetadata(directories)
if not res["OK"]:
return res
else:
failed.update(res["Value"]["Failed"])
successful = res["Value"]["Successful"]
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def getDirectorySize(self, path):
"""Get the size of the directory on the storage"""
res = self.__checkArgumentFormat(path)
if not res["OK"]:
return res
urls = res["Value"]
gLogger.debug("RFIOStorage.getDirectorySize: Attempting to get size of %s directories." % len(urls))
res = self.listDirectory(urls)
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
successful = {}
for directory, dirDict in res["Value"]["Successful"].items():
directorySize = 0
directoryFiles = 0
filesDict = dirDict["Files"]
for _fileURL, fileDict in filesDict.items():
directorySize += fileDict["Size"]
directoryFiles += 1
gLogger.debug("RFIOStorage.getDirectorySize: Successfully obtained size of %s." % directory)
successful[directory] = {"Files": directoryFiles, "Size": directorySize}
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def __checkArgumentFormat(self, path):
"""FIXME: Can be replaced by a generic checkArgumentFormat Utility"""
if isinstance(path, six.string_types):
urls = [path]
elif isinstance(path, list):
urls = path
elif isinstance(path, dict):
urls = list(path)
else:
return S_ERROR("RFIOStorage.__checkArgumentFormat: Supplied path is not of the correct format.")
return S_OK(urls)
def __executeOperation(self, url, method):
"""Executes the requested functionality with the supplied url"""
fcn = None
if hasattr(self, method) and callable(getattr(self, method)):
fcn = getattr(self, method)
if not fcn:
return S_ERROR("Unable to invoke %s, it isn't a member funtion of RFIOStorage" % method)
res = fcn(url)
if not res["OK"]:
return res
elif url not in res["Value"]["Successful"]:
return S_ERROR(res["Value"]["Failed"][url])
return S_OK(res["Value"]["Successful"][url])
| ic-hep/DIRAC | src/DIRAC/Resources/Storage/RFIOStorage.py | Python | gpl-3.0 | 43,317 | [
"DIRAC"
] | e6a37fde11e6b467d05d3fbb86f2bd1cca2df2e665134b817bb8aa5cef15cb55 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
try:
from simtk.unit import *
from simtk.openmm import *
from simtk.openmm.app import *
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
import mdtraj as md
from mdtraj.testing import get_fn, eq, skipif
from mdtraj.reporters import hdf5reporter, netcdfreporter
from mdtraj.reporters import HDF5Reporter, NetCDFReporter, DCDReporter
from mdtraj.formats import HDF5TrajectoryFile, NetCDFTrajectoryFile
dir = tempfile.mkdtemp()
def teardown_module(module):
"""remove the temporary directory created by tests in this file
this gets automatically called by nose"""
shutil.rmtree(dir)
@skipif(not HAVE_OPENMM, 'No OpenMM')
def test_reporter():
tempdir = os.path.join(dir, 'test1')
os.makedirs(tempdir)
pdb = PDBFile(get_fn('native.pdb'))
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
# NO PERIODIC BOUNDARY CONDITIONS
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic,
nonbondedCutoff=1.0*nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300*kelvin, 1.0/picoseconds, 2.0*femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300*kelvin)
hdf5file = os.path.join(tempdir, 'traj.h5')
ncfile = os.path.join(tempdir, 'traj.nc')
dcdfile = os.path.join(tempdir, 'traj.dcd')
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True, cell=True)
reporter3 = DCDReporter(dcdfile, 2)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, 22, 3))
eq(got.velocities.shape, (50, 22, 3))
eq(got.cell_lengths, None)
eq(got.cell_angles, None)
eq(got.time, 0.002*2*(1+np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb')).top
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, None)
eq(cell_angles, None)
eq(time, 0.002*2*(1+np.arange(50)))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=get_fn('native.pdb'))
netcdf_traj = md.load(ncfile, top=get_fn('native.pdb'))
# we don't have to convert units here, because md.load already
# handles that
assert hdf5_traj.unitcell_vectors is None
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
# yield lambda: eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
@skipif(not HAVE_OPENMM, 'No OpenMM')
def test_reporter_subset():
tempdir = os.path.join(dir, 'test2')
os.makedirs(tempdir)
pdb = PDBFile(get_fn('native2.pdb'))
pdb.topology.setUnitCellDimensions([2, 2, 2])
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffPeriodic,
nonbondedCutoff=1*nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300*kelvin, 1.0/picoseconds, 2.0*femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300*kelvin)
hdf5file = os.path.join(tempdir, 'traj.h5')
ncfile = os.path.join(tempdir, 'traj.nc')
dcdfile= os.path.join(tempdir, 'traj.dcd')
atomSubset = [0,1,2, 4,5]
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True, atomSubset=atomSubset)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True,
cell=True, atomSubset=atomSubset)
reporter3 = DCDReporter(dcdfile, 2, atomSubset=atomSubset)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
t = md.load(get_fn('native.pdb'))
t.restrict_atoms(atomSubset)
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, len(atomSubset), 3))
eq(got.velocities.shape, (50, len(atomSubset), 3))
eq(got.cell_lengths, 2 * np.ones((50, 3)))
eq(got.cell_angles, 90*np.ones((50, 3)))
eq(got.time, 0.002*2*(1+np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb'), atom_indices=atomSubset).topology
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, 20 * np.ones((50, 3)))
eq(cell_angles, 90*np.ones((50, 3)))
eq(time, 0.002*2*(1+np.arange(50)))
eq(xyz.shape, (50, len(atomSubset), 3))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=hdf5_traj)
netcdf_traj = md.load(ncfile, top=hdf5_traj)
# we don't have to convert units here, because md.load already handles
# that
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
| ctk3b/mdtraj | mdtraj/tests/test_reporter.py | Python | lgpl-2.1 | 7,441 | [
"MDTraj",
"OpenMM"
] | 97acec2556098ddb4e0f1ce2af1b47a0b755f156857afe921cd1b5818269550a |
# Generated from asparagram.g4 by ANTLR 4.5.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .asparagramParser import asparagramParser
else:
from asparagramParser import asparagramParser
# This class defines a complete generic visitor for a parse tree produced by asparagramParser.
class asparagramVisitor(ParseTreeVisitor):
# Visit a parse tree produced by asparagramParser#rlRoot.
def visitRlRoot(self, ctx:asparagramParser.RlRootContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlStmt.
def visitRlStmt(self, ctx:asparagramParser.RlStmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlGlobStmt.
def visitRlGlobStmt(self, ctx:asparagramParser.RlGlobStmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlObjStmt.
def visitRlObjStmt(self, ctx:asparagramParser.RlObjStmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlDynStmt.
def visitRlDynStmt(self, ctx:asparagramParser.RlDynStmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlGlobStmts.
def visitRlGlobStmts(self, ctx:asparagramParser.RlGlobStmtsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlObjStmts.
def visitRlObjStmts(self, ctx:asparagramParser.RlObjStmtsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlDynStmts.
def visitRlDynStmts(self, ctx:asparagramParser.RlDynStmtsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlCallStmt.
def visitRlCallStmt(self, ctx:asparagramParser.RlCallStmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlSetStmt.
def visitRlSetStmt(self, ctx:asparagramParser.RlSetStmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlPlaceStmt.
def visitRlPlaceStmt(self, ctx:asparagramParser.RlPlaceStmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlPos.
def visitRlPos(self, ctx:asparagramParser.RlPosContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlNPropList.
def visitRlNPropList(self, ctx:asparagramParser.RlNPropListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlProperty.
def visitRlProperty(self, ctx:asparagramParser.RlPropertyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlValue.
def visitRlValue(self, ctx:asparagramParser.RlValueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlConnection.
def visitRlConnection(self, ctx:asparagramParser.RlConnectionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlRef.
def visitRlRef(self, ctx:asparagramParser.RlRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by asparagramParser#rlOff.
def visitRlOff(self, ctx:asparagramParser.RlOffContext):
return self.visitChildren(ctx)
del asparagramParser | Caian/Asparagus | asparagramVisitor.py | Python | gpl-2.0 | 3,513 | [
"VisIt"
] | 7c1913ef8fad7dc166efd26436e18c2b10999e0a6f2eecec6d54bf4e634dc554 |
import warnings
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import normalize
class RandomWalk2D:
''' Class for TD-lambda random walk '''
def __init__(self, grid_size=3, end_states=[(0,0)], rewards=[1], \
exploration=.1, move_cost=0, alpha=.3, gamma=.9, lmbda=.7):
self.n = grid_size
self.alpha = alpha
self.gamma = gamma
self.lmbda = lmbda
self.end_states = end_states
self.move_cost = move_cost
self.rewards = rewards
self.e = exploration
self.actions = [(-1,0), (1,0), (0,-1), (0,1)]
# history for plot
self.h = np.zeros((self.n, self.n, len(self.actions)), dtype=int)
# invalid move penality for first time
self.v = np.zeros((self.n, self.n))
for idx, state in enumerate(end_states):
self.v[state] = rewards[idx] / (1-lmbda)
def valid_moves(self, state):
moves = []
if state[0] != 0:
moves.append(0)
if state[0] != self.n - 1:
moves.append(1)
if state[1] != 0:
moves.append(2)
if state[1] != self.n - 1:
moves.append(3)
return moves
def choose_action(self, state):
moves = self.valid_moves(state)
if np.random.uniform() < self.e:
return np.random.choice(moves)
values = np.zeros(len(moves))
for idx, move in enumerate(moves):
values[idx] = self.v[self.move(state, move)]
return moves[np.argmax(values)]
def move(self, state, action):
return tuple([x+y for x,y in zip(state, self.actions[action])])
def episode(self):
old_v = np.copy(self.v)
z = np.zeros((self.n, self.n))
state = self.end_states[0]
while state in self.end_states:
state = tuple(np.random.random_integers(0, self.n - 1, size=2))
visited = set()
reward, end = 0, False
while not end:
action = self.choose_action(state)
reward -= self.move_cost
state1 = self.move(state, action)
if state1 in self.end_states:
reward += self.rewards[self.end_states.index(state1)]
end = True
delta = reward + self.gamma*self.v[state1] - self.v[state]
z[state] = 1 #replacing trace
#z[state] += 1 #accumulative trace
visited.add(state)
for s in visited:
self.v[s] += self.alpha*delta*z[s]
z[s] *= self.gamma*self.lmbda
visited = set([x for x in visited if z[x] < 1e-5])
self.h[(state[0], state[1], action)] += 1
state = state1
return np.amax(np.abs(self.v - old_v))
def policy(self):
e, self.e = self.e, 0
policy = np.zeros((self.n,self.n), dtype=int)
for y in range(self.n):
for x in range(self.n):
policy[y,x] = self.choose_action((y,x))
self.e = e
return policy
def arrow(self, m):
dx, dy = 0, 0
if m == 0:
dx, dy = 0, -.3
elif m == 1:
dx, dy = 0, .3
elif m == 2:
dx, dy = -.3, 0
elif m == 3:
dx, dy = .3, 0
return dx, dy
def plot(self, axis):
policy = self.policy()
maxh = np.amax(self.h)*.2
for y in range(self.n):
for x in range(self.n):
cx, cy = x + .5, y + .5
if (y,x) in self.end_states:
v = self.rewards[self.end_states.index((y,x))]
c = 'coral' if v < 0 else 'lime'
axis.add_artist(plt.Circle((cx, cy), .3, color=c))
axis.text(cx, cy, str(v), fontsize=15, horizontalalignment='center', verticalalignment='center')
else:
#v = float(self.v[y,x])
#c = 'coral' if v < 0 else 'lime'
#axis.add_artist(plt.Circle((cx, cy), .3, color=c))
moves = np.copy(self.h[y, x, :])
for m, v in np.ndenumerate(moves):
v = 1 - min(1, v / maxh)
dx, dy = self.arrow(m[0])
c = '#{0:02x}{0:02x}ff'.format(int(v*255))
plt.arrow(cx, cy, dx, dy, head_width=.2, head_length=.2, fc=c, ec=c)
dx, dy = self.arrow(policy[(y,x)])
plt.arrow(cx, cy, dx, dy, head_width=.2, head_length=.2, fc='k', ec='k')
self.h //= 10
def main():
exp = RandomWalk2D(grid_size=5, exploration=0.01, move_cost=0.01, \
end_states=[(0,0), (4,4), (1,2), (2,1)], \
rewards=[2,1,-2,-1], \
alpha=.3, gamma=.9, lmbda=.8)
display_interval = 100
figure, axis = plt.subplots()
figure.canvas.set_window_title('SARSA')
axis.set_xlim([0,exp.n])
axis.xaxis.tick_top()
axis.set_ylim([exp.n, 0])
for iter in range(1000):
delta = 0
for sub_iter in range(display_interval):
delta = exp.episode()
print(exp.v)
axis.cla()
exp.plot(axis)
plt.title('Policy Iteration: {0}, delta = {1:.7f}' \
.format((iter+1)*display_interval, delta), y=1.08)
plt.xlabel('Blue: visit-frequency, Black: optimal-policy')
axis.set_aspect('equal')
plt.draw()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plt.pause(.001)
plt.show()
if __name__ == '__main__':
main()
| rahulsrma26/code-gems | RL/randomWalk/TDLambda.py | Python | mit | 5,584 | [
"VisIt"
] | fe520187adf586f293331f816d898f9979aba11d3d80fc0b104726ae2fc0c956 |
##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing NWChem, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import re
import shutil
import stat
import tempfile
import easybuild.tools.config as config
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import adjust_permissions, mkdir, write_file
from easybuild.tools.modules import get_software_libdir, get_software_root, get_software_version
from easybuild.tools.run import run_cmd
class EB_NWChem(ConfigureMake):
"""Support for building/installing NWChem."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for NWChem."""
super(EB_NWChem, self).__init__(*args, **kwargs)
self.test_cases_dir = None
# path for symlink to local copy of default .nwchemrc, required by NWChem at runtime
# this path is hardcoded by NWChem, and there's no way to make it use a config file at another path...
self.home_nwchemrc = os.path.join(os.getenv('HOME'), '.nwchemrc')
# local NWChem .nwchemrc config file, to which symlink will point
# using this approach, multiple parallel builds (on different nodes) can use the same symlink
common_tmp_dir = os.path.dirname(tempfile.gettempdir()) # common tmp directory, same across nodes
self.local_nwchemrc = os.path.join(common_tmp_dir, os.getenv('USER'), 'easybuild_nwchem', '.nwchemrc')
@staticmethod
def extra_options():
"""Custom easyconfig parameters for NWChem."""
extra_vars = {
'target': ["LINUX64", "Target platform", CUSTOM],
# possible options for ARMCI_NETWORK on LINUX64 with Infiniband:
# OPENIB, MPI-MT, MPI-SPAWN, MELLANOX
'armci_network': ["OPENIB", "Network protocol to use", CUSTOM],
'msg_comms': ["MPI", "Type of message communication", CUSTOM],
'modules': ["all", "NWChem modules to build", CUSTOM],
'lib_defines': ["", "Additional defines for C preprocessor", CUSTOM],
'tests': [True, "Run example test cases", CUSTOM],
# lots of tests fail, so allow a certain fail ratio
'max_fail_ratio': [0.5, "Maximum test case fail ratio", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def setvar_env_makeopt(self, name, value):
"""Set a variable both in the environment and a an option to make."""
env.setvar(name, value)
self.cfg.update('buildopts', "%s='%s'" % (name, value))
def configure_step(self):
"""Custom configuration procedure for NWChem."""
# check whether a (valid) symlink to a .nwchemrc config file exists (via a dummy file if necessary)
# fail early if the link is not what's we expect, since running the test cases will likely fail in this case
try:
if os.path.exists(self.home_nwchemrc) or os.path.islink(self.home_nwchemrc):
# create a dummy file to check symlink
if not os.path.exists(self.local_nwchemrc):
write_file(self.local_nwchemrc, 'dummy')
self.log.debug("Contents of %s: %s", os.path.dirname(self.local_nwchemrc),
os.listdir(os.path.dirname(self.local_nwchemrc)))
if os.path.islink(self.home_nwchemrc) and not os.path.samefile(self.home_nwchemrc, self.local_nwchemrc):
raise EasyBuildError("Found %s, but it's not a symlink to %s. "
"Please (re)move %s while installing NWChem; it can be restored later",
self.home_nwchemrc, self.local_nwchemrc, self.home_nwchemrc)
# ok to remove, we'll recreate it anyway
os.remove(self.local_nwchemrc)
except (IOError, OSError), err:
raise EasyBuildError("Failed to validate %s symlink: %s", self.home_nwchemrc, err)
# building NWChem in a long path name is an issue, so let's try to make sure we have a short one
try:
# NWChem insists that version is in name of build dir
tmpdir = tempfile.mkdtemp(suffix='-%s-%s' % (self.name, self.version))
# remove created directory, since we're not going to use it as is
os.rmdir(tmpdir)
# avoid having '['/']' characters in build dir name, NWChem doesn't like that
start_dir = tmpdir.replace('[', '_').replace(']', '_')
mkdir(os.path.dirname(start_dir), parents=True)
os.symlink(self.cfg['start_dir'], start_dir)
os.chdir(start_dir)
self.cfg['start_dir'] = start_dir
except OSError, err:
raise EasyBuildError("Failed to symlink build dir to a shorter path name: %s", err)
# change to actual build dir
try:
os.chdir('src')
except OSError, err:
raise EasyBuildError("Failed to change to build dir: %s", err)
nwchem_modules = self.cfg['modules']
# set required NWChem environment variables
env.setvar('NWCHEM_TOP', self.cfg['start_dir'])
if len(self.cfg['start_dir']) > 64:
# workaround for:
# "The directory name chosen for NWCHEM_TOP is longer than the maximum allowed value of 64 characters"
# see also https://svn.pnl.gov/svn/nwchem/trunk/src/util/util_nwchem_srcdir.F
self.setvar_env_makeopt('NWCHEM_LONG_PATHS', 'Y')
env.setvar('NWCHEM_TARGET', self.cfg['target'])
env.setvar('MSG_COMMS', self.cfg['msg_comms'])
env.setvar('ARMCI_NETWORK', self.cfg['armci_network'])
if self.cfg['armci_network'] in ["OPENIB"]:
env.setvar('IB_INCLUDE', "/usr/include")
env.setvar('IB_LIB', "/usr/lib64")
env.setvar('IB_LIB_NAME', "-libumad -libverbs -lpthread")
if 'python' in self.cfg['modules']:
python_root = get_software_root('Python')
if not python_root:
raise EasyBuildError("Python module not loaded, you should add Python as a dependency.")
env.setvar('PYTHONHOME', python_root)
pyver = '.'.join(get_software_version('Python').split('.')[0:2])
env.setvar('PYTHONVERSION', pyver)
# if libreadline is loaded, assume it was a dependency for Python
# pass -lreadline to avoid linking issues (libpython2.7.a doesn't include readline symbols)
libreadline = get_software_root('libreadline')
if libreadline:
libreadline_libdir = os.path.join(libreadline, get_software_libdir('libreadline'))
ncurses = get_software_root('ncurses')
if not ncurses:
raise EasyBuildError("ncurses is not loaded, but required to link with libreadline")
ncurses_libdir = os.path.join(ncurses, get_software_libdir('ncurses'))
readline_libs = ' '.join([
os.path.join(libreadline_libdir, 'libreadline.a'),
os.path.join(ncurses_libdir, 'libcurses.a'),
])
extra_libs = os.environ.get('EXTRA_LIBS', '')
env.setvar('EXTRA_LIBS', ' '.join([extra_libs, readline_libs]))
env.setvar('LARGE_FILES', 'TRUE')
env.setvar('USE_NOFSCHECK', 'TRUE')
env.setvar('CCSDTLR', 'y') # enable CCSDTLR
env.setvar('CCSDTQ', 'y') # enable CCSDTQ (compilation is long, executable is big)
if LooseVersion(self.version) >= LooseVersion("6.2"):
env.setvar('MRCC_METHODS','y') # enable multireference coupled cluster capability
if LooseVersion(self.version) >= LooseVersion("6.5"):
env.setvar('EACCSD','y') # enable EOM electron-attachemnt coupled cluster capability
env.setvar('IPCCSD','y') # enable EOM ionization-potential coupled cluster capability
for var in ['USE_MPI', 'USE_MPIF', 'USE_MPIF4']:
env.setvar(var, 'y')
for var in ['CC', 'CXX', 'F90']:
env.setvar('MPI_%s' % var, os.getenv('MPI%s' % var))
env.setvar('MPI_LOC', os.path.dirname(os.getenv('MPI_INC_DIR')))
env.setvar('MPI_LIB', os.getenv('MPI_LIB_DIR'))
env.setvar('MPI_INCLUDE', os.getenv('MPI_INC_DIR'))
libmpi = None
mpi_family = self.toolchain.mpi_family()
if mpi_family in toolchain.OPENMPI:
libmpi = "-lmpi_f90 -lmpi_f77 -lmpi -ldl -Wl,--export-dynamic -lnsl -lutil"
elif mpi_family in [toolchain.INTELMPI]:
if self.cfg['armci_network'] in ["MPI-MT"]:
libmpi = "-lmpigf -lmpigi -lmpi_ilp64 -lmpi_mt"
else:
libmpi = "-lmpigf -lmpigi -lmpi_ilp64 -lmpi"
elif mpi_family in [toolchain.MPICH, toolchain.MPICH2]:
libmpi = "-lmpichf90 -lmpich -lopa -lmpl -lrt -lpthread"
else:
raise EasyBuildError("Don't know how to set LIBMPI for %s", mpi_family)
env.setvar('LIBMPI', libmpi)
# compiler optimization flags: set environment variables _and_ add them to list of make options
self.setvar_env_makeopt('COPTIMIZE', os.getenv('CFLAGS'))
self.setvar_env_makeopt('FOPTIMIZE', os.getenv('FFLAGS'))
# BLAS and ScaLAPACK
self.setvar_env_makeopt('BLASOPT', '%s -L%s %s %s' % (os.getenv('LDFLAGS'), os.getenv('MPI_LIB_DIR'),
os.getenv('LIBSCALAPACK_MT'), libmpi))
self.setvar_env_makeopt('SCALAPACK', '%s %s' % (os.getenv('LDFLAGS'), os.getenv('LIBSCALAPACK_MT')))
if self.toolchain.options['i8']:
size = 8
self.setvar_env_makeopt('USE_SCALAPACK_I8', 'y')
self.cfg.update('lib_defines', '-DSCALAPACK_I8')
else:
self.setvar_env_makeopt('HAS_BLAS', 'yes')
self.setvar_env_makeopt('USE_SCALAPACK', 'y')
size = 4
# set sizes
for lib in ['BLAS', 'LAPACK', 'SCALAPACK']:
self.setvar_env_makeopt('%s_SIZE' % lib, str(size))
env.setvar('NWCHEM_MODULES', nwchem_modules)
env.setvar('LIB_DEFINES', self.cfg['lib_defines'])
# clean first (why not)
run_cmd("make clean", simple=True, log_all=True, log_ok=True)
# configure build
cmd = "make %s nwchem_config" % self.cfg['buildopts']
run_cmd(cmd, simple=True, log_all=True, log_ok=True, log_output=True)
def build_step(self):
"""Custom build procedure for NWChem."""
# set FC
self.setvar_env_makeopt('FC', os.getenv('F77'))
# check whether 64-bit integers should be used, and act on it
if not self.toolchain.options['i8']:
if self.cfg['parallel']:
self.cfg.update('buildopts', '-j %s' % self.cfg['parallel'])
run_cmd("make %s 64_to_32" % self.cfg['buildopts'], simple=True, log_all=True, log_ok=True, log_output=True)
self.setvar_env_makeopt('USE_64TO32', "y")
# unset env vars that cause trouble during NWChem build or cause build to generate incorrect stuff
for var in ['CFLAGS', 'FFLAGS', 'LIBS']:
val = os.getenv(var)
if val:
self.log.info("%s was defined as '%s', need to unset it to avoid problems..." % (var, val))
os.unsetenv(var)
os.environ.pop(var)
super(EB_NWChem, self).build_step(verbose=True)
# build version info
try:
self.log.info("Building version info...")
cwd = os.getcwd()
os.chdir(os.path.join(self.cfg['start_dir'], 'src', 'util'))
run_cmd("make version", simple=True, log_all=True, log_ok=True, log_output=True)
run_cmd("make", simple=True, log_all=True, log_ok=True, log_output=True)
os.chdir(os.path.join(self.cfg['start_dir'], 'src'))
run_cmd("make link", simple=True, log_all=True, log_ok=True, log_output=True)
os.chdir(cwd)
except OSError, err:
raise EasyBuildError("Failed to build version info: %s", err)
# run getmem.nwchem script to assess memory availability and make an educated guess
# this is an alternative to specifying -DDFLT_TOT_MEM via LIB_DEFINES
# this recompiles the appropriate files and relinks
if not 'DDFLT_TOT_MEM' in self.cfg['lib_defines']:
try:
os.chdir(os.path.join(self.cfg['start_dir'], 'contrib'))
run_cmd("./getmem.nwchem", simple=True, log_all=True, log_ok=True, log_output=True)
os.chdir(self.cfg['start_dir'])
except OSError, err:
raise EasyBuildError("Failed to run getmem.nwchem script: %s", err)
def install_step(self):
"""Custom install procedure for NWChem."""
try:
# binary
bindir = os.path.join(self.installdir, 'bin')
mkdir(bindir)
shutil.copy(os.path.join(self.cfg['start_dir'], 'bin', self.cfg['target'], 'nwchem'),
bindir)
# data
shutil.copytree(os.path.join(self.cfg['start_dir'], 'src', 'data'),
os.path.join(self.installdir, 'data'))
shutil.copytree(os.path.join(self.cfg['start_dir'], 'src', 'basis', 'libraries'),
os.path.join(self.installdir, 'data', 'libraries'))
shutil.copytree(os.path.join(self.cfg['start_dir'], 'src', 'nwpw', 'libraryps'),
os.path.join(self.installdir, 'data', 'libraryps'))
except OSError, err:
raise EasyBuildError("Failed to install NWChem: %s", err)
# create NWChem settings file
default_nwchemrc = os.path.join(self.installdir, 'data', 'default.nwchemrc')
txt = '\n'.join([
"nwchem_basis_library %(path)s/data/libraries/",
"nwchem_nwpw_library %(path)s/data/libraryps/",
"ffield amber",
"amber_1 %(path)s/data/amber_s/",
"amber_2 %(path)s/data/amber_q/",
"amber_3 %(path)s/data/amber_x/",
"amber_4 %(path)s/data/amber_u/",
"spce %(path)s/data/solvents/spce.rst",
"charmm_s %(path)s/data/charmm_s/",
"charmm_x %(path)s/data/charmm_x/",
]) % {'path': self.installdir}
write_file(default_nwchemrc, txt)
# fix permissions in data directory
datadir = os.path.join(self.installdir, 'data')
adjust_permissions(datadir, stat.S_IROTH, add=True, recursive=True)
adjust_permissions(datadir, stat.S_IXOTH, add=True, recursive=True, onlydirs=True)
def sanity_check_step(self):
"""Custom sanity check for NWChem."""
custom_paths = {
'files': ['bin/nwchem'],
'dirs': [os.path.join('data', x) for x in ['amber_q', 'amber_s', 'amber_t', 'amber_u', 'amber_x',
'charmm_s', 'charmm_x', 'solvents', 'libraries', 'libraryps']],
}
super(EB_NWChem, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Custom extra module file entries for NWChem."""
txt = super(EB_NWChem, self).make_module_extra()
txt += self.module_generator.set_environment("PYTHONHOME", get_software_root('Python'))
# '/' at the end is critical for NWCHEM_BASIS_LIBRARY!
datadir = os.path.join(self.installdir, 'data')
txt += self.module_generator.set_environment('NWCHEM_BASIS_LIBRARY', os.path.join(datadir, 'libraries/'))
if LooseVersion(self.version) >= LooseVersion("6.3"):
txt += self.module_generator.set_environment('NWCHEM_NWPW_LIBRARY', os.path.join(datadir, 'libraryps/'))
return txt
def cleanup_step(self):
"""Copy stuff from build directory we still need, if any."""
try:
exs_dir = os.path.join(self.cfg['start_dir'], 'examples')
self.examples_dir = os.path.join(tempfile.mkdtemp(), 'examples')
shutil.copytree(exs_dir, self.examples_dir)
self.log.info("Copied %s to %s." % (exs_dir, self.examples_dir))
except OSError, err:
raise EasyBuildError("Failed to copy examples: %s", err)
super(EB_NWChem, self).cleanup_step()
def test_cases_step(self):
"""Run provided list of test cases, or provided examples is no test cases were specified."""
# run all examples if no test cases were specified
# order and grouping is important for some of these tests (e.g., [o]h3tr*
# Some of the examples are deleted
# missing md parameter files: dna.nw, mache.nw, 18c6NaK.nw, membrane.nw, sdm.nw
# method not implemented (unknown thory) or keyword not found: triplet.nw, C2H6.nw, pspw_MgO.nw, ccsdt_polar_small.nw, CG.nw
# no convergence: diamond.nw
# Too much memory required: ccsd_polar_big.nw
if type(self.cfg['tests']) is bool:
examples = [('qmd', ['3carbo_dft.nw', '3carbo.nw', 'h2o_scf.nw']),
('pspw', ['C2.nw', 'C6.nw', 'Carbene.nw', 'Na16.nw', 'NaCl.nw']),
('tcepolar', ['ccsd_polar_small.nw']),
('dirdyvtst/h3', ['h3tr1.nw', 'h3tr2.nw']),
('dirdyvtst/h3', ['h3tr3.nw']), ('dirdyvtst/h3', ['h3tr4.nw']), ('dirdyvtst/h3', ['h3tr5.nw']),
('dirdyvtst/oh3', ['oh3tr1.nw', 'oh3tr2.nw']),
('dirdyvtst/oh3', ['oh3tr3.nw']), ('dirdyvtst/oh3', ['oh3tr4.nw']), ('dirdyvtst/oh3', ['oh3tr5.nw']),
('pspw/session1', ['band.nw', 'si4.linear.nw', 'si4.rhombus.nw', 'S2-drift.nw',
'silicon.nw', 'S2.nw', 'si4.rectangle.nw']),
('md/myo', ['myo.nw']), ('md/nak', ['NaK.nw']), ('md/crown', ['crown.nw']), ('md/hrc', ['hrc.nw']),
('md/benzene', ['benzene.nw'])]
self.cfg['tests'] = [(os.path.join(self.examples_dir, d), l) for (d, l) in examples]
self.log.info("List of examples to be run as test cases: %s" % self.cfg['tests'])
try:
# symlink $HOME/.nwchemrc to local copy of default nwchemrc
default_nwchemrc = os.path.join(self.installdir, 'data', 'default.nwchemrc')
# make a local copy of the default .nwchemrc file at a fixed path, so we can symlink to it
# this makes sure that multiple parallel builds can reuse the same symlink, even for different builds
# there is apparently no way to point NWChem to a particular config file other that $HOME/.nwchemrc
try:
local_nwchemrc_dir = os.path.dirname(self.local_nwchemrc)
if not os.path.exists(local_nwchemrc_dir):
os.makedirs(local_nwchemrc_dir)
shutil.copy2(default_nwchemrc, self.local_nwchemrc)
# only try to create symlink if it's not there yet
# we've verified earlier that the symlink is what we expect it to be if it's there
if not os.path.exists(self.home_nwchemrc):
os.symlink(self.local_nwchemrc, self.home_nwchemrc)
except OSError, err:
raise EasyBuildError("Failed to symlink %s to %s: %s", self.home_nwchemrc, self.local_nwchemrc, err)
# run tests, keep track of fail ratio
cwd = os.getcwd()
fail = 0.0
tot = 0.0
success_regexp = re.compile("Total times\s*cpu:.*wall:.*")
test_cases_logfn = os.path.join(self.installdir, config.log_path(), 'test_cases.log')
test_cases_log = open(test_cases_logfn, "w")
for (testdir, tests) in self.cfg['tests']:
# run test in a temporary dir
tmpdir = tempfile.mkdtemp(prefix='nwchem_test_')
os.chdir(tmpdir)
# copy all files in test case dir
for item in os.listdir(testdir):
test_file = os.path.join(testdir, item)
if os.path.isfile(test_file):
self.log.debug("Copying %s to %s" % (test_file, tmpdir))
shutil.copy2(test_file, tmpdir)
# run tests
for testx in tests:
cmd = "nwchem %s" % testx
msg = "Running test '%s' (from %s) in %s..." % (cmd, testdir, tmpdir)
self.log.info(msg)
test_cases_log.write("\n%s\n" % msg)
(out, ec) = run_cmd(cmd, simple=False, log_all=False, log_ok=False, log_output=True)
# check exit code and output
if ec:
msg = "Test %s failed (exit code: %s)!" % (testx, ec)
self.log.warning(msg)
test_cases_log.write('FAIL: %s' % msg)
fail += 1
else:
if success_regexp.search(out):
msg = "Test %s successful!" % testx
self.log.info(msg)
test_cases_log.write('SUCCESS: %s' % msg)
else:
msg = "No 'Total times' found for test %s (but exit code is %s)!" % (testx, ec)
self.log.warning(msg)
test_cases_log.write('FAIL: %s' % msg)
fail += 1
test_cases_log.write("\nOUTPUT:\n\n%s\n\n" % out)
tot += 1
# go back
os.chdir(cwd)
shutil.rmtree(tmpdir)
fail_ratio = fail / tot
fail_pcnt = fail_ratio * 100
msg = "%d of %d tests failed (%s%%)!" % (fail, tot, fail_pcnt)
self.log.info(msg)
test_cases_log.write('\n\nSUMMARY: %s' % msg)
test_cases_log.close()
self.log.info("Log for test cases saved at %s" % test_cases_logfn)
if fail_ratio > self.cfg['max_fail_ratio']:
max_fail_pcnt = self.cfg['max_fail_ratio'] * 100
raise EasyBuildError("Over %s%% of test cases failed, assuming broken build.", max_fail_pcnt)
# cleanup
try:
shutil.rmtree(self.examples_dir)
shutil.rmtree(local_nwchemrc_dir)
except OSError, err:
raise EasyBuildError("Cleanup failed: %s", err)
# set post msg w.r.t. cleaning up $HOME/.nwchemrc symlink
self.postmsg += "\nRemember to clean up %s after all NWChem builds are finished." % self.home_nwchemrc
except OSError, err:
raise EasyBuildError("Failed to run test cases: %s", err)
| valtandor/easybuild-easyblocks | easybuild/easyblocks/n/nwchem.py | Python | gpl-2.0 | 24,264 | [
"Amber",
"NWChem"
] | f14145a4ade90d9a734a438fbe91312d887d2da1522efa8747638eccf4392e5c |
'''
Create PV-slices of the spectrally down-sampled HI cube.
'''
from spectral_cube import SpectralCube, Projection
import pvextractor as pv
from astropy.utils.console import ProgressBar
from astropy.io import fits
from astropy import units as u
import numpy as np
import matplotlib.pyplot as plt
from paths import fourteenB_HI_data_wGBT_path, fourteenB_wGBT_HI_file_dict
from galaxy_params import gal_feath as gal
from constants import distance
# Radius cut-off from galaxy frame to observation frame
def obs_radius(radius, PA, gal):
ang_term = (np.cos(PA))**2 + (np.sin(PA) / np.cos(gal.inclination))**2
return np.sqrt(radius**2 / ang_term)
def phys_to_ang(phys_size, distance=distance):
'''
Convert from angular to physical scales
'''
return (phys_size.to(distance.unit) / distance) * u.rad
if __name__ == "__main__":
cube = SpectralCube.read(fourteenB_HI_data_wGBT_path("downsamp_1kms/M33_14B-088_HI.clean.image.GBT_feathered.1kms.fits"))
mom0 = Projection.from_hdu(fits.open(fourteenB_wGBT_HI_file_dict["Moment0"])[0])
pa_angles = gal.position_angles(header=mom0.header)
radius = gal.radius(header=mom0.header)
# Set the angles to create PV slices from. Angles defined wrt to the disk PA.
# The cuts at PA - 165 and 175 are to match the slices presented in Kam+17
thetas = np.array([0, 45, 90, 135, gal.position_angle.value - 165,
gal.position_angle.value - 175]) * u.deg
max_rad = 8.5 * u.kpc
check_paths = False
pvslices = []
paths = []
for theta in ProgressBar(thetas):
# Adjust path length based on inclination
obs_rad = obs_radius(max_rad, theta, gal)
# Now convert the physical size in the observed frame to an angular size
ang_length = 2 * phys_to_ang(obs_rad, distance)
ang_width = 2 * phys_to_ang(obs_radius(max_rad, theta + 90 * u.deg, gal),
distance)
pv_path = pv.PathFromCenter(gal.center_position, length=ang_length,
angle=theta + gal.position_angle,
sample=20,
width=ang_width)
paths.append(pv_path)
if check_paths:
plt.imshow(mom0.value, origin='lower')
plt.contour(radius <= max_rad, colors='r')
center = gal.to_center_position_pixel(wcs=cube.wcs)
plt.plot(center[0], center[1], 'bD')
for i, posn in enumerate(pv_path.get_xy(cube.wcs)):
if i == 0:
symb = "c^"
else:
symb = "g^"
plt.plot(posn[0], posn[1], symb)
plt.draw()
raw_input("{}".format(theta))
plt.clf()
# Set NaNs to zero. We're averaging over very large areas here.
pvslice = pv.extract_pv_slice(cube, pv_path, respect_nan=False)
filename = "downsamp_1kms/M33_14B-088_HI.GBT_feathered_PA_{}_pvslice.fits".format(int(theta.value))
pvslice.writeto(fourteenB_HI_data_wGBT_path(filename, no_check=True), overwrite=True)
pvslices.append(pvslice)
# Make a major axis PV slice with the rotation subtracted cube
rotcube = SpectralCube.read(fourteenB_HI_data_wGBT_path("downsamp_1kms/M33_14B-088_HI.clean.image.GBT_feathered.rotation_corrected.1kms.fits"))
theta = 0.0 * u.deg
obs_rad = obs_radius(max_rad, theta, gal)
# Now convert the physical size in the observed frame to an angular size
ang_length = 2 * phys_to_ang(obs_rad, distance)
ang_width = 2 * phys_to_ang(obs_radius(max_rad, theta + 90 * u.deg, gal),
distance)
pv_path = pv.PathFromCenter(gal.center_position, length=ang_length,
angle=theta + gal.position_angle,
sample=20,
width=ang_width)
pvslice = pv.extract_pv_slice(rotcube, pv_path, respect_nan=False)
filename = "downsamp_1kms/M33_14B-088_HI.GBT_feathered.rotation_corrected_PA_{}_pvslice.fits".format(int(theta.value))
pvslice.writeto(fourteenB_HI_data_wGBT_path(filename, no_check=True), overwrite=True)
# Make thinner PV slices with the normal cube
pvslices_thin = []
paths_thin = []
for theta in ProgressBar(thetas):
# Adjust path length based on inclination
obs_rad = obs_radius(max_rad, theta, gal)
# Now convert the physical size in the observed frame to an angular size
ang_length = 2 * phys_to_ang(obs_rad, distance)
ang_width = 200
pv_path = pv.PathFromCenter(gal.center_position, length=ang_length,
angle=theta + gal.position_angle,
sample=20,
width=ang_width)
paths_thin.append(pv_path)
if check_paths:
plt.imshow(mom0.value, origin='lower')
plt.contour(radius <= max_rad, colors='r')
center = gal.to_center_position_pixel(wcs=cube.wcs)
plt.plot(center[0], center[1], 'bD')
for i, posn in enumerate(pv_path.get_xy(cube.wcs)):
if i == 0:
symb = "c^"
else:
symb = "g^"
plt.plot(posn[0], posn[1], symb)
plt.draw()
raw_input("{}".format(theta))
plt.clf()
# Set NaNs to zero. We're averaging over very large areas here.
pvslice = pv.extract_pv_slice(cube, pv_path, respect_nan=False)
filename = "downsamp_1kms/M33_14B-088_HI.GBT_feathered_PA_{}_pvslice_200arcsec_width.fits".format(int(theta.value))
pvslice.writeto(fourteenB_HI_data_wGBT_path(filename, no_check=True), overwrite=True)
pvslices_thin.append(pvslice)
| e-koch/VLA_Lband | 14B-088/HI/analysis/HI_pvslices.py | Python | mit | 5,875 | [
"Galaxy"
] | 02f3b313dbf5356d402286c56a897622522cc72cc540c31daec632fbdaff8537 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyHtseq(PythonPackage):
"""HTSeq is a Python package that provides infrastructure to process
data from high-throughput sequencing assays."""
homepage = "http://htseq.readthedocs.io/en/release_0.9.1/overview.html"
url = "https://github.com/simon-anders/htseq/archive/release_0.9.1.tar.gz"
version('0.11.2', sha256='dfc707effa699d5ba9034e1bb9f13c0fb4e9bc60d31ede2444aa49c7e2fc71aa')
version('0.9.1', sha256='28b41d68aa233fce0d57699e649b69bb11957f8f1b9b7b82dfe3415849719534')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-pysam', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-cython', type=('build', 'run'))
depends_on('swig', type=('build', 'run'))
| rspavel/spack | var/spack/repos/builtin/packages/py-htseq/package.py | Python | lgpl-2.1 | 1,029 | [
"HTSeq",
"pysam"
] | d6052413ecd1191bba5b062a008e22300f2acfe7a8282aeed2fada7ca7eb4d85 |
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Classes for representing multi-dimensional data with metadata.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
from xml.dom.minidom import Document
import collections
import copy
import datetime
import operator
import warnings
import zlib
import biggus
import numpy as np
import numpy.ma as ma
from iris._deprecation import warn_deprecated
import iris.analysis
from iris.analysis.cartography import wrap_lons
import iris.analysis.maths
import iris.analysis._interpolate_private
import iris.aux_factory
import iris.coord_systems
import iris.coords
import iris._concatenate
import iris._constraints
import iris._merge
import iris.exceptions
import iris.util
from iris._cube_coord_common import CFVariableMixin
from functools import reduce
__all__ = ['Cube', 'CubeList', 'CubeMetadata']
class CubeMetadata(collections.namedtuple('CubeMetadata',
['standard_name',
'long_name',
'var_name',
'units',
'attributes',
'cell_methods'])):
"""
Represents the phenomenon metadata for a single :class:`Cube`.
"""
__slots__ = ()
def name(self, default='unknown'):
"""
Returns a human-readable name.
First it tries self.standard_name, then it tries the 'long_name'
attribute, then the 'var_name' attribute, before falling back to
the value of `default` (which itself defaults to 'unknown').
"""
return self.standard_name or self.long_name or self.var_name or default
# The XML namespace to use for CubeML documents
XML_NAMESPACE_URI = "urn:x-iris:cubeml-0.2"
class _CubeFilter(object):
"""
A constraint, paired with a list of cubes matching that constraint.
"""
def __init__(self, constraint, cubes=None):
self.constraint = constraint
if cubes is None:
cubes = CubeList()
self.cubes = cubes
def __len__(self):
return len(self.cubes)
def add(self, cube):
"""
Adds the appropriate (sub)cube to the list of cubes where it
matches the constraint.
"""
sub_cube = self.constraint.extract(cube)
if sub_cube is not None:
self.cubes.append(sub_cube)
def merged(self, unique=False):
"""
Returns a new :class:`_CubeFilter` by merging the list of
cubes.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
"""
return _CubeFilter(self.constraint, self.cubes.merge(unique))
class _CubeFilterCollection(object):
"""
A list of _CubeFilter instances.
"""
@staticmethod
def from_cubes(cubes, constraints=None):
"""
Creates a new collection from an iterable of cubes, and some
optional constraints.
"""
constraints = iris._constraints.list_of_constraints(constraints)
pairs = [_CubeFilter(constraint) for constraint in constraints]
collection = _CubeFilterCollection(pairs)
for cube in cubes:
collection.add_cube(cube)
return collection
def __init__(self, pairs):
self.pairs = pairs
def add_cube(self, cube):
"""
Adds the given :class:`~iris.cube.Cube` to all of the relevant
constraint pairs.
"""
for pair in self.pairs:
pair.add(cube)
def cubes(self):
"""
Returns all the cubes in this collection concatenated into a
single :class:`CubeList`.
"""
result = CubeList()
for pair in self.pairs:
result.extend(pair.cubes)
return result
def merged(self, unique=False):
"""
Returns a new :class:`_CubeFilterCollection` by merging all the cube
lists of this collection.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
"""
return _CubeFilterCollection([pair.merged(unique) for pair in
self.pairs])
class CubeList(list):
"""
All the functionality of a standard :class:`list` with added "Cube"
context.
"""
def __new__(cls, list_of_cubes=None):
"""Given a :class:`list` of cubes, return a CubeList instance."""
cube_list = list.__new__(cls, list_of_cubes)
# Check that all items in the incoming list are cubes. Note that this
# checking does not guarantee that a CubeList instance *always* has
# just cubes in its list as the append & __getitem__ methods have not
# been overridden.
if not all([isinstance(cube, Cube) for cube in cube_list]):
raise ValueError('All items in list_of_cubes must be Cube '
'instances.')
return cube_list
def __str__(self):
"""Runs short :meth:`Cube.summary` on every cube."""
result = ['%s: %s' % (i, cube.summary(shorten=True)) for i, cube in
enumerate(self)]
if result:
result = '\n'.join(result)
else:
result = '< No cubes >'
return result
def __repr__(self):
"""Runs repr on every cube."""
return '[%s]' % ',\n'.join([repr(cube) for cube in self])
# TODO #370 Which operators need overloads?
def __add__(self, other):
return CubeList(list.__add__(self, other))
def __getitem__(self, keys):
"""x.__getitem__(y) <==> x[y]"""
result = super(CubeList, self).__getitem__(keys)
if isinstance(result, list):
result = CubeList(result)
return result
def __getslice__(self, start, stop):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
result = super(CubeList, self).__getslice__(start, stop)
result = CubeList(result)
return result
def xml(self, checksum=False, order=True, byteorder=True):
"""Return a string of the XML that this list of cubes represents."""
doc = Document()
cubes_xml_element = doc.createElement("cubes")
cubes_xml_element.setAttribute("xmlns", XML_NAMESPACE_URI)
for cube_obj in self:
cubes_xml_element.appendChild(
cube_obj._xml_element(
doc, checksum=checksum, order=order, byteorder=byteorder))
doc.appendChild(cubes_xml_element)
# return our newly created XML string
return doc.toprettyxml(indent=" ")
def extract(self, constraints, strict=False):
"""
Filter each of the cubes which can be filtered by the given
constraints.
This method iterates over each constraint given, and subsets each of
the cubes in this CubeList where possible. Thus, a CubeList of length
**n** when filtered with **m** constraints can generate a maximum of
**m * n** cubes.
Keywords:
* strict - boolean
If strict is True, then there must be exactly one cube which is
filtered per constraint.
"""
return self._extract_and_merge(self, constraints, strict,
merge_unique=None)
@staticmethod
def _extract_and_merge(cubes, constraints, strict, merge_unique=False):
# * merge_unique - if None: no merging, if false: non unique merging,
# else unique merging (see merge)
constraints = iris._constraints.list_of_constraints(constraints)
# group the resultant cubes by constraints in a dictionary
constraint_groups = dict([(constraint, CubeList()) for constraint in
constraints])
for cube in cubes:
for constraint, cube_list in six.iteritems(constraint_groups):
sub_cube = constraint.extract(cube)
if sub_cube is not None:
cube_list.append(sub_cube)
if merge_unique is not None:
for constraint, cubelist in six.iteritems(constraint_groups):
constraint_groups[constraint] = cubelist.merge(merge_unique)
result = CubeList()
for constraint in constraints:
constraint_cubes = constraint_groups[constraint]
if strict and len(constraint_cubes) != 1:
msg = 'Got %s cubes for constraint %r, ' \
'expecting 1.' % (len(constraint_cubes), constraint)
raise iris.exceptions.ConstraintMismatchError(msg)
result.extend(constraint_cubes)
if strict and len(constraints) == 1:
result = result[0]
return result
def extract_strict(self, constraints):
"""
Calls :meth:`CubeList.extract` with the strict keyword set to True.
"""
return self.extract(constraints, strict=True)
def extract_overlapping(self, coord_names):
"""
Returns a :class:`CubeList` of cubes extracted over regions
where the coordinates overlap, for the coordinates
in coord_names.
Args:
* coord_names:
A string or list of strings of the names of the coordinates
over which to perform the extraction.
"""
if isinstance(coord_names, six.string_types):
coord_names = [coord_names]
def make_overlap_fn(coord_name):
def overlap_fn(cell):
return all(cell in cube.coord(coord_name).cells()
for cube in self)
return overlap_fn
coord_values = {coord_name: make_overlap_fn(coord_name)
for coord_name in coord_names}
return self.extract(iris.Constraint(coord_values=coord_values))
def merge_cube(self):
"""
Return the merged contents of the :class:`CubeList` as a single
:class:`Cube`.
If it is not possible to merge the `CubeList` into a single
`Cube`, a :class:`~iris.exceptions.MergeError` will be raised
describing the reason for the failure.
For example:
>>> cube_1 = iris.cube.Cube([1, 2])
>>> cube_1.add_aux_coord(iris.coords.AuxCoord(0, long_name='x'))
>>> cube_2 = iris.cube.Cube([3, 4])
>>> cube_2.add_aux_coord(iris.coords.AuxCoord(1, long_name='x'))
>>> cube_2.add_dim_coord(
... iris.coords.DimCoord([0, 1], long_name='z'), 0)
>>> single_cube = iris.cube.CubeList([cube_1, cube_2]).merge_cube()
Traceback (most recent call last):
...
iris.exceptions.MergeError: failed to merge into a single cube.
Coordinates in cube.dim_coords differ: z.
Coordinate-to-dimension mapping differs for cube.dim_coords.
"""
if not self:
raise ValueError("can't merge an empty CubeList")
# Register each of our cubes with a single ProtoCube.
proto_cube = iris._merge.ProtoCube(self[0])
for cube in self[1:]:
proto_cube.register(cube, error_on_mismatch=True)
# Extract the merged cube from the ProtoCube.
merged_cube, = proto_cube.merge()
return merged_cube
def merge(self, unique=True):
"""
Returns the :class:`CubeList` resulting from merging this
:class:`CubeList`.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
This combines cubes with different values of an auxiliary scalar
coordinate, by constructing a new dimension.
.. testsetup::
import iris
c1 = iris.cube.Cube([0,1,2], long_name='some_parameter')
xco = iris.coords.DimCoord([11, 12, 13], long_name='x_vals')
c1.add_dim_coord(xco, 0)
c1.add_aux_coord(iris.coords.AuxCoord([100], long_name='y_vals'))
c2 = c1.copy()
c2.coord('y_vals').points = [200]
For example::
>>> print(c1)
some_parameter / (unknown) (x_vals: 3)
Dimension coordinates:
x_vals x
Scalar coordinates:
y_vals: 100
>>> print(c2)
some_parameter / (unknown) (x_vals: 3)
Dimension coordinates:
x_vals x
Scalar coordinates:
y_vals: 200
>>> cube_list = iris.cube.CubeList([c1, c2])
>>> new_cube = cube_list.merge()[0]
>>> print(new_cube)
some_parameter / (unknown) (y_vals: 2; x_vals: 3)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(new_cube.coord('y_vals').points)
[100 200]
>>>
Contrast this with :meth:`iris.cube.CubeList.concatenate`, which joins
cubes along an existing dimension.
.. note::
If time coordinates in the list of cubes have differing epochs then
the cubes will not be able to be merged. If this occurs, use
:func:`iris.util.unify_time_units` to normalise the epochs of the
time coordinates so that the cubes can be merged.
"""
# Register each of our cubes with its appropriate ProtoCube.
proto_cubes_by_name = {}
for cube in self:
name = cube.standard_name
proto_cubes = proto_cubes_by_name.setdefault(name, [])
proto_cube = None
for target_proto_cube in proto_cubes:
if target_proto_cube.register(cube):
proto_cube = target_proto_cube
break
if proto_cube is None:
proto_cube = iris._merge.ProtoCube(cube)
proto_cubes.append(proto_cube)
# Emulate Python 2 behaviour.
def _none_sort(item):
return (item is not None, item)
# Extract all the merged cubes from the ProtoCubes.
merged_cubes = CubeList()
for name in sorted(proto_cubes_by_name, key=_none_sort):
for proto_cube in proto_cubes_by_name[name]:
merged_cubes.extend(proto_cube.merge(unique=unique))
return merged_cubes
def concatenate_cube(self, check_aux_coords=True):
"""
Return the concatenated contents of the :class:`CubeList` as a single
:class:`Cube`.
If it is not possible to concatenate the `CubeList` into a single
`Cube`, a :class:`~iris.exceptions.ConcatenateError` will be raised
describing the reason for the failure.
Kwargs:
* check_aux_coords
Checks the auxilliary coordinates of the cubes match. This check
is not applied to auxilliary coordinates that span the dimension
the concatenation is occuring along. Defaults to True.
.. note::
Concatenation cannot occur along an anonymous dimension.
"""
if not self:
raise ValueError("can't concatenate an empty CubeList")
names = [cube.metadata.name() for cube in self]
unique_names = list(collections.OrderedDict.fromkeys(names))
if len(unique_names) == 1:
res = iris._concatenate.concatenate(
self, error_on_mismatch=True,
check_aux_coords=check_aux_coords)
n_res_cubes = len(res)
if n_res_cubes == 1:
return res[0]
else:
msgs = []
msgs.append('An unexpected problem prevented concatenation.')
msgs.append('Expected only a single cube, '
'found {}.'.format(n_res_cubes))
raise iris.exceptions.ConcatenateError(msgs)
else:
msgs = []
msgs.append('Cube names differ: {} != {}'.format(names[0],
names[1]))
raise iris.exceptions.ConcatenateError(msgs)
def concatenate(self, check_aux_coords=True):
"""
Concatenate the cubes over their common dimensions.
Kwargs:
* check_aux_coords
Checks the auxilliary coordinates of the cubes match. This check
is not applied to auxilliary coordinates that span the dimension
the concatenation is occuring along. Defaults to True.
Returns:
A new :class:`iris.cube.CubeList` of concatenated
:class:`iris.cube.Cube` instances.
This combines cubes with a common dimension coordinate, but occupying
different regions of the coordinate value. The cubes are joined across
that dimension.
.. testsetup::
import iris
import numpy as np
xco = iris.coords.DimCoord([11, 12, 13, 14], long_name='x_vals')
yco1 = iris.coords.DimCoord([4, 5], long_name='y_vals')
yco2 = iris.coords.DimCoord([7, 9, 10], long_name='y_vals')
c1 = iris.cube.Cube(np.zeros((2,4)), long_name='some_parameter')
c1.add_dim_coord(xco, 1)
c1.add_dim_coord(yco1, 0)
c2 = iris.cube.Cube(np.zeros((3,4)), long_name='some_parameter')
c2.add_dim_coord(xco, 1)
c2.add_dim_coord(yco2, 0)
For example::
>>> print(c1)
some_parameter / (unknown) (y_vals: 2; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(c1.coord('y_vals').points)
[4 5]
>>> print(c2)
some_parameter / (unknown) (y_vals: 3; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(c2.coord('y_vals').points)
[ 7 9 10]
>>> cube_list = iris.cube.CubeList([c1, c2])
>>> new_cube = cube_list.concatenate()[0]
>>> print(new_cube)
some_parameter / (unknown) (y_vals: 5; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(new_cube.coord('y_vals').points)
[ 4 5 7 9 10]
>>>
Contrast this with :meth:`iris.cube.CubeList.merge`, which makes a new
dimension from values of an auxiliary scalar coordinate.
.. note::
If time coordinates in the list of cubes have differing epochs then
the cubes will not be able to be concatenated. If this occurs, use
:func:`iris.util.unify_time_units` to normalise the epochs of the
time coordinates so that the cubes can be concatenated.
.. note::
Concatenation cannot occur along an anonymous dimension.
"""
return iris._concatenate.concatenate(self,
check_aux_coords=check_aux_coords)
def _is_single_item(testee):
"""
Return whether this is a single item, rather than an iterable.
We count string types as 'single', also.
"""
return (isinstance(testee, six.string_types) or
not isinstance(testee, collections.Iterable))
class Cube(CFVariableMixin):
"""
A single Iris cube of data and metadata.
Typically obtained from :func:`iris.load`, :func:`iris.load_cube`,
:func:`iris.load_cubes`, or from the manipulation of existing cubes.
For example:
>>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
>>> print(cube)
air_temperature / (K) (latitude: 73; longitude: 96)
Dimension coordinates:
latitude x -
longitude - x
Scalar coordinates:
forecast_period: 6477 hours, bound=(-28083.0, 6477.0) hours
forecast_reference_time: 1998-03-01 03:00:00
pressure: 1000.0 hPa
time: 1998-12-01 00:00:00, \
bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00)
Attributes:
STASH: m01s16i203
source: Data from Met Office Unified Model
Cell methods:
mean within years: time
mean over years: time
See the :doc:`user guide</userguide/index>` for more information.
"""
#: Indicates to client code that the object supports
#: "orthogonal indexing", which means that slices that are 1d arrays
#: or lists slice along each dimension independently. This behavior
#: is similar to Fortran or Matlab, but different than numpy.
__orthogonal_indexing__ = True
def __init__(self, data, standard_name=None, long_name=None,
var_name=None, units=None, attributes=None,
cell_methods=None, dim_coords_and_dims=None,
aux_coords_and_dims=None, aux_factories=None,
cell_measures_and_dims=None):
"""
Creates a cube with data and optional metadata.
Not typically used - normally cubes are obtained by loading data
(e.g. :func:`iris.load`) or from manipulating existing cubes.
Args:
* data
This object defines the shape of the cube and the phenomenon
value in each cell.
It can be a biggus array, a numpy array, a numpy array
subclass (such as :class:`numpy.ma.MaskedArray`), or an
*array_like* as described in :func:`numpy.asarray`.
See :attr:`Cube.data<iris.cube.Cube.data>`.
Kwargs:
* standard_name
The standard name for the Cube's data.
* long_name
An unconstrained description of the cube.
* var_name
The CF variable name for the cube.
* units
The unit of the cube, e.g. ``"m s-1"`` or ``"kelvin"``.
* attributes
A dictionary of cube attributes
* cell_methods
A tuple of CellMethod objects, generally set by Iris, e.g.
``(CellMethod("mean", coords='latitude'), )``.
* dim_coords_and_dims
A list of coordinates with scalar dimension mappings, e.g
``[(lat_coord, 0), (lon_coord, 1)]``.
* aux_coords_and_dims
A list of coordinates with dimension mappings,
e.g ``[(lat_coord, 0), (lon_coord, (0, 1))]``.
See also :meth:`Cube.add_dim_coord()<iris.cube.Cube.add_dim_coord>`
and :meth:`Cube.add_aux_coord()<iris.cube.Cube.add_aux_coord>`.
* aux_factories
A list of auxiliary coordinate factories. See
:mod:`iris.aux_factory`.
* cell_measures_and_dims
A list of CellMeasures with dimension mappings.
For example::
>>> from iris.coords import DimCoord
>>> from iris.cube import Cube
>>> latitude = DimCoord(np.linspace(-90, 90, 4),
... standard_name='latitude',
... units='degrees')
>>> longitude = DimCoord(np.linspace(45, 360, 8),
... standard_name='longitude',
... units='degrees')
>>> cube = Cube(np.zeros((4, 8), np.float32),
... dim_coords_and_dims=[(latitude, 0),
... (longitude, 1)])
"""
# Temporary error while we transition the API.
if isinstance(data, six.string_types):
raise TypeError('Invalid data type: {!r}.'.format(data))
if not isinstance(data, (biggus.Array, ma.MaskedArray)):
data = np.asarray(data)
self._my_data = data
#: The "standard name" for the Cube's phenomenon.
self.standard_name = standard_name
#: An instance of :class:`cf_units.Unit` describing the Cube's data.
self.units = units
#: The "long name" for the Cube's phenomenon.
self.long_name = long_name
#: The CF variable name for the Cube.
self.var_name = var_name
self.cell_methods = cell_methods
#: A dictionary, with a few restricted keys, for arbitrary
#: Cube metadata.
self.attributes = attributes
# Coords
self._dim_coords_and_dims = []
self._aux_coords_and_dims = []
self._aux_factories = []
# Cell Measures
self._cell_measures_and_dims = []
identities = set()
if dim_coords_and_dims:
dims = set()
for coord, dim in dim_coords_and_dims:
identity = coord.standard_name, coord.long_name
if identity not in identities and dim not in dims:
self._add_unique_dim_coord(coord, dim)
else:
self.add_dim_coord(coord, dim)
identities.add(identity)
dims.add(dim)
if aux_coords_and_dims:
for coord, dims in aux_coords_and_dims:
identity = coord.standard_name, coord.long_name
if identity not in identities:
self._add_unique_aux_coord(coord, dims)
else:
self.add_aux_coord(coord, dims)
identities.add(identity)
if aux_factories:
for factory in aux_factories:
self.add_aux_factory(factory)
if cell_measures_and_dims:
for cell_measure, dims in cell_measures_and_dims:
self.add_cell_measure(cell_measure, dims)
@property
def metadata(self):
"""
An instance of :class:`CubeMetadata` describing the phenomenon.
This property can be updated with any of:
- another :class:`CubeMetadata` instance,
- a tuple/dict which can be used to make a :class:`CubeMetadata`,
- or any object providing the attributes exposed by
:class:`CubeMetadata`.
"""
return CubeMetadata(self.standard_name, self.long_name, self.var_name,
self.units, self.attributes, self.cell_methods)
@metadata.setter
def metadata(self, value):
try:
value = CubeMetadata(**value)
except TypeError:
try:
value = CubeMetadata(*value)
except TypeError:
missing_attrs = [field for field in CubeMetadata._fields
if not hasattr(value, field)]
if missing_attrs:
raise TypeError('Invalid/incomplete metadata')
for name in CubeMetadata._fields:
setattr(self, name, getattr(value, name))
def is_compatible(self, other, ignore=None):
"""
Return whether the cube is compatible with another.
Compatibility is determined by comparing :meth:`iris.cube.Cube.name()`,
:attr:`iris.cube.Cube.units`, :attr:`iris.cube.Cube.cell_methods` and
:attr:`iris.cube.Cube.attributes` that are present in both objects.
Args:
* other:
An instance of :class:`iris.cube.Cube` or
:class:`iris.cube.CubeMetadata`.
* ignore:
A single attribute key or iterable of attribute keys to ignore when
comparing the cubes. Default is None. To ignore all attributes set
this to other.attributes.
Returns:
Boolean.
.. seealso::
:meth:`iris.util.describe_diff()`
.. note::
This function does not indicate whether the two cubes can be
merged, instead it checks only the four items quoted above for
equality. Determining whether two cubes will merge requires
additional logic that is beyond the scope of this method.
"""
compatible = (self.name() == other.name() and
self.units == other.units and
self.cell_methods == other.cell_methods)
if compatible:
common_keys = set(self.attributes).intersection(other.attributes)
if ignore is not None:
if isinstance(ignore, six.string_types):
ignore = (ignore,)
common_keys = common_keys.difference(ignore)
for key in common_keys:
if np.any(self.attributes[key] != other.attributes[key]):
compatible = False
break
return compatible
def convert_units(self, unit):
"""
Change the cube's units, converting the values in the data array.
For example, if a cube's :attr:`~iris.cube.Cube.units` are
kelvin then::
cube.convert_units('celsius')
will change the cube's :attr:`~iris.cube.Cube.units` attribute to
celsius and subtract 273.15 from each value in
:attr:`~iris.cube.Cube.data`.
.. warning::
Calling this method will trigger any deferred loading, causing
the cube's data array to be loaded into memory.
"""
# If the cube has units convert the data.
if not self.units.is_unknown():
self.data = self.units.convert(self.data, unit)
self.units = unit
def add_cell_method(self, cell_method):
"""Add a CellMethod to the Cube."""
self.cell_methods += (cell_method, )
def add_aux_coord(self, coord, data_dims=None):
"""
Adds a CF auxiliary coordinate to the cube.
Args:
* coord
The :class:`iris.coords.DimCoord` or :class:`iris.coords.AuxCoord`
instance to add to the cube.
Kwargs:
* data_dims
Integer or iterable of integers giving the data dimensions spanned
by the coordinate.
Raises a ValueError if a coordinate with identical metadata already
exists on the cube.
See also :meth:`Cube.remove_coord()<iris.cube.Cube.remove_coord>`.
"""
if self.coords(coord): # TODO: just fail on duplicate object
raise ValueError('Duplicate coordinates are not permitted.')
self._add_unique_aux_coord(coord, data_dims)
def _check_multi_dim_metadata(self, metadata, data_dims):
# Convert to a tuple of integers
if data_dims is None:
data_dims = tuple()
elif isinstance(data_dims, collections.Container):
data_dims = tuple(int(d) for d in data_dims)
else:
data_dims = (int(data_dims),)
if data_dims:
if len(data_dims) != metadata.ndim:
msg = 'Invalid data dimensions: {} given, {} expected for ' \
'{!r}.'.format(len(data_dims), metadata.ndim,
metadata.name())
raise ValueError(msg)
# Check compatibility with the shape of the data
for i, dim in enumerate(data_dims):
if metadata.shape[i] != self.shape[dim]:
msg = 'Unequal lengths. Cube dimension {} => {};' \
' metadata {!r} dimension {} => {}.'
raise ValueError(msg.format(dim, self.shape[dim],
metadata.name(), i,
metadata.shape[i]))
elif metadata.shape != (1,):
msg = 'Missing data dimensions for multi-valued {} {!r}'
msg = msg.format(metadata.__class__.__name__, metadata.name())
raise ValueError(msg)
return data_dims
def _add_unique_aux_coord(self, coord, data_dims):
data_dims = self._check_multi_dim_metadata(coord, data_dims)
self._aux_coords_and_dims.append([coord, data_dims])
def add_aux_factory(self, aux_factory):
"""
Adds an auxiliary coordinate factory to the cube.
Args:
* aux_factory
The :class:`iris.aux_factory.AuxCoordFactory` instance to add.
"""
if not isinstance(aux_factory, iris.aux_factory.AuxCoordFactory):
raise TypeError('Factory must be a subclass of '
'iris.aux_factory.AuxCoordFactory.')
self._aux_factories.append(aux_factory)
def add_cell_measure(self, cell_measure, data_dims=None):
"""
Adds a CF cell measure to the cube.
Args:
* cell_measure
The :class:`iris.coords.CellMeasure`
instance to add to the cube.
Kwargs:
* data_dims
Integer or iterable of integers giving the data dimensions spanned
by the coordinate.
Raises a ValueError if a cell_measure with identical metadata already
exists on the cube.
See also
:meth:`Cube.remove_cell_measure()<iris.cube.Cube.remove_cell_measure>`.
"""
if self.cell_measures(cell_measure):
raise ValueError('Duplicate cell_measures are not permitted.')
data_dims = self._check_multi_dim_metadata(cell_measure, data_dims)
self._cell_measures_and_dims.append([cell_measure, data_dims])
self._cell_measures_and_dims.sort(key=lambda cm_dims:
(cm_dims[0]._as_defn(), cm_dims[1]))
def add_dim_coord(self, dim_coord, data_dim):
"""
Add a CF coordinate to the cube.
Args:
* dim_coord
The :class:`iris.coords.DimCoord` instance to add to the cube.
* data_dim
Integer giving the data dimension spanned by the coordinate.
Raises a ValueError if a coordinate with identical metadata already
exists on the cube or if a coord already exists for the
given dimension.
See also :meth:`Cube.remove_coord()<iris.cube.Cube.remove_coord>`.
"""
if self.coords(dim_coord):
raise ValueError('The coordinate already exists on the cube. '
'Duplicate coordinates are not permitted.')
# Check dimension is available
if self.coords(dimensions=data_dim, dim_coords=True):
raise ValueError('A dim_coord is already associated with '
'dimension %d.' % data_dim)
self._add_unique_dim_coord(dim_coord, data_dim)
def _add_unique_dim_coord(self, dim_coord, data_dim):
if isinstance(dim_coord, iris.coords.AuxCoord):
raise ValueError('The dim_coord may not be an AuxCoord instance.')
# Convert data_dim to a single integer
if isinstance(data_dim, collections.Container):
if len(data_dim) != 1:
raise ValueError('The supplied data dimension must be a'
' single number.')
data_dim = int(list(data_dim)[0])
else:
data_dim = int(data_dim)
# Check data_dim value is valid
if data_dim < 0 or data_dim >= self.ndim:
raise ValueError('The cube does not have the specified dimension '
'(%d)' % data_dim)
# Check compatibility with the shape of the data
if dim_coord.shape[0] != self.shape[data_dim]:
msg = 'Unequal lengths. Cube dimension {} => {}; coord {!r} => {}.'
raise ValueError(msg.format(data_dim, self.shape[data_dim],
dim_coord.name(),
len(dim_coord.points)))
self._dim_coords_and_dims.append([dim_coord, int(data_dim)])
def remove_aux_factory(self, aux_factory):
"""Removes the given auxiliary coordinate factory from the cube."""
self._aux_factories.remove(aux_factory)
def _remove_coord(self, coord):
self._dim_coords_and_dims = [(coord_, dim) for coord_, dim in
self._dim_coords_and_dims if coord_
is not coord]
self._aux_coords_and_dims = [(coord_, dims) for coord_, dims in
self._aux_coords_and_dims if coord_
is not coord]
def remove_coord(self, coord):
"""
Removes a coordinate from the cube.
Args:
* coord (string or coord)
The (name of the) coordinate to remove from the cube.
See also :meth:`Cube.add_dim_coord()<iris.cube.Cube.add_dim_coord>`
and :meth:`Cube.add_aux_coord()<iris.cube.Cube.add_aux_coord>`.
"""
coord = self.coord(coord)
self._remove_coord(coord)
for factory in self.aux_factories:
factory.update(coord)
def remove_cell_measure(self, cell_measure):
"""
Removes a cell measure from the cube.
Args:
* cell_measure (CellMeasure)
The CellMeasure to remove from the cube.
See also
:meth:`Cube.add_cell_measure()<iris.cube.Cube.add_cell_measure>`
"""
self._cell_measures_and_dims = [[cell_measure_, dim] for cell_measure_,
dim in self._cell_measures_and_dims
if cell_measure_ is not cell_measure]
def replace_coord(self, new_coord):
"""
Replace the coordinate whose metadata matches the given coordinate.
"""
old_coord = self.coord(new_coord)
dims = self.coord_dims(old_coord)
was_dimensioned = old_coord in self.dim_coords
self._remove_coord(old_coord)
if was_dimensioned and isinstance(new_coord, iris.coords.DimCoord):
self.add_dim_coord(new_coord, dims[0])
else:
self.add_aux_coord(new_coord, dims)
for factory in self.aux_factories:
factory.update(old_coord, new_coord)
def coord_dims(self, coord):
"""
Returns a tuple of the data dimensions relevant to the given
coordinate.
When searching for the given coordinate in the cube the comparison is
made using coordinate metadata equality. Hence the given coordinate
instance need not exist on the cube, and may contain different
coordinate values.
Args:
* coord (string or coord)
The (name of the) coord to look for.
"""
coord = self.coord(coord)
# Search for existing coordinate (object) on the cube, faster lookup
# than equality - makes no functional difference.
matches = [(dim,) for coord_, dim in self._dim_coords_and_dims if
coord_ is coord]
if not matches:
matches = [dims for coord_, dims in self._aux_coords_and_dims if
coord_ is coord]
# Search derived aux coords
target_defn = coord._as_defn()
if not matches:
def match(factory):
return factory._as_defn() == target_defn
factories = filter(match, self._aux_factories)
matches = [factory.derived_dims(self.coord_dims) for factory in
factories]
if not matches:
raise iris.exceptions.CoordinateNotFoundError(coord.name())
return matches[0]
def cell_measure_dims(self, cell_measure):
"""
Returns a tuple of the data dimensions relevant to the given
CellMeasure.
* cell_measure
The CellMeasure to look for.
"""
# Search for existing cell measure (object) on the cube, faster lookup
# than equality - makes no functional difference.
matches = [dims for cm_, dims in self._cell_measures_and_dims if
cm_ is cell_measure]
if not matches:
raise iris.exceptions.CellMeasureNotFoundError(cell_measure.name())
return matches[0]
def aux_factory(self, name=None, standard_name=None, long_name=None,
var_name=None):
"""
Returns the single coordinate factory that matches the criteria,
or raises an error if not found.
Kwargs:
* name
If not None, matches against factory.name().
* standard_name
The CF standard name of the desired coordinate factory.
If None, does not check for standard name.
* long_name
An unconstrained description of the coordinate factory.
If None, does not check for long_name.
* var_name
The CF variable name of the desired coordinate factory.
If None, does not check for var_name.
.. note::
If the arguments given do not result in precisely 1 coordinate
factory being matched, an
:class:`iris.exceptions.CoordinateNotFoundError` is raised.
"""
factories = self.aux_factories
if name is not None:
factories = [factory for factory in factories if
factory.name() == name]
if standard_name is not None:
factories = [factory for factory in factories if
factory.standard_name == standard_name]
if long_name is not None:
factories = [factory for factory in factories if
factory.long_name == long_name]
if var_name is not None:
factories = [factory for factory in factories if
factory.var_name == var_name]
if len(factories) > 1:
factory_names = (factory.name() for factory in factories)
msg = 'Expected to find exactly one coordinate factory, but ' \
'found {}. They were: {}.'.format(len(factories),
', '.join(factory_names))
raise iris.exceptions.CoordinateNotFoundError(msg)
elif len(factories) == 0:
msg = 'Expected to find exactly one coordinate factory, but ' \
'found none.'
raise iris.exceptions.CoordinateNotFoundError(msg)
return factories[0]
def coords(self, name_or_coord=None, standard_name=None,
long_name=None, var_name=None, attributes=None, axis=None,
contains_dimension=None, dimensions=None, coord=None,
coord_system=None, dim_coords=None, name=None):
"""
Return a list of coordinates in this cube fitting the given criteria.
Kwargs:
* name_or_coord
Either
(a) a :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name`. Defaults to value of `default`
(which itself defaults to `unknown`) as defined in
:class:`iris._cube_coord_common.CFVariableMixin`.
(b) a coordinate instance with metadata equal to that of
the desired coordinates. Accepts either a
:class:`iris.coords.DimCoord`, :class:`iris.coords.AuxCoord`,
:class:`iris.aux_factory.AuxCoordFactory`
or :class:`iris.coords.CoordDefn`.
* name
.. deprecated:: 1.6. Please use the name_or_coord kwarg.
* standard_name
The CF standard name of the desired coordinate. If None, does not
check for standard name.
* long_name
An unconstrained description of the coordinate. If None, does not
check for long_name.
* var_name
The CF variable name of the desired coordinate. If None, does not
check for var_name.
* attributes
A dictionary of attributes desired on the coordinates. If None,
does not check for attributes.
* axis
The desired coordinate axis, see
:func:`iris.util.guess_coord_axis`. If None, does not check for
axis. Accepts the values 'X', 'Y', 'Z' and 'T' (case-insensitive).
* contains_dimension
The desired coordinate contains the data dimension. If None, does
not check for the dimension.
* dimensions
The exact data dimensions of the desired coordinate. Coordinates
with no data dimension can be found with an empty tuple or list
(i.e. ``()`` or ``[]``). If None, does not check for dimensions.
* coord
.. deprecated:: 1.6. Please use the name_or_coord kwarg.
* coord_system
Whether the desired coordinates have coordinate systems equal to
the given coordinate system. If None, no check is done.
* dim_coords
Set to True to only return coordinates that are the cube's
dimension coordinates. Set to False to only return coordinates
that are the cube's auxiliary and derived coordinates. If None,
returns all coordinates.
See also :meth:`Cube.coord()<iris.cube.Cube.coord>`.
"""
# Handle deprecated kwargs
if name is not None:
name_or_coord = name
warn_deprecated('the name kwarg is deprecated and will be removed '
'in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
if coord is not None:
name_or_coord = coord
warn_deprecated('the coord kwarg is deprecated and will be '
'removed in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
# Finish handling deprecated kwargs
name = None
coord = None
if isinstance(name_or_coord, six.string_types):
name = name_or_coord
else:
coord = name_or_coord
coords_and_factories = []
if dim_coords in [True, None]:
coords_and_factories += list(self.dim_coords)
if dim_coords in [False, None]:
coords_and_factories += list(self.aux_coords)
coords_and_factories += list(self.aux_factories)
if name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.name() == name]
if standard_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.standard_name == standard_name]
if long_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.long_name == long_name]
if var_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.var_name == var_name]
if axis is not None:
axis = axis.upper()
guess_axis = iris.util.guess_coord_axis
coords_and_factories = [coord_ for coord_ in coords_and_factories
if guess_axis(coord_) == axis]
if attributes is not None:
if not isinstance(attributes, collections.Mapping):
msg = 'The attributes keyword was expecting a dictionary ' \
'type, but got a %s instead.' % type(attributes)
raise ValueError(msg)
def attr_filter(coord_):
return all(k in coord_.attributes and coord_.attributes[k] == v
for k, v in six.iteritems(attributes))
coords_and_factories = [coord_ for coord_ in coords_and_factories
if attr_filter(coord_)]
if coord_system is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.coord_system == coord_system]
if coord is not None:
if isinstance(coord, iris.coords.CoordDefn):
defn = coord
else:
defn = coord._as_defn()
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_._as_defn() == defn]
if contains_dimension is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if contains_dimension in
self.coord_dims(coord_)]
if dimensions is not None:
if not isinstance(dimensions, collections.Container):
dimensions = [dimensions]
dimensions = tuple(dimensions)
coords_and_factories = [coord_ for coord_ in coords_and_factories
if self.coord_dims(coord_) == dimensions]
# If any factories remain after the above filters we have to make the
# coords so they can be returned
def extract_coord(coord_or_factory):
if isinstance(coord_or_factory, iris.aux_factory.AuxCoordFactory):
coord = coord_or_factory.make_coord(self.coord_dims)
elif isinstance(coord_or_factory, iris.coords.Coord):
coord = coord_or_factory
else:
msg = 'Expected Coord or AuxCoordFactory, got ' \
'{!r}.'.format(type(coord_or_factory))
raise ValueError(msg)
return coord
coords = [extract_coord(coord_or_factory) for coord_or_factory in
coords_and_factories]
return coords
def coord(self, name_or_coord=None, standard_name=None,
long_name=None, var_name=None, attributes=None, axis=None,
contains_dimension=None, dimensions=None, coord=None,
coord_system=None, dim_coords=None, name=None):
"""
Return a single coord given the same arguments as :meth:`Cube.coords`.
.. note::
If the arguments given do not result in precisely 1 coordinate
being matched, an :class:`iris.exceptions.CoordinateNotFoundError`
is raised.
.. seealso::
:meth:`Cube.coords()<iris.cube.Cube.coords>` for full keyword
documentation.
"""
# Handle deprecated kwargs
if name is not None:
name_or_coord = name
warn_deprecated('the name kwarg is deprecated and will be removed '
'in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
if coord is not None:
name_or_coord = coord
warn_deprecated('the coord kwarg is deprecated and will be '
'removed in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
# Finish handling deprecated kwargs
coords = self.coords(name_or_coord=name_or_coord,
standard_name=standard_name,
long_name=long_name, var_name=var_name,
attributes=attributes, axis=axis,
contains_dimension=contains_dimension,
dimensions=dimensions,
coord_system=coord_system,
dim_coords=dim_coords)
if len(coords) > 1:
msg = 'Expected to find exactly 1 coordinate, but found %s. ' \
'They were: %s.' % (len(coords), ', '.join(coord.name() for
coord in coords))
raise iris.exceptions.CoordinateNotFoundError(msg)
elif len(coords) == 0:
bad_name = name or standard_name or long_name or \
(coord and coord.name()) or ''
msg = 'Expected to find exactly 1 %s coordinate, but found ' \
'none.' % bad_name
raise iris.exceptions.CoordinateNotFoundError(msg)
return coords[0]
def coord_system(self, spec=None):
"""
Find the coordinate system of the given type.
If no target coordinate system is provided then find
any available coordinate system.
Kwargs:
* spec:
The the name or type of a coordinate system subclass.
E.g. ::
cube.coord_system("GeogCS")
cube.coord_system(iris.coord_systems.GeogCS)
If spec is provided as a type it can be a superclass of
any coordinate system found.
If spec is None, then find any available coordinate
systems within the :class:`iris.cube.Cube`.
Returns:
The :class:`iris.coord_systems.CoordSystem` or None.
"""
if isinstance(spec, six.string_types) or spec is None:
spec_name = spec
else:
msg = "type %s is not a subclass of CoordSystem" % spec
assert issubclass(spec, iris.coord_systems.CoordSystem), msg
spec_name = spec.__name__
# Gather a temporary list of our unique CoordSystems.
coord_systems = ClassDict(iris.coord_systems.CoordSystem)
for coord in self.coords():
if coord.coord_system:
coord_systems.add(coord.coord_system, replace=True)
result = None
if spec_name is None:
for key in sorted(coord_systems.keys(),
key=lambda class_: class_.__name__):
result = coord_systems[key]
break
else:
result = coord_systems.get(spec_name)
return result
def cell_measures(self, name_or_cell_measure=None):
"""
Return a list of cell measures in this cube fitting the given criteria.
Kwargs:
* name_or_cell_measure
Either
(a) a :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name`. Defaults to value of `default`
(which itself defaults to `unknown`) as defined in
:class:`iris._cube_coord_common.CFVariableMixin`.
(b) a cell_measure instance with metadata equal to that of
the desired cell_measures.
See also :meth:`Cube.cell_measure()<iris.cube.Cube.cell_measure>`.
"""
name = None
if isinstance(name_or_cell_measure, six.string_types):
name = name_or_cell_measure
else:
cell_measure = name_or_cell_measure
cell_measures = []
for cm, _ in self._cell_measures_and_dims:
if name is not None:
if cm.name() == name:
cell_measures.append(cm)
elif cell_measure is not None:
if cm == cell_measure:
cell_measures.append(cm)
else:
cell_measures.append(cm)
return cell_measures
def cell_measure(self, name_or_cell_measure=None):
"""
Return a single cell_measure given the same arguments as
:meth:`Cube.cell_measures`.
.. note::
If the arguments given do not result in precisely 1 cell_measure
being matched, an :class:`iris.exceptions.CellMeasureNotFoundError`
is raised.
.. seealso::
:meth:`Cube.cell_measures()<iris.cube.Cube.cell_measures>`
for full keyword documentation.
"""
cell_measures = self.cell_measures(name_or_cell_measure)
if len(cell_measures) > 1:
msg = ('Expected to find exactly 1 cell_measure, but found {}. '
'They were: {}.')
msg = msg.format(len(cell_measures),
', '.join(cm.name() for cm in cell_measures))
raise iris.exceptions.CellMeasureNotFoundError(msg)
elif len(cell_measures) == 0:
if isinstance(name_or_cell_measure, six.string_types):
bad_name = name_or_cell_measure
else:
bad_name = (name_or_cell_measure and
name_or_cell_measure.name()) or ''
msg = 'Expected to find exactly 1 %s cell_measure, but found ' \
'none.' % bad_name
raise iris.exceptions.CellMeasureNotFoundError(msg)
return cell_measures[0]
@property
def cell_methods(self):
"""
Tuple of :class:`iris.coords.CellMethod` representing the processing
done on the phenomenon.
"""
return self._cell_methods
@cell_methods.setter
def cell_methods(self, cell_methods):
self._cell_methods = tuple(cell_methods) if cell_methods else tuple()
@property
def shape(self):
"""The shape of the data of this cube."""
shape = self.lazy_data().shape
return shape
@property
def dtype(self):
"""The :class:`numpy.dtype` of the data of this cube."""
return self.lazy_data().dtype
@property
def ndim(self):
"""The number of dimensions in the data of this cube."""
return len(self.shape)
def lazy_data(self, array=None):
"""
Return a :class:`biggus.Array` representing the
multi-dimensional data of the Cube, and optionally provide a
new array of values.
Accessing this method will never cause the data to be loaded.
Similarly, calling methods on, or indexing, the returned Array
will not cause the Cube to have loaded data.
If the data have already been loaded for the Cube, the returned
Array will be a :class:`biggus.NumpyArrayAdapter` which wraps
the numpy array from `self.data`.
Kwargs:
* array (:class:`biggus.Array` or None):
When this is not None it sets the multi-dimensional data of
the cube to the given value.
Returns:
A :class:`biggus.Array` representing the multi-dimensional
data of the Cube.
"""
if array is not None:
if not isinstance(array, biggus.Array):
raise TypeError('new values must be a biggus.Array')
if self.shape != array.shape:
# The _ONLY_ data reshape permitted is converting a
# 0-dimensional array into a 1-dimensional array of
# length one.
# i.e. self.shape = () and array.shape == (1,)
if self.shape or array.shape != (1,):
raise ValueError('Require cube data with shape %r, got '
'%r.' % (self.shape, array.shape))
self._my_data = array
else:
array = self._my_data
if not isinstance(array, biggus.Array):
array = biggus.NumpyArrayAdapter(array)
return array
@property
def data(self):
"""
The :class:`numpy.ndarray` representing the multi-dimensional data of
the cube.
.. note::
Cubes obtained from netCDF, PP, and FieldsFile files will only
populate this attribute on its first use.
To obtain the shape of the data without causing it to be loaded,
use the Cube.shape attribute.
Example::
>>> fname = iris.sample_data_path('air_temp.pp')
>>> cube = iris.load_cube(fname, 'air_temperature')
>>> # cube.data does not yet have a value.
...
>>> print(cube.shape)
(73, 96)
>>> # cube.data still does not have a value.
...
>>> cube = cube[:10, :20]
>>> # cube.data still does not have a value.
...
>>> data = cube.data
>>> # Only now is the data loaded.
...
>>> print(data.shape)
(10, 20)
"""
data = self._my_data
if not isinstance(data, np.ndarray):
try:
data = data.masked_array()
except MemoryError:
msg = "Failed to create the cube's data as there was not" \
" enough memory available.\n" \
"The array shape would have been {0!r} and the data" \
" type {1}.\n" \
"Consider freeing up variables or indexing the cube" \
" before getting its data."
msg = msg.format(self.shape, data.dtype)
raise MemoryError(msg)
# Unmask the array only if it is filled.
if isinstance(data, np.ndarray) and ma.count_masked(data) == 0:
data = data.data
# data may be a numeric type, so ensure an np.ndarray is returned
self._my_data = np.asanyarray(data)
return self._my_data
@data.setter
def data(self, value):
data = np.asanyarray(value)
if self.shape != data.shape:
# The _ONLY_ data reshape permitted is converting a 0-dimensional
# array i.e. self.shape == () into a 1-dimensional array of length
# one i.e. data.shape == (1,)
if self.shape or data.shape != (1,):
raise ValueError('Require cube data with shape %r, got '
'%r.' % (self.shape, data.shape))
self._my_data = data
def has_lazy_data(self):
return isinstance(self._my_data, biggus.Array)
@property
def dim_coords(self):
"""
Return a tuple of all the dimension coordinates, ordered by dimension.
.. note::
The length of the returned tuple is not necessarily the same as
:attr:`Cube.ndim` as there may be dimensions on the cube without
dimension coordinates. It is therefore unreliable to use the
resulting tuple to identify the dimension coordinates for a given
dimension - instead use the :meth:`Cube.coord` method with the
``dimensions`` and ``dim_coords`` keyword arguments.
"""
return tuple((coord for coord, dim in
sorted(self._dim_coords_and_dims,
key=lambda co_di: (co_di[1], co_di[0].name()))))
@property
def aux_coords(self):
"""
Return a tuple of all the auxiliary coordinates, ordered by
dimension(s).
"""
return tuple((coord for coord, dims in
sorted(self._aux_coords_and_dims,
key=lambda co_di: (co_di[1], co_di[0].name()))))
@property
def derived_coords(self):
"""
Return a tuple of all the coordinates generated by the coordinate
factories.
"""
return tuple(factory.make_coord(self.coord_dims) for factory in
sorted(self.aux_factories,
key=lambda factory: factory.name()))
@property
def aux_factories(self):
"""Return a tuple of all the coordinate factories."""
return tuple(self._aux_factories)
def _summary_coord_extra(self, coord, indent):
# Returns the text needed to ensure this coordinate can be
# distinguished from all others with the same name.
extra = ''
similar_coords = self.coords(coord.name())
if len(similar_coords) > 1:
# Find all the attribute keys
keys = set()
for similar_coord in similar_coords:
keys.update(six.iterkeys(similar_coord.attributes))
# Look for any attributes that vary
vary = set()
attributes = {}
for key in keys:
for similar_coord in similar_coords:
if key not in similar_coord.attributes:
vary.add(key)
break
value = similar_coord.attributes[key]
if attributes.setdefault(key, value) != value:
vary.add(key)
break
keys = sorted(vary & set(coord.attributes.keys()))
bits = ['{}={!r}'.format(key, coord.attributes[key]) for key in
keys]
if bits:
extra = indent + ', '.join(bits)
return extra
def _summary_extra(self, coords, summary, indent):
# Where necessary, inserts extra lines into the summary to ensure
# coordinates can be distinguished.
new_summary = []
for coord, summary in zip(coords, summary):
new_summary.append(summary)
extra = self._summary_coord_extra(coord, indent)
if extra:
new_summary.append(extra)
return new_summary
def summary(self, shorten=False, name_padding=35):
"""
Unicode string summary of the Cube with name, a list of dim coord names
versus length and optionally relevant coordinate information.
"""
# Create a set to contain the axis names for each data dimension.
dim_names = [set() for dim in range(len(self.shape))]
# Add the dim_coord names that participate in the associated data
# dimensions.
for dim in range(len(self.shape)):
dim_coords = self.coords(contains_dimension=dim, dim_coords=True)
if dim_coords:
dim_names[dim].add(dim_coords[0].name())
else:
dim_names[dim].add('-- ')
# Convert axes sets to lists and sort.
dim_names = [sorted(names, key=sorted_axes) for names in dim_names]
# Generate textual summary of the cube dimensionality.
if self.shape == ():
dimension_header = 'scalar cube'
else:
dimension_header = '; '.join(
[', '.join(dim_names[dim]) +
': %d' % dim_shape for dim, dim_shape in
enumerate(self.shape)])
nameunit = '{name} / ({units})'.format(name=self.name(),
units=self.units)
# If all unknown and a STASH attribute exists, use it.
if nameunit == 'unknown / (unknown)' and 'STASH' in self.attributes:
nameunit = '{}'.format(self.attributes['STASH'])
cube_header = '{nameunit!s:{length}} ({dimension})'.format(
length=name_padding,
nameunit=nameunit,
dimension=dimension_header)
summary = ''
# Generate full cube textual summary.
if not shorten:
indent = 10
extra_indent = ' ' * 13
# Cache the derived coords so we can rely on consistent
# object IDs.
derived_coords = self.derived_coords
# Determine the cube coordinates that are scalar (single-valued)
# AND non-dimensioned.
dim_coords = self.dim_coords
aux_coords = self.aux_coords
all_coords = dim_coords + aux_coords + derived_coords
scalar_coords = [coord for coord in all_coords if not
self.coord_dims(coord) and coord.shape == (1,)]
# Determine the cube coordinates that are not scalar BUT
# dimensioned.
scalar_coord_ids = set(map(id, scalar_coords))
vector_dim_coords = [coord for coord in dim_coords if id(coord) not
in scalar_coord_ids]
vector_aux_coords = [coord for coord in aux_coords if id(coord) not
in scalar_coord_ids]
vector_derived_coords = [coord for coord in derived_coords if
id(coord) not in scalar_coord_ids]
# cell measures
vector_cell_measures = [cm for cm in self.cell_measures()
if cm.shape != (1,)]
# Determine the cube coordinates that don't describe the cube and
# are most likely erroneous.
vector_coords = vector_dim_coords + vector_aux_coords + \
vector_derived_coords
ok_coord_ids = scalar_coord_ids.union(set(map(id, vector_coords)))
invalid_coords = [coord for coord in all_coords if id(coord) not
in ok_coord_ids]
# Sort scalar coordinates by name.
scalar_coords.sort(key=lambda coord: coord.name())
# Sort vector coordinates by data dimension and name.
vector_dim_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
vector_aux_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
vector_derived_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
# Sort other coordinates by name.
invalid_coords.sort(key=lambda coord: coord.name())
#
# Generate textual summary of cube vector coordinates.
#
def vector_summary(vector_coords, cube_header, max_line_offset,
cell_measures=None):
"""
Generates a list of suitably aligned strings containing coord
names and dimensions indicated by one or more 'x' symbols.
.. note::
The function may need to update the cube header so this is
returned with the list of strings.
"""
if cell_measures is None:
cell_measures = []
vector_summary = []
vectors = []
# Identify offsets for each dimension text marker.
alignment = np.array([index for index, value in
enumerate(cube_header) if
value == ':'])
# Generate basic textual summary for each vector coordinate
# - WITHOUT dimension markers.
for coord in vector_coords + cell_measures:
vector_summary.append('%*s%s' % (
indent, ' ', iris.util.clip_string(coord.name())))
min_alignment = min(alignment)
# Determine whether the cube header requires realignment
# due to one or more longer vector coordinate summaries.
if max_line_offset >= min_alignment:
delta = max_line_offset - min_alignment + 5
cube_header = '%-*s (%s)' % (int(name_padding + delta),
self.name() or 'unknown',
dimension_header)
alignment += delta
if vector_coords:
# Generate full textual summary for each vector coordinate
# - WITH dimension markers.
for index, coord in enumerate(vector_coords):
dims = self.coord_dims(coord)
for dim in range(len(self.shape)):
width = alignment[dim] - len(vector_summary[index])
char = 'x' if dim in dims else '-'
line = '{pad:{width}}{char}'.format(pad=' ',
width=width,
char=char)
vector_summary[index] += line
vectors = vectors + vector_coords
if cell_measures:
# Generate full textual summary for each vector coordinate
# - WITH dimension markers.
for index, coord in enumerate(cell_measures):
dims = self.cell_measure_dims(coord)
for dim in range(len(self.shape)):
width = alignment[dim] - len(vector_summary[index])
char = 'x' if dim in dims else '-'
line = '{pad:{width}}{char}'.format(pad=' ',
width=width,
char=char)
vector_summary[index] += line
vectors = vectors + cell_measures
# Interleave any extra lines that are needed to distinguish
# the coordinates.
vector_summary = self._summary_extra(vectors,
vector_summary,
extra_indent)
return vector_summary, cube_header
# Calculate the maximum line offset.
max_line_offset = 0
for coord in all_coords:
max_line_offset = max(max_line_offset, len('%*s%s' % (
indent, ' ', iris.util.clip_string(str(coord.name())))))
if vector_dim_coords:
dim_coord_summary, cube_header = vector_summary(
vector_dim_coords, cube_header, max_line_offset)
summary += '\n Dimension coordinates:\n' + \
'\n'.join(dim_coord_summary)
if vector_aux_coords:
aux_coord_summary, cube_header = vector_summary(
vector_aux_coords, cube_header, max_line_offset)
summary += '\n Auxiliary coordinates:\n' + \
'\n'.join(aux_coord_summary)
if vector_derived_coords:
derived_coord_summary, cube_header = vector_summary(
vector_derived_coords, cube_header, max_line_offset)
summary += '\n Derived coordinates:\n' + \
'\n'.join(derived_coord_summary)
#
# Generate summary of cube cell measures attribute
#
if vector_cell_measures:
cell_measure_summary, cube_header = vector_summary(
[], cube_header, max_line_offset,
cell_measures=vector_cell_measures)
summary += '\n Cell Measures:\n'
summary += '\n'.join(cell_measure_summary)
#
# Generate textual summary of cube scalar coordinates.
#
scalar_summary = []
if scalar_coords:
for coord in scalar_coords:
if (coord.units in ['1', 'no_unit', 'unknown'] or
coord.units.is_time_reference()):
unit = ''
else:
unit = ' {!s}'.format(coord.units)
# Format cell depending on type of point and whether it
# has a bound
with iris.FUTURE.context(cell_datetime_objects=False):
coord_cell = coord.cell(0)
if isinstance(coord_cell.point, six.string_types):
# Indent string type coordinates
coord_cell_split = [iris.util.clip_string(str(item))
for item in
coord_cell.point.split('\n')]
line_sep = '\n{pad:{width}}'.format(
pad=' ', width=indent + len(coord.name()) + 2)
coord_cell_str = line_sep.join(coord_cell_split) + unit
else:
# Human readable times
if coord.units.is_time_reference():
coord_cell_cpoint = coord.units.num2date(
coord_cell.point)
if coord_cell.bound is not None:
coord_cell_cbound = coord.units.num2date(
coord_cell.bound)
else:
coord_cell_cpoint = coord_cell.point
coord_cell_cbound = coord_cell.bound
coord_cell_str = '{!s}{}'.format(coord_cell_cpoint,
unit)
if coord_cell.bound is not None:
bound = '({})'.format(', '.join(str(val) for
val in coord_cell_cbound))
coord_cell_str += ', bound={}{}'.format(bound,
unit)
scalar_summary.append('{pad:{width}}{name}: {cell}'.format(
pad=' ', width=indent, name=coord.name(),
cell=coord_cell_str))
# Interleave any extra lines that are needed to distinguish
# the coordinates.
scalar_summary = self._summary_extra(scalar_coords,
scalar_summary,
extra_indent)
summary += '\n Scalar coordinates:\n' + '\n'.join(
scalar_summary)
#
# Generate summary of cube's invalid coordinates.
#
if invalid_coords:
invalid_summary = []
for coord in invalid_coords:
invalid_summary.append(
'%*s%s' % (indent, ' ', coord.name()))
# Interleave any extra lines that are needed to distinguish the
# coordinates.
invalid_summary = self._summary_extra(
invalid_coords, invalid_summary, extra_indent)
summary += '\n Invalid coordinates:\n' + \
'\n'.join(invalid_summary)
# cell measures
scalar_cell_measures = [cm for cm in self.cell_measures()
if cm.shape == (1,)]
if scalar_cell_measures:
summary += '\n Scalar cell measures:\n'
scalar_cms = [' {}'.format(cm.name())
for cm in scalar_cell_measures]
summary += '\n'.join(scalar_cms)
#
# Generate summary of cube attributes.
#
if self.attributes:
attribute_lines = []
for name, value in sorted(six.iteritems(self.attributes)):
value = iris.util.clip_string(six.text_type(value))
line = u'{pad:{width}}{name}: {value}'.format(pad=' ',
width=indent,
name=name,
value=value)
attribute_lines.append(line)
summary += '\n Attributes:\n' + '\n'.join(attribute_lines)
#
# Generate summary of cube cell methods
#
if self.cell_methods:
summary += '\n Cell methods:\n'
cm_lines = []
for cm in self.cell_methods:
cm_lines.append('%*s%s' % (indent, ' ', str(cm)))
summary += '\n'.join(cm_lines)
# Construct the final cube summary.
summary = cube_header + summary
return summary
def assert_valid(self):
"""
Does nothing and returns None.
.. deprecated:: 0.8
"""
warn_deprecated('Cube.assert_valid() has been deprecated.')
def __str__(self):
# six has a decorator for this bit, but it doesn't do errors='replace'.
if six.PY3:
return self.summary()
else:
return self.summary().encode(errors='replace')
def __unicode__(self):
return self.summary()
def __repr__(self):
return "<iris 'Cube' of %s>" % self.summary(shorten=True,
name_padding=1)
def __iter__(self):
raise TypeError('Cube is not iterable')
def __getitem__(self, keys):
"""
Cube indexing (through use of square bracket notation) has been
implemented at the data level. That is, the indices provided to this
method should be aligned to the data of the cube, and thus the indices
requested must be applicable directly to the cube.data attribute. All
metadata will be subsequently indexed appropriately.
"""
# turn the keys into a full slice spec (all dims)
full_slice = iris.util._build_full_slice_given_keys(keys,
len(self.shape))
# make indexing on the cube column based by using the
# column_slices_generator (potentially requires slicing the data
# multiple times)
dimension_mapping, slice_gen = iris.util.column_slices_generator(
full_slice, len(self.shape))
def new_coord_dims(coord_):
return [dimension_mapping[d]
for d in self.coord_dims(coord_)
if dimension_mapping[d] is not None]
def new_cell_measure_dims(cm_):
return [dimension_mapping[d]
for d in self.cell_measure_dims(cm_)
if dimension_mapping[d] is not None]
try:
first_slice = next(slice_gen)
except StopIteration:
first_slice = None
if first_slice is not None:
data = self._my_data[first_slice]
else:
data = copy.deepcopy(self._my_data)
for other_slice in slice_gen:
data = data[other_slice]
# We don't want a view of the data, so take a copy of it if it's
# not already our own.
if isinstance(data, biggus.Array) or not data.flags['OWNDATA']:
data = copy.deepcopy(data)
# We can turn a masked array into a normal array if it's full.
if isinstance(data, ma.core.MaskedArray):
if ma.count_masked(data) == 0:
data = data.filled()
# Make the new cube slice
cube = Cube(data)
cube.metadata = copy.deepcopy(self.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
# Slice the coords
for coord in self.aux_coords:
coord_keys = tuple([full_slice[dim] for dim in
self.coord_dims(coord)])
try:
new_coord = coord[coord_keys]
except ValueError:
# TODO make this except more specific to catch monotonic error
# Attempt to slice it by converting to AuxCoord first
new_coord = iris.coords.AuxCoord.from_coord(coord)[coord_keys]
cube.add_aux_coord(new_coord, new_coord_dims(coord))
coord_mapping[id(coord)] = new_coord
for coord in self.dim_coords:
coord_keys = tuple([full_slice[dim] for dim in
self.coord_dims(coord)])
new_dims = new_coord_dims(coord)
# Try/Catch to handle slicing that makes the points/bounds
# non-monotonic
try:
new_coord = coord[coord_keys]
if not new_dims:
# If the associated dimension has been sliced so the coord
# is a scalar move the coord to the aux_coords container
cube.add_aux_coord(new_coord, new_dims)
else:
cube.add_dim_coord(new_coord, new_dims)
except ValueError:
# TODO make this except more specific to catch monotonic error
# Attempt to slice it by converting to AuxCoord first
new_coord = iris.coords.AuxCoord.from_coord(coord)[coord_keys]
cube.add_aux_coord(new_coord, new_dims)
coord_mapping[id(coord)] = new_coord
for factory in self.aux_factories:
cube.add_aux_factory(factory.updated(coord_mapping))
# slice the cell measures and add them to the cube
for cellmeasure in self.cell_measures():
dims = self.cell_measure_dims(cellmeasure)
cm_keys = tuple([full_slice[dim] for dim in dims])
new_cm = cellmeasure[cm_keys]
cube.add_cell_measure(new_cm,
new_cell_measure_dims(cellmeasure))
return cube
def subset(self, coord):
"""
Get a subset of the cube by providing the desired resultant
coordinate. If the coordinate provided applies to the whole cube; the
whole cube is returned. As such, the operation is not strict.
"""
if not isinstance(coord, iris.coords.Coord):
raise ValueError('coord_to_extract must be a valid Coord.')
# Get the coord to extract from the cube
coord_to_extract = self.coord(coord)
# If scalar, return the whole cube. Not possible to subset 1 point.
if coord_to_extract in self.aux_coords and\
len(coord_to_extract.points) == 1:
# Default to returning None
result = None
indices = coord_to_extract.intersect(coord, return_indices=True)
# If there is an intersect between the two scalar coordinates;
# return the whole cube. Else, return None.
if len(indices):
result = self
else:
if len(self.coord_dims(coord_to_extract)) > 1:
msg = "Currently, only 1D coords can be used to subset a cube"
raise iris.exceptions.CoordinateMultiDimError(msg)
# Identify the dimension of the cube which this coordinate
# references
coord_to_extract_dim = self.coord_dims(coord_to_extract)[0]
# Identify the indices which intersect the requested coord and
# coord_to_extract
coord_indices = coord_to_extract.intersect(coord,
return_indices=True)
# Build up a slice which spans the whole of the cube
full_slice = [slice(None, None)] * len(self.shape)
# Update the full slice to only extract specific indices which
# were identified above
full_slice[coord_to_extract_dim] = coord_indices
full_slice = tuple(full_slice)
result = self[full_slice]
return result
def extract(self, constraint):
"""
Filter the cube by the given constraint using
:meth:`iris.Constraint.extract` method.
"""
# Cast the constraint into a proper constraint if it is not so already
constraint = iris._constraints.as_constraint(constraint)
return constraint.extract(self)
def intersection(self, *args, **kwargs):
"""
Return the intersection of the cube with specified coordinate
ranges.
Coordinate ranges can be specified as:
(a) instances of :class:`iris.coords.CoordExtent`.
(b) keyword arguments, where the keyword name specifies the name
of the coordinate (as defined in :meth:`iris.cube.Cube.coords()`)
and the value defines the corresponding range of coordinate
values as a tuple. The tuple must contain two, three, or four
items corresponding to: (minimum, maximum, min_inclusive,
max_inclusive). Where the items are defined as:
* minimum
The minimum value of the range to select.
* maximum
The maximum value of the range to select.
* min_inclusive
If True, coordinate values equal to `minimum` will be included
in the selection. Default is True.
* max_inclusive
If True, coordinate values equal to `maximum` will be included
in the selection. Default is True.
To perform an intersection that ignores any bounds on the coordinates,
set the optional keyword argument *ignore_bounds* to True. Defaults to
False.
.. note::
For ranges defined over "circular" coordinates (i.e. those
where the `units` attribute has a modulus defined) the cube
will be "rolled" to fit where neccesary.
.. warning::
Currently this routine only works with "circular"
coordinates (as defined in the previous note.)
For example::
>>> import iris
>>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
>>> print(cube.coord('longitude').points[::10])
[ 0. 37.49999237 74.99998474 112.49996948 \
149.99996948
187.49995422 224.99993896 262.49993896 299.99993896 \
337.49990845]
>>> subset = cube.intersection(longitude=(30, 50))
>>> print(subset.coord('longitude').points)
[ 33.74999237 37.49999237 41.24998856 44.99998856 48.74998856]
>>> subset = cube.intersection(longitude=(-10, 10))
>>> print(subset.coord('longitude').points)
[-7.50012207 -3.75012207 0. 3.75 7.5 ]
Returns:
A new :class:`~iris.cube.Cube` giving the subset of the cube
which intersects with the requested coordinate intervals.
"""
result = self
ignore_bounds = kwargs.pop('ignore_bounds', False)
for arg in args:
result = result._intersect(*arg, ignore_bounds=ignore_bounds)
for name, value in six.iteritems(kwargs):
result = result._intersect(name, *value,
ignore_bounds=ignore_bounds)
return result
def _intersect(self, name_or_coord, minimum, maximum,
min_inclusive=True, max_inclusive=True,
ignore_bounds=False):
coord = self.coord(name_or_coord)
if coord.ndim != 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
if coord.nbounds not in (0, 2):
raise ValueError('expected 0 or 2 bound values per cell')
if minimum > maximum:
raise ValueError('minimum greater than maximum')
modulus = coord.units.modulus
if modulus is None:
raise ValueError('coordinate units with no modulus are not yet'
' supported')
subsets, points, bounds = self._intersect_modulus(coord,
minimum, maximum,
min_inclusive,
max_inclusive,
ignore_bounds)
# By this point we have either one or two subsets along the relevant
# dimension. If it's just one subset (which might be a slice or an
# unordered collection of indices) we can simply index the cube
# and we're done. If it's two subsets we need to stitch the two
# pieces together.
# subsets provides a way of slicing the coordinates to ensure that
# they remain contiguous. In doing so, this can mean
# transforming the data (this stitching together of two separate
# pieces).
def make_chunk(key):
chunk = self[key_tuple_prefix + (key,)]
chunk_coord = chunk.coord(coord)
chunk_coord.points = points[(key,)]
if chunk_coord.has_bounds():
chunk_coord.bounds = bounds[(key,)]
return chunk
dim, = self.coord_dims(coord)
key_tuple_prefix = (slice(None),) * dim
chunks = [make_chunk(key) for key in subsets]
if len(chunks) == 1:
result = chunks[0]
else:
if self.has_lazy_data():
data = biggus.LinearMosaic([chunk.lazy_data()
for chunk in chunks],
dim)
else:
module = ma if ma.isMaskedArray(self.data) else np
data = module.concatenate([chunk.data for chunk in chunks],
dim)
result = iris.cube.Cube(data)
result.metadata = copy.deepcopy(self.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
def create_coords(src_coords, add_coord):
# Add copies of the source coordinates, selecting
# the appropriate subsets out of coordinates which
# share the intersection dimension.
preserve_circular = (min_inclusive and max_inclusive and
abs(maximum - minimum) == modulus)
for src_coord in src_coords:
dims = self.coord_dims(src_coord)
if dim in dims:
dim_within_coord = dims.index(dim)
points = np.concatenate([chunk.coord(src_coord).points
for chunk in chunks],
dim_within_coord)
if src_coord.has_bounds():
bounds = np.concatenate(
[chunk.coord(src_coord).bounds
for chunk in chunks],
dim_within_coord)
else:
bounds = None
result_coord = src_coord.copy(points=points,
bounds=bounds)
circular = getattr(result_coord, 'circular', False)
if circular and not preserve_circular:
result_coord.circular = False
else:
result_coord = src_coord.copy()
add_coord(result_coord, dims)
coord_mapping[id(src_coord)] = result_coord
create_coords(self.dim_coords, result.add_dim_coord)
create_coords(self.aux_coords, result.add_aux_coord)
for factory in self.aux_factories:
result.add_aux_factory(factory.updated(coord_mapping))
return result
def _intersect_derive_subset(self, coord, points, bounds, inside_indices):
# Return the subsets, i.e. the means to allow the slicing of
# coordinates to ensure that they remain contiguous.
modulus = coord.units.modulus
delta = coord.points[inside_indices] - points[inside_indices]
step = np.rint(np.diff(delta) / modulus)
non_zero_step_indices = np.nonzero(step)[0]
def dim_coord_subset():
"""
Derive the subset for dimension coordinates.
Ensure that we do not wrap if blocks are at the very edge. That
is, if the very edge is wrapped and corresponds to base + period,
stop this unnecessary wraparound.
"""
# A contiguous block at the start and another at the end.
# (NB. We can't have more than two blocks because we've already
# restricted the coordinate's range to its modulus).
end_of_first_chunk = non_zero_step_indices[0]
index_of_second_chunk = inside_indices[end_of_first_chunk + 1]
final_index = points.size - 1
# Condition1: The two blocks don't themselves wrap
# (inside_indices is contiguous).
# Condition2: Are we chunked at either extreme edge.
edge_wrap = ((index_of_second_chunk ==
inside_indices[end_of_first_chunk] + 1) and
index_of_second_chunk in (final_index, 1))
subsets = None
if edge_wrap:
# Increasing coord
if coord.points[-1] > coord.points[0]:
index_end = -1
index_start = 0
# Decreasing coord
else:
index_end = 0
index_start = -1
# Unwrap points and bounds (if present and equal base + period)
if bounds is not None:
edge_equal_base_period = (
np.isclose(coord.bounds[index_end, index_end],
coord.bounds[index_start, index_start] +
modulus))
if edge_equal_base_period:
bounds[index_end, :] = coord.bounds[index_end, :]
else:
edge_equal_base_period = (
np.isclose(coord.points[index_end],
coord.points[index_start] +
modulus))
if edge_equal_base_period:
points[index_end] = coord.points[index_end]
subsets = [slice(inside_indices[0],
inside_indices[-1] + 1)]
# Either no edge wrap or edge wrap != base + period
# i.e. derive subset without alteration
if subsets is None:
subsets = [
slice(index_of_second_chunk, None),
slice(None, inside_indices[end_of_first_chunk] + 1)
]
return subsets
if isinstance(coord, iris.coords.DimCoord):
if non_zero_step_indices.size:
subsets = dim_coord_subset()
else:
# A single, contiguous block.
subsets = [slice(inside_indices[0], inside_indices[-1] + 1)]
else:
# An AuxCoord could have its values in an arbitrary
# order, and hence a range of values can select an
# arbitrary subset. Also, we want to preserve the order
# from the original AuxCoord. So we just use the indices
# directly.
subsets = [inside_indices]
return subsets
def _intersect_modulus(self, coord, minimum, maximum, min_inclusive,
max_inclusive, ignore_bounds):
modulus = coord.units.modulus
if maximum > minimum + modulus:
raise ValueError("requested range greater than coordinate's"
" unit's modulus")
if coord.has_bounds():
values = coord.bounds
else:
values = coord.points
if values.max() > values.min() + modulus:
raise ValueError("coordinate's range greater than coordinate's"
" unit's modulus")
min_comp = np.less_equal if min_inclusive else np.less
max_comp = np.less_equal if max_inclusive else np.less
if coord.has_bounds():
bounds = wrap_lons(coord.bounds, minimum, modulus)
if ignore_bounds:
points = wrap_lons(coord.points, minimum, modulus)
inside_indices, = np.where(
np.logical_and(min_comp(minimum, points),
max_comp(points, maximum)))
else:
inside = np.logical_and(min_comp(minimum, bounds),
max_comp(bounds, maximum))
inside_indices, = np.where(np.any(inside, axis=1))
# To ensure that bounds (and points) of matching cells aren't
# "scrambled" by the wrap operation we detect split cells that
# straddle the wrap point and choose a new wrap point which avoids
# split cells.
# For example: the cell [349.875, 350.4375] wrapped at -10 would
# become [349.875, -9.5625] which is no longer valid. The lower
# cell bound value (and possibly associated point) are
# recalculated so that they are consistent with the extended
# wrapping scheme which moves the wrap point to the correct lower
# bound value (-10.125) thus resulting in the cell no longer
# being split. For bounds which may extend exactly the length of
# the modulus, we simply preserve the point to bound difference,
# and call the new bounds = the new points + the difference.
pre_wrap_delta = np.diff(coord.bounds[inside_indices])
post_wrap_delta = np.diff(bounds[inside_indices])
close_enough = np.allclose(pre_wrap_delta, post_wrap_delta)
if not close_enough:
split_cell_indices, _ = np.where(pre_wrap_delta !=
post_wrap_delta)
# Recalculate the extended minimum.
indices = inside_indices[split_cell_indices]
cells = bounds[indices]
cells_delta = np.diff(coord.bounds[indices])
# Watch out for ascending/descending bounds
if cells_delta[0, 0] > 0:
cells[:, 0] = cells[:, 1] - cells_delta[:, 0]
minimum = np.min(cells[:, 0])
else:
cells[:, 1] = cells[:, 0] + cells_delta[:, 0]
minimum = np.min(cells[:, 1])
points = wrap_lons(coord.points, minimum, modulus)
bound_diffs = coord.points[:, np.newaxis] - coord.bounds
bounds = points[:, np.newaxis] - bound_diffs
else:
points = wrap_lons(coord.points, minimum, modulus)
bounds = None
inside_indices, = np.where(
np.logical_and(min_comp(minimum, points),
max_comp(points, maximum)))
# Determine the subsets
subsets = self._intersect_derive_subset(coord, points, bounds,
inside_indices)
return subsets, points, bounds
def _as_list_of_coords(self, names_or_coords):
"""
Convert a name, coord, or list of names/coords to a list of coords.
"""
# If not iterable, convert to list of a single item
if _is_single_item(names_or_coords):
names_or_coords = [names_or_coords]
coords = []
for name_or_coord in names_or_coords:
if (isinstance(name_or_coord, six.string_types) or
isinstance(name_or_coord, iris.coords.Coord)):
coords.append(self.coord(name_or_coord))
else:
# Don't know how to handle this type
msg = ("Don't know how to handle coordinate of type %s. "
"Ensure all coordinates are of type six.string_types "
"or iris.coords.Coord.") % (type(name_or_coord), )
raise TypeError(msg)
return coords
def slices_over(self, ref_to_slice):
"""
Return an iterator of all subcubes along a given coordinate or
dimension index, or multiple of these.
Args:
* ref_to_slice (string, coord, dimension index or a list of these):
Determines which dimensions will be iterated along (i.e. the
dimensions that are not returned in the subcubes).
A mix of input types can also be provided.
Returns:
An iterator of subcubes.
For example, to get all subcubes along the time dimension::
for sub_cube in cube.slices_over('time'):
print(sub_cube)
.. seealso:: :meth:`iris.cube.Cube.slices`.
.. note::
The order of dimension references to slice along does not affect
the order of returned items in the iterator; instead the ordering
is based on the fastest-changing dimension.
"""
# Required to handle a mix between types.
if _is_single_item(ref_to_slice):
ref_to_slice = [ref_to_slice]
slice_dims = set()
for ref in ref_to_slice:
try:
coord, = self._as_list_of_coords(ref)
except TypeError:
dim = int(ref)
if dim < 0 or dim > self.ndim:
msg = ('Requested an iterator over a dimension ({}) '
'which does not exist.'.format(dim))
raise ValueError(msg)
# Convert coord index to a single-element list to prevent a
# TypeError when `slice_dims.update` is called with it.
dims = [dim]
else:
dims = self.coord_dims(coord)
slice_dims.update(dims)
all_dims = set(range(self.ndim))
opposite_dims = list(all_dims - slice_dims)
return self.slices(opposite_dims, ordered=False)
def slices(self, ref_to_slice, ordered=True):
"""
Return an iterator of all subcubes given the coordinates or dimension
indices desired to be present in each subcube.
Args:
* ref_to_slice (string, coord, dimension index or a list of these):
Determines which dimensions will be returned in the subcubes (i.e.
the dimensions that are not iterated over).
A mix of input types can also be provided. They must all be
orthogonal (i.e. point to different dimensions).
Kwargs:
* ordered: if True, the order which the coords to slice or data_dims
are given will be the order in which they represent the data in
the resulting cube slices. If False, the order will follow that of
the source cube. Default is True.
Returns:
An iterator of subcubes.
For example, to get all 2d longitude/latitude subcubes from a
multi-dimensional cube::
for sub_cube in cube.slices(['longitude', 'latitude']):
print(sub_cube)
.. seealso:: :meth:`iris.cube.Cube.slices_over`.
"""
if not isinstance(ordered, bool):
raise TypeError("'ordered' argument to slices must be boolean.")
# Required to handle a mix between types
if _is_single_item(ref_to_slice):
ref_to_slice = [ref_to_slice]
dim_to_slice = []
for ref in ref_to_slice:
try:
# attempt to handle as coordinate
coord = self._as_list_of_coords(ref)[0]
dims = self.coord_dims(coord)
if not dims:
msg = ('Requested an iterator over a coordinate ({}) '
'which does not describe a dimension.')
msg = msg.format(coord.name())
raise ValueError(msg)
dim_to_slice.extend(dims)
except TypeError:
try:
# attempt to handle as dimension index
dim = int(ref)
except ValueError:
raise ValueError('{} Incompatible type {} for '
'slicing'.format(ref, type(ref)))
if dim < 0 or dim > self.ndim:
msg = ('Requested an iterator over a dimension ({}) '
'which does not exist.'.format(dim))
raise ValueError(msg)
dim_to_slice.append(dim)
if len(set(dim_to_slice)) != len(dim_to_slice):
msg = 'The requested coordinates are not orthogonal.'
raise ValueError(msg)
# Create a list with of the shape of our data
dims_index = list(self.shape)
# Set the dimensions which have been requested to length 1
for d in dim_to_slice:
dims_index[d] = 1
return _SliceIterator(self, dims_index, dim_to_slice, ordered)
def transpose(self, new_order=None):
"""
Re-order the data dimensions of the cube in-place.
new_order - list of ints, optional
By default, reverse the dimensions, otherwise permute the
axes according to the values given.
.. note:: If defined, new_order must span all of the data dimensions.
Example usage::
# put the second dimension first, followed by the third dimension,
and finally put the first dimension third cube.transpose([1, 2, 0])
"""
if new_order is None:
new_order = np.arange(self.ndim)[::-1]
elif len(new_order) != self.ndim:
raise ValueError('Incorrect number of dimensions.')
if self.has_lazy_data():
self._my_data = self.lazy_data().transpose(new_order)
else:
self._my_data = self.data.transpose(new_order)
dim_mapping = {src: dest for dest, src in enumerate(new_order)}
def remap_dim_coord(coord_and_dim):
coord, dim = coord_and_dim
return coord, dim_mapping[dim]
self._dim_coords_and_dims = list(map(remap_dim_coord,
self._dim_coords_and_dims))
def remap_aux_coord(coord_and_dims):
coord, dims = coord_and_dims
return coord, tuple(dim_mapping[dim] for dim in dims)
self._aux_coords_and_dims = list(map(remap_aux_coord,
self._aux_coords_and_dims))
def xml(self, checksum=False, order=True, byteorder=True):
"""
Returns a fully valid CubeML string representation of the Cube.
"""
doc = Document()
cube_xml_element = self._xml_element(doc, checksum=checksum,
order=order,
byteorder=byteorder)
cube_xml_element.setAttribute("xmlns", XML_NAMESPACE_URI)
doc.appendChild(cube_xml_element)
# Print our newly created XML
return doc.toprettyxml(indent=" ")
def _xml_element(self, doc, checksum=False, order=True, byteorder=True):
cube_xml_element = doc.createElement("cube")
if self.standard_name:
cube_xml_element.setAttribute('standard_name', self.standard_name)
if self.long_name:
cube_xml_element.setAttribute('long_name', self.long_name)
if self.var_name:
cube_xml_element.setAttribute('var_name', self.var_name)
cube_xml_element.setAttribute('units', str(self.units))
if self.attributes:
attributes_element = doc.createElement('attributes')
for name in sorted(six.iterkeys(self.attributes)):
attribute_element = doc.createElement('attribute')
attribute_element.setAttribute('name', name)
value = self.attributes[name]
# Strict check because we don't want namedtuples.
if type(value) in (list, tuple):
delimiter = '[]' if isinstance(value, list) else '()'
value = ', '.join(("'%s'"
if isinstance(item, six.string_types)
else '%s') % (item, ) for item in value)
value = delimiter[0] + value + delimiter[1]
else:
value = str(value)
attribute_element.setAttribute('value', value)
attributes_element.appendChild(attribute_element)
cube_xml_element.appendChild(attributes_element)
coords_xml_element = doc.createElement("coords")
for coord in sorted(self.coords(), key=lambda coord: coord.name()):
# make a "cube coordinate" element which holds the dimensions (if
# appropriate) which itself will have a sub-element of the
# coordinate instance itself.
cube_coord_xml_element = doc.createElement("coord")
coords_xml_element.appendChild(cube_coord_xml_element)
dims = list(self.coord_dims(coord))
if dims:
cube_coord_xml_element.setAttribute("datadims", repr(dims))
coord_xml_element = coord.xml_element(doc)
cube_coord_xml_element.appendChild(coord_xml_element)
cube_xml_element.appendChild(coords_xml_element)
# cell methods (no sorting!)
cell_methods_xml_element = doc.createElement("cellMethods")
for cm in self.cell_methods:
cell_method_xml_element = cm.xml_element(doc)
cell_methods_xml_element.appendChild(cell_method_xml_element)
cube_xml_element.appendChild(cell_methods_xml_element)
data_xml_element = doc.createElement("data")
data_xml_element.setAttribute("shape", str(self.shape))
# NB. Getting a checksum triggers any deferred loading,
# in which case it also has the side-effect of forcing the
# byte order to be native.
if checksum:
data = self.data
# Ensure consistent memory layout for checksums.
def normalise(data):
data = np.ascontiguousarray(data)
if data.dtype.newbyteorder('<') != data.dtype:
data = data.byteswap(False)
data.dtype = data.dtype.newbyteorder('<')
return data
if isinstance(data, ma.MaskedArray):
# Fill in masked values to avoid the checksum being
# sensitive to unused numbers. Use a fixed value so
# a change in fill_value doesn't affect the
# checksum.
crc = '0x%08x' % (
zlib.crc32(normalise(data.filled(0))) & 0xffffffff, )
data_xml_element.setAttribute("checksum", crc)
if ma.is_masked(data):
crc = '0x%08x' % (
zlib.crc32(normalise(data.mask)) & 0xffffffff, )
else:
crc = 'no-masked-elements'
data_xml_element.setAttribute("mask_checksum", crc)
data_xml_element.setAttribute('fill_value',
str(data.fill_value))
else:
crc = '0x%08x' % (zlib.crc32(normalise(data)) & 0xffffffff, )
data_xml_element.setAttribute("checksum", crc)
elif self.has_lazy_data():
data_xml_element.setAttribute("state", "deferred")
else:
data_xml_element.setAttribute("state", "loaded")
# Add the dtype, and also the array and mask orders if the
# data is loaded.
if not self.has_lazy_data():
data = self.data
dtype = data.dtype
def _order(array):
order = ''
if array.flags['C_CONTIGUOUS']:
order = 'C'
elif array.flags['F_CONTIGUOUS']:
order = 'F'
return order
if order:
data_xml_element.setAttribute('order', _order(data))
# NB. dtype.byteorder can return '=', which is bad for
# cross-platform consistency - so we use dtype.str
# instead.
if byteorder:
array_byteorder = {'>': 'big', '<': 'little'}.get(dtype.str[0])
if array_byteorder is not None:
data_xml_element.setAttribute('byteorder', array_byteorder)
if order and isinstance(data, ma.core.MaskedArray):
data_xml_element.setAttribute('mask_order',
_order(data.mask))
else:
dtype = self.lazy_data().dtype
data_xml_element.setAttribute('dtype', dtype.name)
cube_xml_element.appendChild(data_xml_element)
return cube_xml_element
def copy(self, data=None):
"""
Returns a deep copy of this cube.
Kwargs:
* data:
Replace the data of the cube copy with provided data payload.
Returns:
A copy instance of the :class:`Cube`.
"""
return self._deepcopy({}, data)
def __copy__(self):
"""Shallow copying is disallowed for Cubes."""
raise copy.Error("Cube shallow-copy not allowed. Use deepcopy() or "
"Cube.copy()")
def __deepcopy__(self, memo):
return self._deepcopy(memo)
def _deepcopy(self, memo, data=None):
if data is None:
# Use a copy of the source cube data.
if self.has_lazy_data():
# Use copy.copy, as lazy arrays don't have a copy method.
new_cube_data = copy.copy(self.lazy_data())
else:
# Do *not* use copy.copy, as NumPy 0-d arrays do that wrong.
new_cube_data = self.data.copy()
else:
# Use the provided data (without copying it).
if not isinstance(data, biggus.Array):
data = np.asanyarray(data)
if data.shape != self.shape:
msg = 'Cannot copy cube with new data of a different shape ' \
'(slice or subset the cube first).'
raise ValueError(msg)
new_cube_data = data
new_dim_coords_and_dims = copy.deepcopy(self._dim_coords_and_dims,
memo)
new_aux_coords_and_dims = copy.deepcopy(self._aux_coords_and_dims,
memo)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
for old_pair, new_pair in zip(self._dim_coords_and_dims,
new_dim_coords_and_dims):
coord_mapping[id(old_pair[0])] = new_pair[0]
for old_pair, new_pair in zip(self._aux_coords_and_dims,
new_aux_coords_and_dims):
coord_mapping[id(old_pair[0])] = new_pair[0]
new_cube = Cube(new_cube_data,
dim_coords_and_dims=new_dim_coords_and_dims,
aux_coords_and_dims=new_aux_coords_and_dims)
new_cube.metadata = copy.deepcopy(self.metadata, memo)
for factory in self.aux_factories:
new_cube.add_aux_factory(factory.updated(coord_mapping))
return new_cube
# START OPERATOR OVERLOADS
def __eq__(self, other):
result = NotImplemented
if isinstance(other, Cube):
result = self.metadata == other.metadata
# having checked the metadata, now check the coordinates
if result:
coord_comparison = iris.analysis.coord_comparison(self, other)
# if there are any coordinates which are not equal
result = not (coord_comparison['not_equal'] or
coord_comparison['non_equal_data_dimension'])
# having checked everything else, check approximate data
# equality - loading the data if has not already been loaded.
if result:
result = np.all(np.abs(self.data - other.data) < 1e-8)
return result
# Must supply __ne__, Python does not defer to __eq__ for negative equality
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
# Must supply __hash__, Python 3 does not enable it if __eq__ is defined
# This is necessary for merging, but probably shouldn't be used otherwise.
# See #962 and #1772.
def __hash__(self):
return hash(id(self))
def __add__(self, other):
return iris.analysis.maths.add(self, other, ignore=True)
__radd__ = __add__
def __sub__(self, other):
return iris.analysis.maths.subtract(self, other, ignore=True)
__mul__ = iris.analysis.maths.multiply
__rmul__ = iris.analysis.maths.multiply
__div__ = iris.analysis.maths.divide
__truediv__ = iris.analysis.maths.divide
__pow__ = iris.analysis.maths.exponentiate
# END OPERATOR OVERLOADS
def add_history(self, string):
"""
Add the given string to the cube's history.
If the history coordinate does not exist, then one will be created.
.. deprecated:: 1.6
Add/modify history metadata within
attr:`~iris.cube.Cube.attributes` as needed.
"""
warn_deprecated("Cube.add_history() has been deprecated - "
"please modify/create cube.attributes['history'] "
"as needed.")
timestamp = datetime.datetime.now().strftime("%d/%m/%y %H:%M:%S")
string = '%s Iris: %s' % (timestamp, string)
try:
history = self.attributes['history']
self.attributes['history'] = '%s\n%s' % (history, string)
except KeyError:
self.attributes['history'] = string
# START ANALYSIS ROUTINES
regridded = iris.util._wrap_function_for_method(
iris.analysis._interpolate_private.regrid,
"""
Returns a new cube with values derived from this cube on the
horizontal grid specified by the grid_cube.
.. deprecated:: 1.10
Please replace usage of :meth:`~Cube.regridded` with
:meth:`~Cube.regrid`. See :meth:`iris.analysis.interpolate.regrid`
for details of exact usage equivalents.
""")
# END ANALYSIS ROUTINES
def collapsed(self, coords, aggregator, **kwargs):
"""
Collapse one or more dimensions over the cube given the coordinate/s
and an aggregation.
Examples of aggregations that may be used include
:data:`~iris.analysis.COUNT` and :data:`~iris.analysis.MAX`.
Weighted aggregations (:class:`iris.analysis.WeightedAggregator`) may
also be supplied. These include :data:`~iris.analysis.MEAN` and
sum :data:`~iris.analysis.SUM`.
Weighted aggregations support an optional *weights* keyword argument.
If set, this should be supplied as an array of weights whose shape
matches the cube. Values for latitude-longitude area weights may be
calculated using :func:`iris.analysis.cartography.area_weights`.
Some Iris aggregators support "lazy" evaluation, meaning that
cubes resulting from this method may represent data arrays which are
not computed until the data is requested (e.g. via ``cube.data`` or
``iris.save``). If lazy evaluation exists for the given aggregator
it will be used wherever possible when this cube's data is itself
a deferred array.
Args:
* coords (string, coord or a list of strings/coords):
Coordinate names/coordinates over which the cube should be
collapsed.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied for collapse operation.
Kwargs:
* kwargs:
Aggregation function keyword arguments.
Returns:
Collapsed cube.
For example:
>>> import iris
>>> import iris.analysis
>>> path = iris.sample_data_path('ostia_monthly.nc')
>>> cube = iris.load_cube(path)
>>> new_cube = cube.collapsed('longitude', iris.analysis.MEAN)
>>> print(new_cube)
surface_temperature / (K) (time: 54; latitude: 18)
Dimension coordinates:
time x -
latitude - x
Auxiliary coordinates:
forecast_reference_time x -
Scalar coordinates:
forecast_period: 0 hours
longitude: 180.0 degrees, bound=(0.0, 360.0) degrees
Attributes:
Conventions: CF-1.5
STASH: m01s00i024
Cell methods:
mean: month, year
mean: longitude
.. note::
Some aggregations are not commutative and hence the order of
processing is important i.e.::
tmp = cube.collapsed('realization', iris.analysis.VARIANCE)
result = tmp.collapsed('height', iris.analysis.VARIANCE)
is not necessarily the same result as::
tmp = cube.collapsed('height', iris.analysis.VARIANCE)
result2 = tmp.collapsed('realization', iris.analysis.VARIANCE)
Conversely operations which operate on more than one coordinate
at the same time are commutative as they are combined internally
into a single operation. Hence the order of the coordinates
supplied in the list does not matter::
cube.collapsed(['longitude', 'latitude'],
iris.analysis.VARIANCE)
is the same (apart from the logically equivalent cell methods that
may be created etc.) as::
cube.collapsed(['latitude', 'longitude'],
iris.analysis.VARIANCE)
.. _partially_collapse_multi-dim_coord:
.. note::
You cannot partially collapse a multi-dimensional coordinate. Doing
so would result in a partial collapse of the multi-dimensional
coordinate. Instead you must either:
* collapse in a single operation all cube axes that the
multi-dimensional coordinate spans,
* remove the multi-dimensional coordinate from the cube before
performing the collapse operation, or
* not collapse the coordinate at all.
Multi-dimensional derived coordinates will not prevent a successful
collapse operation.
"""
# Convert any coordinate names to coordinates
coords = self._as_list_of_coords(coords)
if (isinstance(aggregator, iris.analysis.WeightedAggregator) and
not aggregator.uses_weighting(**kwargs)):
msg = "Collapsing spatial coordinate {!r} without weighting"
lat_match = [coord for coord in coords
if 'latitude' in coord.name()]
if lat_match:
for coord in lat_match:
warnings.warn(msg.format(coord.name()))
# Determine the dimensions we need to collapse (and those we don't)
if aggregator.cell_method == 'peak':
dims_to_collapse = [list(self.coord_dims(coord))
for coord in coords]
# Remove duplicate dimensions.
new_dims = collections.OrderedDict.fromkeys(
d for dim in dims_to_collapse for d in dim)
# Reverse the dimensions so the order can be maintained when
# reshaping the data.
dims_to_collapse = list(new_dims)[::-1]
else:
dims_to_collapse = set()
for coord in coords:
dims_to_collapse.update(self.coord_dims(coord))
if not dims_to_collapse:
msg = 'Cannot collapse a dimension which does not describe any ' \
'data.'
raise iris.exceptions.CoordinateCollapseError(msg)
untouched_dims = set(range(self.ndim)) - set(dims_to_collapse)
# Remove the collapsed dimension(s) from the metadata
indices = [slice(None, None)] * self.ndim
for dim in dims_to_collapse:
indices[dim] = 0
collapsed_cube = self[tuple(indices)]
# Collapse any coords that span the dimension(s) being collapsed
for coord in self.dim_coords + self.aux_coords:
coord_dims = self.coord_dims(coord)
if set(dims_to_collapse).intersection(coord_dims):
local_dims = [coord_dims.index(dim) for dim in
dims_to_collapse if dim in coord_dims]
collapsed_cube.replace_coord(coord.collapsed(local_dims))
untouched_dims = sorted(untouched_dims)
# Record the axis(s) argument passed to 'aggregation', so the same is
# passed to the 'update_metadata' function.
collapse_axis = -1
data_result = None
# Perform the actual aggregation.
if aggregator.cell_method == 'peak':
# The PEAK aggregator must collapse each coordinate separately.
untouched_shape = [self.shape[d] for d in untouched_dims]
collapsed_shape = [self.shape[d] for d in dims_to_collapse]
new_shape = untouched_shape + collapsed_shape
array_dims = untouched_dims + dims_to_collapse
unrolled_data = np.transpose(
self.data, array_dims).reshape(new_shape)
for dim in dims_to_collapse:
unrolled_data = aggregator.aggregate(unrolled_data,
axis=-1,
**kwargs)
data_result = unrolled_data
# Perform the aggregation in lazy form if possible.
elif (aggregator.lazy_func is not None and
len(dims_to_collapse) == 1 and self.has_lazy_data()):
# Use a lazy operation separately defined by the aggregator, based
# on the cube lazy array.
# NOTE: do not reform the data in this case, as 'lazy_aggregate'
# accepts multiple axes (unlike 'aggregate').
collapse_axis = dims_to_collapse
try:
data_result = aggregator.lazy_aggregate(self.lazy_data(),
collapse_axis,
**kwargs)
except TypeError:
# TypeError - when unexpected keywords passed through (such as
# weights to mean)
pass
# If we weren't able to complete a lazy aggregation, compute it
# directly now.
if data_result is None:
# Perform the (non-lazy) aggregation over the cube data
# First reshape the data so that the dimensions being aggregated
# over are grouped 'at the end' (i.e. axis=-1).
dims_to_collapse = sorted(dims_to_collapse)
end_size = reduce(operator.mul, (self.shape[dim] for dim in
dims_to_collapse))
untouched_shape = [self.shape[dim] for dim in untouched_dims]
new_shape = untouched_shape + [end_size]
dims = untouched_dims + dims_to_collapse
unrolled_data = np.transpose(self.data, dims).reshape(new_shape)
# Perform the same operation on the weights if applicable
if kwargs.get("weights") is not None:
weights = kwargs["weights"].view()
kwargs["weights"] = np.transpose(weights,
dims).reshape(new_shape)
data_result = aggregator.aggregate(unrolled_data,
axis=-1,
**kwargs)
aggregator.update_metadata(collapsed_cube, coords, axis=collapse_axis,
**kwargs)
result = aggregator.post_process(collapsed_cube, data_result, coords,
**kwargs)
return result
def aggregated_by(self, coords, aggregator, **kwargs):
"""
Perform aggregation over the cube given one or more "group
coordinates".
A "group coordinate" is a coordinate where repeating values represent a
single group, such as a month coordinate on a daily time slice.
Repeated values will form a group even if they are not consecutive.
The group coordinates must all be over the same cube dimension. Each
common value group identified over all the group-by coordinates is
collapsed using the provided aggregator.
Args:
* coords (list of coord names or :class:`iris.coords.Coord` instances):
One or more coordinates over which group aggregation is to be
performed.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied to each group.
Kwargs:
* kwargs:
Aggregator and aggregation function keyword arguments.
Returns:
:class:`iris.cube.Cube`.
.. note::
This operation does not yet have support for lazy evaluation.
For example:
>>> import iris
>>> import iris.analysis
>>> import iris.coord_categorisation as cat
>>> fname = iris.sample_data_path('ostia_monthly.nc')
>>> cube = iris.load_cube(fname, 'surface_temperature')
>>> cat.add_year(cube, 'time', name='year')
>>> new_cube = cube.aggregated_by('year', iris.analysis.MEAN)
>>> print(new_cube)
surface_temperature / (K) \
(time: 5; latitude: 18; longitude: 432)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_reference_time \
x - -
year \
x - -
Scalar coordinates:
forecast_period: 0 hours
Attributes:
Conventions: CF-1.5
STASH: m01s00i024
Cell methods:
mean: month, year
mean: year
"""
groupby_coords = []
dimension_to_groupby = None
# We can't handle weights
if isinstance(aggregator, iris.analysis.WeightedAggregator) and \
aggregator.uses_weighting(**kwargs):
raise ValueError('Invalid Aggregation, aggregated_by() cannot use'
' weights.')
coords = self._as_list_of_coords(coords)
for coord in sorted(coords, key=lambda coord: coord._as_defn()):
if coord.ndim > 1:
msg = 'Cannot aggregate_by coord %s as it is ' \
'multidimensional.' % coord.name()
raise iris.exceptions.CoordinateMultiDimError(msg)
dimension = self.coord_dims(coord)
if not dimension:
msg = 'Cannot group-by the coordinate "%s", as its ' \
'dimension does not describe any data.' % coord.name()
raise iris.exceptions.CoordinateCollapseError(msg)
if dimension_to_groupby is None:
dimension_to_groupby = dimension[0]
if dimension_to_groupby != dimension[0]:
msg = 'Cannot group-by coordinates over different dimensions.'
raise iris.exceptions.CoordinateCollapseError(msg)
groupby_coords.append(coord)
# Determine the other coordinates that share the same group-by
# coordinate dimension.
shared_coords = list(filter(
lambda coord_: coord_ not in groupby_coords,
self.coords(dimensions=dimension_to_groupby)))
# Create the aggregation group-by instance.
groupby = iris.analysis._Groupby(groupby_coords, shared_coords)
# Create the resulting aggregate-by cube and remove the original
# coordinates that are going to be groupedby.
key = [slice(None, None)] * self.ndim
# Generate unique index tuple key to maintain monotonicity.
key[dimension_to_groupby] = tuple(range(len(groupby)))
key = tuple(key)
aggregateby_cube = self[key]
for coord in groupby_coords + shared_coords:
aggregateby_cube.remove_coord(coord)
# Determine the group-by cube data shape.
data_shape = list(self.shape + aggregator.aggregate_shape(**kwargs))
data_shape[dimension_to_groupby] = len(groupby)
# Aggregate the group-by data.
cube_slice = [slice(None, None)] * len(data_shape)
for i, groupby_slice in enumerate(groupby.group()):
# Slice the cube with the group-by slice to create a group-by
# sub-cube.
cube_slice[dimension_to_groupby] = groupby_slice
groupby_sub_cube = self[tuple(cube_slice)]
# Perform the aggregation over the group-by sub-cube and
# repatriate the aggregated data into the aggregate-by cube data.
cube_slice[dimension_to_groupby] = i
result = aggregator.aggregate(groupby_sub_cube.data,
axis=dimension_to_groupby,
**kwargs)
# Determine aggregation result data type for the aggregate-by cube
# data on first pass.
if i == 0:
if isinstance(self.data, ma.MaskedArray):
aggregateby_data = ma.zeros(data_shape, dtype=result.dtype)
else:
aggregateby_data = np.zeros(data_shape, dtype=result.dtype)
aggregateby_data[tuple(cube_slice)] = result
# Add the aggregation meta data to the aggregate-by cube.
aggregator.update_metadata(aggregateby_cube,
groupby_coords,
aggregate=True, **kwargs)
# Replace the appropriate coordinates within the aggregate-by cube.
dim_coord, = self.coords(dimensions=dimension_to_groupby,
dim_coords=True) or [None]
for coord in groupby.coords:
if dim_coord is not None and \
dim_coord._as_defn() == coord._as_defn() and \
isinstance(coord, iris.coords.DimCoord):
aggregateby_cube.add_dim_coord(coord.copy(),
dimension_to_groupby)
else:
aggregateby_cube.add_aux_coord(coord.copy(),
dimension_to_groupby)
# Attach the aggregate-by data into the aggregate-by cube.
aggregateby_cube = aggregator.post_process(aggregateby_cube,
aggregateby_data,
coords, **kwargs)
return aggregateby_cube
def rolling_window(self, coord, aggregator, window, **kwargs):
"""
Perform rolling window aggregation on a cube given a coordinate, an
aggregation method and a window size.
Args:
* coord (string/:class:`iris.coords.Coord`):
The coordinate over which to perform the rolling window
aggregation.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied to the data.
* window (int):
Size of window to use.
Kwargs:
* kwargs:
Aggregator and aggregation function keyword arguments. The weights
argument to the aggregator, if any, should be a 1d array with the
same length as the chosen window.
Returns:
:class:`iris.cube.Cube`.
.. note::
This operation does not yet have support for lazy evaluation.
For example:
>>> import iris, iris.analysis
>>> fname = iris.sample_data_path('GloSea4', 'ensemble_010.pp')
>>> air_press = iris.load_cube(fname, 'surface_temperature')
>>> print(air_press)
surface_temperature / (K) \
(time: 6; latitude: 145; longitude: 192)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_period \
x - -
Scalar coordinates:
forecast_reference_time: 2011-07-23 00:00:00
realization: 10
Attributes:
STASH: m01s00i024
source: Data from Met Office Unified Model
um_version: 7.6
Cell methods:
mean: time (1 hour)
>>> print(air_press.rolling_window('time', iris.analysis.MEAN, 3))
surface_temperature / (K) \
(time: 4; latitude: 145; longitude: 192)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_period \
x - -
Scalar coordinates:
forecast_reference_time: 2011-07-23 00:00:00
realization: 10
Attributes:
STASH: m01s00i024
source: Data from Met Office Unified Model
um_version: 7.6
Cell methods:
mean: time (1 hour)
mean: time
Notice that the forecast_period dimension now represents the 4
possible windows of size 3 from the original cube.
"""
coord = self._as_list_of_coords(coord)[0]
if getattr(coord, 'circular', False):
raise iris.exceptions.NotYetImplementedError(
'Rolling window over a circular coordinate.')
if window < 2:
raise ValueError('Cannot perform rolling window '
'with a window size less than 2.')
if coord.ndim > 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
dimension = self.coord_dims(coord)
if len(dimension) != 1:
raise iris.exceptions.CoordinateCollapseError(
'Cannot perform rolling window with coordinate "%s", '
'must map to one data dimension.' % coord.name())
dimension = dimension[0]
# Use indexing to get a result-cube of the correct shape.
# NB. This indexes the data array which is wasted work.
# As index-to-get-shape-then-fiddle is a common pattern, perhaps
# some sort of `cube.prepare()` method would be handy to allow
# re-shaping with given data, and returning a mapping of
# old-to-new-coords (to avoid having to use metadata identity)?
key = [slice(None, None)] * self.ndim
key[dimension] = slice(None, self.shape[dimension] - window + 1)
new_cube = self[tuple(key)]
# take a view of the original data using the rolling_window function
# this will add an extra dimension to the data at dimension + 1 which
# represents the rolled window (i.e. will have a length of window)
rolling_window_data = iris.util.rolling_window(self.data,
window=window,
axis=dimension)
# now update all of the coordinates to reflect the aggregation
for coord_ in self.coords(dimensions=dimension):
if coord_.has_bounds():
warnings.warn('The bounds of coordinate %r were ignored in '
'the rolling window operation.' % coord_.name())
if coord_.ndim != 1:
raise ValueError('Cannot calculate the rolling '
'window of %s as it is a multidimensional '
'coordinate.' % coord_.name())
new_bounds = iris.util.rolling_window(coord_.points, window)
if np.issubdtype(new_bounds.dtype, np.str):
# Handle case where the AuxCoord contains string. The points
# are the serialized form of the points contributing to each
# window and the bounds are the first and last points in the
# window as with numeric coordinates.
new_points = np.apply_along_axis(lambda x: '|'.join(x), -1,
new_bounds)
new_bounds = new_bounds[:, (0, -1)]
else:
# Take the first and last element of the rolled window (i.e.
# the bounds) and the new points are the midpoints of these
# bounds.
new_bounds = new_bounds[:, (0, -1)]
new_points = np.mean(new_bounds, axis=-1)
# wipe the coords points and set the bounds
new_coord = new_cube.coord(coord_)
new_coord.points = new_points
new_coord.bounds = new_bounds
# update the metadata of the cube itself
aggregator.update_metadata(
new_cube, [coord],
action='with a rolling window of length %s over' % window,
**kwargs)
# and perform the data transformation, generating weights first if
# needed
if isinstance(aggregator, iris.analysis.WeightedAggregator) and \
aggregator.uses_weighting(**kwargs):
if 'weights' in kwargs:
weights = kwargs['weights']
if weights.ndim > 1 or weights.shape[0] != window:
raise ValueError('Weights for rolling window aggregation '
'must be a 1d array with the same length '
'as the window.')
kwargs = dict(kwargs)
kwargs['weights'] = iris.util.broadcast_to_shape(
weights, rolling_window_data.shape, (dimension + 1,))
data_result = aggregator.aggregate(rolling_window_data,
axis=dimension + 1,
**kwargs)
result = aggregator.post_process(new_cube, data_result, [coord],
**kwargs)
return result
def interpolate(self, sample_points, scheme, collapse_scalar=True):
"""
Interpolate from this :class:`~iris.cube.Cube` to the given
sample points using the given interpolation scheme.
Args:
* sample_points:
A sequence of (coordinate, points) pairs over which to
interpolate. The values for coordinates that correspond to
dates or times may optionally be supplied as datetime.datetime or
netcdftime.datetime instances.
* scheme:
The type of interpolation to use to interpolate from this
:class:`~iris.cube.Cube` to the given sample points. The
interpolation schemes currently available in Iris are:
* :class:`iris.analysis.Linear`, and
* :class:`iris.analysis.Nearest`.
Kwargs:
* collapse_scalar:
Whether to collapse the dimension of scalar sample points
in the resulting cube. Default is True.
Returns:
A cube interpolated at the given sample points.
If `collapse_scalar` is True then the dimensionality of the cube
will be the number of original cube dimensions minus
the number of scalar coordinates.
For example:
>>> import datetime
>>> import iris
>>> path = iris.sample_data_path('uk_hires.pp')
>>> cube = iris.load_cube(path, 'air_potential_temperature')
>>> print(cube.summary(shorten=True))
air_potential_temperature / (K) \
(time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
>>> print(cube.coord('time'))
DimCoord([2009-11-19 10:00:00, 2009-11-19 11:00:00, \
2009-11-19 12:00:00], standard_name='time', calendar='gregorian')
>>> print(cube.coord('time').points)
[ 349618. 349619. 349620.]
>>> samples = [('time', 349618.5)]
>>> result = cube.interpolate(samples, iris.analysis.Linear())
>>> print(result.summary(shorten=True))
air_potential_temperature / (K) \
(model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
>>> print(result.coord('time'))
DimCoord([2009-11-19 10:30:00], standard_name='time', \
calendar='gregorian')
>>> print(result.coord('time').points)
[ 349618.5]
>>> # For datetime-like coordinates, we can also use
>>> # datetime-like objects.
>>> samples = [('time', datetime.datetime(2009, 11, 19, 10, 30))]
>>> result2 = cube.interpolate(samples, iris.analysis.Linear())
>>> print(result2.summary(shorten=True))
air_potential_temperature / (K) \
(model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
>>> print(result2.coord('time'))
DimCoord([2009-11-19 10:30:00], standard_name='time', \
calendar='gregorian')
>>> print(result2.coord('time').points)
[ 349618.5]
>>> print(result == result2)
True
"""
coords, points = zip(*sample_points)
interp = scheme.interpolator(self, coords)
return interp(points, collapse_scalar=collapse_scalar)
def regrid(self, grid, scheme):
"""
Regrid this :class:`~iris.cube.Cube` on to the given target `grid`
using the given regridding `scheme`.
Args:
* grid:
A :class:`~iris.cube.Cube` that defines the target grid.
* scheme:
The type of regridding to use to regrid this cube onto the
target grid. The regridding schemes currently available
in Iris are:
* :class:`iris.analysis.Linear`,
* :class:`iris.analysis.Nearest`, and
* :class:`iris.analysis.AreaWeighted`.
Returns:
A cube defined with the horizontal dimensions of the target grid
and the other dimensions from this cube. The data values of
this cube will be converted to values on the new grid
according to the given regridding scheme.
"""
regridder = scheme.regridder(self, grid)
return regridder(self)
class ClassDict(collections.MutableMapping, object):
"""
A mapping that stores objects keyed on their superclasses and their names.
The mapping has a root class, all stored objects must be a subclass of the
root class. The superclasses used for an object include the class of the
object, but do not include the root class. Only one object is allowed for
any key.
"""
def __init__(self, superclass):
if not isinstance(superclass, type):
raise TypeError("The superclass must be a Python type or new "
"style class.")
self._superclass = superclass
self._basic_map = {}
self._retrieval_map = {}
def add(self, object_, replace=False):
'''Add an object to the dictionary.'''
if not isinstance(object_, self._superclass):
msg = "Only subclasses of {!r} are allowed as values.".format(
self._superclass.__name__)
raise TypeError(msg)
# Find all the superclasses of the given object, starting with the
# object's class.
superclasses = type.mro(type(object_))
if not replace:
# Ensure nothing else is already registered against those
# superclasses.
# NB. This implies the _basic_map will also be empty for this
# object.
for key_class in superclasses:
if key_class in self._retrieval_map:
msg = "Cannot add instance of '%s' because instance of " \
"'%s' already added." % (type(object_).__name__,
key_class.__name__)
raise ValueError(msg)
# Register the given object against those superclasses.
for key_class in superclasses:
self._retrieval_map[key_class] = object_
self._retrieval_map[key_class.__name__] = object_
self._basic_map[type(object_)] = object_
def __getitem__(self, class_):
try:
return self._retrieval_map[class_]
except KeyError:
raise KeyError('Coordinate system %r does not exist.' % class_)
def __setitem__(self, key, value):
raise NotImplementedError('You must call the add method instead.')
def __delitem__(self, class_):
cs = self[class_]
keys = [k for k, v in six.iteritems(self._retrieval_map) if v == cs]
for key in keys:
del self._retrieval_map[key]
del self._basic_map[type(cs)]
return cs
def __len__(self):
return len(self._basic_map)
def __iter__(self):
for item in self._basic_map:
yield item
def keys(self):
'''Return the keys of the dictionary mapping.'''
return self._basic_map.keys()
def sorted_axes(axes):
"""
Returns the axis names sorted alphabetically, with the exception that
't', 'z', 'y', and, 'x' are sorted to the end.
"""
return sorted(axes, key=lambda name: ({'x': 4,
'y': 3,
'z': 2,
't': 1}.get(name, 0), name))
# See Cube.slice() for the definition/context.
class _SliceIterator(collections.Iterator):
def __init__(self, cube, dims_index, requested_dims, ordered):
self._cube = cube
# Let Numpy do some work in providing all of the permutations of our
# data shape. This functionality is something like:
# ndindex(2, 1, 3) -> [(0, 0, 0), (0, 0, 1), (0, 0, 2),
# (1, 0, 0), (1, 0, 1), (1, 0, 2)]
self._ndindex = np.ndindex(*dims_index)
self._requested_dims = requested_dims
# indexing relating to sliced cube
self._mod_requested_dims = np.argsort(requested_dims)
self._ordered = ordered
def __next__(self):
# NB. When self._ndindex runs out it will raise StopIteration for us.
index_tuple = next(self._ndindex)
# Turn the given tuple into a list so that we can do something with it
index_list = list(index_tuple)
# For each of the spanning dimensions requested, replace the 0 with a
# spanning slice
for d in self._requested_dims:
index_list[d] = slice(None, None)
# Request the slice
cube = self._cube[tuple(index_list)]
if self._ordered:
if any(self._mod_requested_dims != list(range(len(cube.shape)))):
cube.transpose(self._mod_requested_dims)
return cube
next = __next__
| zak-k/iris | lib/iris/cube.py | Python | gpl-3.0 | 158,449 | [
"NetCDF"
] | a31b4f2d2091b9de6967feed63655f9d565aea8d974678cdc134d4930baea977 |
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
'''Gaussian LOG and FCHK file fromats'''
import numpy as np
__all__ = ['load_operators_g09', 'FCHKFile', 'load_fchk']
def load_operators_g09(fn, lf):
"""Loads several one- and four-index operators from a Gaussian log file.
**Arugment:**
fn
The filename of the Gaussian log file.
lf
A LinalgFactory instance.
The following two-index operators are loaded if present: overlap,
kinetic, nuclear attraction. The following four-index operator is loaded
if present: electrostatic repulsion. In order to make all these matrices
are present in the Gaussian log file, the following commands must be used
in the Gaussian input file:
scf(conventional) iop(3/33=5) extralinks=l316 iop(3/27=999)
**Returns:** A dictionary that may contain the keys: ``olp``, ``kin``,
``na`` and/or ``er``.
"""
with open(fn) as f:
# First get the line with the number of orbital basis functions
for line in f:
if line.startswith(' NBasis ='):
nbasis = int(line[12:18])
break
if lf.default_nbasis is not None and lf.default_nbasis != nbasis:
raise TypeError('The value of lf.default_nbasis does not match nbasis reported in the log file.')
lf.default_nbasis = nbasis
# Then load the one- and four-index operators. This part is written such
# that it does not make any assumptions about the order in which these
# operators are printed.
result = {'lf': lf}
for line in f:
if line.startswith(' *** Overlap ***'):
result['olp'] = _load_twoindex_g09(f, nbasis, lf)
elif line.startswith(' *** Kinetic Energy ***'):
result['kin'] = _load_twoindex_g09(f, nbasis, lf)
elif line.startswith(' ***** Potential Energy *****'):
result['na'] = _load_twoindex_g09(f, nbasis, lf)
elif line.startswith(' *** Dumping Two-Electron integrals ***'):
result['er'] = _load_fourindex_g09(f, nbasis, lf)
return result
def _load_twoindex_g09(f, nbasis, lf):
"""Load a two-index operator from a Gaussian log file
**Arguments:**
f
A file object for the Gaussian log file in read mode.
nbasis
The number of orbital basis functions.
lf
A LinalgFactory instance.
"""
result = lf.create_two_index(nbasis)
block_counter = 0
while block_counter < nbasis:
# skip the header line
f.next()
# determine the number of rows in this part
nrow = nbasis - block_counter
for i in xrange(nrow):
words = f.next().split()[1:]
for j in xrange(len(words)):
value = float(words[j].replace('D', 'E'))
result.set_element(i+block_counter, j+block_counter, value)
block_counter += 5
return result
def _load_fourindex_g09(f, nbasis, lf):
"""Load a four-index operator from a Gaussian log file
**Arguments:**
f
A file object for the Gaussian log file in read mode.
nbasis
The number of orbital basis functions.
lf
A LinalgFactory instance.
"""
result = lf.create_four_index(nbasis)
# Skip first six lines
for i in xrange(6):
f.next()
# Start reading elements until a line is encountered that does not start
# with ' I='
while True:
line = f.next()
if not line.startswith(' I='):
break
#print line[3:7], line[9:13], line[15:19], line[21:25], line[28:].replace('D', 'E')
i = int(line[3:7])-1
j = int(line[9:13])-1
k = int(line[15:19])-1
l = int(line[21:25])-1
value = float(line[29:].replace('D', 'E'))
# Gaussian uses the chemists notation for the 4-center indexes. HORTON
# uses the physicists notation.
result.set_element(i, k, j, l, value)
return result
class FCHKFile(dict):
"""Reader for Formatted checkpoint files
After initialization, the data from the file is available in the fields
dictionary. Also the following attributes are read from the file: title,
command, lot (level of theory) and basis.
"""
def __init__(self, filename, field_labels=None):
"""
**Arguments:**
filename
The formatted checkpoint file.
**Optional arguments:**
field_labels
When provided, only these fields are read from the formatted
checkpoint file. (This can save a lot of time.)
"""
dict.__init__(self, [])
self.filename = filename
self._read(filename, set(field_labels))
def _read(self, filename, field_labels=None):
"""Read all the requested fields"""
# if fields is None, all fields are read
def read_field(f):
"""Read a single field"""
datatype = None
while datatype is None:
# find a sane header line
line = f.readline()
if line == "":
return False
label = line[:43].strip()
if field_labels is not None:
if len(field_labels) == 0:
return False
elif label not in field_labels:
return True
else:
field_labels.discard(label)
line = line[43:]
words = line.split()
if len(words) == 0:
return True
if words[0] == 'I':
datatype = int
elif words[0] == 'R':
datatype = float
if len(words) == 2:
try:
value = datatype(words[1])
except ValueError:
return True
elif len(words) == 3:
if words[1] != "N=":
raise IOError("Unexpected line in formatted checkpoint file %s\n%s" % (filename, line[:-1]))
length = int(words[2])
value = np.zeros(length, datatype)
counter = 0
try:
while counter < length:
line = f.readline()
if line == "":
raise IOError("Unexpected end of formatted checkpoint file %s" % filename)
for word in line.split():
try:
value[counter] = datatype(word)
except (ValueError, OverflowError), e:
raise IOError('Could not interpret word while reading %s: %s' % (word, filename))
counter += 1
except ValueError:
return True
else:
raise IOError("Unexpected line in formatted checkpoint file %s\n%s" % (filename, line[:-1]))
self[label] = value
return True
f = file(filename, 'r')
self.title = f.readline()[:-1].strip()
words = f.readline().split()
if len(words) == 3:
self.command, self.lot, self.obasis = words
elif len(words) == 2:
self.command, self.lot = words
else:
raise IOError('The second line of the FCHK file should contain two or three words.')
while read_field(f):
pass
f.close()
def triangle_to_dense(triangle):
'''Convert a symmetric matrix in triangular storage to a dense square matrix.
**Arguments:**
triangle
A row vector containing all the unique matrix elements of symmetrix
matrix. (Either the lower-triangular part in row major-order or the
upper-triangular part in column-major order.)
**Returns:** a square symmetrix matrix.
'''
nrow = int(np.round((np.sqrt(1+8*len(triangle))-1)/2))
result = np.zeros((nrow, nrow))
begin = 0
for irow in xrange(nrow):
end = begin + irow + 1
result[irow,:irow+1] = triangle[begin:end]
result[:irow+1,irow] = triangle[begin:end]
begin = end
return result
def load_fchk(filename, lf):
'''Load from a formatted checkpoint file.
**Arguments:**
filename
The filename of the Gaussian formatted checkpoint file.
lf
A LinalgFactory instance.
**Returns** a dictionary with: ``title``, ``coordinates``, ``numbers``,
``obasis``, ``exp_alpha``, ``permutation``, ``energy``,
``pseudo_numbers``, ``mulliken_charges``. The dictionary may also
contain: ``npa_charges``, ``esp_charges``, ``exp_beta``, ``dm_full_mp2``,
``dm_spin_mp2``, ``dm_full_mp3``, ``dm_spin_mp3``, ``dm_full_cc``,
``dm_spin_cc``, ``dm_full_ci``, ``dm_spin_ci``, ``dm_full_scf``,
``dm_spin_scf``.
'''
from horton.gbasis import GOBasis
fchk = FCHKFile(filename, [
"Number of electrons", "Number of independant functions",
"Number of independent functions",
"Number of alpha electrons", "Number of beta electrons",
"Atomic numbers", "Current cartesian coordinates",
"Shell types", "Shell to atom map", "Shell to atom map",
"Number of primitives per shell", "Primitive exponents",
"Contraction coefficients", "P(S=P) Contraction coefficients",
"Alpha Orbital Energies", "Alpha MO coefficients",
"Beta Orbital Energies", "Beta MO coefficients",
"Total Energy", "Nuclear charges",
'Total SCF Density', 'Spin SCF Density',
'Total MP2 Density', 'Spin MP2 Density',
'Total MP3 Density', 'Spin MP3 Density',
'Total CC Density', 'Spin CC Density',
'Total CI Density', 'Spin CI Density',
'Mulliken Charges', 'ESP Charges', 'NPA Charges',
'Polarizability',
])
# A) Load the geometry
numbers = fchk["Atomic numbers"]
coordinates = fchk["Current cartesian coordinates"].reshape(-1,3)
pseudo_numbers = fchk["Nuclear charges"]
# Mask out ghost atoms
mask = pseudo_numbers != 0.0
numbers = numbers[mask]
# Do not overwrite coordinates array, because it is needed to specify basis
system_coordinates = coordinates[mask]
pseudo_numbers = pseudo_numbers[mask]
# B) Load the orbital basis set
shell_types = fchk["Shell types"]
shell_map = fchk["Shell to atom map"] - 1
nprims = fchk["Number of primitives per shell"]
alphas = fchk["Primitive exponents"]
ccoeffs_level1 = fchk["Contraction coefficients"]
ccoeffs_level2 = fchk.get("P(S=P) Contraction coefficients")
my_shell_types = []
my_shell_map = []
my_nprims = []
my_alphas = []
con_coeffs = []
counter = 0
for i, n in enumerate(nprims):
if shell_types[i] == -1:
# Special treatment for SP shell type
my_shell_types.append(0)
my_shell_types.append(1)
my_shell_map.append(shell_map[i])
my_shell_map.append(shell_map[i])
my_nprims.append(nprims[i])
my_nprims.append(nprims[i])
my_alphas.append(alphas[counter:counter+n])
my_alphas.append(alphas[counter:counter+n])
con_coeffs.append(ccoeffs_level1[counter:counter+n])
con_coeffs.append(ccoeffs_level2[counter:counter+n])
else:
my_shell_types.append(shell_types[i])
my_shell_map.append(shell_map[i])
my_nprims.append(nprims[i])
my_alphas.append(alphas[counter:counter+n])
con_coeffs.append(ccoeffs_level1[counter:counter+n])
counter += n
my_shell_types = np.array(my_shell_types)
my_shell_map = np.array(my_shell_map)
my_nprims = np.array(my_nprims)
my_alphas = np.concatenate(my_alphas)
con_coeffs = np.concatenate(con_coeffs)
del shell_map
del shell_types
del nprims
del alphas
obasis = GOBasis(coordinates, my_shell_map, my_nprims, my_shell_types, my_alphas, con_coeffs)
if lf.default_nbasis is not None and lf.default_nbasis != obasis.nbasis:
raise TypeError('The value of lf.default_nbasis does not match nbasis reported in the fchk file.')
lf.default_nbasis = obasis.nbasis
# permutation of the orbital basis functions
permutation_rules = {
-9: np.arange(19),
-8: np.arange(17),
-7: np.arange(15),
-6: np.arange(13),
-5: np.arange(11),
-4: np.arange(9),
-3: np.arange(7),
-2: np.arange(5),
0: np.array([0]),
1: np.arange(3),
2: np.array([0, 3, 4, 1, 5, 2]),
3: np.array([0, 4, 5, 3, 9, 6, 1, 8, 7, 2]),
4: np.arange(15)[::-1],
5: np.arange(21)[::-1],
6: np.arange(28)[::-1],
7: np.arange(36)[::-1],
8: np.arange(45)[::-1],
9: np.arange(55)[::-1],
}
permutation = []
for shell_type in my_shell_types:
permutation.extend(permutation_rules[shell_type]+len(permutation))
permutation = np.array(permutation, dtype=int)
result = {
'title': fchk.title,
'coordinates': system_coordinates,
'lf': lf,
'numbers': numbers,
'obasis': obasis,
'permutation': permutation,
'pseudo_numbers': pseudo_numbers,
}
# C) Load density matrices
def load_dm(label):
if label in fchk:
dm = lf.create_two_index(obasis.nbasis)
start = 0
for i in xrange(obasis.nbasis):
stop = start+i+1
dm._array[i,:i+1] = fchk[label][start:stop]
dm._array[:i+1,i] = fchk[label][start:stop]
start = stop
return dm
# First try to load the post-hf density matrices.
load_orbitals = True
for key in 'MP2', 'MP3', 'CC', 'CI', 'SCF':
dm_full = load_dm('Total %s Density' % key)
if dm_full is not None:
result['dm_full_%s' % key.lower()] = dm_full
dm_spin = load_dm('Spin %s Density' % key)
if dm_spin is not None:
result['dm_spin_%s' % key.lower()] = dm_spin
# D) Load the wavefunction
# Handle small difference in fchk files from g03 and g09
nbasis_indep = fchk.get("Number of independant functions") or \
fchk.get("Number of independent functions")
if nbasis_indep is None:
nbasis_indep = obasis.nbasis
# Load orbitals
nalpha = fchk['Number of alpha electrons']
nbeta = fchk['Number of beta electrons']
if nalpha < 0 or nbeta < 0 or nalpha+nbeta <= 0:
raise ValueError('The file %s does not contain a positive number of electrons.' % filename)
exp_alpha = lf.create_expansion(obasis.nbasis, nbasis_indep)
exp_alpha.coeffs[:] = fchk['Alpha MO coefficients'].reshape(nbasis_indep, obasis.nbasis).T
exp_alpha.energies[:] = fchk['Alpha Orbital Energies']
exp_alpha.occupations[:nalpha] = 1.0
result['exp_alpha'] = exp_alpha
if 'Beta Orbital Energies' in fchk:
# UHF case
exp_beta = lf.create_expansion(obasis.nbasis, nbasis_indep)
exp_beta.coeffs[:] = fchk['Beta MO coefficients'].reshape(nbasis_indep, obasis.nbasis).T
exp_beta.energies[:] = fchk['Beta Orbital Energies']
exp_beta.occupations[:nbeta] = 1.0
result['exp_beta'] = exp_beta
elif fchk['Number of beta electrons'] != fchk['Number of alpha electrons']:
# ROHF case
exp_beta = lf.create_expansion(obasis.nbasis, nbasis_indep)
exp_beta.coeffs[:] = fchk['Alpha MO coefficients'].reshape(nbasis_indep, obasis.nbasis).T
exp_beta.energies[:] = fchk['Alpha Orbital Energies']
exp_beta.occupations[:nbeta] = 1.0
result['exp_beta'] = exp_beta
# Delete dm_full_scf because it is known to be buggy
result.pop('dm_full_scf')
# E) Load properties
result['energy'] = fchk['Total Energy']
if 'Polarizability' in fchk:
result['polar'] = triangle_to_dense(fchk['Polarizability'])
# F) Load optional properties
# Mask out ghost atoms from charges
if 'Mulliken Charges' in fchk:
result['mulliken_charges'] = fchk['Mulliken Charges'][mask]
if 'ESP Charges' in fchk:
result['esp_charges'] = fchk['ESP Charges'][mask]
if 'NPA Charges' in fchk:
result['npa_charges'] = fchk['NPA Charges'][mask]
return result
| eustislab/horton | horton/io/gaussian.py | Python | gpl-3.0 | 17,417 | [
"Gaussian"
] | 610605213ff613c7d06822fd7c0fd32b1b321856da99fe0b2d2eeed048185bba |
from brian2 import *
from data_utils import *
set_device('cpp_standalone', build_on_run=False)
# neuronal parameters
N = 12500 # total number of neurons
NE = 10000 # number of excitatory neurons
vth = 20*mV # threshold potential
vr = 10*mV # reset potential
tau = 20*ms # membrane time constant
eqs_neurons='''
inp : volt
dv/dt = (-v + inp)/tau : volt
'''
P=NeuronGroup(N=N,model=eqs_neurons,\
threshold='v>=vth',reset='v=vr',\
refractory=2*ms,method='euler')
P.v = uniform(size=12500)*vth
# synaptic parameters
g = 5 # ratio of inh to exc
J = 0.1*mV # synaptic weight
p = 0.1 # connection probability
delay = 1.5*ms # synaptic delay
# delta-function synapses
con = Synapses(P,P,'w:volt (constant)',on_pre='v_post+=w',method='euler')
#con.connect(condition='i!=j',p=p)
print 'computing connection matrix'
CE = int(p*NE)
CI = int(p*(N-NE))
C = CE+CI
conn_i = np.zeros(C*N,dtype=int)
preneuronsE = arange(0,NE,dtype=int)
preneuronsI = arange(NE,N,dtype=int)
for j in range(N): # loop over post-synaptic neurons
# draw CE number of neuron indices out of NE neurons, no autapses
if j<NE: preneurons = np.delete(preneuronsE,j)
else: preneurons = preneuronsE
conn_i[j*C:j*C+CE] = np.random.permutation(preneurons)[:CE]
# draw CI number of neuron indices out of inhibitory neurons, no autapses
if j>NE: preneurons = np.delete(preneuronsI,j-NE)
else: preneurons = preneuronsI
conn_i[j*C+CE:(j+1)*C] = np.random.permutation(preneurons)[:CI]
conn_j = np.repeat(range(N),C)
print 'connecting network'
con.connect(i=conn_i,j=conn_j)
con.delay = delay
con.w['i<NE'] = J
con.w['i>=NE'] = -g*J
# input parameters
inpfactor = 2
nu_theta = vth/(p*NE*J*tau)
Pinp = PoissonGroup(N=N,rates=inpfactor*nu_theta)
con_ext = Synapses(Pinp, P, on_pre='v += J')
con_ext.connect(True, p=p*NE/float(N))
con_ext.delay = delay
sm = SpikeMonitor(P)
sr = PopulationRateMonitor(P)
sm_vm = StateMonitor(P,'v',record=range(5))
print 'compiling/running'
run(0.25*second, report='text')
device.build(directory='output', compile=True, run=True, debug=False);
print "mean activity (Hz) =",mean(sr.rate/Hz)
figure()
plot(sm.t/ms,sm.i,'.')
#ylim([1350,1400])
figure()
plot(sr.t/ms,sr.rate/Hz,',-')
#figure()
#hist(CV_spiketrains(array(sm.t),array(sm.i),0.,range(N)),bins=100)
show()
| h-mayorquin/camp_india_2016 | tutorials/EI networks/STEP3_ExcInhNet_Brunel2000_brian2.py | Python | mit | 2,316 | [
"NEURON"
] | d1618bda437cef9bfebb20ed465c842940c77e79e8b5a395c00358699a83e25a |
#
# Copyright 2014-2015, 2017, 2020-2021 Lars Pastewka (U. Freiburg)
# 2018-2021 Jan Griesser (U. Freiburg)
# 2020 Jonas Oldenstaedt (U. Freiburg)
# 2015 Adrien Gola (KIT)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from .kumagai import Kumagai
from .tersoff_brenner import TersoffBrenner
from .stillinger_weber import StillingerWeber
from .harmonic import ZeroPair, ZeroTriplet, HarmonicBond, HarmonicAngle
| libAtoms/matscipy | matscipy/calculators/manybody/explicit_forms/__init__.py | Python | lgpl-2.1 | 1,181 | [
"Matscipy"
] | 591e8df1b22e592dcd19ca40cdd3c7443f15cb981e712c07a54f97bf2a7d726b |
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string, optparse, subprocess
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
warn("Couldn't find documentation file at: %s" % docdir)
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','ti.touchid.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','TiTouchidModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def info(msg):
print "[INFO] %s" % msg
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release OTHER_CFLAGS=\"-fembed-bitcode\"")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release OTHER_CFLAGS=\"-fembed-bitcode\"")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def generate_apidoc(apidoc_build_path):
global options
if options.skip_docs:
info("Skipping documentation generation.")
return False
else:
info("Module apidoc generation can be skipped using --skip-docs")
apidoc_path = os.path.join(cwd, "apidoc")
if not os.path.exists(apidoc_path):
warn("Skipping apidoc generation. No apidoc folder found at: %s" % apidoc_path)
return False
if not os.path.exists(apidoc_build_path):
os.makedirs(apidoc_build_path)
ti_root = string.strip(subprocess.check_output(["echo $TI_ROOT"], shell=True))
if not len(ti_root) > 0:
warn("Not generating documentation from the apidoc folder. The titanium_mobile repo could not be found.")
warn("Set the TI_ROOT environment variable to the parent folder where the titanium_mobile repo resides (eg.'export TI_ROOT=/Path').")
return False
docgen = os.path.join(ti_root, "titanium_mobile", "apidoc", "docgen.py")
if not os.path.exists(docgen):
warn("Not generating documentation from the apidoc folder. Couldn't find docgen.py at: %s" % docgen)
return False
info("Generating documentation from the apidoc folder.")
rc = os.system("\"%s\" --format=jsca,modulehtml --css=styles.css -o \"%s\" -e \"%s\"" % (docgen, apidoc_build_path, apidoc_path))
if rc != 0:
die("docgen failed")
return True
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
apidoc_build_path = os.path.join(cwd, "build", "apidoc")
if generate_apidoc(apidoc_build_path):
for file in os.listdir(apidoc_build_path):
if file in ignoreFiles or os.path.isdir(os.path.join(apidoc_build_path, file)):
continue
zf.write(os.path.join(apidoc_build_path, file), '%s/documentation/apidoc/%s' % (modulepath, file))
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
global options
parser = optparse.OptionParser()
parser.add_option("-s", "--skip-docs",
dest="skip_docs",
action="store_true",
help="Will skip building documentation in apidoc folder",
default=False)
(options, args) = parser.parse_args()
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
| titanium-forks/appcelerator-modules.ti.touchid | ios/build.py | Python | apache-2.0 | 8,826 | [
"VisIt"
] | 93bf4defa9c2a57d32ad31d86cc9754e1c395b14e2f270e87e204963643b17c2 |
import unittest
from keras2pmml import keras2pmml
from sklearn.datasets import load_iris
import numpy as np
import theano
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense
class GenericFieldsTestCase(unittest.TestCase):
def setUp(self):
iris = load_iris()
theano.config.floatX = 'float32'
X = iris.data.astype(theano.config.floatX)
y = iris.target.astype(np.int32)
y_ohe = np_utils.to_categorical(y)
model = Sequential()
model.add(Dense(input_dim=X.shape[1], output_dim=5, activation='tanh'))
model.add(Dense(input_dim=5, output_dim=y_ohe.shape[1], activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='sgd')
model.fit(X, y_ohe, nb_epoch=10, batch_size=1, verbose=3, validation_data=None)
params = {'copyright': 'Václav Čadek', 'model_name': 'Iris Model'}
self.model = model
self.pmml = keras2pmml(self.model, **params)
self.num_inputs = self.model.input_shape[1]
self.num_outputs = self.model.output_shape[1]
self.num_connection_layers = len(self.model.layers)
self.features = ['x{}'.format(i) for i in range(self.num_inputs)]
self.class_values = ['y{}'.format(i) for i in range(self.num_outputs)]
def test_data_dict(self):
continuous_fields = self.pmml.findall("DataDictionary/DataField/[@optype='continuous']")
categorical_field = self.pmml.findall("DataDictionary/DataField/[@optype='categorical']")
self.assertEquals(len(continuous_fields), self.num_inputs, 'Correct number of continuous fields.')
self.assertEquals(len(categorical_field), 1, 'Exactly one categorical field in data dictionary.')
categorical_name = categorical_field[0].attrib.get('name', None)
self.assertEquals(categorical_name, 'class', 'Correct target variable name.')
output_values = categorical_field[0].findall('Value')
self.assertEqual(len(output_values), self.num_outputs, 'Correct number of output values.')
self.assertListEqual(
[ov.attrib['value'] for ov in output_values],
self.class_values
)
self.assertListEqual(
[ov.attrib['name'] for ov in continuous_fields],
self.features
)
def test_mining_schema(self):
target_field = self.pmml.findall("NeuralNetwork/MiningSchema/MiningField/[@usageType='target']")
active_fields = self.pmml.findall("NeuralNetwork/MiningSchema/MiningField/[@usageType='active']")
self.assertEquals(len(active_fields), self.num_inputs, 'Correct number of active fields.')
self.assertEquals(len(target_field), 1, 'Exactly one target field in mining schema.')
target_name = target_field[0].attrib.get('name', None)
self.assertEquals(target_name, 'class', 'Correct target field name.')
self.assertListEqual(
[ov.attrib['name'] for ov in active_fields],
self.features
)
def test_input(self):
field_refs = self.pmml.findall("NeuralNetwork/NeuralInputs/NeuralInput/DerivedField/FieldRef")
for fr in field_refs:
if fr.attrib.get('field') not in self.features:
self.fail('Field mapped to non-existing field.')
def test_output(self):
output_fields = self.pmml.findall("NeuralNetwork/Output/OutputField/[@feature='probability']")
self.assertEqual(len(output_fields), self.num_outputs, 'Correct number of output fields.')
self.assertListEqual(
[of.attrib['name'] for of in output_fields],
['probability_{}'.format(v) for v in self.class_values]
)
def test_topology(self):
neural_inputs = self.pmml.findall('NeuralNetwork/NeuralInputs/NeuralInput')
neural_outputs = self.pmml.findall('NeuralNetwork/NeuralOutputs/NeuralOutput')
neural_layers = self.pmml.findall('NeuralNetwork/NeuralLayer')
self.assertEqual(len(neural_inputs), self.num_inputs, 'Correct number of input neurons.')
self.assertEqual(len(neural_outputs), self.num_outputs, 'Correct number of output neurons.')
self.assertEquals(len(neural_layers), self.num_connection_layers, 'Correct number of layers.')
for i, l in enumerate(neural_layers):
weights = self.model.layers[i].get_weights()[0]
biases = self.model.layers[i].get_weights()[1]
for j, n in enumerate(l.findall('Neuron')):
self.assertListEqual(
[str(c.attrib['weight']) for c in n.findall('Con')], weights.astype(str)[:, j].tolist(),
'Verify correct weights and that is fully-connected from previous layer.'
)
self.assertEquals(n.attrib['bias'], biases.astype(str)[j])
| vaclavcadek/keras2pmml | tests/sequential.py | Python | mit | 4,863 | [
"NEURON"
] | 03c8d8ffab446f3e7a1776145e9032e5ae01afea5065ddba9632497520601538 |
#!/usr/bin/env python
#
# Copyright (c) 2009- Facebook
# Distributed under the Thrift Software License
#
# See accompanying file LICENSE or visit the Thrift site at:
# http://developers.facebook.com/thrift/
#
"""
This script can be used to make the output from
facebook::thrift::profile_print_info() more human-readable.
It translates each executable file name and address into the corresponding
source file name, line number, and function name. By default, it also
demangles C++ symbol names.
"""
import optparse
import os
import re
import subprocess
import sys
class AddressInfo(object):
"""
A class to store information about a particular address in an object file.
"""
def __init__(self, obj_file, address):
self.objectFile = obj_file
self.address = address
self.sourceFile = None
self.sourceLine = None
self.funtion = None
g_addrs_by_filename = {}
def get_address(filename, address):
"""
Retrieve an AddressInfo object for the specified object file and address.
Keeps a global list of AddressInfo objects. Two calls to get_address()
with the same filename and address will always return the same AddressInfo
object.
"""
global g_addrs_by_filename
try:
by_address = g_addrs_by_filename[filename]
except KeyError:
by_address = {}
g_addrs_by_filename[filename] = by_address
try:
addr_info = by_address[address]
except KeyError:
addr_info = AddressInfo(filename, address)
by_address[address] = addr_info
return addr_info
def translate_file_addresses(filename, addresses, options):
"""
Use addr2line to look up information for the specified addresses.
All of the addresses must belong to the same object file.
"""
# Do nothing if we can't find the file
if not os.path.isfile(filename):
return
args = ['addr2line']
if options.printFunctions:
args.append('-f')
args.extend(['-e', filename])
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
for address in addresses:
assert address.objectFile == filename
proc.stdin.write(address.address + '\n')
if options.printFunctions:
function = proc.stdout.readline()
function = function.strip()
if not function:
raise Exception('unexpected EOF from addr2line')
address.function = function
file_and_line = proc.stdout.readline()
file_and_line = file_and_line.strip()
if not file_and_line:
raise Exception('unexpected EOF from addr2line')
idx = file_and_line.rfind(':')
if idx < 0:
msg = 'expected file and line number from addr2line; got %r' % \
(file_and_line,)
msg += '\nfile=%r, address=%r' % (filename, address.address)
raise Exception(msg)
address.sourceFile = file_and_line[:idx]
address.sourceLine = file_and_line[idx+1:]
(remaining_out, cmd_err) = proc.communicate()
retcode = proc.wait()
if retcode != 0:
raise subprocess.CalledProcessError(retcode, args)
def lookup_addresses(options):
"""
Look up source file information for all of the addresses currently stored
in the global list of AddressInfo objects.
"""
global g_addrs_by_filename
for (file, addresses) in g_addrs_by_filename.items():
translate_file_addresses(file, addresses.values(), options)
class Entry(object):
"""
An entry in the thrift profile output.
Contains a header line, and a backtrace.
"""
def __init__(self, header):
self.header = header
self.bt = []
def addFrame(self, filename, address):
# If libc was able to determine the symbols names, the filename
# argument will be of the form <filename>(<function>+<offset>)
# So, strip off anything after the last '('
idx = filename.rfind('(')
if idx >= 0:
filename = filename[:idx]
addr = get_address(filename, address)
self.bt.append(addr)
def write(self, f, options):
f.write(self.header)
f.write('\n')
n = 0
for address in self.bt:
f.write(' #%-2d %s:%s\n' % (n, address.sourceFile,
address.sourceLine))
n += 1
if options.printFunctions:
if address.function:
f.write(' %s\n' % (address.function,))
else:
f.write(' ??\n')
def process_file(in_file, out_file, options):
"""
Read thrift profile output from the specified input file, and print
prettier information on the output file.
"""
#
# A naive approach would be to read the input line by line,
# and each time we come to a filename and address, pass it to addr2line
# and print the resulting information. Unfortunately, addr2line can be
# quite slow, especially with large executables.
#
# This approach is much faster. We read in all of the input, storing
# the addresses in each file that need to be resolved. We then call
# addr2line just once for each file. This is much faster than calling
# addr2line once per address.
#
virt_call_regex = re.compile(r'^\s*T_VIRTUAL_CALL: (\d+) calls on (.*):$')
gen_prot_regex = re.compile(
r'^\s*T_GENERIC_PROTOCOL: (\d+) calls to (.*) with a (.*):$')
bt_regex = re.compile(r'^\s*#(\d+)\s*(.*) \[(0x[0-9A-Za-z]+)\]$')
# Parse all of the input, and store it as Entry objects
entries = []
current_entry = None
while True:
line = in_file.readline()
if not line:
break
if line == '\n' or line.startswith('Thrift virtual call info:'):
continue
virt_call_match = virt_call_regex.match(line)
if virt_call_match:
num_calls = int(virt_call_match.group(1))
type_name = virt_call_match.group(2)
if options.cxxfilt:
# Type names reported by typeid() are internal names.
# By default, c++filt doesn't demangle internal type names.
# (Some versions of c++filt have a "-t" option to enable this.
# Other versions don't have this argument, but demangle type
# names passed as an argument, but not on stdin.)
#
# If the output is being filtered through c++filt, prepend
# "_Z" to the type name to make it look like an external name.
type_name = '_Z' + type_name
header = 'T_VIRTUAL_CALL: %d calls on "%s"' % \
(num_calls, type_name)
if current_entry is not None:
entries.append(current_entry)
current_entry = Entry(header)
continue
gen_prot_match = gen_prot_regex.match(line)
if gen_prot_match:
num_calls = int(gen_prot_match.group(1))
type_name1 = gen_prot_match.group(2)
type_name2 = gen_prot_match.group(3)
if options.cxxfilt:
type_name1 = '_Z' + type_name1
type_name2 = '_Z' + type_name2
header = 'T_GENERIC_PROTOCOL: %d calls to "%s" with a "%s"' % \
(num_calls, type_name1, type_name2)
if current_entry is not None:
entries.append(current_entry)
current_entry = Entry(header)
continue
bt_match = bt_regex.match(line)
if bt_match:
if current_entry is None:
raise Exception('found backtrace frame before entry header')
frame_num = int(bt_match.group(1))
filename = bt_match.group(2)
address = bt_match.group(3)
current_entry.addFrame(filename, address)
continue
raise Exception('unexpected line in input: %r' % (line,))
# Add the last entry we were processing to the list
if current_entry is not None:
entries.append(current_entry)
current_entry = None
# Look up all of the addresses
lookup_addresses(options)
# Print out the entries, now that the information has been translated
for entry in entries:
entry.write(out_file, options)
out_file.write('\n')
def start_cppfilt():
(read_pipe, write_pipe) = os.pipe()
# Fork. Run c++filt in the parent process,
# and then continue normal processing in the child.
pid = os.fork()
if pid == 0:
# child
os.dup2(write_pipe, sys.stdout.fileno())
os.close(read_pipe)
os.close(write_pipe)
return
else:
# parent
os.dup2(read_pipe, sys.stdin.fileno())
os.close(read_pipe)
os.close(write_pipe)
cmd = ['c++filt']
os.execvp(cmd[0], cmd)
def main(argv):
parser = optparse.OptionParser(usage='%prog [options] [<file>]')
parser.add_option('--no-functions', help='Don\'t print function names',
dest='printFunctions', action='store_false',
default=True)
parser.add_option('--no-demangle',
help='Don\'t demangle C++ symbol names',
dest='cxxfilt', action='store_false',
default=True)
(options, args) = parser.parse_args(argv[1:])
num_args = len(args)
if num_args == 0:
in_file = sys.stdin
elif num_args == 1:
in_file = open(argv[1], 'r')
else:
parser.print_usage(sys.stderr)
print >> sys.stderr, 'trailing arguments: %s' % (' '.join(args[1:],))
return 1
if options.cxxfilt:
start_cppfilt()
process_file(in_file, sys.stdout, options)
if __name__ == '__main__':
rc = main(sys.argv)
sys.exit(rc)
| uber/fbthrift | thrift/contrib/parse_profiling.py | Python | apache-2.0 | 9,945 | [
"VisIt"
] | 4677203a7786469940a86d86aef0f755922a53df76ae5f23bb6beab948d812b4 |
# Copyright (C) 2015, 2016, 2017, 2018 Francisco Favela
# This file is part of isonav
# isonav is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# isonav is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from math import *
from loadingStuff import *
from isoParser import *
import sqlite3
conn = sqlite3.connect(isoDatadb)
cursor = conn.cursor()
#important constant
# c=3*10**8
c=299792458 #in m/s
cfm=c*10**(15) #in fm/s
eCoef=931.4941 #amu to MeV convertion coef
hc=1.23984193 #MeV-pm
hbc=197.33 #MeV-fm
alpha=1/137.036 #fine structure
electEMass=0.5109989461 # mass of the electron in MeV
N_a=6.022140857e23 #mol^-1, Avogadro constant
# cfm=1 #in fm/s
#utility functions for nuclear physics reactions
def checkDictIso(iso):
A,k=getIso(iso)
if len(iDict[k][1][A])<=1:
return False
else:
return True
def getKey(pNum):
if 0<=pNum<len(listStuff):
return listStuff[pNum]
return False
def getPnum(iso):
A,k=getIso(iso)
if k=="None" or k=="0None":
return 0
if k not in listStuff:
return False
return listStuff.index(k)
def getNnum(iso):
A,k=getIso(iso)
return A-getPnum(k)
def getMass(iso):
a,key=getIso(iso)
return iDict[key][1][a][0]
def getNameFromSymbol(s):
if s not in nameDict:
return False
return nameDict[s]
def printElemList():
i=0
for e in listStuff:
print(i,e)
i+=1
##This functions should be somewhat equivalent to getCoef but I'll leave
##it for now
#Center of mass velocity stuff
def getVelcm(iso1,iso2,E1):
m1=getEMass(iso1)
m2=getEMass(iso2)
v1=sqrt(2.0*E1/m1)*c
v2=0 #assuming it is still
Vcm=(1.0*v1*m1+1.0*v2*m2)/(m1+m2)
v1p=v1-Vcm
v2p=v2-Vcm
return v1p,v2p,Vcm
def getInEcms(iso1,iso2,E1L):
vels=getVelcm(iso1,iso2,E1L)
mE1=getEMass(iso1)
mE2=getEMass(iso2)
#Alternative way
# mu=mE1*mE2/(mE1+mE2)
# rVel=vels[0]-vels[1]
# print 1.0/2.0*mu*rVel**2
E1cm=0.5*(vels[0]/c)**2*mE1
E2cm=0.5*(vels[1]/c)**2*mE2
inEcmAvail=E1cm+E2cm
inEcmSys=0.5*(vels[2]/c)**2*(mE1+mE2)
return E1cm,E2cm,inEcmAvail,inEcmSys
def getOutEcms(iso1,iso2,isoE,isoR,E1L,exE):
E1cm,E2cm,inEcmAvail,inEcmSys=getInEcms(iso1,iso2,E1L)
mE1=getEMass(iso1)
mE2=getEMass(iso2)
mEE=getEMass(isoE)
mER=getEMass(isoR)
inMass=mE1+mE2
outMass=mEE+mER
outEcmSys=inEcmSys*(inMass/outMass)
Q=getQVal(mE1,mE2,mEE,mER)
outEcmAvail=inEcmSys*(1.0-1.0*(inMass/outMass))+inEcmAvail+Q-exE
if outEcmAvail < 0:
return -1,-1,-1,-1
EEcm,ERcm=getEcmsFromECM2(mEE,mER,outEcmAvail)
return EEcm,ERcm,outEcmAvail,outEcmSys
def getEcmsFromECM(iso1,iso2,ECM):
#For example, in a decay ECM=Q
m1=getEMass(iso1)
m2=getEMass(iso2)
mu=1.0*m1*m2/(m1+m2)
P=sqrt(2.0*mu*ECM)/c
E1=0.5*(P*c)**2/m1
E2=0.5*(P*c)**2/m2
return E1,E2
def getEcmsFromECM2(m1,m2,ECM):
#For example, in a decay ECM=Q
# m1=getEMass(iso1)
# m2=getEMass(iso2)
mu=1.0*m1*m2/(m1+m2)
P=sqrt(2.0*mu*ECM)/c
E1=0.5*(P*c)**2/m1
E2=0.5*(P*c)**2/m2
return E1,E2
def getAvailEnergy(iso1,iso2,isoEject,isoRes,E1L,E2L=0):
E1cm,E2cm,inEcmAvail,EcmSys=getInEcms(iso1,iso2,E1L)
Q=getIsoQVal(iso1,iso2,isoEject,isoRes)
return inEcmAvail+Q
#Just for testing
def getAllVs(iso1,iso2,isoE,isoR,E1L):
v1cm,v2cm,Vcm=getVelcm(iso1,iso2,E1L)
EcmAvail=getAvailEnergy(iso1,iso2,isoE,isoR,E1L)
ejectE,resE=getEcmsFromECM(isoE,isoR,EcmAvail)
print(ejectE,resE)
vE=sqrt(2.0*ejectE/getEMass(isoE))*c
vR=sqrt(2.0*resE/getEMass(isoR))*c
############################################
def checkIsoExistence(iso1,iso2):
a1,key1=getIso(iso1)
a2,key2=getIso(iso2)
if key1 not in iDict or key2 not in iDict:
print("Error: keys have to be in the dictionary")
return False
if a1 not in iDict[key1][1] or a2 not in iDict[key2][1]:
print("Error: isotopes have to exist")
return False
return True
def checkIsoExist1(iso):
a,key=getIso(iso)
if key not in iDict:
# print "Error: keys have to be in the dictionary"
return False
return True
def nRadius(iso):
#In fermis
A,k=getIso(iso)
return 1.2*A**(1.0/3.0)
def mirror(iso):
# if not checkDictIso(e,a):
# return False
pNumber=getNnum(iso)
nNumber=getPnum(iso)
ma=pNumber+nNumber
mE=getKey(pNumber)
isoM=str(ma)+str(mE)
return isoM
def coulombE(iso1,iso2):
z1=getPnum(iso1)
z2=getPnum(iso2)
rMin=nRadius(iso1)+nRadius(iso2)
return z1*z2*alpha*hbc/rMin
def thresholdE(iso1,iso2,iso3,iso4):
mp=getMass(iso1)
mt=getMass(iso2)
mE=getMass(iso3)
mR=getMass(iso4)
Q=getQVal(mp,mt,mE,mR)*eCoef
if Q<=0:
Ethres=-Q*(mR+mE)/(mR+mE-mp)
else:
Ethres=0
return Ethres
def reaction(iso1,iso2, Ex=0.0):
#Think about meoizing
a1,key1=getIso(iso1)
a2,key2=getIso(iso2)
isoExist=checkIsoExistence(iso1,iso2)
amuEx=Ex/eCoef
if not isoExist or isoExist=="Decay":
return False
aTot=a1+a2
pTot=getPnum(key1)+getPnum(key2)
nTot=aTot-pTot
aRes=aTot
pRes=pTot
aEject=0
pEject=0
aVal=aTot
pVal=pTot
initialMass=getMass(iso1)+getMass(iso2)+amuEx
reactionList=[]
rKey=getKey(pRes)
eKey='None'
maxLoop=1000#More than this and it should return
iterator=0
while True:
if iterator>maxLoop:
return reactionList
iterator+=1
#If dict loaded don't worry about this for now
if not (rKey and eKey):
pRes-=1
pEject+=1
continue
#Ending of ignore block
if aRes in iDict[rKey][1] and aEject in iDict[eKey][1]:
#Maybe use getMass or getQval here?
finalMass=iDict[eKey][1][aEject][0]+iDict[rKey][1][aRes][0]
Q=(initialMass-finalMass)*eCoef
ejectIso=str(aEject)+eKey
resIso=str(aRes)+rKey
if 'None' in [key1,key2,eKey,rKey]:
Ethres='None'
else:
Ethres=thresholdE(iso1,iso2,ejectIso,resIso)
#Getting rid o the annoying -0.0, there must be a better way
if Ethres==0:
Ethres=0.0
newVal=[ejectIso,resIso,Ethres,Q]
newValP=[resIso,ejectIso,Ethres,Q]#Avoiding repetition
# if newVal not in reactionList and newValP not in reactionList:
fList=[v[0] for v in reactionList]
slist=[v[1] for v in reactionList]
if ejectIso not in fList and ejectIso not in slist:
reactionList.append(newVal)
aRes-=1
aEject+=1
else:
pRes-=1
pEject+=1
rKey=getKey(pRes)
eKey=getKey(pEject)
aRes=aTot-pEject
aEject=pEject
if not (rKey and eKey):
#It appears to happen in big vs big nuclei
continue
while aRes not in iDict[rKey][1] or aEject not in iDict[eKey][1]:
if pRes<=pTot/2-1:
return reactionList
rKey=getKey(pRes)
eKey=getKey(pEject)
aRes-=1
aEject+=1
if iterator>maxLoop:
return reactionList
iterator+=1
rKey=getKey(pRes)
eKey=getKey(pEject)
def nReaction(iso1,iso2,Ex=0.0):
ls=reaction(iso1,iso2,Ex=Ex)
if ls==[]:
print("Nuclei might be too big")
if ls==False:
print("An error ocurred")
return False
#Sort the list elements in terms of their
#Q value
ls.sort(key=lambda x: x[3],reverse=True)
return ls
#Not yet perfect
#Not any beta decays
def QDecay(iso1,Ex=0.0):
decayCand=nReaction(iso1,'0None',Ex=Ex)
if decayCand==False:
return False
decays=[val[0:2]+[val[3]] for val in decayCand if val[3]>0]
ndec=[]
for d in decays:
if d[0] == '0None' or d[1] == '0None':
continue
E1cm,E2cm=getEcmsFromECM(d[0],d[1],d[2])
d=[d[0],d[1],E1cm,E2cm,d[2]]
ndec.append(d)
return ndec
#Not very elegant for now (Calls QDecay) But it was a quick and easy
#solution ;) For proton and neutron emission do emit="1H" or emit="1n"
#Note: only for the base state for now.
def emitDecay(iso,emit="4He"):
qDecList=QDecay(iso)
for e in qDecList:
if emit in e[0:2]:
return e
#This is the more careful solution###
def emitDecay2(iso,emit="4He",num=1):
newIso=getNewIso(iso,emit,num)
if not newIso:
return False
QVal=emitDecayQVal(iso,emit,num)
if not QVal or QVal<0:
return False
nEmit=str(num)+"("+emit+")"
return [nEmit,newIso,QVal]
def emitDecayQVal(iso,emit="4He",num=1):
newIso=getNewIso(iso,emit,num)
if not newIso:
return False
isoEMass=getEMass(iso)
if not isoEMass:
return False
emitEMass=getEMass(emit)
if not emitEMass:
return False
newIsoEMass=getEMass(newIso)
if not newIsoEMass:
return False
QVal=getQVal(isoEMass,0,newIsoEMass,emitEMass*num)
return QVal
def getNewIso(iso,emit="4He",num=1):
isoN=getNnum(iso)
isoP=getPnum(iso)
emitN=getNnum(emit)
emitP=getPnum(emit)
newIsoN=isoN-emitN*num
newIsoP=isoP-emitP*num
#Still not sure about this condition, maybe neutron condition can be
#loosened, check special cases such as deuteron
if newIsoP<=0 or newIsoP<=0:
return False
newA=newIsoP+newIsoN
newKey=getKey(newIsoP)
if not newKey:
return False
newIso=str(newA)+newKey
if not checkIsoExist1(newIso):
return False
return newIso
#Still working on this
# #Given an isotope, the ejectile nucleus, the Daughter and the available
# #energy (in CM, not Q), it returns all the possible combinations of
# #excitation modes.
# def xDecay(iso,isoE,isoD,ECM=0):
# if iso != getCompound(isoE,isoD):
# return False
# exList=[0,0,0,0]
# Q=getIsoQVal(iso,"0None",isoE,isoD,exList)
# Eavail=ECM+Q
# levsE=getPopLevels(isoE,Eavail)
# levsD=getPopLevels(isoR,Eavail)
#Prints out all the possible neg Q's
def QStable(iso1):
a1,key1=getIso(iso1)
decayCand=nReaction(iso1,'0None')
if decayCand==False:
return False
decays=[val for val in decayCand if val[3]<0]
return decays
def checkReaction(iso1,iso2,isoEject,isoRes):
a1,key1=getIso(iso1)
a2,key2=getIso(iso2)
aEject,eject=getIso(isoEject)
aRes,res=getIso(isoRes)
#Making sure that the cases 'n' are '1n' 'p' is '1H' etc
if eject==None or res==None:
print("Reaction is invalid")
return False
isoEject=str(aEject)+eject
isoRes=str(aRes)+res
if not checkIsoExistence(iso1,iso2):
print("Entered first cond")
return False
if not checkIsoExistence(isoEject,isoRes):
print("Entered second cond")
return False
reactionStuffa=[eject,aEject,res,aRes]
reactionStuffb=[res,aRes,eject,aEject]
reactionStuffa=[isoEject,isoRes]
reactionStuffb=[isoRes,isoEject]
retList=nReaction(iso1,iso2)
for ret in retList:
#Excluding the threshold and the QValue
if reactionStuffa==ret[:2] or reactionStuffb==ret[:2]:
return ret
print("Reaction is invalid")
return False
def sReaction(iso1,iso2,isoEject,isoRes,ELab=2.9,ang=30,exList=[0,0,0,0]):
a1,key1=getIso(iso1)
a2,key2=getIso(iso2)
aEject,eject=getIso(isoEject)
aRes,res=getIso(isoRes)
react=checkReaction(iso1,iso2,isoEject,isoRes)
if not checkArguments(ELab,react,eject,res):
return False
s1=analyticSol(iso1,iso2,isoEject,isoRes,ELab,angle=ang,exList=exList)
s2=analyticSol(iso1,iso2,isoRes,isoEject,ELab,angle=ang,exList=exList)
solution=[s1,s2]
return solution
#This is now deprecated
def checkSecSol(emp,emt,emE,emR,ELab):
Q=getQVal(emp,emt,emE,emR)
if Q<0:
Ethres=-Q*(emR+emE)/(emR+emE-emp)
Emax=-Q*emR/(emR-emp)
print("Ethres,Emax")
print(Ethres,Emax)
if Ethres<ELab<Emax:
print("Possible second solution")
thetaM=acos(sqrt(-(emR+emE)*(emR*Q+(emR-emp)*ELab)/(emp*emE*ELab)))
return thetaM
return False
#This is now deprecated
def solveNum(ang,vE,vR,Vcm,isoE,isoR,exList=[0,0,0,0]):
emE=getEMass(isoE)+exList[2]
emR=getEMass(isoR)+exList[3]
thEject=0
dTh=0.2
ang=radians(ang)
if ang>pi/2:
ang-=pi
tolerance=0.0001
while True:
thEject+=dTh
vEy=vE*sin(thEject)
vEz=vE*cos(thEject)
vRy=vR*sin(pi-thEject)
vRz=vR*cos(pi-thEject)
#They actually have to be zero
### deltaPy=(vEy*emE-vRy*emR)*1.0/c**2
### deltaPz=(vEz*emE+vRz*emR)*1.0/c**2
# print deltaPy,deltaPz
if (vEz+Vcm)==0 or (vRz+Vcm)==0:
print("No solution was found, div by zero")
print("#####################################################")
return False
thEjectLab=atan(1.0*vEy/(vEz+Vcm))
ELabEject=emE*(1.0*vEy**2+(vEz+Vcm)**2)/(2*c**2)
theResLab=atan(1.0*vRy/(vRz+Vcm))
ELabResid=emR*(1.0*vRy**2+(vRz+Vcm)**2)/(2*c**2)
diff=ang-thEjectLab
if abs(diff)<tolerance:
break
if dTh>0 and diff<0 or dTh<0 and diff>0:
dTh *= -1.0/2
if thEject>=pi:
# print "No solution was found"
# print "#####################################################"
return False
return [degrees(thEjectLab),ELabEject,degrees(theResLab),\
ELabResid]
def xTremeTest(iso1,iso2,E=10,ang=30):
reactions=nReaction(iso1,iso2)
l=[]
for e in reactions:
if 'None' in e:
continue
isoEject=e[0]
isoRes=e[1]
react1,react2=sReaction(iso1,iso2,isoEject,isoRes,E,ang)
if react1[0]==[False,False,False,False]:
break
firstSols=[react1[0],react2[0]]
secSols=[react1[1],react2[1]]
l.append([e,firstSols,secSols])
return l
#returns the corresponding fused element, along with the max populated
#level and the corresponding remaining energy
def fussionCase(iso1,iso2,E1L,E2L=0):
isof=getCompound(iso1,iso2)
if isof==False:
return False
Q=getIsoQVal(iso1,iso2,"0None",isof)
E1cm,E2cm,Ecm,EcmSys=getInEcms(iso1,iso2,E1L)
ETotcm=Q+Ecm
maxLev,maxLE=getCorrespLevE(isof,ETotcm)
rKEcm=ETotcm-maxLE #residual KE
vDump,vDump,Vcm=getVcms(iso1,iso2,iso1,iso2,E1L)
EcmSys=0.5*(Vcm/c)**2*(getEMass(iso1)+getEMass(iso2))
rKE=rKEcm+EcmSys
return isof,maxLev,maxLE,rKE
def getCompound(iso1,iso2):
a1,k1=getIso(iso1)
a2,k2=getIso(iso2)
p1=getPnum(iso1)
p2=getPnum(iso2)
pf=p1+p2
af=a1+a2
kf=getKey(pf)
if kf==False:
return False
isof=str(af)+kf
if getPnum(isof):
return isof
return False
def getCorrespLevE(iso,E):
aVal,eName=getIso(iso)
getMoreData(iso)
if not checkDictIso(iso):
return False
lev,lEMax=0,0
for e in iDict[eName][1][aVal][1]:
lE=iDict[eName][1][aVal][1][e][0]
if lE >= E:
lev,lEMax=e-1,iDict[eName][1][aVal][1][e-1][0]
break
if E>0 and lev==0:
print("#Energy over max level in db")
lev,lEMax=e,iDict[eName][1][aVal][1][e][0]
return lev,lEMax
def getLevelE(iso1,level):
A,k=getIso(iso1)
getMoreData(iso1)
if not checkDictIso(iso1):
return 0
return iDict[k][1][A][1][level][0]
def getAllLevels(iso):
A,k=getIso(iso)
getMoreData(iso)
if not checkDictIso(iso):
return 0
lList=[]
for level in iDict[k][1][A][1]:
lList.append([level,iDict[k][1][A][1][level][0]])
return lList
def getPopLevels(iso1,aE):
levels=[]
iso,eName=getIso(iso1)
getMoreData(iso1)
if not checkDictIso(iso1):
return [1]
for e in iDict[eName][1][iso][1]:
lE=iDict[eName][1][iso][1][e][0]
if lE>aE:
return levels
levels.append([e,lE])
return levels
#If the excitation data is needed then this loads it.
def getMoreData(iso,xFile=None):
#Careful with neutrons and Nones
A,k=getIso(iso)
levDict={}
if len(iDict[k][1][A])<2:
if xFile == None:
t=(iso,)
cursor.execute('SELECT levNum,xEnergy,extra FROM isoLevels WHERE iso=?', t)
#Creating subDictionary
for exData in cursor.fetchall():
if int(exData[0]) not in levDict:
levDict[exData[0]]=[float(exData[1]),myString2List(exData[2])]
#print("Debug in getMoreData",levDict[exData[0]])
iDict[k][1][A].append(levDict)
else:
with open(xFile) as myFileObj:
lineLst=myFileObj.readlines()
for a,b in zip(lineLst,range(len(lineLst))):
levDict[b+1]=[float(a),[]]
iDict[k][1][A].append(levDict)
#This is now deprecated
def getCoef(iso1,iso2,isoE,isoR,ELab,exList=[0,0,0,0]):
emp,emt,emE,emR=getAllEMasses(iso1,iso2,isoE,isoR,exList)
Q=getQVal(emp,emt,emE,emR)
# Pi=sqrt(2*emp*ELab)/c
# Vcm=Pi*c**2/(emp+emt)
# EcmSys=(Pi*c)**2/(2.0*(emp+emt))
v1=sqrt(2.0*ELab/emp)*c
v2=0 #For future improvement
Vcm=(1.0*emp*v1+1.0*emt*v2)/(emp+emt)
EcmSys=0.5*(Vcm/c)**2*(emp+emt)
#Available E in b4 collision
Edisp=ELab-EcmSys
Ef=Edisp+Q
if Ef<0:
print("Inside getCoef Ef = ", Ef)
print("Not enough energy for reaction")
return False,False,Vcm,Ef
#Final momentum, in cm.
muf=1.0*emE*emR/(emE+emR)
Pf=sqrt(2.0*Ef*muf)/c
vE=1.0*Pf*c**2/emE
vR=1.0*Pf*c**2/emR
return vE,vR,Vcm,Ef
def getEMass(iso1):
if iso1 == "n":
iso1="1n"
A,k=getIso(iso1)
vals=[i[0] for i in getIsotopes(iso1)]
vals.append("p")
vals.append("d")
vals.append("t")
vals.append("a")
if iso1 not in vals:
return False
return iDict[k][1][A][0]*eCoef
#Still work to be done, assuming the nucleus only gets increased mass
#when the reaction occurs (no fission or gammas for now)
def exLevReact(ang,iso1,iso2,isoEject,isoRes,E1L,E2L,eVal=1):
if eVal==1:
isoEX1=isoRes
else:
isoEX1=isoEject
Edisp=getAvailEnergy(iso1,iso2,isoEject,isoRes,E1L,E2L)
popLevels=getPopLevels(isoEX1,Edisp)
if len(popLevels)<=1:
popLevels=[[1,0.0]]
levList=[]
#For sending the mass excitations into getCoef
exList=[0,0,0,0]
for e in popLevels:
# print e
if e[1] == False and e[0] != 1:
#print("#Entered false for e[1] en exLevReact")
continue
if eVal==1:
exList[3]=e[1]
else:
exList[2]=e[1]
# numSol=getEsAndAngs(ang,iso1,iso2,isoEject,isoRes,ELab,E2L=0,\
# exList=exList)
numSol1,numSol2=analyticSol(iso1,iso2,isoEject,isoRes,\
E1L,E2L,angle=ang,exList=exList)
if numSol1[0]==False:
break
levList.append([e,[numSol1,numSol2]])
return levList
def getQVal(m1,m2,m3,m4):
Q=(m1+m2-m3-m4)
return Q
def getIsoQVal(iso1,iso2,iso3,iso4,exList=[0,0,0,0]):
if not checkReaction(iso1,iso2,iso3,iso4):
return False
m1=getEMass(iso1)+exList[0]#Adding mass excitations
m2=getEMass(iso2)+exList[1]
m3=getEMass(iso3)+exList[2]
m4=getEMass(iso4)+exList[3]
Q=(m1+m2-m3-m4)
return Q
def getIsoQValAMU(iso1,iso2,iso3,iso4):
return getIsoQVal(iso1,iso2,iso3,iso4)/eCoef
def iso2String(k,iso,eVal=''):
return eVal+str(iso)+k
def xReaction(iso1,iso2,isoEject,isoRes,ELab=2.9,ang=30,xf1=None,xf2=None):
a1,key1=getIso(iso1)
a2,key2=getIso(iso2)
aEject,eject=getIso(isoEject)
aRes,res=getIso(isoRes)
react=checkReaction(iso1,iso2,isoEject,isoRes)
if not checkArguments(ELab,react,eject,res):
return False
Q=react[3]
# vE,vR,Vcm,Ef=getCoef(iso1,iso2,isoEject,isoRes,ELab)
# if vE==False:
# return False
lL=[]
E1L=ELab
#For now E2L=0
E2L=0
if xf1 != None:
getMoreData(isoEject,xf1)
if xf2 != None:
getMoreData(isoRes,xf2)
c=[iso2String(eject,aEject,'*'),iso2String(res,aRes,'')]
# lL.append([c,exLevReact(ang,iso1,iso2,isoEject,isoRes,ELab,Ef,0)])
exL1=exLevReact(ang,iso1,iso2,isoEject,isoRes,E1L,E2L,0)
# exL1=[[val[0],val[1][0]] for val in exL1L]
lL.append([c,exL1])
c=[iso2String(eject,aEject,''),iso2String(res,aRes,'*')]
# lL.append([c,exLevReact(ang,iso1,iso2,isoEject,isoRes,ELab,Ef,1)])
exL2=exLevReact(ang,iso1,iso2,isoEject,isoRes,E1L,E2L,1)
# exL2=[[val[0],val[1][0]] for val in exL2L]
lL.append([c,exL2])
c=[iso2String(res,aRes,'*'),iso2String(eject,aEject,'')]
# lL.append([c,exLevReact(ang,iso1,iso2,isoRes,isoEject,ELab,Ef,0)])
exL3=exLevReact(ang,iso1,iso2,isoRes,isoEject,E1L,E2L,0)
# exL3=[[val[0],val[1][0]] for val in exL3L]
lL.append([c,exL3])
c=[iso2String(res,aRes,''),iso2String(eject,aEject,'*')]
# lL.append([c,exLevReact(ang,iso1,iso2,isoRes,isoEject,ELab,Ef,1)])
exL4=exLevReact(ang,iso1,iso2,isoRes,isoEject,E1L,E2L,1)
# exL4=[[val[0],val[1][0]] for val in exL4L]
lL.append([c,exL4])
return lL
def xXTremeTest(iso1,iso2,E=10,ang=30):
reactions=nReaction(iso1,iso2)
rStuff=[]
for e in reactions:
if 'None' in e:
continue
isoEject=e[0]
isoRes=e[1]
reactL=xReaction(iso1,iso2,isoEject,isoRes,E,ang)
react=[]
for lr in reactL:
exitReact=lr[0]
for info in lr[1:]:
firstSolEs=[[val[0],val[1][0]] for val in info]
secSol=[[val[0],val[1][1]] for val in info]
react.append([exitReact,firstSolEs,secSol])
#Is this meaningful?
if react==False:
break
rStuff.append([e,react])
return rStuff
def checkArguments(ELab,react,eject,res):
if ELab<=0:
print("Lab energy has to be positive")
return False
if not react:
return False
if eject=='None' or res=='None':
print("Reaction must have at least 2 final elements")
return False
return True
def getAllEMasses(iso1,iso2,isoEject,isoRes,exList=[0,0,0,0]):
emp=getEMass(iso1)
emt=getEMass(iso2)
emE=getEMass(isoEject)
emR=getEMass(isoRes)
emp+=exList[0]
emt+=exList[1]
emE+=exList[2]
emR+=exList[3]
return emp,emt,emE,emR
#Given an energy, beam energy, angle, a list of reactions and a
#tolerance it returns values to hint where it might be from
def fReact(E,bE,angle,rList,tol=140):
for iR in rList:
print("######################")
print(iR)
print("######################")
#Need to be upgraded for second sol from xXtremeTest
XXList=xXTremeTest(iR[0],iR[1],bE,angle)
# pXXTremeTest(XXList)
pFReact(E,tol,XXList)
def pFReact(E,tol,XXList):
for e in XXList:
for ee in e[1]:
for states in ee[1]:
if states[1]==False:
continue
if abs(states[1][1]-E)<=tol:
print(e[0],ee[0],states)
def findOE(Eang,ang,iso1,iso2):
E=Eang
Emax=2.0*Eang
dE=0.01
tolerance=0.00001
while True:
sRList=sReaction(iso1,iso2,iso1,iso2,E,ang)
sR=sRList[0]
diff=Eang-sR[0][1]
if abs(diff)<tolerance:
break
if dE>0 and diff<0 or dE<0 and diff>0:
dE*=-1.0/2
if E>Emax:
return False
E+=dE
return E
#It prints the CS in mb
def rutherford0(iso1,iso2,Ecm,theta):
theta=radians(theta)
z1=getPnum(iso1)
z2=getPnum(iso2)
dSigma=(z1*z2*alpha*hbc/(4*Ecm))**2/sin(theta/2)**4
# converting to mb
dSigma*=10
return dSigma
def rutherfordLab0(iso1,iso2,ELab,thetaL):
""" Returns the rutherford value in the lab frame"""
Ecm=getInEcms(iso1,iso2,ELab)[2] #Taking the 3rd argument
K=getMass(iso1)/getMass(iso2)
#see m. cottereau and f. lefebvres recuel de problemes...
thetaCM=solveAng(thetaL,K)
dSigmaL=rutherford0(iso1,iso2,Ecm,thetaCM)*\
(1+K**2+2*K*cos(thetaCM))**(3.0/2.0)/(1+K*cos(thetaCM))
return dSigmaL
def solveAng(thetaL,ratio,direction="f"):
""" Returns the CM angle """
thetaL=radians(thetaL)
tgThetaL=tan(thetaL)
#"f" is for forward sol "b" for backward sol
if direction=="f":
thetaCM=0
dTh=0.05
sign=1
else:
thetaCM=pi
dTh=-0.05
sign=-1
def myFunct(thetaCM,ratio):
return sin(thetaCM)/(cos(thetaCM)+ratio)
tolerance=0.0001
# i=0
while True:
fVal=myFunct(thetaCM,ratio)
# if i>=20:
# break
# print "fVal is:",fVal
# i+=1
diff=sign*(tgThetaL-fVal)
if abs(diff)<tolerance:
break
if dTh>0 and diff<0 or dTh<0 and diff>0:
dTh *= -1.0/2
# print "Sign switch"
if sign==1 and thetaCM>=pi or sign==-1 and thetaCM<0:
# print "No solution was found"
return False
thetaCM+=dTh
thetaL=degrees(atan(fVal))
return degrees(thetaCM)
def getAngs(iso1,iso2,isoE,isoR,E1L,exList,thetaL):
vE,vR,Vcm,Ef=getCoef(iso1,iso2,isoE,isoR,E1L,exList)
r=1.0*vE/Vcm
ratio=1.0/r
thetaCMf=solveAng(thetaL,ratio,"f")
# For excited states it stays in this function
#Commenting it for now
# thetaCMb=solveAng(thetaL,ratio,"b")
thetaCMb=False
#No need to convert to radians in this case
return thetaCMf,thetaCMb
#This is now deprecated
def getEsAndAngs(iso1,iso2,isoE,isoR,E1L,E2L=0,thetaL=0,\
exList=[0,0,0,0],direction="f"):
angMax=getMaxAng(iso1,iso2,isoE,isoR,E1L,E2L,exList)[0]
#Keeping angles in degrees
if thetaL>angMax:
print("Angle is too big, no solution found")
return [False,False,False,False]
#Getting the coefficients
vE,vR,Vcm,Ef=getCoef(iso1,iso2,isoE,isoR,E1L,exList)
#Getting the CM angles
angs=getAngs(iso1,iso2,isoE,isoR,E1L,exList,thetaL)
if direction=="f":
thEjectCM=angs[0]
else:
thEjectCM=angs[1]
thEjectCM=radians(thEjectCM)
theResCM=pi-thEjectCM
emE=getEMass(isoE)+exList[2]
emR=getEMass(isoR)+exList[3]
vEy=vE*sin(thEjectCM)
vEz=vE*cos(thEjectCM)
vRy=vR*sin(theResCM)
vRz=vR*cos(theResCM)
thEjectLab=atan(vEy/(vEz+Vcm))
ELabEject=emE*(vEy**2+(vEz+Vcm)**2)/(2*c**2)
theResLab=atan(vRy/(vRz+Vcm))
ELabResid=emR*(vRy**2+(vRz+Vcm)**2)/(2*c**2)
return [degrees(thEjectLab),ELabEject,degrees(theResLab),\
ELabResid]
def getMaxAng(iso1,iso2,isoE,isoR,E1L,E2L=0,exList=[0,0,0,0]):
emp,emt,emE,emR=getAllEMasses(iso1,iso2,isoE,isoR,exList)
# v1=sqrt(2.0*E1L/emp)
# v2=0 #Zero for now
vE,vR,Vcm,Ef=getCoef(iso1,iso2,isoE,isoR,E1L,exList)
if vE==False:
print("Not enough energy to get angle")
return False
r1=1.0*vE/Vcm
r2=1.0*vR/Vcm
if r1>=1:
maxAng1=pi
else:
maxAng1=atan2(r1,sqrt(1.0-r1**2))
if r2>=1:
maxAng2=pi
else:
maxAng2=atan2(r2,sqrt(1.0-r2**2))
return [degrees(maxAng1),degrees(maxAng2)]
def nEvents(Ni,aDens,dSigma,dOmega):
return Ni*aDens*dSigma*dOmega
def getdOmega(r,R):
return pi*(r/R)**2
#Converts current into # of charges
def current2Part(current):
C=6.2415093E18
return C*current*10**(-6)
#Gets the product of #Projectiles*#Targets
#in part/mb
def getT(ps,ts,E,angle,Nr,dOmega):
return 1.0*Nr/(rutherfordLab0(ps,ts,E,angle)*dOmega)
def getdSigma(Nn,dOmega,T):
return 1.0*Nn/(dOmega*T)
def getdSigma2(pIso,tIso,Nruth,Nnucl,ELab,angle):
return 1.0*Nnucl/Nruth*rutherfordLab0(pIso,tIso,ELab,angle)
#Returns density in part/cm**2, T increases with time as well as nPart
#so time cancels out, just put the average current, and remember that
#there are 5mm collimators and that not all of the original beam gets to
#the jet.
def getDensityIncmSquare(T,current):
#Current in micro Amperes
nPart=current2Part(current)
mBarn2cm2=1E-27
return T/(mBarn2cm2*nPart)
#Binding Energy
def getBE(iso):
# iso=str(A)+s
A,k=getIso(iso)
z=getPnum(iso)
em=getEMass(iso)
#proton mass
pm=getEMass("1H")
#neutron mass
nm=getEMass("1n")
return em-z*pm-(A-z)*nm
#Binding Energy per nucleon
def getBEperNucleon(iso):
A,k=getIso(iso)
return 1.0*getBE(iso)/A
#Using the liquid drop model for the binding energy
#Values taken from A. Das and T. Ferbel book
def getLDBE(iso,a1=15.6,a2=16.8,a3=0.72,a4=23.3,a5=34):
#All the coefficients are in MeV
A,s=getIso(iso)
Z=getPnum(s)
N=getNnum(iso)
if N%2==0 and Z%2==0:#Even even case
a5*=-1 #Greater stability
elif (A%2)==1:#Odd even case
a5=0
BE=-a1*A+a2*A**(2.0/3.0)+a3*Z**2/A**(1.0/3.0)+a4*(N-Z)**2/A+a5*A**(-3.0/4.0)
return BE
#Binding energy per nucleon using LD
def getLDBEperNucleon(iso):
A,s=getIso(iso)
return 1.0*getLDBE(iso)/A
#Using the LD model to get the eMass
def getLDEMass(iso):
A,s=getIso(iso)
Z=getPnum(iso)
#proton mass
pm=getEMass("1H")
#neutron mass
nm=getEMass("n")
return Z*pm+(A-Z)*nm+getLDBE(iso)
#Using the LD model to get the mass
def getLDMass(iso):
return 1.0*getLDEMass(iso)/eCoef
#de Broglie wavelength in angstrom
def deBroglie(iso,E):
# iso=str(A)+element
em=getEMass(iso)
p=sqrt(2.0*em*E) #a "c" from here goes to the hc
return hc/p/100 # 1/100 to convert to angstrom
#reduced de Broglie wavelength in angstrom
def reducedDeBroglie(iso,E):
return deBroglie(iso,E)/(2.0*pi)
#Compton wavelength
def comptonW(iso):
em=getEMass(iso)
return hc/em*1000 #*1000 to convert to fm
#Reduced Compton wavelength
def rComptonW(iso):
em=getEMass(iso)
return hbc/em
#Hard sphere classical total CS
#All this was taken from Griffiths
def hardSphereCTCS(iso):
a=nRadius(iso)
return pi*a**2/100 #1/100 barn conversion.
#Hard sphere quantum total CS
#Note; this is an approximation from an expansion.
def hardSphereQTCS(iso):
a=nRadius(iso)
return 4*pi*a**2/100 #1/100 barn conversion.
#soft sphere differential CS
def softSphereDCS(isop,isot,V0=50):
a=nRadius(isot)
# iso=str(ap)+sp
em=getEMass(isop)
return (2*em*V0*a**3/(3*hbc**2))**2
#soft sphere total CS
def softSphereTCS(isop,isot,V0=50):
return 4*pi*softSphereDCS(isop,isot,V0)
#soft sphere using the second Born approximation
def softSphereDSBorn(isop,isot,V0=50):
a=nRadius(isot)
# iso=str(ap)+sp
em=getEMass(isop)
firstC=2*em*V0*a**3/(3*hbc**2)
secondC=1-4*em*V0*a**2/(5*hbc**2)
return (firstC*secondC)**2
#soft sphere using the second Born approximation for total CS
def softSphereTSBorn(isop,isot,V0=50):
return 4*pi*softSphereDSBorn(isop,isot,V0)
#Using the Yukawa potential
def yukawaDCS(isop,isot,E,theta,beta,mu):
# iso=str(ap)+sp
eMass=getEMass(isop)
theta=radians(theta)
k=sqrt(2*eMass*E/hbc)
kappa=2*k*sin(theta/2)
return (-2*eMass*beta/(hbc**2*(mu**2+kappa**2)))**2
#Getting the total CS for the Yukawa potential, Griffiths 11.12 Note;
#this is still in testing
def yukawaTCS(isop,isot,E,theta,beta,mu):
# iso=str(ap)+sp
eMass=getEMass(isop)
theta=radians(theta)
k=sqrt(2*eMass*E/hbc)
kappa=2*k*sin(theta/2)
return pi*(4*eMass*beta/(mu*hbc))**2/((mu*kappa)**2+8*eMass*E)
#Using krane pg 248 eq 8.72
def getTAlpha(radIso):
A,k=getIso(radIso)
daughterIso=str(A-4)+getKey(getPnum(k)-2)
# print daughterIso
Q=getIsoQVal('0None',radIso,'4He',daughterIso)
TAlpha=Q*(1.0-4.0/A)
return TAlpha
#Using gamow factor according to krane eq. 8.17
def gamowAlpha(iso1):
isoEject="4He"
# a1,s1=getIso(iso1)
# aEject,sEject=getIso(isoEject)
decay=findDecay(iso1,isoEject)
if decay != 'None':
Q=decay[2]
else:
return 'None'
B=getB(iso1,isoEject)
em=getEMass(isoEject) #Most probably alpha part mass
z1=getPnum(iso1)
z2=getPnum(isoEject)
x=1.0*Q/B
#Both equations should give the same... but they don't!!
#See Krane pg 253, eq. 8.16
G=sqrt(2*em/Q)*alpha*z1*z2*(pi/2-2*sqrt(x))
# G=sqrt(2*em/Q)*alpha*z1*z2*(acos(x)-sqrt(x*(1-x)))
return G
#Gets the half life using the Gamow factor. It sometimes matches
#experimental vals and sometimes it is way off! TODO; add the option to
#change the QVal for example. Also include the hbc*l*(l+1)/emR**2 in the
#energy. And the possibility to change V0.
def gamowHL(iso1):
isoEject="4He"
a1,s1=getIso(iso1)
decay=findDecay(iso1,isoEject)
# Q=6
if decay != 'None':
Q=decay[2]
else:
return 'None'
ln2=0.693
a=nRadius(iso1)
V0=35 #50
em=getEMass(iso1)
G=gamowAlpha(iso1)
tHalf=ln2*a/cfm*sqrt(em/(V0+Q))*e**(2*G)
return tHalf
def findDecay(iso1,ejectIso):
rList=QDecay(iso1)
# aEject,sEject=getIso(ejectIso)
# for e in rList:
# if sEject==e[0] and aEject==e[1]:
# return e
for e in rList:
if ejectIso in e:
return e
#Take care of this case
return 'None'
#For alpha decay is the barrier penetration energy for decay (in MeV),
#normally alpha
def getB(iso1,isoEject):
a=nRadius(iso1)
z1=getPnum(iso1)
z2=getPnum(isoEject)
return alpha*hbc*z1*z2/a
#This is still in testing
def stoppingPowerD(iso1,iso2,E,I):
z1=getPnum(iso1)
z2=getPnum(iso2)
A=getMass(iso2)
#In MeV/cm
return -z1**2*z2*log(2195*E/I)/(A*E)
#This is also still in testing
def stoppingPowerI(iso1,iso2,E,I,L):
#L in microns (10**-4 cm)
x=0
L=L*10**(-4)
dx=L/10
while x<L or E<=0:
E+=stoppingPowerD(iso1,iso2,E,I)*dx
x+=dx
return E
#########################################################################
###### Testing analytic #################################################
#########################################################################
#def getVcms(v1L,v2L,m1,m2):
def getVcms(iso1,iso2,isoEject,isoRes,E1L,E2L=0,exList=[0,0,0,0]):
mE=getEMass(isoEject)
mR=getEMass(isoRes)
exE=exList[2]+exList[3]
EEcm,ERcm,outEcmAvail,outEcmSys=getOutEcms(iso1,iso2,isoEject,isoRes,E1L,exE)
if outEcmAvail<=0:
return False,False,False
outVcmSys=sqrt(2.0*outEcmSys/(mE+mR))*c
if outEcmAvail<=0:
return False,False,False
# print "E1cm,E2cm,Ecm = ",E1cm,E2cm,Ecm
vEcm,vRcm=getVcmsFromEcm(isoEject,isoRes,outEcmAvail)
# print "v1cm,v2cm",v1cm,v2cm
# print "vEcm,vRcm",vEcm,vRcm
return vEcm,vRcm,outVcmSys
def getVcmsFromEcm(iso1,iso2,Ecm):
m1=getEMass(iso1)
m2=getEMass(iso2)
if Ecm<=0:
return False,False
v1cm=sqrt(2.0*Ecm/(m1*(1+1.0*m1/m2)))*c
v2cm=(1.0*m1)/m2*v1cm
return v1cm,v2cm
def getEFromV(iso,v,xMass=0):
m=getEMass(iso)+xMass
return 0.5*m*(v/c)**2
#Testing the non numeric solution
def analyticSol(iso1,iso2,isoEject,isoRes,E1L,E2L=0,angle=0,exList=[0,0,0,0]):
vEcm,vRcm,Vcm=getVcms(iso1,iso2,isoEject,isoRes,E1L,E2L,exList)#the inEmcs
if vEcm == False:
return [[False,False,False,False],[]]
#This part should be updated to out vals etc
maxAng=getMaxAngles(iso1,iso2,isoEject,isoRes,E1L,E2L,exList)[0]
if maxAng=="NaN":
return "NaN"
if angle>=maxAng:
return [[False,False,False,False],[]]
# maxAng=radians(maxAng) #not sure about this
# angLA1,Ea1,angLB1,Eb1=getEsAndAngs(iso1,iso2,isoEject,isoRes,E1L,E2L,angle,exList)
sol1,sol2=analyticDetails(vEcm,vRcm,Vcm,angle,isoEject,isoRes)
angLA1,Ea1,angLB1,Eb1=sol1
retVal2=[]
if sol2 != []:
angLA2,Ea2,angLB2,Eb2=sol2
retVal2=[degrees(angLA2),Ea2,degrees(angLB2),Eb2]
retVal1=[degrees(angLA1),Ea1,degrees(angLB1),Eb1]
return [retVal1,retVal2]
def analyticDetails(vEcm,vRcm,Vcm,angle,isoEject,isoRes):
angle=radians(angle)
kAng=tan(angle)
k1=1.0*vEcm/Vcm
discr=1-(1+kAng**2)*(1-k1**2)
secSol=True
if discr<0:
# print "Angle maybe too large"
return [[False,False,False,False],[]]
if angle <= pi/2:
vxa1=Vcm*(1+sqrt(discr))/(1+kAng**2)
else: #angle >= pi/2
#Using the backward solution for this case
vxa1=Vcm*(1-sqrt(discr))/(1+kAng**2)
secSol=False
if Vcm<=vEcm:
#There can only be one solution in this case.
secSol=False
#Ignoring the second solutions for now
# vxa2=Vcm*(1-sqrt(discr))/(1+kAng**2)
vya1=kAng*vxa1
# vya2=kAng*vxa2
va1=sqrt(vxa1**2+vya1**2)
# va2=sqrt(vxa2**2+vya2**2)
angLA1=atan2(vya1,vxa1)
# angLA2=atan(vya2/vxa2)
#To get the angle and velocity of the corresponding particle, we
#do the following 1.- Get the center of mass velocity of
#particle "a".
vxa1CM=vxa1-Vcm
vya1CM=vya1
# vxa2CM=vxa2-Vcm
# vya2CM=vya2
#2.- Get the slopes #No need
# sa1=vya1CM/vxa1CM
# sa2=vya2CM/vxa2CM
#3.- The corresponding angles
angA1=atan2(vya1CM,vxa1CM)
# angA2=atan(sa2)
# print "angA1,angA2 = ", degrees(angA1),degrees(angA2)
angB1=angA1-pi
# angB2=angA2-pi
#4.- The corresponding center of mass velocity values
vxb1CM=vRcm*cos(angB1)
vyb1CM=vRcm*sin(angB1)
# vxb2CM=vRcm*cos(angB2)
# vyb2CM=vRcm*sin(angB2)
#5.- The lab values
vxb1=vxb1CM+Vcm
vyb1=vyb1CM
vb1=sqrt(vxb1**2+vyb1**2)
# vxb2=vxb2CM+Vcm
# vyb2=vyb2CM
# vb2=sqrt(vxb2**2+vyb2**2)
angLB1=atan2(vyb1,vxb1)
# angLB2=atan(vyb2/vxb2)
Ea1=getEFromV(isoEject,va1)
Eb1=getEFromV(isoRes,vb1)
firstSolList=[angLA1,Ea1,angLB1,Eb1]
secSolList=[]
if secSol:
#Calculating the second solutions here
vxa2=Vcm*(1-sqrt(discr))/(1+kAng**2)
vya2=kAng*vxa2
va2=sqrt(vxa2**2+vya2**2)
angLA2=atan2(vya2,vxa2)
#To get the angle and velocity of the corresponding particle, we
#do the following 1.- Get the center of mass velocity of
#particle "a".
vxa2CM=vxa2-Vcm
vya2CM=vya2
#2.- Get the slopes #No need
# sa2=vya2CM/vxa2CM
#3.- The corresponding angles
angA2=atan2(vya2CM,vxa2CM)
# print "angA1,angA2 = ", degrees(angA1),degrees(angA2)
angB2=angA2-pi
#4.- The corresponding center of mass velocity values
vxb2CM=vRcm*cos(angB2)
vyb2CM=vRcm*sin(angB2)
#5.- The lab values
vxb2=vxb2CM+Vcm
vyb2=vyb2CM
vb2=sqrt(vxb2**2+vyb2**2)
# print(vb2)
angLB2=atan2(vyb2,vxb2)
Ea2=getEFromV(isoEject,va2)
Eb2=getEFromV(isoRes,vb2)
secSolList=[angLA2,Ea2,angLB2,Eb2]
#Angle is in radians
return [firstSolList,secSolList]
# def getMaxAngles(v1L,v2L,m1,m2):
def getMaxAngles(iso1,iso2,isoEject,isoRes,E1L,E2L=0,exList=[0,0,0,0]):
vEcm,vRcm,Vcm=getVcms(iso1,iso2,isoEject,isoRes,E1L,E2L,exList)
if Vcm == False:
return ["NaN","NaN"]
k1=1.0*vEcm/Vcm
k2=1.0*vRcm/Vcm
# print "k1,k2 = ",k1,k2
if k1 != 1:
discr1=k1**2/(1.0-k1**2)
# print "discr1 = ",discr1
if discr1<0: #Vcm < vEcm
maxAng1=pi
else:
maxAng1=atan(sqrt(discr1))
else:
maxAng1=pi #Maybe it should be pi/2
if k2 != 1:
discr2=k2**2/(1.0-k2**2)
# print "discr2 = ",discr2
if discr2<0: #Vcm < vRcm
maxAng2=pi
else:
maxAng2=atan(sqrt(discr2))
else:
maxAng2=pi #Maybe it should be pi/2
return [degrees(maxAng1),degrees(maxAng2)]
def getIsotopes(s):
a,key=getIso(s)
l=[]
if key not in iDict:
return False
for e in iDict[key][1]:
isoVar=str(e)+key
l+=[[isoVar,iDict[key][1][e][0]]]
return l
# print "#Populating dictionary"
# iDict=populateDict()
iDict=fastPopulateDict()
def gamowE(iso1,iso2):
#In MeV
z1=getPnum(iso1)
z2=getPnum(iso2)
em1=getEMass(iso1)
em2=getEMass(iso1)
eMu=em1*em2/(em1+em2)
GE=2*(pi*z1*z2*alpha)**2*eMu
return GE
def gamowPeak(iso1,iso2,T):
GE=gamowE(iso1,iso2)
TE=temp2E(T)/10**6 #Converting to MeV
GP=(TE**2*GE/4)**(1.0/3)
return GP
def temp2E(T):
#Energy given is in eV
#Ta=300K, Ea=1/40eV
Ta=300
Ea=0.025
TE=T/Ta*Ea
return TE
#Bethe-Bloch energy loss stuff
def getBeta(iso,E):
m=getEMass(iso)
#The non relativistic version is:
#v=sqrt(2.0*E/m)*c, and beta=v/c
# beta=sqrt(2.0*E/m)
#Using the relativistic version
beta=sqrt(1-(1/(E/m+1))**2)
return beta
def getTOF(iso,E,L):
#Here L is in meters
beta=getBeta(iso,E)
v=c*beta
#t is in seconds
t=L/v
return t
def getElectDensity(Z,A_r,rho):
"""Returns the electron density, in #e^-/cm^3"""
#Properly is; n=(N_a*Z*rho)/(A*M_u), but M_u=1 g/mol
n=(N_a*Z*rho)/A_r
return n
def getBlochMeanExcE(Z):
"""Returns the Bloch approximation of the mean ionization potential in
eV """
I=10*Z
return I
#Using this for now, this has to be improved through a database or a
#pickle file!!. Format will probably change. The list format (for now)
#is Z, A_r, rho(at solid state).
# materialDict={"silicon":[14,28.085,2.3290],
# "gold":[79,196.966569,19.30],
# "aluminum":[13,26.9815385,2.70],
# "copper":[29,63.54,8.96]}
def checkMaterial(material,bloch=False,density=None):
#Calls get material properties to fill the cache only once
vals=getMaterialProperties(material,bloch,density)
if vals[-1] == False:
return False
return True
#Global variable to avoid loading the pkl file over and over again.
materialDictCache={}
def getMaterialProperties(material,bloch=False,density=None):
if material in materialDictCache:
return materialDictCache[material]
materialDict=getChemDictFromFile()
if material not in materialDict:
return False,False,False,False
if bloch:
Z=materialDict[material][0]
materialDict[material][-1] = getBlochMeanExcE(Z)
if density != None:
materialDict[material][2]=density
if materialDict[material][-1] == '-':
return False,False,False,False
materialDictCache[material]=materialDict[material]
return materialDict[material]
def getBetheLoss(iso,E,material):
"""Gets the Bethe energy loss differential of an ion through a
material, it includes soft and hard scattering.
"""
beta=getBeta(iso,E)
beta2=beta**2
coefs=getCBbetaCoef(iso,material)
if coefs == None:
return None
C_beta,B_beta = coefs
#remember dE/dx is negative, it is the relativistic formula
dEx=C_beta/beta2*(log((B_beta*beta2)/(1-beta2))-beta2)
return dEx
CBDictCache={}
def getCBbetaCoef(iso, material):
myString="[" + iso + "," + material + "]"
if myString in CBDictCache:
return CBDictCache[myString]
Z,A_r,rho,I=getMaterialProperties(material)
if rho == False:
return None
n=getElectDensity(Z,A_r,rho)
#n has to be given in #e^-/fm^3
n*=10**(-39)
zNum=getPnum(iso)
#"I" was given in eV so it has to be converted in MeV
I*=10**(-6)
C_beta=4*pi/electEMass*n*zNum**2*(hbc*alpha)**2
C_beta*=10**(9) #Converting the units into MeV/mu^3
B_beta=2*electEMass/I
CBDictCache[myString]=[C_beta,B_beta]
return CBDictCache[myString]
def integrateELoss(iso,E,material,thick):
"""Gets the final energy of an ion going through a material with a
certain thickness.
"""
partitionSize=10000
dx=1.0*thick/partitionSize
##For the criteria of considering the particle has stopped##
coefs=getCBbetaCoef(iso,material)
if coefs == None:
#No material was found
return -2
C_beta,B_beta = coefs
ionMass=getEMass(iso)
#e=2.71...
EM=e*ionMass/(2*B_beta)
dExMax=(C_beta*B_beta)/e
fracCrit=0.01
##############
dEx=getBetheLoss(iso,E,material)
#Next is for avoiding energy increment
if dEx<=0:
return -1
for i in range(partitionSize-1):
dEx=getBetheLoss(iso,E,material)
if dEx == None:
#No material was found
return -2
E-=dEx*dx
if E<EM and dEx<=fracCrit*dExMax:
#Particle has stopped
return -1
return E
#High energies might take a while
def getParticleRange(iso,E,material):
"""Gets the range (in microns) of a charged particle in a material.
"""
dx=10**(-2)#TODO: make smarter selection 4 this
##For the criteria of considering the particle has stopped##
coefs=getCBbetaCoef(iso,material)
if coefs == None:
#No material was found
return -2
C_beta,B_beta = coefs
ionMass=getEMass(iso)
#e=2.71...
EM=e*ionMass/(2*B_beta)
dExMax=(C_beta*B_beta)/e
fracCrit=0.01
##############
thick=0
#The following is for when E is less than EM initially
dEx=getBetheLoss(iso,E,material)
while not (E<EM and dEx<=fracCrit*dExMax):
dEx=getBetheLoss(iso,E,material)
E-=dEx*dx
thick+=dx
return thick
#Note that the DeltaEs for alphas of 5.15, 5.48, 5.80 are 2.55, 2.41,
#2.29 for an 11 micron silicon detector (and I think with a 1 micron
#gold coating)
#Don't forget this?
#Leave it commented or else errors occur :(
# conn.close()
| ffavela/isonav | isonavBase.py | Python | gpl-3.0 | 46,355 | [
"Avogadro"
] | b4236adb7ceb5b344af3cc04cbefafa22e921cf2562ec06012019ff67c16861c |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parses a Sudoku puzzle image, converts the puzzle to a string of numbers.
Code modified from the following sources:
- http://goo.gl/baijxj
- http://goo.gl/8O3obH
"""
import cv
import cv2
import numpy as np
GREEN = (0, 255, 0)
SUDOKU_RESIZE = 450
NUM_ROWS = 9
JPEG_EXTENSION = '.jpeg'
TEXT_WEIGHT = 2
TEXT_SIZE = 1
XOFFSET = 20
YOFFSET = 35
class SudokuImageParser(object):
"""Parses a sudoku puzzle.
Attributes:
model: cv2.KNearest model trained with OCR data.
image: numpy.ndarray of the original Sudoku image.
resized_largest_square: numpy.ndarray of the largest square in the
image.
stringified_puzzle: The puzzle as a string of numbers.
"""
def __init__(self):
"""Initialize the SudokuImageParser class and model."""
self.model = self._get_model()
def parse(self, image_data):
"""Parses the image file and returns the puzzle as a string of numbers.
Args:
image_data: The data of the image as a string.
Returns:
String of numbers representing the Sudoku puzzle.
"""
self.image = self._create_image_from_data(image_data)
largest_square = self._find_largest_square()
self.resized_largest_square = self._resize(
largest_square, SUDOKU_RESIZE)
puzzle = self._get_puzzle()
self.stringified_puzzle = ''.join(str(n) for n in puzzle.flatten())
return self.stringified_puzzle
def draw_solution(self, solution):
"""Draw the solution to the puzzle on the image.
Args:
solution: An np array containing the solution to the puzzle.
Returns:
The numpy.ndarray with the solution.
"""
for i in xrange(len(self.stringified_puzzle)):
if self.stringified_puzzle[i] == '0':
r = i / NUM_ROWS
c = i % NUM_ROWS
loc = SUDOKU_RESIZE / NUM_ROWS
posx = c*loc + XOFFSET
posy = r*loc + YOFFSET
cv2.putText(
self.resized_largest_square,
solution[i],
(posx, posy),
cv2.FONT_HERSHEY_SIMPLEX,
TEXT_SIZE,
GREEN,
TEXT_WEIGHT)
return self.resized_largest_square
def convert_to_jpeg(self, nparray):
"""Converts a numpy array to a jpeg cv2.Mat image.
Args:
nparray: A numpy.ndarray of an image.
Returns:
A cv2.Mat jpeg-encoded image.
"""
cvmat = cv.fromarray(nparray)
cvmat = cv.EncodeImage(JPEG_EXTENSION, cvmat)
return cvmat
def _create_image_from_data(self, image_data):
"""Convert string image data to cv2.Mat.
Args:
image_data: The data of the image as a string.
Returns:
A numpy.ndarray representing the image.
"""
np_array = np.fromstring(image_data, np.uint8)
image = cv2.imdecode(np_array, cv2.CV_LOAD_IMAGE_COLOR)
return image
def _get_model(self):
"""Return the OCR model using training data and samples.
Returns:
Trained cv2.KNearest model.
"""
samples = np.float32(np.loadtxt('feature_vector_pixels2.data'))
responses = np.float32(np.loadtxt('samples_pixels2.data'))
model = cv2.KNearest()
model.train(samples, responses)
return model
def _find_largest_square(self):
"""Find the largest square in the image, most likely the puzzle.
Returns:
Contour vector with the largest area or None if not found.
"""
contours, image = self._get_major_contours(self.image)
# Store contours that could be the puzzle using the contour's area
# as the key.
possible_puzzles = {}
for contour in contours:
contour_length = cv2.arcLength(contour, True)
area = cv2.contourArea(contour)
# Approximate the contour to a polygon.
contour = cv2.approxPolyDP(contour, 0.02 * contour_length, True)
# Find contours with 4 vertices and an area greater than a
# third of the image area with a convex shape.
if len(contour) == 4 and (
area > image.size / 3.0 and cv2.isContourConvex(contour)):
# Find the largest cosine of the angles in the contour.
contour_reshaped = contour.reshape(-1, 2)
max_cos = np.max([self._angle_cos(
contour_reshaped[i],
contour_reshaped[(i+1) % 4],
contour_reshaped[(i+2) % 4]) for i in xrange(4)])
# If the max cosine is almost zero (a square),
# it is most likely the Sudoku puzzle.
if max_cos < 0.1:
possible_puzzles[area] = contour
# We get the smallest contour because sometimes interference around the
# edge of the image creates a contour almost the size of the image,
# and we don't want to use that contour.
areas = possible_puzzles.keys()
areas.sort()
return possible_puzzles[areas[0]]
def _get_puzzle(self):
"""Get the numbers in the puzzle in a 9x9 array.
Returns:
A numpy.ndarray filled with the numbers of the puzzle.
"""
# a 9x9 matrix to store our sudoku puzzle
sudoku_matrix = np.zeros((NUM_ROWS, NUM_ROWS), np.uint8)
contours, image_copy = self._get_major_contours(
self.resized_largest_square,
sigma1=3,
threshold_type=cv2.THRESH_BINARY_INV,
dilate=False)
# Erode and dilate the image to further amplify features.
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
erode = cv2.erode(image_copy, kernel)
dilate = cv2.dilate(erode, kernel)
for contour in contours:
area = cv2.contourArea(contour)
# if 100 < area < 800:
if 50 < area < 800:
(bx, by, bw, bh) = cv2.boundingRect(contour)
# if (100 < bw*bh < 1200) and (10 < bw < 40) and (25 < bh < 45):
# aju
if (100 < bw*bh < 1200) and (5 < bw < 40) and (10 < bh < 45):
# Get the region of interest, which contains the number.
roi = dilate[by:by + bh, bx:bx + bw]
small_roi = cv2.resize(roi, (10, 10))
feature = small_roi.reshape((1, 100)).astype(np.float32)
# Use the model to find the most likely number.
ret, results, neigh, dist = self.model.find_nearest(
feature, k=1)
integer = int(results.ravel()[0])
# gridx and gridy are indices of row and column in Sudoku
gridy = (bx + bw/2) / (SUDOKU_RESIZE / NUM_ROWS)
gridx = (by + bh/2) / (SUDOKU_RESIZE / NUM_ROWS)
sudoku_matrix.itemset((gridx, gridy), integer)
return sudoku_matrix
def _get_major_contours(
self, image, sigma1=0, dilate=True,
threshold_type=cv2.THRESH_BINARY):
"""Simplifies the image to find and return the major contours.
Args:
image: numpy.ndarray representing the image.
sigma1: Integer Gaussian kernel standard deviation in X direction.
dilate: Boolean for dilating the image.
threshold_type: Integer representing the thresholding type.
Returns:
List of contours and the numpy.ndarray modified image.
Raises:
ImageError if image could not be processed.
"""
try:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
except cv2.error as e:
raise ImageError('Could not process image.')
# mod_image = cv2.GaussianBlur(gray_image, ksize=(3, 3), sigma1=sigma1)
# aju
mod_image = cv2.GaussianBlur(gray_image, (3, 3), sigma1)
if dilate:
mod_image = cv2.dilate(
mod_image,
kernel=cv2.getStructuringElement(
shape=cv2.MORPH_RECT, ksize=(3, 3)))
mod_image = cv2.adaptiveThreshold(
mod_image,
maxValue=255,
adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C,
thresholdType=threshold_type,
blockSize=5,
C=2)
copied_image = mod_image.copy()
contours, hierarchy = cv2.findContours(
mod_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
return contours, copied_image
def _angle_cos(self, p0, p1, p2):
"""Find the cosine of the angle.
Args:
p0: List representing the coordinates one corner of the square.
p1: List representing the coordinates one corner of the square.
p2: List representing the coordinates one corner of the square.
Returns:
The float cosine of the angle between the 3 coordinates.
"""
d1 = (p0 - p1).astype('float')
d2 = (p2 - p1).astype('float')
return abs(np.dot(d1, d2) / np.sqrt(np.dot(d1, d1) * np.dot(d2, d2)))
def _resize(self, square, size):
"""Resize the sudoku puzzle to specified dimension.
Args:
square: The cv2.Mat image to resize.
size: The integer value to resize the image to.
Returns:
The resized numpy.ndarray of the image.
"""
# Put the corners of square in clockwise order.
approx = self._rectify(square)
h = np.array(
[[0, 0], [size - 1, 0], [size - 1, size - 1], [0, size - 1]],
np.float32)
# Get the transformation matrix.
tranformed_image = cv2.getPerspectiveTransform(approx, h)
# Use the transformation matrix to resize the square to the
# specified size.
resized_image = cv2.warpPerspective(
self.image, tranformed_image, (size, size))
return resized_image
def _rectify(self, square):
"""Put vertices of square in clockwise order.
Args:
square: List of vertices representing a square.
Returns:
List of vertices of the square in clockwise order.
"""
square = square.reshape((4, 2))
square_new = np.zeros((4, 2), dtype=np.float32)
add = square.sum(1)
square_new[0] = square[np.argmin(add)]
square_new[2] = square[np.argmax(add)]
diff = np.diff(square, axis=1)
square_new[1] = square[np.argmin(diff)]
square_new[3] = square[np.argmax(diff)]
return square_new
class ImageError(Exception):
"""Raised when image could not be processed."""
| gdgjodhpur/appengine-opencv-sudoku-python | sudoku_image_parser.py | Python | apache-2.0 | 11,664 | [
"Gaussian"
] | 9fee3947ce0c1c4ffa4ed12b5b25fd77a0065d870eeb94c3fff1385c61987a62 |
#!/usr/bin/python
# Copyright 2008 Marcus D. Hanwell <marcus@cryos.org>
# Distributed under the terms of the GNU General Public License v2 or later
#
# Taken from commit 40304a05323c68d0297a8ade123d27c6e0c78d8c at
# http://github.com/cryos/avogadro/blob/master/scripts/gitlog2changelog.py
import string, re, os
# Execute git log with the desired command line options.
fin = os.popen('git log --summary --stat --no-merges --date=short', 'r')
# Create a ChangeLog file in the current directory.
fout = open('ChangeLog', 'w')
# Set up the loop variables in order to locate the blocks we want
authorFound = False
dateFound = False
messageFound = False
filesFound = False
message = ""
messageNL = False
files = ""
prevAuthorLine = ""
# The main part of the loop
for line in fin:
# The commit line marks the start of a new commit object.
if string.find(line, 'commit') >= 0:
# Start all over again...
authorFound = False
dateFound = False
messageFound = False
messageNL = False
message = ""
filesFound = False
files = ""
continue
# Match the author line and extract the part we want
elif re.match('Author:', line) >=0:
authorList = re.split(': ', line, 1)
author = authorList[1]
author = author[0:len(author)-1]
authorFound = True
# Match the date line
elif re.match('Date:', line) >= 0:
dateList = re.split(': ', line, 1)
date = dateList[1]
date = date[0:len(date)-1]
dateFound = True
# The svn-id lines are ignored
elif re.match(' git-svn-id:', line) >= 0:
continue
# The sign off line is ignored too
elif re.search('Signed-off-by', line) >= 0:
continue
# Extract the actual commit message for this commit
elif authorFound & dateFound & messageFound == False:
# Find the commit message if we can
if len(line) == 1:
if messageNL:
messageFound = True
else:
messageNL = True
elif len(line) == 4:
messageFound = True
else:
if len(message) == 0:
message = message + line.strip()
else:
message = message + " " + line.strip()
# If this line is hit all of the files have been stored for this commit
elif re.search('files changed', line) >= 0:
filesFound = True
continue
# Collect the files for this commit. FIXME: Still need to add +/- to files
elif authorFound & dateFound & messageFound:
fileList = re.split(' \| ', line, 2)
if len(fileList) > 1:
if len(files) > 0:
files = files + ", " + fileList[0].strip()
else:
files = fileList[0].strip()
# All of the parts of the commit have been found - write out the entry
if authorFound & dateFound & messageFound & filesFound:
# First the author line, only outputted if it is the first for that
# author on this day
authorLine = date + " " + author
if len(prevAuthorLine) == 0:
fout.write(authorLine + "\n")
elif authorLine == prevAuthorLine:
pass
else:
fout.write("\n" + authorLine + "\n")
# Assemble the actual commit message line(s) and limit the line length
# to 80 characters.
commitLine = "* " + files + ": " + message
i = 0
commit = ""
while i < len(commitLine):
if len(commitLine) < i + 78:
commit = commit + "\n " + commitLine[i:len(commitLine)]
break
index = commitLine.rfind(' ', i, i+78)
if index > i:
commit = commit + "\n " + commitLine[i:index]
i = index+1
else:
commit = commit + "\n " + commitLine[i:78]
i = i+79
# Write out the commit line
fout.write(commit + "\n")
#Now reset all the variables ready for a new commit block.
authorFound = False
dateFound = False
messageFound = False
messageNL = False
message = ""
filesFound = False
files = ""
prevAuthorLine = authorLine
# Close the input and output lines now that we are finished.
fin.close()
fout.close()
| Beirdo/beirdobot | scripts/gitlog2changelog.py | Python | lgpl-2.1 | 4,359 | [
"Avogadro"
] | d1e15a079860e5b4bf6143ed3326ff4a555ef9f2b4e49ae2f315e6e61a6e0861 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBrian(PythonPackage):
"""A clock-driven simulator for spiking neural networks"""
homepage = "http://www.briansimulator.org"
url = "https://pypi.io/packages/source/b/brian/brian-1.4.3.tar.gz"
version('1.4.3', sha256='c881dcfcd1a21990f9cb3cca76cdd868111cfd9e227ef5c1b13bb372d2efeaa4')
depends_on('py-matplotlib@0.90.1:', type=('build', 'run'))
depends_on('py-numpy@1.4.1:', type=('build', 'run'))
depends_on('py-scipy@0.7.0:', type=('build', 'run'))
| iulian787/spack | var/spack/repos/builtin/packages/py-brian/package.py | Python | lgpl-2.1 | 724 | [
"Brian"
] | 702a144240ad96ddd60a10799980eb747b7e39857588b7e4f36adc072fc404ca |
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
"""K-point/spin combination-descriptors
This module contains classes for defining combinations of two indices:
* Index k for irreducible kpoints in the 1st Brillouin zone.
* Index s for spin up/down if spin-polarized (otherwise ignored).
"""
import numpy as np
from ase.units import Bohr
from ase.dft import monkhorst_pack
from gpaw.symmetry import Symmetry
from gpaw.kpoint import KPoint
class KPointDescriptor:
"""Descriptor-class for k-points."""
def __init__(self, kpts, nspins):
"""Construct descriptor object for kpoint/spin combinations (ks-pair).
Parameters
----------
kpts: None, list of ints, or ndarray
Specification of the k-point grid. None=Gamma, list of
ints=Monkhorst-Pack, ndarray=user specified.
nspins: int
Number of spins.
Attributes
============ ======================================================
``N_c`` Number of k-points in the different directions.
``nspins`` Number of spins.
``nibzkpts`` Number of irreducible kpoints in 1st Brillouin zone.
``nks`` Number of k-point/spin combinations in total.
``mynks`` Number of k-point/spin combinations on this CPU.
``gamma`` Boolean indicator for gamma point calculation.
``comm`` MPI-communicator for kpoint distribution.
============ ======================================================
"""
if kpts is None:
self.bzk_kc = np.zeros((1, 3))
self.N_c = np.array((1, 1, 1), dtype=int)
elif isinstance(kpts[0], int):
self.bzk_kc = monkhorst_pack(kpts)
self.N_c = np.array(kpts, dtype=int)
else:
self.bzk_kc = np.array(kpts)
self.N_c = None
self.nspins = nspins
self.nbzkpts = len(self.bzk_kc)
# Gamma-point calculation
self.gamma = self.nbzkpts == 1 and not self.bzk_kc[0].any()
self.symmetry = None
self.comm = None
self.ibzk_kc = None
self.weight_k = None
self.nibzkpts = None
self.rank0 = None
self.mynks = None
self.ks0 = None
self.ibzk_qc = None
def __len__(self):
"""Return number of k-point/spin combinations of local CPU."""
return self.mynks
def set_symmetry(self, atoms, setups, usesymm, N_c=None):
"""Create symmetry object and construct irreducible Brillouin zone.
Parameters
----------
atoms: Atoms object
Defines atom positions and types and also unit cell and
boundary conditions.
setups: instance of class Setups
PAW setups for the atoms.
usesymm: bool
Symmetry flag.
N_c: three int's or None
If not None: Check also symmetry of grid.
"""
if (~atoms.pbc & self.bzk_kc.any(0)).any():
raise ValueError('K-points can only be used with PBCs!')
# Construct a Symmetry instance containing the identity operation
# only
# Round off
magmom_a = atoms.get_initial_magnetic_moments().round(decimals=3)
id_a = zip(magmom_a, setups.id_a)
self.symmetry = Symmetry(id_a, atoms.cell / Bohr, atoms.pbc)
if self.gamma or usesymm is None:
# Point group and time-reversal symmetry neglected
nkpts = len(self.bzk_kc)
self.weight_k = np.ones(nkpts) / nkpts
self.ibzk_kc = self.bzk_kc.copy()
self.sym_k = np.zeros(nkpts)
self.time_reversal_k = np.zeros(nkpts, bool)
self.kibz_k = np.arange(nkpts)
else:
if usesymm:
# Find symmetry operations of atoms
self.symmetry.analyze(atoms.get_scaled_positions())
if N_c is not None:
self.symmetry.prune_symmetries_grid(N_c)
(self.ibzk_kc, self.weight_k,
self.sym_k,
self.time_reversal_k,
self.kibz_k) = self.symmetry.reduce(self.bzk_kc)
setups.set_symmetry(self.symmetry)
# Number of irreducible k-points and k-point/spin combinations.
self.nibzkpts = len(self.ibzk_kc)
self.nks = self.nibzkpts * self.nspins
def set_communicator(self, comm):
"""Set k-point communicator."""
# Ranks < self.rank0 have mynks0 k-point/spin combinations and
# ranks >= self.rank0 have mynks0+1 k-point/spin combinations.
mynks0, x = divmod(self.nks, comm.size)
self.rank0 = comm.size - x
self.comm = comm
# My number and offset of k-point/spin combinations
self.mynks, self.ks0 = self.get_count(), self.get_offset()
if self.nspins == 2 and comm.size == 1:
# Avoid duplicating k-points in local list of k-points.
self.ibzk_qc = self.ibzk_kc.copy()
else:
self.ibzk_qc = np.vstack((self.ibzk_kc,
self.ibzk_kc))[self.get_slice()]
def create_k_points(self, gd):
"""Return a list of KPoints."""
sdisp_cd = gd.sdisp_cd
kpt_u = []
for ks in range(self.ks0, self.ks0 + self.mynks):
s, k = divmod(ks, self.nibzkpts)
q = (ks - self.ks0) % self.nibzkpts
weight = self.weight_k[k] * 2 / self.nspins
if self.gamma:
phase_cd = np.ones((3, 2), complex)
else:
phase_cd = np.exp(2j * np.pi *
sdisp_cd * self.ibzk_kc[k, :, np.newaxis])
kpt_u.append(KPoint(weight, s, k, q, phase_cd))
return kpt_u
def transform_wave_function(self, psit_G, k):
"""Transform wave function from IBZ to BZ.
k is the index of the desired k-point in the full BZ."""
s = self.sym_k[k]
time_reversal = self.time_reversal_k[k]
op_cc = np.linalg.inv(self.symmetry.op_scc[s]).round().astype(int)
# Identity
if (np.abs(op_cc - np.eye(3, dtype=int)) < 1e-10).all():
if time_reversal:
return psit_G.conj()
else:
return psit_G
# Inversion symmetry
elif (np.abs(op_cc + np.eye(3, dtype=int)) < 1e-10).all():
return psit_G.conj()
# General point group symmetry
else:
ik = self.kibz_k[k]
kibz_c = self.ibzk_kc[ik]
kbz_c = self.bzk_kc[k]
import _gpaw
b_g = np.zeros_like(psit_G)
if time_reversal:
# assert abs(np.dot(op_cc, kibz_c) - -kbz_c) < tol
_gpaw.symmetrize_wavefunction(psit_G, b_g, op_cc.copy(),
kibz_c, -kbz_c)
return b_g.conj()
else:
# assert abs(np.dot(op_cc, kibz_c) - kbz_c) < tol
_gpaw.symmetrize_wavefunction(psit_G, b_g, op_cc.copy(),
kibz_c, kbz_c)
return b_g
def find_k_plus_q(self, q_c):
"""Find the indices of k+q for all kpoints in the Brillouin zone.
In case that k+q is outside the BZ, the k-point inside the BZ
corresponding to k+q is given.
Parameters
----------
q_c: ndarray
Coordinates for the q-vector in units of the reciprocal
lattice vectors.
"""
# Monkhorst-pack grid
if self.N_c is not None:
N_c = self.N_c
dk_c = 1. / N_c
kmax_c = (N_c - 1) * dk_c / 2.
N = np.zeros(3, dtype=int)
# k+q vectors
kplusq_kc = self.bzk_kc + q_c
# Translate back into the first BZ
kplusq_kc[np.where(kplusq_kc > 0.5)] -= 1.
kplusq_kc[np.where(kplusq_kc <= -0.5)] += 1.
# List of k+q indices
kplusq_k = []
# Find index of k+q vector in the bzk_kc attribute
for kplusq, kplusq_c in enumerate(kplusq_kc):
# Calculate index for Monkhorst-Pack grids
if self.N_c is not None:
N = np.asarray(np.round((kplusq_c + kmax_c) / dk_c),
dtype=int)
kplusq_k.append(N[2] + N[1] * N_c[2] +
N[0] * N_c[2] * N_c[1])
else:
k = np.argmin(np.sum(np.abs(self.bzk_kc - kplusq_c), axis=1))
kplusq_k.append(k)
# Check the k+q vector index
k_c = self.bzk_kc[kplusq_k[kplusq]]
assert abs(kplusq_c - k_c).sum() < 1e-8, "Could not find k+q!"
return kplusq_k
def get_bz_q_points(self):
"""Return the q=k1-k2."""
bzk_kc = self.bzk_kc
# Get all q-points
all_qs = []
for k1 in bzk_kc:
for k2 in bzk_kc:
all_qs.append(k1-k2)
all_qs = np.array(all_qs)
# Fold q-points into Brillouin zone
all_qs[np.where(all_qs > 0.501)] -= 1.
all_qs[np.where(all_qs < -0.499)] += 1.
# Make list of non-identical q-points in full BZ
bz_qs = [all_qs[0]]
for q_a in all_qs:
q_in_list = False
for q_b in bz_qs:
if (abs(q_a[0]-q_b[0]) < 0.01 and
abs(q_a[1]-q_b[1]) < 0.01 and
abs(q_a[2]-q_b[2]) < 0.01):
q_in_list = True
break
if q_in_list == False:
bz_qs.append(q_a)
self.bzq_kc = bz_qs
return
def where_is_q(self, q_c):
"""Find the index of q points."""
q_c[np.where(q_c>0.499)] -= 1
q_c[np.where(q_c<-0.499)] += 1
found = False
for ik in range(self.nbzkpts):
if (np.abs(self.bzq_kc[ik] - q_c) < 1e-8).all():
found = True
return ik
break
if found is False:
print self.bzq_kc, q_c
raise ValueError('q-points can not be found!')
def get_count(self, rank=None):
"""Return the number of ks-pairs which belong to a given rank."""
if rank is None:
rank = self.comm.rank
assert rank in xrange(self.comm.size)
mynks0 = self.nks // self.comm.size
mynks = mynks0
if rank >= self.rank0:
mynks += 1
return mynks
def get_offset(self, rank=None):
"""Return the offset of the first ks-pair on a given rank."""
if rank is None:
rank = self.comm.rank
assert rank in xrange(self.comm.size)
mynks0 = self.nks // self.comm.size
ks0 = rank * mynks0
if rank >= self.rank0:
ks0 += rank - self.rank0
return ks0
def get_rank_and_index(self, s, k):
"""Find rank and local index of k-point/spin combination."""
u = self.where_is(s, k)
rank, myu = self.who_has(u)
return rank, myu
def get_slice(self, rank=None):
"""Return the slice of global ks-pairs which belong to a given rank."""
if rank is None:
rank = self.comm.rank
assert rank in xrange(self.comm.size)
mynks, ks0 = self.get_count(rank), self.get_offset(rank)
uslice = slice(ks0, ks0 + mynks)
return uslice
def get_indices(self, rank=None):
"""Return the global ks-pair indices which belong to a given rank."""
uslice = self.get_slice(rank)
return np.arange(*uslice.indices(self.nks))
def get_ranks(self):
"""Return array of ranks as a function of global ks-pair indices."""
ranks = np.empty(self.nks, dtype=int)
for rank in range(self.comm.size):
uslice = self.get_slice(rank)
ranks[uslice] = rank
assert (ranks >= 0).all() and (ranks < self.comm.size).all()
return ranks
def who_has(self, u):
"""Convert global index to rank information and local index."""
mynks0 = self.nks // self.comm.size
if u < mynks0 * self.rank0:
rank, myu = divmod(u, mynks0)
else:
rank, myu = divmod(u - mynks0 * self.rank0, mynks0 + 1)
rank += self.rank0
return rank, myu
def global_index(self, myu, rank=None):
"""Convert rank information and local index to global index."""
if rank is None:
rank = self.comm.rank
assert rank in xrange(self.comm.size)
ks0 = self.get_offset(rank)
u = ks0 + myu
return u
def what_is(self, u):
"""Convert global index to corresponding kpoint/spin combination."""
s, k = divmod(u, self.nibzkpts)
return s, k
def where_is(self, s, k):
"""Convert kpoint/spin combination to the global index thereof."""
u = k + self.nibzkpts * s
return u
#def get_size_of_global_array(self):
# return (self.nspins*self.nibzkpts,)
#
#def ...
class KPointDescriptorOld:
"""Descriptor-class for ordered lists of kpoint/spin combinations
TODO
""" #XXX
def __init__(self, nspins, nibzkpts, comm=None, gamma=True, dtype=float):
"""Construct descriptor object for kpoint/spin combinations (ks-pair).
Parameters:
nspins: int
Number of spins.
nibzkpts: int
Number of irreducible kpoints in 1st Brillouin zone.
comm: MPI-communicator
Communicator for kpoint-groups.
gamma: bool
More to follow.
dtype: NumPy dtype
More to follow.
Note that if comm.size is greater than the number of spins, then
the kpoints cannot all be located at the gamma point and therefor
the gamma boolean loses its significance.
Attributes:
============ ======================================================
``nspins`` Number of spins.
``nibzkpts`` Number of irreducible kpoints in 1st Brillouin zone.
``nks`` Number of k-point/spin combinations in total.
``mynks`` Number of k-point/spin combinations on this CPU.
``gamma`` Boolean indicator for gamma point calculation.
``dtype`` Data type appropriate for wave functions.
``beg`` Beginning of ks-pair indices in group (inclusive).
``end`` End of ks-pair indices in group (exclusive).
``step`` Stride for ks-pair indices between ``beg`` and ``end``.
``comm`` MPI-communicator for kpoint distribution.
============ ======================================================
"""
if comm is None:
comm = mpi.serial_comm
self.comm = comm
self.rank = self.comm.rank
self.nspins = nspins
self.nibzkpts = nibzkpts
self.nks = self.nibzkpts * self.nspins
# XXX Check from distribute_cpus in mpi/__init__.py line 239 rev. 4187
if self.nks % self.comm.size != 0:
raise RuntimeError('Cannot distribute %d k-point/spin ' \
'combinations to %d processors' % \
(self.nks, self.comm.size))
self.mynks = self.nks // self.comm.size
# TODO Move code from PAW.initialize in paw.py lines 319-328 rev. 4187
self.gamma = gamma
self.dtype = dtype
uslice = self.get_slice()
self.beg, self.end, self.step = uslice.indices(self.nks)
#XXX u is global kpoint index
def __len__(self):
return self.mynks
def get_rank_and_index(self, s, k):
"""Find rank and local index of k-point/spin combination."""
u = self.where_is(s, k)
rank, myu = self.who_has(u)
return rank, myu
def get_slice(self, rank=None):
"""Return the slice of global ks-pairs which belong to a given rank."""
if rank is None:
rank = self.comm.rank
assert rank in xrange(self.comm.size)
ks0 = rank * self.mynks
uslice = slice(ks0, ks0 + self.mynks)
return uslice
def get_indices(self, rank=None):
"""Return the global ks-pair indices which belong to a given rank."""
uslice = self.get_slice(rank)
return np.arange(*uslice.indices(self.nks))
def get_ranks(self):
"""Return array of ranks as a function of global ks-pair indices."""
ranks = np.empty(self.nks, dtype=int)
for rank in range(self.comm.size):
uslice = self.get_slice(rank)
ranks[uslice] = rank
assert (ranks >= 0).all() and (ranks < self.comm.size).all()
return ranks
def who_has(self, u):
"""Convert global index to rank information and local index."""
rank, myu = divmod(u, self.mynks)
return rank, myu
def global_index(self, myu, rank=None):
"""Convert rank information and local index to global index."""
if rank is None:
rank = self.comm.rank
u = rank * self.mynks + myu
return u
def what_is(self, u):
"""Convert global index to corresponding kpoint/spin combination."""
s, k = divmod(u, self.nibzkpts)
return s, k
def where_is(self, s, k):
"""Convert kpoint/spin combination to the global index thereof."""
u = k + self.nibzkpts * s
return u
#def get_size_of_global_array(self):
# return (self.nspins*self.nibzkpts,)
#
#def ...
| qsnake/gpaw | gpaw/kpt_descriptor.py | Python | gpl-3.0 | 17,804 | [
"ASE",
"GPAW"
] | 1ec134d6a5b25e24d59987e3d0dd12f4f9c343bdf61925df828a64a014c7ab3e |
#!/usr/bin/env python
import os.path, sys
from fityk import Fityk
class GaussianFitter(Fityk):
def __init__(self, filename):
Fityk.__init__(self)
if not os.path.isfile(filename):
raise ValueError("File `%s' not found." % filename)
self.filename = filename
self.execute("@0 < '%s'" % filename)
print "Data info:", self.get_info("data", 0)
def run(self):
self.execute("guess Gaussian")
gauss = self.all_functions()[-1] # the last function (just created)
print "initial Gaussian center: %g" % gauss.get_param_value("center")
print "Fitting %s ..." % self.filename
self.execute("fit")
print "WSSR=", self.get_wssr()
print "Gaussian center: %g" % gauss.get_param_value("center")
def save_session(self, filename):
self.execute("info state >'%s'" % filename)
f = Fityk()
print f.get_info("version")
print "ln(2) =", f.calculate_expr("ln(2)")
del f
g = GaussianFitter(os.path.join(os.path.dirname(sys.argv[0]), "nacl01.dat"))
g.run()
g.save_session("tmp_save.fit")
| wojdyr/fityk | samples/hello.py | Python | gpl-2.0 | 1,092 | [
"Gaussian"
] | e7f7ab53b455315b406a355f2ff3d33d3f14e0bf1506d7978cf054ebb3f7f37b |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-05 00:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('visit', '0116_auto_20160803_1607'),
]
operations = [
migrations.AddField(
model_name='programdirector',
name='receive_emails',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='school',
name='program_model',
field=models.CharField(choices=[(b'school_wide', b'2+2'), (b'fellowship', b'Fellowship Model'), (b'ptlt', b'PTLT')], max_length=20),
),
]
| koebbe/homeworks | visit/migrations/0117_auto_20160804_1909.py | Python | mit | 719 | [
"VisIt"
] | 6164b324b4e4bf1485be680a5fe8ce7deaea7ad855ae483cf04fd467b5c46de7 |
import copy
import os
import datetime
import inspect
import hashlib
import re
import os.path
import random
import glob
from GangaCore.GPIDev.Base.Proxy import stripProxy, isType, getName
from GangaCore.GPIDev.Lib.GangaList.GangaList import GangaList
from GangaCore.GPIDev.Schema import Schema, Version, SimpleItem, ComponentItem
from GangaCore.GPIDev.Adapters.IGangaFile import IGangaFile
from GangaCore.GPIDev.Lib.File import FileUtils
from GangaCore.GPIDev.Lib.Job.Job import Job
from GangaCore.Utility.files import expandfilename
from GangaCore.Core.exceptions import GangaFileError
from GangaDirac.Lib.Utilities.DiracUtilities import getDiracEnv, execute, GangaDiracError
import GangaCore.Utility.Config
from GangaCore.Runtime.GPIexport import exportToGPI
from GangaCore.GPIDev.Credentials import require_credential
from GangaDirac.Lib.Credentials.DiracProxy import DiracProxy, DiracProxyInfo
from GangaCore.Utility.Config import getConfig
from GangaCore.Utility.logging import getLogger
from GangaDirac.Lib.Backends.DiracUtils import getAccessURLs
configDirac = getConfig('DIRAC')
logger = getLogger()
regex = re.compile('[*?\[\]]')
global stored_list_of_sites
stored_list_of_sites = []
class DiracFile(IGangaFile):
"""
File stored on a DIRAC storage element
Usage:
Some common use cases:
1) Uploading a file and sending jobs to run over it
2) Uploading a file to be sent to where your jobs are running
3) Uploading and removing a file
4) Removing an existing file from Dirac storage
5) Change the path of LFN produced by a ganga job.
6) Accessing a (potentially remote) file known to Dirac through an LFN
1)
To upload a file and submit a job to use it as inputdata:
df = DiracFile('/path/to/some/local/file')
df.put()
j=Job( ... )
j.inputdata=[df.lfn]
(The file is now accessible via data.py at the site)
2)
To upload a file and make it available on a workernode:
df = DiracFile('/path/to/some/local/file')
df.put(uploadSE = 'CERN-USER')
j=Job( ... )
j.inputfiles = [df]
j.submit()
3)
To upload and then remove a file:
df = DiracFile('/path/to/some/local/file')
df.put()
df.remove()
4)
To remove an existing file already in Dirac storage
df = DiracFile('LFN:/some/lfn/path')
df.remove()
or:
df = DiracFile(lfn='/some/lfn/path')
df.remove()
5)
To change an LFN path structure which is produced by Ganga:
j=Job( ... )
j.outputfiles=[DiracFile('myAwesomeLFN.ext', remoteDir='myPath_{jid}_{sjid}')]
j.submit()
This will produce LFN similar to:
/lhcb/user/<u>/<user>/myPath_1_2/2017_01/123456/123456789/myAwesomeLFN.ext
Other possibilities may look like:
j.outputfiles=[DiracFile('myData.ext', remoteDir='myProject/job{jid}_sj{sjid}')]
=>
/lhcb/user/<u>/<user>/myProject/job1_sj2/2017_01/123456/123456789/myData.ext
j.outputfiles=[DiracFile('myData.ext', remoteDir='myProject')]
=>
/lhcb/user/<u>/<user>/myProject/2017_01/123456/123456789/myData.ext
Alternatively you may change in your .gangarc:
[DIRAC]
useGangaPath=True
This will give you LFN like:
/lhcb/user/<u>/<user>/GangaJob_13/OutputFiles/2017_01/123456/123456789/myFile.ext
for all future jobs while this is in your .gangarc config.
6)
Accessing a (potentially remote) file locally known to DIRAC:
df = DiracFile(lfn='/some/lfn/path')
ganga_path = df.accessURL()
**exit ganga**
root ganga_path # to stream a file over xrootd://
"""
_schema = Schema(Version(1, 1), {'namePattern': SimpleItem(defvalue="", doc='pattern of the file name'),
'localDir': SimpleItem(defvalue=None, copyable=1, typelist=['str', 'type(None)'],
doc='local dir where the file is stored, used from get and put methods'),
'locations': SimpleItem(defvalue=[], copyable=1, typelist=['str'], sequence=1,
doc="list of SE locations where the outputfiles are uploaded"),
'compressed': SimpleItem(defvalue=False, typelist=['bool'], protected=0,
doc='wheather the output file should be compressed before sending somewhere'),
'lfn': SimpleItem(defvalue='', copyable=1, typelist=['str'],
doc='return the logical file name/set the logical file name to use if not '
'using wildcards in namePattern'),
'remoteDir': SimpleItem(defvalue="", doc='remote directory where the LFN is to be placed within '
'this is the relative path of the LFN which is put between the user LFN base and the filename.'),
'guid': SimpleItem(defvalue='', copyable=1, typelist=['str'],
doc='return the GUID/set the GUID to use if not using wildcards in the namePattern.'),
'subfiles': ComponentItem(category='gangafiles', defvalue=[], sequence=1, copyable=0, # hidden=1,
typelist=['GangaDirac.Lib.Files.DiracFile'], doc="collected files from the wildcard namePattern"),
'defaultSE': SimpleItem(defvalue='', copyable=1, doc="defaultSE where the file is to be accessed from or uploaded to"),
'failureReason': SimpleItem(defvalue="", protected=1, copyable=0, doc='reason for the upload failure'),
'credential_requirements': ComponentItem('CredentialRequirement', defvalue='DiracProxy'),
})
_env = None
_category = 'gangafiles'
_name = "DiracFile"
_exportmethods = ["get", "getMetadata", "getReplicas", 'getSubFiles', 'remove', 'removeReplica',
"replicate", 'put', 'locations', 'location', 'accessURL',
'_updateRemoteURLs', 'hasMatchedFiles', 'getSize']
_additional_slots = ['_have_copied', '_remoteURLs', '_storedReplicas']
def __init__(self, namePattern='', localDir=None, lfn='', remoteDir=None, **kwds):
"""
name is the name of the output file that has to be written ...
"""
super(DiracFile, self).__init__()
self.locations = []
self._setLFNnamePattern(lfn, namePattern)
if localDir is not None:
self.localDir = localDir
if remoteDir is not None:
self.remoteDir = remoteDir
self._have_copied = False
self._remoteURLs = {}
self._storedReplicas = {}
def __setattr__(self, attr, value):
"""
This is an overloaded setter method to make sure that we're auto-expanding the filenames of files which exist.
In the case we're assigning any other attributes the value is simply passed through
Args:
attr (str): This is the name of the attribute which we're assigning
value (unknown): This is the value being assigned.
"""
actual_value = value
if attr == "namePattern":
this_dir, actual_value = os.path.split(value)
if this_dir:
self.localDir = this_dir
elif attr == 'localDir':
if value:
new_value = os.path.abspath(expandfilename(value))
if os.path.exists(new_value):
actual_value = new_value
super(DiracFile, self).__setattr__(attr, actual_value)
def _attribute_filter__set__(self, name, value):
if value != "" and value is not None:
# Do some checking of the filenames in a subprocess
if name == 'lfn':
this_dir, self.namePattern = os.path.split(value)
if this_dir:
self.remoteDir = this_dir
return value
elif name == 'namePattern':
self.localDir, this_name = os.path.split(value)
return this_name
elif name == 'localDir':
if value:
return expandfilename(value)
else:
return value
return value
def locations(self):
return self.locations
def getSize(self):
"""
Return the size of the DiracFile.
"""
md = self.getMetadata()
size = md['Successful'][self.lfn]['Size']
return size
def _setLFNnamePattern(self, lfn="", namePattern=""):
if hasattr(self, 'defaultSE') and self.defaultSE != "":
## TODO REPLACE THIS WITH IN LIST OF VONAMES KNOWN
# Check for /lhcb/some/path or /gridpp/some/path
if namePattern.split(os.pathsep)[0] == self.defaultSE \
or (len(namePattern) > 3 and namePattern[0:4].upper() == "LFN:"\
or len(namePattern.split(os.pathsep)) > 1 and namePattern.split(os.pathsep)[1] == self.defaultSE):
# Check for LFN:/gridpp/some/path or others...
lfn = namePattern
namePattern = ""
if lfn:
if len(lfn) > 3 and lfn[0:4].upper() == "LFN:":
lfn = lfn[4:]
elif namePattern:
if len(namePattern) > 3 and namePattern[0:4].upper() == 'LFN:':
lfn = namePattern[4:]
if lfn != "" and namePattern != "":
self.lfn = lfn
self.namePattern = namePattern
elif lfn != "" and namePattern == "":
self.lfn = lfn
elif namePattern != "" and lfn == "":
self.namePattern = namePattern
def _attribute_filter__get__(self, name):
# Attempt to spend too long loading un-needed objects into memory in
# order to read job status
if name is 'lfn':
if not self.lfn:
logger.warning("Do NOT have an LFN, for file: %s" % self.namePattern)
logger.warning("If file exists locally try first using the method put()")
return object.__getattribute__(self, 'lfn')
elif name in ['guid', 'locations']:
if configDirac['DiracFileAutoGet']:
if name is 'guid':
if self.guid:
if self.lfn:
self.getMetadata()
return object.__getattribute__(self, 'guid')
elif name is 'locations':
if self.locations == []:
if self.lfn:
self.getMetadata()
return object.__getattribute__(self, 'locations')
return object.__getattribute__(self, name)
def __repr__(self):
"""Get the representation of the file."""
return "DiracFile(namePattern='%s', lfn='%s', localDir='%s')" % (self.namePattern, self.lfn, self.localDir)
def getSubFiles(self, process_wildcards = False):
"""
Returns the subfiles for this instance
"""
if process_wildcards:
self.processWildcardMatches()
if self.lfn:
self.setLocation()
return self.subfiles
def dirac_line_processor(self, line, dirac_file, localPath):
"""
Function to interperate the post processor lines.
This returns False when everything went OK and True on an ERROR
"""
logger.debug("Calling dirac_line_processor")
tokens = line.strip().split(':::')
logger.debug("dirac_line_processor: %s" % tokens)
pattern = tokens[1].split('->')[0].split('&&')[0]
name = tokens[1].split('->')[0].split('&&')[1]
lfn = tokens[1].split('->')[1]
guid = tokens[3]
try:
locations = eval(tokens[2])
except Exception as err:
logger.debug("line_process err: %s" % err)
locations = tokens[2]
if pattern == name:
logger.debug("pattern == name")
logger.error("Failed to parse outputfile data for file '%s'" % name)
return True
# This is the case that multiple files were requested
if pattern == dirac_file.namePattern:
logger.debug("pattern == dirac_file.namePattern")
d = DiracFile(namePattern=name, lfn=lfn)
d.compressed = dirac_file.compressed
d.guid = guid
d.locations = locations
d.localDir = localPath
dirac_file.subfiles.append(d)
#dirac_line_processor(line, d)
return False
# This is the case that an individual file was requested
elif name == dirac_file.namePattern:
logger.debug("name == dirac_file.namePattern")
if lfn == '###FAILED###':
dirac_file.failureReason = tokens[2]
logger.error("Failed to upload file '%s' to Dirac: %s" % (name, dirac_file.failureReason))
return True
dirac_file.lfn = lfn
dirac_file.locations = locations
dirac_file.guid = guid
dirac_file.localDir = localPath
return False
else:
logger.debug("False")
return False
def setLocation(self):
"""
"""
logger.debug("DiracFile: setLocation")
if not stripProxy(self).getJobObject():
logger.error("No job assocaited with DiracFile: %s" % str(self))
return
job = self.getJobObject()
postprocessLocationsPath = os.path.join(job.outputdir, getConfig('Output')['PostProcessLocationsFileName'])
postprocesslocations = None
try:
postprocesslocations = open(postprocessLocationsPath, 'r')
self.subfiles = []
## NB remember only do this once at it leaves the 'cursor' at the end of the file - rcurrie
all_lines = postprocesslocations.readlines()
logger.debug("lines:\n%s" % all_lines)
for line in all_lines:
logger.debug("This line: %s" % line)
if line.startswith('DiracFile'):
if self.dirac_line_processor(line, self, os.path.dirname(postprocessLocationsPath)) and regex.search(self.namePattern) is None:
logger.error("Error processing line:\n%s\nAND: namePattern: %s is NOT matched" % (str(line), str(self.namePattern)))
else:
logger.debug("Parsed the Line")
else:
logger.debug("Skipping the Line")
except Exception as err:
logger.warning("unexpected Error: %s" % str(err))
finally:
if postprocesslocations is not None:
postprocesslocations.close()
def _auto_remove(self):
"""
Remove called when job is removed as long as config option allows
"""
if self.lfn != '':
self.remove(printInfo=False)
@require_credential
def remove(self, printInfo=True):
"""
Remove this lfn and all replicas from DIRAC LFC/SEs
"""
if self.lfn == "":
raise GangaFileError('Can\'t remove a file from DIRAC SE without an LFN.')
if printInfo:
logger.info('Removing file %s' % self.lfn)
else:
logger.debug('Removing file %s' % self.lfn)
stdout = execute('removeFile("%s")' % self.lfn, cred_req=self.credential_requirements)
self.lfn = ""
self.locations = []
self.guid = ''
return True
@require_credential
def removeReplica(self, SE):
"""
Remove the replica from the given SE
"""
self.getReplicas()
if SE not in self.locations:
raise GangaFileError("No replica at supplied SE: %s" % SE)
try:
logger.info("Removing replica at %s for LFN %s" % (SE, self.lfn))
stdout = execute('removeReplica("%s", "%s")' % (self.lfn, SE), cred_req=self.credential_requirements)
self.locations.remove(SE)
except GangaDiracError as err:
raise err
return True
@require_credential
def getMetadata(self):
"""
Get Metadata associated with this files lfn. This method will also
try to automatically set the files guid attribute.
"""
if self.lfn == "":
self._optionallyUploadLocalFile()
# check that it has a replica
if not self.getReplicas():
raise GangaFileError("No replica found for this file!")
# eval again here as datatime not included in dirac_ganga_server
ret = execute('getMetadata("%s")' % self.lfn, cred_req=self.credential_requirements)
if self.guid != ret.get('Successful',{}).get(self.lfn,{}).get('GUID',False):
self.guid = ret['Successful'][self.lfn]['GUID']
reps = self.getReplicas()
ret['Successful'][self.lfn].update({'replicas': self.locations})
return ret
def _optionallyUploadLocalFile(self):
"""
"""
if self.lfn != "":
return
if self.namePattern != "" and self.lfn == "":
logger.info("I have a local DiracFile, however you're requesting it's location on the grid")
logger.info("Shall I upload it to the grid before I continue?")
decision = input('[y] / n:')
while not (decision.lower() in ['y', 'n'] or decision.lower() == ''):
decision = input('[y] / n:')
if decision.lower() in ['y', '']:
# upload namePattern to grid
logger.debug("Uploading the file first")
self.put()
elif decision == 'n':
logger.debug("Not uploading now")
return
else:
# do Nothing
logger.debug("Continuing without uploading file")
if self.lfn == "":
raise GangaFileError('Uploading of namePattern: %s failed' % self.namePattern)
if self.namePattern == "" and self.lfn == "":
raise GangaFileError('Cannot do anything if I don\'t have an lfn or a namePattern!')
return
@require_credential
def getReplicas(self, forceRefresh=False):
"""
Get the list of all SE where this file has a replica
This relies on an internally stored list of replicas, (SE and unless forceRefresh = True
"""
if self.lfn == '':
self._optionallyUploadLocalFile()
if self.lfn == '':
raise GangaFileError("Can't find replicas for file which has no LFN!")
these_replicas = None
if len(self.subfiles) != 0:
allReplicas = []
for i in self.subfiles:
allReplicas.append(i.getReplicas())
these_replicas = allReplicas
else:
# deep copy just before wer change it incase we're pointing to the
# data stored in original from a copy
if self._have_copied:
self._storedReplicas = copy.deepcopy(self._storedReplicas)
if (self._storedReplicas == {} and len(self.subfiles) == 0) or forceRefresh:
try:
self._storedReplicas = execute('getReplicas("%s")' % self.lfn, cred_req=self.credential_requirements)
except GangaDiracError as err:
logger.error("Couldn't find replicas for: %s" % str(self.lfn))
self._storedReplicas = {}
raise
try:
self._storedReplicas = self._storedReplicas['Successful']
except Exception as err:
logger.error("Unknown Error: %s from %s" % (str(err), self._storedReplicas))
raise
logger.debug("getReplicas: %s" % str(self._storedReplicas))
if self.lfn in self._storedReplicas:
self._updateRemoteURLs(self._storedReplicas)
these_replicas = [self._storedReplicas[self.lfn]]
else:
these_replicas = {}
elif self._storedReplicas != {}:
these_replicas = [self._storedReplicas[self.lfn]]
return these_replicas
def _updateRemoteURLs(self, reps):
"""
Internal function used for storing all replica information about this LFN at different sites
"""
if len(self.subfiles) != 0:
for i in self.subfiles:
i._updateRemoteURLs(reps)
else:
if self.lfn not in reps:
return
if self.locations != list(reps[self.lfn].keys()):
self.locations = list(reps[self.lfn].keys())
#logger.debug( "locations: %s" % str( self.locations ) )
# deep copy just before wer change it incase we're pointing to the
# data stored in original from a copy
if self._have_copied:
self._remoteURLs = copy.deepcopy(self._remoteURLs)
for site in self.locations:
#logger.debug( "site: %s" % str( site ) )
self._remoteURLs[site] = reps[self.lfn][site]
#logger.debug("Adding _remoteURLs[site]: %s" % str(self._remoteURLs[site]))
def location(self):
"""
Return a list of LFN locations for this DiracFile
"""
if len(self.subfiles) == 0:
if self.lfn == "":
self._optionallyUploadLocalFile()
else:
return [self.lfn]
else:
# 1 LFN per DiracFile
LFNS = []
for this_file in self.subfiles:
these_LFNs = this_file.location()
for this_url in these_LFNs:
LFNs.append(this_url)
return LFNs
@require_credential
def accessURL(self, thisSE='', protocol=''):
"""
Attempt to find an accessURL which corresponds to the specified SE. If no SE is specified then
return a random one from all the replicas. Also use the specified protocol - if none then use
the default.
"""
lfns = []
if len(self.subfiles) == 0:
lfns.append(self.lfn)
else:
for i in self.subfiles:
lfns.append(i.lfn)
return getAccessURLs(lfns, thisSE, protocol, self.credential_requirements)
@require_credential
def internalCopyTo(self, targetPath):
"""
Retrieves locally the file matching this DiracFile object pattern.
If localPath is specified
Args:
targetPath(str): The path the file should be placed at locally
"""
to_location = targetPath
if self.lfn == "":
raise GangaFileError('Can\'t download a file without an LFN.')
logger.info("Getting file %s" % self.lfn)
stdout = execute('getFile("%s", destDir="%s")' % (self.lfn, to_location), cred_req=self.credential_requirements)
if self.namePattern == "":
name = os.path.basename(self.lfn)
if self.compressed:
name = name[:-3]
self.namePattern = name
if self.guid == "" or not self.locations:
self.getMetadata()
return True
@require_credential
def replicate(self, destSE, sourceSE=''):
"""
Replicate an LFN to another SE
Args:
destSE (str): the SE to replicate the file to
sourceSE (str): the se to use as a cource for the file
"""
if not self.lfn:
raise GangaFileError('Must supply an lfn to replicate')
logger.info("Replicating file %s to %s" % (self.lfn, destSE))
stdout = execute('replicateFile("%s", "%s", "%s")' % (self.lfn, destSE, sourceSE), cred_req=self.credential_requirements)
if destSE not in self.locations:
self.locations.append(destSE)
def processWildcardMatches(self):
if regex.search(self.namePattern) is not None:
raise GangaFileError("No wildcards in inputfiles for DiracFile just yet. Dirac are exposing this in API soon.")
@require_credential
def put(self, lfn='', force=False, uploadSE="", replicate=False):
"""
Try to upload file sequentially to storage elements defined in configDirac['allDiracSE'].
File will be uploaded to the first SE that the upload command succeeds for.
The file is uploaded to the SE described by the DiracFile.defaultSE attribute
Alternatively, the user can specify an uploadSE which contains an SE
which the file is to be uploaded to.
If the user wants to replicate this file(s) across all SE then they should state replicate = True.
Return value will be either the stdout from the dirac upload command if not
using the wildcard characters '*?[]' in the namePattern.
If the wildcard characters are used then the return value will be a list containing
newly created DiracFile objects which were the result of glob-ing the wildcards.
The objects in this list will have been uploaded or had their failureReason attribute populated if the
upload failed.
"""
if self.lfn != "" and force == False and lfn == '':
logger.warning("Warning you're about to 'put' this DiracFile: %s on the grid as it already has an lfn: %s" % (self.namePattern, self.lfn))
if (lfn != '' and self.lfn != '') and force == False:
logger.warning("Warning you're attempting to put this DiracFile: %s" % self.namePattern)
logger.warning("It currently has an LFN associated with it: %s" % self.lfn)
logger.warning("Will continue and attempt to upload to: %s" % lfn)
if lfn and os.path.basename(lfn) != self.namePattern:
logger.warning("Changing namePattern from: '%s' to '%s' during put operation" % (self.namePattern, os.path.basename(lfn)))
if lfn:
self.lfn = lfn
# looks like will only need this for the interactive uploading of jobs.
# Also if any backend need dirac upload on client then when downloaded
# this will upload then delete the file.
if self.namePattern == "":
if self.lfn != '':
logger.warning("'Put'-ing a file with ONLY an existing LFN makes no sense!")
raise GangaFileError('Can\'t upload a file without a local file name.')
sourceDir = self.localDir
if self.localDir is None:
sourceDir = os.getcwd()
# attached to a job, use the joboutputdir
if self._parent is not None and os.path.isdir(self.getJobObject().outputdir):
sourceDir = self.getJobObject().outputdir
if not os.path.isdir(sourceDir):
raise GangaFileError('localDir attribute is not a valid dir, don\'t know from which dir to take the file')
if regex.search(self.namePattern) is not None:
if self.lfn != "":
logger.warning("Cannot specify a single lfn for a wildcard namePattern")
logger.warning("LFN will be generated automatically")
self.lfn = ""
if not self.remoteDir:
try:
job = self.getJobObject()
lfn_folder = os.path.join("GangaJob_%s" % job.getFQID('/'), "OutputFiles")
except AssertionError:
t = datetime.datetime.now()
this_date = t.strftime("%H.%M_%A_%d_%B_%Y")
lfn_folder = os.path.join('GangaFiles_%s' % this_date)
lfn_base = os.path.join(DiracFile.diracLFNBase(self.credential_requirements), lfn_folder)
else:
lfn_base = os.path.join(DiracFile.diracLFNBase(self.credential_requirements), self.remoteDir)
if uploadSE == "":
if self.defaultSE != "":
storage_elements = [self.defaultSE]
else:
if configDirac['allDiracSE']:
trySEs = list(configDirac['allDiracSE'])
random.shuffle(trySEs)
storage_elements = None
for se in trySEs:
if execute('checkSEStatus("%s", "%s")' % (se, 'Write')):
storage_elements = [se]
break
if not storage_elements:
raise GangaFileError("No SE allowed for Write")
else:
raise GangaFileError("Can't upload a file without a valid defaultSE or storageSE, please provide one")
elif isinstance(uploadSE, list):
storage_elements = uploadSE
else:
storage_elements = [uploadSE]
outputFiles = GangaList()
for this_file in glob.glob(os.path.join(sourceDir, self.namePattern)):
name = this_file
if not os.path.exists(name):
if not self.compressed:
raise GangaFileError('Cannot upload file. File "%s" must exist!' % name)
name += '.gz'
if not os.path.exists(name):
raise GangaFileError('File "%s" must exist!' % name)
else:
if self.compressed:
os.system('gzip -c %s > %s.gz' % (name, name))
name += '.gz'
if not os.path.exists(name):
raise GangaFileError('File "%s" must exist!' % name)
if lfn=='':
lfn = os.path.join(lfn_base, os.path.basename(this_file))
if len(os.path.basename(lfn)) > 60:
logger.warning('Filename is longer than 60 characters. This may cause problems with Dirac storage.')
d = DiracFile()
d.namePattern = os.path.basename(name)
d.compressed = self.compressed
d.localDir = sourceDir
stderr = ''
stdout = ''
logger.info('Uploading file \'%s\' to \'%s\' as \'%s\'' % (name, storage_elements[0], lfn))
logger.debug('execute: uploadFile("%s", "%s", %s)' % (lfn, os.path.join(sourceDir, name), str([storage_elements[0]])))
try:
stdout = execute('uploadFile("%s", "%s", %s)' % (lfn, os.path.join(sourceDir, name), str([storage_elements[0]])), cred_req=self.credential_requirements)
except GangaDiracError as err:
logger.warning("Couldn't upload file '%s': \'%s\'" % (os.path.basename(name), err))
failureReason = "Error in uploading file '%s' : '%s'" % (os.path.basename(name), err)
if regex.search(self.namePattern) is not None:
d.failureReason = failureReason
outputFiles.append(d)
continue
self.failureReason += '\n' + failureReason
continue
stdout_temp = stdout.get('Successful')
if not stdout_temp:
msg = "Couldn't upload file '%s': \'%s\'" % (os.path.basename(name), stdout)
logger.warning(msg)
if regex.search(self.namePattern) is not None:
d.failureReason = msg
outputFiles.append(d)
continue
self.failureReason = msg
continue
else:
lfn_out = stdout_temp[lfn]
# when doing the two step upload delete the temp file
if self.compressed or self._parent is not None:
os.remove(name)
# need another eval as datetime needs to be included.
guid = lfn_out.get('GUID', '')
if regex.search(self.namePattern) is not None:
d.lfn = lfn
d.remoteDir = os.path.dirname(lfn)
d.locations = lfn_out.get('allDiracSE', '')
d.guid = guid
outputFiles.append(d)
continue
else:
self.lfn = lfn
self.remoteDir = os.path.dirname(lfn)
self.locations = lfn_out.get('allDiracSE', '')
self.guid = guid
if replicate == True:
if len(outputFiles) == 1 or len(outputFiles) == 0:
storage_elements.pop(0)
for se in storage_elements:
self.replicate(se)
else:
storage_elements.pop(0)
for this_file in outputFiles:
for se in storage_elements:
this_file.replicate(se)
if len(outputFiles) > 0:
return outputFiles
else:
outputFiles.append(self)
return outputFiles
def getWNScriptDownloadCommand(self, indent):
script_location = os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), 'downloadScript.py.template')
download_script = FileUtils.loadScript(script_location, '')
script = """\n
download_script=b'''\n###DOWNLOAD_SCRIPT###'''
import subprocess
dirac_env=###DIRAC_ENV###
subprocess.Popen('''python -c "import sys\nexec(sys.stdin.read())"''', shell=True, env=dirac_env, stdin=subprocess.PIPE).communicate(download_script)
"""
script = '\n'.join([ str(indent+str(line)) for line in script.split('\n')])
replace_dict = {'###DOWNLOAD_SCRIPT###' : download_script,
'###DIRAC_ENV###' : self._getDiracEnvStr(),
'###LFN###' : self.lfn}
for k, v in replace_dict.items():
script = script.replace(str(k), str(v))
return script
def _getDiracEnvStr(self):
diracEnv = str(getDiracEnv(self.credential_requirements.dirac_env))
return diracEnv
def _WN_wildcard_script(self, namePattern, lfnBase, compressed):
wildcard_str = """
for f in glob.glob('###NAME_PATTERN###'):
processes.append(uploadFile(os.path.basename(f), '###LFN_BASE###', ###COMPRESSED###, '###NAME_PATTERN###'))
"""
wildcard_str = FileUtils.indentScript(wildcard_str, '###INDENT###')
replace_dict = { '###NAME_PATTERN###' : namePattern,
'###LFN_BASE###' : lfnBase,
'###COMPRESSED###' : compressed }
for k, v in replace_dict.items():
wildcard_str = wildcard_str.replace(str(k), str(v))
return wildcard_str
def getWNInjectedScript(self, outputFiles, indent, patternsToZip, postProcessLocationsFP):
"""
Returns script that have to be injected in the jobscript for postprocessing on the WN
"""
script_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
script_location = os.path.join( script_path, 'uploadScript.py.template')
upload_script = FileUtils.loadScript(script_location, '')
WNscript_location = os.path.join( script_path, 'WNInjectTemplate.py.template' )
script = FileUtils.loadScript(WNscript_location, '')
if not self.remoteDir:
try:
job = self.getJobObject()
lfn_folder = os.path.join("GangaJob_%s" % job.getFQID('.'), "OutputFiles")
except AssertionError:
t = datetime.datetime.now()
this_date = t.strftime("%H.%M_%A_%d_%B_%Y")
lfn_folder = os.path.join('GangaFiles_%s' % this_date)
lfn_base = os.path.join(DiracFile.diracLFNBase(self.credential_requirements), lfn_folder)
else:
lfn_base = os.path.join(DiracFile.diracLFNBase(self.credential_requirements), self.remoteDir)
for this_file in outputFiles:
isCompressed = this_file.namePattern in patternsToZip
if not regex.search(this_file.namePattern) is None:
script += self._WN_wildcard_script(this_file.namePattern, lfn_base, str(isCompressed))
else:
script += '###INDENT###print("Uploading: %s as: %s")\n' % (this_file.namePattern, str(os.path.join(lfn_base, this_file.namePattern)))
script += '###INDENT###processes.append(uploadFile("%s", "%s", %s))\n' % (this_file.namePattern, lfn_base, str(isCompressed))
if stripProxy(self)._parent is not None and stripProxy(self).getJobObject() and getName(stripProxy(self).getJobObject().backend) != 'Dirac':
script_env = self._getDiracEnvStr()
else:
script_env = str(None)
script = '\n'.join([str('###INDENT###' + str(line)) for line in script.split('\n')])
replace_dict = {'###UPLOAD_SCRIPT###' : upload_script,
'###STORAGE_ELEMENTS###' : str(configDirac['allDiracSE']),
'###INDENT###' : indent,
'###LOCATIONSFILE###' : postProcessLocationsFP,
'###DIRAC_ENV###' : script_env}
for k, v in replace_dict.items():
script = script.replace(str(k), str(v))
return script
def hasMatchedFiles(self):
if self.lfn != "" and self.namePattern != "":
if self.namePattern == os.path.basename(self.lfn):
return True
else:
logger.error("LFN doesn't match namePattern for file: %s" % str(self.namePattern))
return False
elif len(self.subfiles) > 0 and regex.search(self.namePattern) is not None:
return True
else:
logger.error("Failed to Match file:\n%s" % str(self))
return False
@staticmethod
def diracLFNBase(credential_requirements):
"""
Compute a sensible default LFN base name
If ``DiracLFNBase`` has been defined, use that.
Otherwise, construct one from the user name and the user VO
Args:
credential_requirements (DiracProxy): This is the credential which governs how we should format the path
"""
if configDirac['DiracLFNBase']:
return configDirac['DiracLFNBase']
user = DiracProxyInfo(credential_requirements).username
return '/{0}/user/{1}/{2}'.format(configDirac['userVO'], user[0], user)
# add DiracFile objects to the configuration scope (i.e. it will be
# possible to write instatiate DiracFile() objects via config file)
GangaCore.Utility.Config.config_scope['DiracFile'] = DiracFile
exportToGPI('GangaDirac', GangaList, 'Classes')
| ganga-devs/ganga | ganga/GangaDirac/Lib/Files/DiracFile.py | Python | gpl-3.0 | 39,137 | [
"DIRAC"
] | b0c26460a6989291cd8c90f6997ea32faecc25c13b2a49f0b51d8c2379cb571e |
import numpy as np
from scipy import sparse
from . import Mapper, _savecache
from . import samplers
class LineMapper(Mapper):
@classmethod
def _cache(cls, filename, subject, xfmname, **kwargs):
from .. import db
masks = []
xfm = db.get_xfm(subject, xfmname, xfmtype='coord')
pia = db.get_surf(subject, "pia", merge=False, nudge=False)
wm = db.get_surf(subject, "wm", merge=False, nudge=False)
#iterate over hemispheres
for (wpts, polys), (ppts, _) in zip(pia, wm):
masks.append(cls._getmask(xfm(ppts), xfm(wpts), polys, xfm.shape, **kwargs))
_savecache(filename, masks[0], masks[1], xfm.shape)
return cls(masks[0], masks[1], xfm.shape)
@classmethod
def _getmask(cls, pia, wm, polys, shape, npts=64, mp=True, **kwargs):
valid = np.unique(polys)
#vidx = np.nonzero(valid)[0]
mapper = sparse.csr_matrix((len(pia), np.prod(shape)))
for t in np.linspace(0, 1, npts+2)[1:-1]:
i, j, data = cls.sampler(pia*t + wm*(1-t), shape)
mapper = mapper + sparse.csr_matrix((data / npts, (i, j)), shape=mapper.shape)
return mapper
class LineNN(LineMapper):
sampler = staticmethod(samplers.nearest)
class LineTrilin(LineMapper):
sampler = staticmethod(samplers.trilinear)
class LineGauss(LineMapper):
sampler = staticmethod(samplers.gaussian)
class LineLanczos(LineMapper):
sampler = staticmethod(samplers.lanczos)
| CVML/pycortex | cortex/mapper/line.py | Python | bsd-2-clause | 1,505 | [
"Gaussian"
] | bb67b0f3a6b9807e6e9851f88c5f25ea7fb0561b1873dbed48190a6dcbe8e754 |
# Copyright (C) 2017,2018
# Max Planck Institute for Polymer Research
# Copyright (C) 2016
# Jakub Krajniak (jkrajniak at gmail.com)
# Copyright (C) 2012,2013,2014,2015
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************
espressopp.interaction.Tabulated
********************************
.. function:: espressopp.interaction.Tabulated(itype, filename, cutoff)
Defines a tabulated potential.
:param itype: interpolation type (1,2, or 3 for linear, Akima, or cubic splines)
:param filename: table filename
:param cutoff: (default: infinity) interaction cutoff
:type itype: int
:type filename: string
:type cutoff: real or "infinity"
.. function:: espressopp.interaction.VerletListAdressTabulated(vl, fixedtupleList)
Defines a verletlist-based AdResS interaction using tabulated potentials for both AT and CG interactions.
:param vl: Verletlist AdResS object
:param fixedtupleList: FixedTupleList object
:type vl: shared_ptr<VerletListAdress>
:type fixedtupleList: shared_ptr<FixedTupleListAdress>
.. function:: espressopp.interaction.VerletListAdressTabulated.setPotentialAT(type1, type2, potential)
Sets the AT potential in VerletListAdressTabulated interaction for interacting particles of type1 and type2.
:param type1: particle type 1
:param type2: particle type 2
:param potential: tabulated interaction potential object
:type type1: int
:type type2: int
:type potential: shared_ptr<Tabulated>
.. function:: espressopp.interaction.VerletListAdressTabulated.setPotentialCG(type1, type2, potential)
Sets the CG potential in VerletListAdressTabulated interaction for interacting particles of type1 and type2.
:param type1: particle type 1
:param type2: particle type 2
:param potential: tabulated interaction potential object
:type type1: int
:type type2: int
:type potential: shared_ptr<Tabulated>
.. function:: espressopp.interaction.VerletListAdressCGTabulated(vl, fixedtupleList)
Defines only the CG part of a verletlist-based AdResS interaction using a tabulated potential for the CG interaction. It's defined as a "NonbondedSlow" interaction (which multiple time stepping integrators can make use of).
:param vl: Verletlist AdResS object
:param fixedtupleList: FixedTupleList object
:type vl: shared_ptr<VerletListAdress>
:type fixedtupleList: shared_ptr<FixedTupleListAdress>
.. function:: espressopp.interaction.VerletListAdressCGTabulated.setPotential(type1, type2, potential)
Sets the CG potential in VerletListAdressCGTabulated interaction for interacting particles of type1 and type2.
:param type1: particle type 1
:param type2: particle type 2
:param potential: tabulated interaction potential object
:type type1: int
:type type2: int
:type potential: shared_ptr<Tabulated>
.. function:: espressopp.interaction.VerletListAdressCGTabulated.getPotential(type1, type2)
Gets the CG potential in VerletListAdressCGTabulated interaction for interacting particles of type1 and type2.
:param type1: particle type 1
:param type2: particle type 2
:type type1: int
:type type2: int
:rtype: shared_ptr<Tabulated>
.. function:: espressopp.interaction.VerletListAdressCGTabulated.getVerletList()
Gets the verletlist used in VerletListAdressCGTabulated interaction.
:rtype: shared_ptr<VerletListAdress>
.. function:: espressopp.interaction.VerletListHadressTabulated(vl, fixedtupleList)
Defines a verletlist-based H-AdResS interaction using tabulated potentials for both AT and CG interactions.
:param vl: Verletlist AdResS object
:param fixedtupleList: FixedTupleList object
:type vl: shared_ptr<VerletListAdress>
:type fixedtupleList: shared_ptr<FixedTupleListAdress>
.. function:: espressopp.interaction.VerletListHadressTabulated.setPotentialAT(type1, type2, potential)
Sets the AT potential in VerletListHadressTabulated interaction for interacting particles of type1 and type2.
:param type1: particle type 1
:param type2: particle type 2
:param potential: tabulated interaction potential object
:type type1: int
:type type2: int
:type potential: shared_ptr<Tabulated>
.. function:: espressopp.interaction.VerletListHadressTabulated.setPotentialCG(type1, type2, potential)
Sets the CG potential in VerletListHadressTabulated interaction for interacting particles of type1 and type2.
:param type1: particle type 1
:param type2: particle type 2
:param potential: tabulated interaction potential object
:type type1: int
:type type2: int
:type potential: shared_ptr<Tabulated>
.. function:: espressopp.interaction.VerletListHadressCGTabulated(vl, fixedtupleList)
Defines only the CG part of a verletlist-based H-AdResS interaction using a tabulated potential for the CG interaction. It's defined as a "NonbondedSlow" interaction (which multiple time stepping integrators can make use of).
:param vl: Verletlist AdResS object
:param fixedtupleList: FixedTupleList object
:type vl: shared_ptr<VerletListAdress>
:type fixedtupleList: shared_ptr<FixedTupleListAdress>
.. function:: espressopp.interaction.VerletListHadressCGTabulated.setPotential(type1, type2, potential)
Sets the CG potential in VerletListHadressCGTabulated interaction for interacting particles of type1 and type2.
:param type1: particle type 1
:param type2: particle type 2
:param potential: tabulated interaction potential object
:type type1: int
:type type2: int
:type potential: shared_ptr<Tabulated>
.. function:: espressopp.interaction.VerletListHadressCGTabulated.getPotential(type1, type2)
Gets the CG potential in VerletListHadressCGTabulated interaction for interacting particles of type1 and type2.
:param type1: particle type 1
:param type2: particle type 2
:type type1: int
:type type2: int
:rtype: shared_ptr<Tabulated>
.. function:: espressopp.interaction.VerletListHadressCGTabulated.getVerletList()
Gets the verletlist used in VerletListHadressCGTabulated interaction.
:rtype: shared_ptr<VerletListAdress>
.. function:: espressopp.interaction.VerletListTabulated(vl)
Defines a verletlist-based interaction using a tabulated potential.
:param vl: Verletlist object
:type vl: shared_ptr<VerletList>
.. function:: espressopp.interaction.VerletListTabulated.getPotential(type1, type2)
Gets the tabulated interaction potential in VerletListTabulated for interacting particles of type1 and type2.
:param type1: particle type 1
:param type2: particle type 2
:type type1: int
:type type2: int
:rtype: shared_ptr<Tabulated>
.. function:: espressopp.interaction.VerletListTabulated.setPotential(type1, type2, potential)
Sets the tabulated interaction potential in VerletListTabulated for interacting particles of type1 and type2.
:param type1: particle type 1
:param type2: particle type 2
:param potential: tabulated interaction potential object
:type type1: int
:type type2: int
:type potential: shared_ptr<Tabulated>
.. function:: espressopp.interaction.CellListTabulated(stor)
Defines a CellList-based interaction using a tabulated potential.
:param stor: storage object
:type stor: shared_ptr <storage::Storage>
.. function:: espressopp.interaction.CellListTabulated.setPotential(type1, type2, potential)
Sets the tabulated interaction potential in CellListTabulated for interacting particles of type1 and type2.
:param type1: particle type 1
:param type2: particle type 2
:param potential: tabulated interaction potential object
:type type1: int
:type type2: int
:type potential: shared_ptr<Tabulated>
.. function:: espressopp.interaction.FixedPairListTabulated(system, vl, potential)
Defines a FixedPairList-based interaction using a tabulated potential.
:param system: system object
:param vl: FixedPairList list object
:param potential: tabulated potential object
:type system: shared_ptr<System>
:type vl: shared_ptr<FixedPairList>
:type potential: shared_ptr<Tabulated>
.. function:: espressopp.interaction.FixedPairListTabulated.setPotential(potential)
Sets the tabulated interaction potential in FixedPairListTabulated for interacting particles.
:param potential: tabulated potential object
:type potential: shared_ptr<Tabulated>
.. function:: espressopp.interaction.FixedPairListTypesTabulated(system, fpl)
:param system: The Espresso++ system object.
:type system: espressopp.System
:param fpl: The FixedPairList.
:type fpl: espressopp.FixedPairList
.. function:: espressopp.interaction.FixedPairListTypesTabulated.setPotential(type1, type2, potential)
Defines bond potential for interaction between particles of types type1-type2-type3.
:param type1: Type of particle 1.
:type type1: int
:param type2: Type of particle 2.
:type type2: int
:param potential: The potential to set up.
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.FixedPairListPIadressTabulated(system, fpl, fixedtupleList, potential, ntrotter, speedup)
Defines tabulated bonded pair potential for interactions based on the fixedtuplelist in the context of Path Integral AdResS. When the speedup flag is set,
it will use only the centroids in the classical region, otherwise all Trotter beads. In the quantum region, always all Trotter beads are used.
:param system: The Espresso++ system object.
:param fpl: The FixedPairList.
:param fixedtupleList: The FixedTupleListAdress object.
:param potential: The potential.
:param ntrotter: The Trotter number.
:param speedup: Boolean flag to decide whether to use centroids in classical region or all Trotter beads
:type system: espressopp.System
:type fpl: espressopp.FixedPairList
:type fixedtupleList: espressopp.FixedTupleListAdress
:type potential: espressopp.interaction.Potential
:type ntrotter: int
:type speedup: bool
.. function:: espressopp.interaction.FixedPairListPIadressTabulated.setPotential(potential)
Sets the potential.
:param potential: The potential object.
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.FixedPairListPIadressTabulated.getPotential()
Gets the potential.
:return: the potential
:rtype: shared_ptr < Potential >
.. function:: espressopp.interaction.FixedPairListPIadressTabulated.setFixedPairList(fpl)
Sets the FixedPairList.
:param fpl: The FixedPairList object.
:type fpl: espressopp.FixedPairList
.. function:: espressopp.interaction.FixedPairListPIadressTabulated.getFixedPairList()
Gets the FixedPairList.
:return: the FixedPairList
:rtype: shared_ptr < FixedPairList >
.. function:: espressopp.interaction.FixedPairListPIadressTabulated.setFixedTupleList(fixedtupleList)
Sets the FixedTupleList.
:param fixedtupleList: The FixedTupleListAdress object.
:type fixedtupleList: espressopp.FixedTupleListAdress
.. function:: espressopp.interaction.FixedPairListPIadressTabulated.getFixedTupleList()
Gets the FixedTupleList.
:return: the FixedTupleList
:rtype: shared_ptr < FixedTupleListAdress >
.. function:: espressopp.interaction.FixedPairListPIadressTabulated.setNTrotter(ntrotter)
Sets the Trotter number NTrotter.
:param ntrotter: The Trotter number.
:type ntrotter: int
.. function:: espressopp.interaction.FixedPairListPIadressTabulated.getNTrotter()
Gets the Trotter number NTrotter.
:param ntrotter: The Trotter number.
:type ntrotter: int
.. function:: espressopp.interaction.FixedPairListPIadressTabulated.setSpeedup(speedup)
Sets the speedup flag.
:param speedup: The speedup flag.
:type speedup: bool
.. function:: espressopp.interaction.FixedPairListPIadressTabulated.getSpeedup()
Gets the speedup flag.
:return: the speedup flag
:rtype: bool
.. function:: espressopp.interaction.VerletListPIadressTabulated(vl, fixedtupleList, ntrotter, speedup)
Defines a non-bonded interaction using an adaptive resolution VerletList in the context of Path Integral AdResS. Two different tabulated potentials can be specified: one, which is used in the quantum region, the other one in the classical region. The interpolation proceeds according to the Path Integral AdResS scheme (see J. Chem. Phys 147, 244104 (2017)). When the speedup flag is set,it will use only the centroids in the classical region, otherwise all Trotter beads. In the quantum region, always all Trotter beads are used.
:param vl: The AdResS VerletList.
:param fixedtupleList: The FixedTupleListAdress object.
:param ntrotter: The Trotter number.
:param speedup: Boolean flag to decide whether to use centroids in classical region or all Trotter beads
:type vl: espressopp.VerletListAdress
:type fixedtupleList: espressopp.FixedTupleListAdress
:type ntrotter: int
:type speedup: bool
.. function:: espressopp.interaction.VerletListPIadressTabulated.setPotentialQM(potential)
Sets the potential for the quantum region (has to be a tabulated one).
:param potential: The potential object.
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.VerletListPIadressTabulated.setPotentialCL(potential)
Sets the potential for the classical region (has to be a tabulated one).
:param potential: The potential object.
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.VerletListPIadressTabulated.setVerletList(vl)
Sets the VerletList.
:param vl: The VerletListAdress object.
:type vl: espressopp.VerletListAdress
.. function:: espressopp.interaction.VerletListPIadressTabulated.getVerletList()
Gets the VerletList.
:return: the Adress VerletList
:rtype: shared_ptr<VerletListAdress>
.. function:: espressopp.interaction.VerletListPIadressTabulated.setFixedTupleList(fixedtupleList)
Sets the FixedTupleList.
:param fixedtupleList: The FixedTupleListAdress object.
:type fixedtupleList: espressopp.FixedTupleListAdress
.. function:: espressopp.interaction.VerletListPIadressTabulated.getFixedTupleList()
Gets the FixedTupleList.
:return: the FixedTupleList
:rtype: shared_ptr < FixedTupleListAdress >
.. function:: espressopp.interaction.VerletListPIadressTabulated.setNTrotter(ntrotter)
Sets the Trotter number NTrotter.
:param ntrotter: The Trotter number.
:type ntrotter: int
.. function:: espressopp.interaction.VerletListPIadressTabulated.getNTrotter()
Gets the Trotter number NTrotter.
:return: the Trotter number
:rtype: int
.. function:: espressopp.interaction.VerletListPIadressTabulated.setSpeedup(speedup)
Sets the speedup flag.
:param speedup: The speedup flag.
:type speedup: bool
.. function:: espressopp.interaction.VerletListPIadressTabulated.getSpeedup()
Gets the speedup flag.
:return: the speedup flag
:rtype: bool
.. function:: espressopp.interaction.VerletListPIadressTabulatedLJ(vl, fixedtupleList, ntrotter, speedup)
Defines a non-bonded interaction using an adaptive resolution VerletList in the context of Path Integral AdResS. Two different potentials can be specified: one, which is used in the quantum region (tabulated), the other one in the classical region (Lennard-Jones type). The interpolation proceeds according to the Path Integral AdResS scheme (see J. Chem. Phys 147, 244104 (2017)). When the speedup flag is set,it will use only the centroids in the classical region, otherwise all Trotter beads. In the quantum region, always all Trotter beads are used.
:param vl: The AdResS VerletList.
:param fixedtupleList: The FixedTupleListAdress object.
:param ntrotter: The Trotter number.
:param speedup: Boolean flag to decide whether to use centroids in classical region or all Trotter beads
:type vl: espressopp.VerletListAdress
:type fixedtupleList: espressopp.FixedTupleListAdress
:type ntrotter: int
:type speedup: bool
.. function:: espressopp.interaction.VerletListPIadressTabulatedLJ.setPotentialQM(potential)
Sets the potential for the quantum region (has to be a tabulated one).
:param potential: The potential object.
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.VerletListPIadressTabulatedLJ.setPotentialCL(potential)
Sets the potential for the classical region (has to be a Lennard-Jones type one).
:param potential: The potential object.
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.VerletListPIadressTabulatedLJ.setVerletList(vl)
Sets the VerletList.
:param vl: The VerletListAdress object.
:type vl: espressopp.VerletListAdress
.. function:: espressopp.interaction.VerletListPIadressTabulatedLJ.getVerletList()
Gets the VerletList.
:return: the Adress VerletList
:rtype: shared_ptr<VerletListAdress>
.. function:: espressopp.interaction.VerletListPIadressTabulatedLJ.setFixedTupleList(fixedtupleList)
Sets the FixedTupleList.
:param fixedtupleList: The FixedTupleListAdress object.
:type fixedtupleList: espressopp.FixedTupleListAdress
.. function:: espressopp.interaction.VerletListPIadressTabulatedLJ.getFixedTupleList()
Gets the FixedTupleList.
:return: the FixedTupleList
:rtype: shared_ptr < FixedTupleListAdress >
.. function:: espressopp.interaction.VerletListPIadressTabulatedLJ.setNTrotter(ntrotter)
Sets the Trotter number NTrotter.
:param ntrotter: The Trotter number.
:type ntrotter: int
.. function:: espressopp.interaction.VerletListPIadressTabulatedLJ.getNTrotter()
Gets the Trotter number NTrotter.
:return: the Trotter number
:rtype: int
.. function:: espressopp.interaction.VerletListPIadressTabulatedLJ.setSpeedup(speedup)
Sets the speedup flag.
:param speedup: The speedup flag.
:type speedup: bool
.. function:: espressopp.interaction.VerletListPIadressTabulatedLJ.getSpeedup()
Gets the speedup flag.
:return: the speedup flag
:rtype: bool
.. function:: espressopp.interaction.VerletListPIadressNoDriftTabulated(vl, fixedtupleList, ntrotter, speedup)
Defines a non-bonded interaction using an adaptive resolution VerletList in the context of Path Integral AdResS. One tabulated potential can be specified, which is used thoughout the whole system. Hence, only the quantumness of the particles changes, but not the forcefield (see J. Chem. Phys 147, 244104 (2017)). When the speedup flag is set,it will use only the centroids in the classical region, otherwise all Trotter beads. In the quantum region, always all Trotter beads are used.
:param vl: The AdResS VerletList.
:param fixedtupleList: The FixedTupleListAdress object.
:param ntrotter: The Trotter number.
:param speedup: Boolean flag to decide whether to use centroids in classical region or all Trotter beads
:type vl: espressopp.VerletListAdress
:type fixedtupleList: espressopp.FixedTupleListAdress
:type ntrotter: int
:type speedup: bool
.. function:: espressopp.interaction.VerletListPIadressNoDriftTabulated.setPotential(potential)
Sets the potential which is used throughout the whole system (has to be a tabulated one).
:param potential: The potential object.
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.VerletListPIadressNoDriftTabulated.setVerletList(vl)
Sets the VerletList.
:param vl: The VerletListAdress object.
:type vl: espressopp.VerletListAdress
.. function:: espressopp.interaction.VerletListPIadressNoDriftTabulated.getVerletList()
Gets the VerletList.
:return: the Adress VerletList
:rtype: shared_ptr<VerletListAdress>
.. function:: espressopp.interaction.VerletListPIadressNoDriftTabulated.setFixedTupleList(fixedtupleList)
Sets the FixedTupleList.
:param fixedtupleList: The FixedTupleListAdress object.
:type fixedtupleList: espressopp.FixedTupleListAdress
.. function:: espressopp.interaction.VerletListPIadressNoDriftTabulated.getFixedTupleList()
Gets the FixedTupleList.
:return: the FixedTupleList
:rtype: shared_ptr < FixedTupleListAdress >
.. function:: espressopp.interaction.VerletListPIadressNoDriftTabulated.setNTrotter(ntrotter)
Sets the Trotter number NTrotter.
:param ntrotter: The Trotter number.
:type ntrotter: int
.. function:: espressopp.interaction.VerletListPIadressNoDriftTabulated.getNTrotter()
Gets the Trotter number NTrotter.
:return: the Trotter number
:rtype: int
.. function:: espressopp.interaction.VerletListPIadressNoDriftTabulated.setSpeedup(speedup)
Sets the speedup flag.
:param speedup: The speedup flag.
:type speedup: bool
.. function:: espressopp.interaction.VerletListPIadressNoDriftTabulated.getSpeedup()
Gets the speedup flag.
:return: the speedup flag
:rtype: bool
"""
# -*- coding: iso-8859-1 -*-
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_Tabulated, \
interaction_VerletListTabulated, \
interaction_VerletListAdressTabulated, \
interaction_VerletListAdressCGTabulated, \
interaction_VerletListHadressTabulated, \
interaction_VerletListHadressCGTabulated, \
interaction_VerletListPIadressTabulated, \
interaction_VerletListPIadressTabulatedLJ, \
interaction_VerletListPIadressNoDriftTabulated, \
interaction_CellListTabulated, \
interaction_FixedPairListTabulated, \
interaction_FixedPairListTypesTabulated, \
interaction_FixedPairListPIadressTabulated
class TabulatedLocal(PotentialLocal, interaction_Tabulated):
def __init__(self, itype, filename, cutoff=infinity):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_Tabulated, itype, filename, cutoff)
class VerletListAdressCGTabulatedLocal(InteractionLocal, interaction_VerletListAdressCGTabulated):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressCGTabulated, vl, fixedtupleList)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def getVerletListLocal(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
class VerletListAdressTabulatedLocal(InteractionLocal, interaction_VerletListAdressTabulated):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressTabulated, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListHadressCGTabulatedLocal(InteractionLocal, interaction_VerletListHadressCGTabulated):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressCGTabulated, vl, fixedtupleList)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def getVerletListLocal(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
class VerletListHadressTabulatedLocal(InteractionLocal, interaction_VerletListHadressTabulated):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressTabulated, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListPIadressTabulatedLocal(InteractionLocal, interaction_VerletListPIadressTabulated):
def __init__(self, vl, fixedtupleList, ntrotter, speedup):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListPIadressTabulated, vl, fixedtupleList, ntrotter, speedup)
def setPotentialQM(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialQM(self, type1, type2, potential)
def setPotentialCL(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCL(self, type1, type2, potential)
def setVerletList(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setVerletList(self, vl)
def getVerletList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
def setFixedTupleList(self, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedTupleList(self, fixedtupleList)
def getFixedTupleList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getFixedTupleList(self)
def setNTrotter(self, ntrotter):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setNTrotter(self, ntrotter)
def getNTrotter(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getNTrotter(self)
def setSpeedup(self, speedup):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setSpeedup(self, speedup)
def getSpeedup(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getSpeedup(self)
class VerletListPIadressTabulatedLJLocal(InteractionLocal, interaction_VerletListPIadressTabulatedLJ):
def __init__(self, vl, fixedtupleList, ntrotter, speedup):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListPIadressTabulatedLJ, vl, fixedtupleList, ntrotter, speedup)
def setPotentialQM(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialQM(self, type1, type2, potential)
def setPotentialCL(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCL(self, type1, type2, potential)
def setVerletList(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setVerletList(self, vl)
def getVerletList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
def setFixedTupleList(self, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedTupleList(self, fixedtupleList)
def getFixedTupleList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getFixedTupleList(self)
def setNTrotter(self, ntrotter):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setNTrotter(self, ntrotter)
def getNTrotter(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getNTrotter(self)
def setSpeedup(self, speedup):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setSpeedup(self, speedup)
def getSpeedup(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getSpeedup(self)
class VerletListPIadressNoDriftTabulatedLocal(InteractionLocal, interaction_VerletListPIadressNoDriftTabulated):
def __init__(self, vl, fixedtupleList, ntrotter, speedup):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListPIadressNoDriftTabulated, vl, fixedtupleList, ntrotter, speedup)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def setVerletList(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setVerletList(self, vl)
def getVerletList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
def setFixedTupleList(self, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedTupleList(self, fixedtupleList)
def getFixedTupleList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getFixedTupleList(self)
def setNTrotter(self, ntrotter):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setNTrotter(self, ntrotter)
def getNTrotter(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getNTrotter(self)
def setSpeedup(self, speedup):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setSpeedup(self, speedup)
def getSpeedup(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getSpeedup(self)
class VerletListTabulatedLocal(InteractionLocal, interaction_VerletListTabulated):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListTabulated, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
class CellListTabulatedLocal(InteractionLocal, interaction_CellListTabulated):
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListTabulated, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListTabulatedLocal(InteractionLocal, interaction_FixedPairListTabulated):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListTabulated, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
class FixedPairListTypesTabulatedLocal(InteractionLocal, interaction_FixedPairListTypesTabulated):
def __init__(self, system, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListTypesTabulated, system, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def setFixedPairList(self, fixedpairlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fixedpairlist)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
class FixedPairListPIadressTabulatedLocal(InteractionLocal, interaction_FixedPairListPIadressTabulated):
def __init__(self, system, fpl, fixedtupleList, potential, ntrotter, speedup):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListPIadressTabulated, system, fpl, fixedtupleList, potential, ntrotter, speedup)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def getPotential(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self)
def setFixedPairList(self, fpl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fpl)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
def setFixedTupleList(self, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedTupleList(self, fixedtupleList)
def getFixedTupleList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getFixedTupleList(self)
def setNTrotter(self, ntrotter):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setNTrotter(self, ntrotter)
def getNTrotter(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getNTrotter(self)
def setSpeedup(self, speedup):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setSpeedup(self, speedup)
def getSpeedup(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getSpeedup(self)
if pmi.isController:
class Tabulated(Potential):
'The Tabulated potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.TabulatedLocal',
pmiproperty = ['itype', 'filename', 'cutoff']
)
class VerletListAdressCGTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListAdressCGTabulatedLocal',
pmicall = ['setPotential', 'getPotential', 'getVerletList']
)
class VerletListAdressTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListAdressTabulatedLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListHadressCGTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListHadressCGTabulatedLocal',
pmicall = ['setPotential', 'getPotential', 'getVerletList']
)
class VerletListHadressTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListHadressTabulatedLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListPIadressTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListPIadressTabulatedLocal',
pmicall = ['setPotentialQM','setPotentialCL','setVerletList', 'getVerletList', 'setFixedTupleList', 'getFixedTupleList', 'setNTrotter', 'getNTrotter', 'setSpeedup', 'getSpeedup']
)
class VerletListPIadressTabulatedLJ(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListPIadressTabulatedLJLocal',
pmicall = ['setPotentialQM','setPotentialCL','setVerletList', 'getVerletList', 'setFixedTupleList', 'getFixedTupleList', 'setNTrotter', 'getNTrotter', 'setSpeedup', 'getSpeedup']
)
class VerletListPIadressNoDriftTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListPIadressNoDriftTabulatedLocal',
pmicall = ['setPotential','setVerletList', 'getVerletList', 'setFixedTupleList', 'getFixedTupleList', 'setNTrotter', 'getNTrotter', 'setSpeedup', 'getSpeedup']
)
class VerletListTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListTabulatedLocal',
pmicall = ['setPotential','getPotential']
)
class CellListTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.CellListTabulatedLocal',
pmicall = ['setPotential']
)
class FixedPairListTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListTabulatedLocal',
pmicall = ['setPotential', 'setFixedPairList', 'getFixedPairList']
)
class FixedPairListTypesTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListTypesTabulatedLocal',
pmicall = ['setPotential','getPotential','setFixedPairList','getFixedPairList']
)
class FixedPairListPIadressTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListPIadressTabulatedLocal',
pmicall = ['setPotential', 'getPotential', 'setFixedPairList', 'getFixedPairList', 'setFixedTupleList', 'getFixedTupleList', 'setNTrotter', 'getNTrotter', 'setSpeedup', 'getSpeedup']
)
| kkreis/espressopp | src/interaction/Tabulated.py | Python | gpl-3.0 | 44,792 | [
"ESPResSo"
] | 74ef8f8b45d1134baf4b2239f22854bda2c312b0322ed1dec9d586b65a8ccce0 |
# Orca
#
# Copyright 2016 Igalia, S.L.
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2016 Igalia, S.L."
__license__ = "LGPL"
from orca import debug
from orca import orca
from orca import orca_state
from orca.scripts import default
from .braille_generator import BrailleGenerator
from .speech_generator import SpeechGenerator
from .script_utilities import Utilities
class Script(default.Script):
def __init__(self, app):
super().__init__(app)
self.presentIfInactive = False
def deactivate(self):
"""Called when this script is deactivated."""
self.utilities.clearCache()
super().deactivate()
def getBrailleGenerator(self):
"""Returns the braille generator for this script."""
return BrailleGenerator(self)
def getSpeechGenerator(self):
"""Returns the speech generator for this script."""
return SpeechGenerator(self)
def getUtilities(self):
"""Returns the utilites for this script."""
return Utilities(self)
def onFocus(self, event):
"""Callback for focus: accessibility events."""
# https://bugzilla.gnome.org/show_bug.cgi?id=748311
orca.setLocusOfFocus(event, event.source)
def onTextDeleted(self, event):
"""Callback for object:text-changed:delete accessibility events."""
if self.utilities.treatEventAsNoise(event):
msg = "TERMINAL: Deletion is believed to be noise"
debug.println(debug.LEVEL_INFO, msg, True)
return
super().onTextDeleted(event)
def onTextInserted(self, event):
"""Callback for object:text-changed:insert accessibility events."""
if not self.utilities.treatEventAsCommand(event):
msg = "TERMINAL: Passing along event to default script."
debug.println(debug.LEVEL_INFO, msg, True)
super().onTextInserted(event)
return
msg = "TERMINAL: Insertion is believed to be due to terminal command"
debug.println(debug.LEVEL_INFO, msg, True)
self.updateBraille(event.source)
newString = self.utilities.insertedText(event)
if len(newString) == 1:
self.speakCharacter(newString)
else:
voice = self.speechGenerator.voice(obj=event.source, string=newString)
self.speakMessage(newString, voice=voice)
if self.flatReviewContext:
return
try:
text = event.source.queryText()
except:
pass
else:
self._saveLastCursorPosition(event.source, text.caretOffset)
self.utilities.updateCachedTextSelection(event.source)
def presentKeyboardEvent(self, event):
if orca_state.learnModeEnabled or not event.isPrintableKey():
return super().presentKeyboardEvent(event)
if event.isPressedKey():
return False
self._sayAllIsInterrupted = False
self.utilities.clearCachedCommandState()
if event.shouldEcho == False or event.isOrcaModified() or event.isCharacterEchoable():
return False
# We have no reliable way of knowing a password is being entered into
# a terminal -- other than the fact that the text typed isn't there.
try:
text = event.getObject().queryText()
offset = text.caretOffset
prevChar = text.getText(offset - 1, offset)
char = text.getText(offset, offset + 1)
except:
return False
string = event.event_string
if string not in [prevChar, "space", char]:
return False
msg = "TERMINAL: Presenting keyboard event %s" % string
debug.println(debug.LEVEL_INFO, msg, True)
self.speakKeyEvent(event)
return True
def skipObjectEvent(self, event):
if event.type == "object:text-changed:insert":
return False
newEvent, newTime = None, 0
if event.type == "object:text-changed:delete":
if self.utilities.isBackSpaceCommandTextDeletionEvent(event):
return False
newEvent, newTime = self.eventCache.get("object:text-changed:insert", [None, 0])
if newEvent is None or newEvent.source != event.source:
return super().skipObjectEvent(event)
if event.detail1 != newEvent.detail1:
return False
data = "\n%s%s" % (" " * 11, str(newEvent).replace("\t", " " * 11))
msg = "TERMINAL: Skipping due to more recent event at offset%s" % data
debug.println(debug.LEVEL_INFO, msg, True)
return True
| GNOME/orca | src/orca/scripts/terminal/script.py | Python | lgpl-2.1 | 5,468 | [
"ORCA"
] | e104f1a4804ee92a06573e2ad5be2b031d3ad8b6490f8f1c2afa84387fd5b7f9 |
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
from skbio.stats.distance import DistanceMatrix
from skbio.tree import TreeNode
def nj(dm, disallow_negative_branch_length=True, result_constructor=None):
""" Apply neighbor joining for phylogenetic reconstruction.
Parameters
----------
dm : skbio.DistanceMatrix
Input distance matrix containing distances between OTUs.
disallow_negative_branch_length : bool, optional
Neighbor joining can result in negative branch lengths, which don't
make sense in an evolutionary context. If `True`, negative branch
lengths will be returned as zero, a common strategy for handling this
issue that was proposed by the original developers of the algorithm.
result_constructor : function, optional
Function to apply to construct the result object. This must take a
newick-formatted string as input. The result of applying this function
to a newick-formatted string will be returned from this function. This
defaults to ``TreeNode.from_newick``.
Returns
-------
TreeNode
By default, the result object is a `TreeNode`, though this can be
overridden by passing `result_constructor`.
See Also
--------
TreeNode.root_at_midpoint
Notes
-----
Neighbor joining was initially described in Saitou and Nei (1987) [1]_. The
example presented here is derived from the Wikipedia page on neighbor
joining [2]_. The Phylip manual also describes the method [3]_ and Phylip
itself provides an implementation which is useful for comparison.
Neighbor joining, by definition, creates unrooted trees. One strategy for
rooting the resulting trees is midpoint rooting, which is accessible as
``TreeNode.root_at_midpoint``.
References
----------
.. [1] Saitou N, and Nei M. (1987) "The neighbor-joining method: a new
method for reconstructing phylogenetic trees." Molecular Biology and
Evolution. PMID: 3447015.
.. [2] http://en.wikipedia.org/wiki/Neighbour_joining
.. [3] http://evolution.genetics.washington.edu/phylip/doc/neighbor.html
Examples
--------
Define a new distance matrix object describing the distances between five
OTUs: a, b, c, d, and e.
>>> from skbio import DistanceMatrix
>>> from skbio.tree import nj
>>> data = [[0, 5, 9, 9, 8],
... [5, 0, 10, 10, 9],
... [9, 10, 0, 8, 7],
... [9, 10, 8, 0, 3],
... [8, 9, 7, 3, 0]]
>>> ids = list('abcde')
>>> dm = DistanceMatrix(data, ids)
Contstruct the neighbor joining tree representing the relationship between
those OTUs. This is returned as a TreeNode object.
>>> tree = nj(dm)
>>> print(tree.ascii_art())
/-d
|
| /-c
|---------|
---------| | /-b
| \--------|
| \-a
|
\-e
Again, construct the neighbor joining tree, but instead return the newick
string representing the tree, rather than the TreeNode object. (Note that
in this example the string output is truncated when printed to facilitate
rendering.)
>>> newick_str = nj(dm, result_constructor=str)
>>> print(newick_str[:55], "...")
(d:2.000000, (c:4.000000, (b:3.000000, a:2.000000):3.00 ...
"""
if dm.shape[0] < 3:
raise ValueError(
"Distance matrix must be at least 3x3 to "
"generate a neighbor joining tree.")
if result_constructor is None:
result_constructor = TreeNode.from_newick
# initialize variables
node_definition = None
# while there are still more than three distances in the distance matrix,
# join neighboring nodes.
while(dm.shape[0] > 3):
# compute the Q matrix
q = _compute_q(dm)
# identify the pair of nodes that have the lowest Q value. if multiple
# pairs have equally low Q values, the first pair identified (closest
# to the top-left of the matrix) will be chosen. these will be joined
# in the current node.
idx1, idx2 = _lowest_index(q)
pair_member_1 = dm.ids[idx1]
pair_member_2 = dm.ids[idx2]
# determine the distance of each node to the new node connecting them.
pair_member_1_len, pair_member_2_len = _pair_members_to_new_node(
dm, idx1, idx2, disallow_negative_branch_length)
# define the new node in newick style
node_definition = "(%s:%f, %s:%f)" % (pair_member_1,
pair_member_1_len,
pair_member_2,
pair_member_2_len)
# compute the new distance matrix, which will contain distances of all
# other nodes to this new node
dm = _compute_collapsed_dm(
dm, pair_member_1, pair_member_2,
disallow_negative_branch_length=disallow_negative_branch_length,
new_node_id=node_definition)
# When there are three distances left in the distance matrix, we have a
# fully defined tree. The last node is internal, and its distances are
# defined by these last three values.
# First determine the distance between the last two nodes to be joined in
# a pair...
pair_member_1 = dm.ids[1]
pair_member_2 = dm.ids[2]
pair_member_1_len, pair_member_2_len = \
_pair_members_to_new_node(dm, pair_member_1, pair_member_2,
disallow_negative_branch_length)
# ...then determine their distance to the other remaining node, but first
# handle the trival case where the input dm was only 3 x 3
node_definition = node_definition or dm.ids[0]
internal_len = _otu_to_new_node(
dm, pair_member_1, pair_member_2, node_definition,
disallow_negative_branch_length=disallow_negative_branch_length)
# ...and finally create the newick string describing the whole tree.
newick = "(%s:%f, %s:%f, %s:%f);" % (pair_member_1, pair_member_1_len,
node_definition, internal_len,
pair_member_2, pair_member_2_len)
# package the result as requested by the user and return it.
return result_constructor(newick)
def _compute_q(dm):
"""Compute Q matrix, used to identify the next pair of nodes to join.
"""
q = np.zeros(dm.shape)
n = dm.shape[0]
for i in range(n):
for j in range(i):
q[i, j] = q[j, i] = \
((n - 2) * dm[i, j]) - dm[i].sum() - dm[j].sum()
return DistanceMatrix(q, dm.ids)
def _compute_collapsed_dm(dm, i, j, disallow_negative_branch_length,
new_node_id):
"""Return the distance matrix resulting from joining ids i and j in a node.
If the input distance matrix has shape ``(n, n)``, the result will have
shape ``(n-1, n-1)`` as the ids `i` and `j` are collapsed to a single new
ids.
"""
in_n = dm.shape[0]
out_n = in_n - 1
out_ids = [new_node_id]
out_ids.extend([e for e in dm.ids if e not in (i, j)])
result = np.zeros((out_n, out_n))
for idx1, out_id1 in enumerate(out_ids[1:]):
result[0, idx1 + 1] = result[idx1 + 1, 0] = _otu_to_new_node(
dm, i, j, out_id1, disallow_negative_branch_length)
for idx2, out_id2 in enumerate(out_ids[1:idx1+1]):
result[idx1+1, idx2+1] = result[idx2+1, idx1+1] = \
dm[out_id1, out_id2]
return DistanceMatrix(result, out_ids)
def _lowest_index(dm):
"""Return the index of the lowest value in the input distance matrix.
If there are ties for the lowest value, the index of top-left most
occurrence of that value will be returned.
This should be ultimately be replaced with a new DistanceMatrix object
method (#228).
"""
lowest_value = np.inf
for i in range(dm.shape[0]):
for j in range(i):
curr_index = i, j
curr_value = dm[curr_index]
if curr_value < lowest_value:
lowest_value = curr_value
result = curr_index
return result
def _otu_to_new_node(dm, i, j, k, disallow_negative_branch_length):
"""Return the distance between a new node and some other node.
Parameters
----------
dm : skbio.DistanceMatrix
The input distance matrix.
i, j : str
Identifiers of entries in the distance matrix to be collapsed. These
get collapsed to a new node, internally represented as `u`.
k : str
Identifier of the entry in the distance matrix for which distance to
`u` will be computed.
disallow_negative_branch_length : bool
Neighbor joining can result in negative branch lengths, which don't
make sense in an evolutionary context. If `True`, negative branch
lengths will be returned as zero, a common strategy for handling this
issue that was proposed by the original developers of the algorithm.
"""
k_to_u = 0.5 * (dm[i, k] + dm[j, k] - dm[i, j])
if disallow_negative_branch_length and k_to_u < 0:
k_to_u = 0
return k_to_u
def _pair_members_to_new_node(dm, i, j, disallow_negative_branch_length):
"""Return the distance between a new node and decendants of that new node.
Parameters
----------
dm : skbio.DistanceMatrix
The input distance matrix.
i, j : str
Identifiers of entries in the distance matrix to be collapsed (i.e.,
the descendents of the new node, which is internally represented as
`u`).
disallow_negative_branch_length : bool
Neighbor joining can result in negative branch lengths, which don't
make sense in an evolutionary context. If `True`, negative branch
lengths will be returned as zero, a common strategy for handling this
issue that was proposed by the original developers of the algorithm.
"""
n = dm.shape[0]
i_to_j = dm[i, j]
i_to_u = (0.5 * i_to_j) + ((dm[i].sum() - dm[j].sum()) / (2 * (n - 2)))
if disallow_negative_branch_length and i_to_u < 0:
i_to_u = 0
j_to_u = i_to_j - i_to_u
if disallow_negative_branch_length and j_to_u < 0:
j_to_u = 0
return i_to_u, j_to_u
| JWDebelius/scikit-bio | skbio/tree/_nj.py | Python | bsd-3-clause | 10,840 | [
"scikit-bio"
] | 9723b37db240477c8a76c5484ec1e50fc0dba1bfe0ad9fbb646a9ee2240f53ec |
import shutil
import jinja2 as j2
import pytoml
from pathlib import Path
from core import Model, ModelType
from core.ModelValidator import ModelValidator
folders = {
"F-IND": "F-IND",
}
mfDict = {
# 'pairwise-linear': ,
'triangle': 'TRI_MF',
'trapezoid': 'TRAP_MF',
# 'gaussian-bell': ,
# 'gaussian2': ,
'gaussian': 'GAUSSIAN_MF',
'sigmoid': 'SIGMOID_MF',
'diff_sigmoid': 'DIF_SIGMOID_MF',
'singleton': 'SPIKE_MF',
}
class templateRenderer(object):
def __init__(self, model, path):
#self.tmplDir = Path(__file__).parent / '..' / 'templates' / folders[model.type.lower()]
self.tmplDir = path
self.tmplDir.resolve()
loader = j2.FileSystemLoader(str(self.tmplDir))
self.env = j2.Environment(
loader=loader, trim_blocks=True, lstrip_blocks=True)
self.model = model
def render(self, template):
tmpl = self.env.get_template(template)
return tmpl.render(model=self.model, mfDict=mfDict)
def write(self, fileOut, template):
with fileOut.open('w') as fo:
fo.write(self.render(template))
class fuzzyCreator(object):
def __init__(self, modelString, outDir):
conf = pytoml.loads(modelString)
self.models = []
for m in conf['model']:
if m['type'].upper() == 'F-IND':
self.models.append(Model.ModelFIND(m))
if m['type'].upper() == 'FEQ':
self.models.append(Model.ModelFeq(m))
if m['type'].upper() != 'F-IND' and m['type'].upper() != 'FEQ':
self.models.append(Model.ModelFis(m))
self.outDir = outDir
def __process_template__(self, renderer, template_name, out_dir, add_model_name=None):
try:
tmplSplit = template_name.split('.')
if add_model_name is None:
outfile = tmplSplit[0] + '.' + tmplSplit[1]
else:
outfile = tmplSplit[0] + '_' + \
add_model_name + '.' + tmplSplit[1]
renderer.write(out_dir / outfile, template_name)
except j2.TemplateSyntaxError as e:
print("Exception in ", template_name, ":")
raise e
def render(self, subfolder=True, include_strings=False):
if not self.outDir.exists():
self.outDir.mkdir(parents=True)
# Validate the models
validator = ModelValidator(self.models)
validator.validate()
model_types_added = ModelType.ModelTypeSet()
outDir = self.outDir
for model in self.models:
model.include_strings = include_strings
if subfolder:
outDir = self.outDir / model.name
if not outDir.exists():
outDir.mkdir()
templ_dir = Path(__file__).parent / '..' / \
'templates' / folders[model.type.upper()]
renderer = templateRenderer(model, templ_dir)
template_list = templ_dir.glob('*.j2')
template_list = [x.name for x in template_list]
for tmpl in template_list:
self.__process_template__(renderer, tmpl, outDir, model.name)
model_types_added.update(model)
# Check if the model type has any common that should be copied as well
for mt in model_types_added:
mt.include_strings = include_strings
templ_dir = Path(__file__).parent / '..' / \
'templates' / folders[mt.type]
common = (templ_dir / 'common').exists()
if common:
renderer = templateRenderer(mt, templ_dir / "common")
fileDir = templ_dir / 'common'
fileList = fileDir.glob('*')
for f in fileList:
if f.is_file():
if f.suffix == ".j2":
self.__process_template__(
renderer, f.name, self.outDir)
else:
shutil.copy(str(f), str(self.outDir))
# Copies common model types files
fileDir = Path(__file__).parent / '..' / 'templates' / "common"
fileList = fileDir.glob('*')
renderer = templateRenderer(model_types_added, fileDir)
for f in fileList:
if f.is_file():
if f.suffix == ".j2":
self.__process_template__(renderer, f.name, self.outDir)
else:
shutil.copy(str(f), str(self.outDir))
| lab-robotics-unipv/pyFUZZYgenerator | core/fuzzyCreator.py | Python | lgpl-3.0 | 4,604 | [
"Gaussian"
] | fa1fb85df9562641448a888e2b522c76499aa31c13b365860ed37de506f6c029 |
####################################################################################################
# Copyright 2013 John Crawford
#
# This file is part of PatchCorral.
#
# PatchCorral is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PatchCorral is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PatchCorral. If not, see <http://www.gnu.org/licenses/>.
####################################################################################################
## @file
# Module Information.
# @date 3/10/2013 Created file. -jc
# @author John Crawford
NAME = 'GM'
PATCHES = [
# Name, MSB, LSB, PC, Group Name, Voice Num Name
('Piano 1', 121, 0, 0, 'AC.PIANO', 'GM 001'),
('Piano 2', 121, 0, 1, 'AC.PIANO', 'GM 002'),
('Piano 3', 121, 0, 2, 'AC.PIANO', 'GM 003'),
('Honky-tonk', 121, 0, 3, 'AC.PIANO', 'GM 004'),
('E.Piano 1', 121, 0, 4, 'EL.PIANO', 'GM 005'),
('E.Piano 2', 121, 0, 5, 'EL.PIANO', 'GM 006'),
('Harpsichord', 121, 0, 6, 'KEYBOARDS', 'GM 007'),
('Clav.', 121, 0, 7, 'KEYBOARDS', 'GM 008'),
('Celesta', 121, 0, 8, 'KEYBOARDS', 'GM 009'),
('Glockenspiel', 121, 0, 9, 'BELL', 'GM 010'),
('Music Box', 121, 0, 10, 'BELL', 'GM 011'),
('Vibraphone', 121, 0, 11, 'MALLET', 'GM 012'),
('Marimba', 121, 0, 12, 'MALLET', 'GM 013'),
('Xylophone', 121, 0, 13, 'MALLET', 'GM 014'),
('Tubular-bell', 121, 0, 14, 'BELL', 'GM 015'),
('Santur', 121, 0, 15, 'PLUCKED', 'GM 016'),
('Organ 1', 121, 0, 16, 'ORGAN', 'GM 017'),
('Organ 2', 121, 0, 17, 'ORGAN', 'GM 018'),
('Organ 3', 121, 0, 18, 'ORGAN', 'GM 019'),
('Church Org.1', 121, 0, 19, 'ORGAN', 'GM 020'),
('Reed Organ', 121, 0, 20, 'ORGAN', 'GM 021'),
('Accordion Fr', 121, 0, 21, 'ACCORDION', 'GM 022'),
('HARMONICA', 121, 0, 22, 'HARMONICA', 'GM 023'),
('Bandoneon', 121, 0, 23, 'ACCORDION', 'GM 024'),
('Nylon-str.Gt', 121, 0, 24, 'AC.GUITAR', 'GM 025'),
('Steel-str.Gt', 121, 0, 25, 'AC.GUITAR', 'GM 026'),
('Jazz Gt.', 121, 0, 26, 'EL.GUITAR', 'GM 027'),
('Clean Gt.', 121, 0, 27, 'EL.GUITAR', 'GM 028'),
('Muted Gt.', 121, 0, 28, 'EL.GUITAR', 'GM 029'),
('Overdrive Gt', 121, 0, 29, 'DIST.GUITAR', 'GM 030'),
('DistortionGt', 121, 0, 30, 'DIST.GUITAR', 'GM 031'),
('Gt.Harmonics', 121, 0, 31, 'EL.GUITAR', 'GM 032'),
('Acoustic Bs.', 121, 0, 32, 'BASS', 'GM 033'),
('Fingered Bs.', 121, 0, 33, 'BASS', 'GM 034'),
('Picked Bass', 121, 0, 34, 'BASS', 'GM 035'),
('Fretless Bs.', 121, 0, 35, 'BASS', 'GM 036'),
('Slap Bass 1', 121, 0, 36, 'BASS', 'GM 037'),
('Slap Bass 2', 121, 0, 37, 'BASS', 'GM 038'),
('Synth Bass 1', 121, 0, 38, 'SYNTH BASS', 'GM 039'),
('Synth Bass 2', 121, 0, 39, 'SYNTH BASS', 'GM 040'),
('Violin', 121, 0, 40, 'STRINGS', 'GM 041'),
('Viola', 121, 0, 41, 'STRINGS', 'GM 042'),
('Cello', 121, 0, 42, 'STRINGS', 'GM 043'),
('Contrabass', 121, 0, 43, 'STRINGS', 'GM 044'),
('Tremolo Str', 121, 0, 44, 'STRINGS', 'GM 045'),
('PizzicatoStr', 121, 0, 45, 'STRINGS', 'GM 046'),
('Harp', 121, 0, 46, 'PLUCKED', 'GM 047'),
('Timpani', 121, 0, 47, 'PERCUSSION', 'GM 048'),
('Orche str', 121, 0, 48, 'STRINGS', 'GM 049'),
('Slow Strings', 121, 0, 49, 'STRINGS', 'GM 050'),
('Syn.Strings1', 121, 0, 50, 'STRINGS', 'GM 051'),
('Syn.Strings2', 121, 0, 51, 'SOFT PAD', 'GM 052'),
('Choir Aahs', 121, 0, 52, 'VOX', 'GM 053'),
('Voice Oohs', 121, 0, 53, 'VOX', 'GM 054'),
('SynVox', 121, 0, 54, 'VOX', 'GM 055'),
('OrchestraHit', 121, 0, 55, 'HIT&STAB', 'GM 056'),
('Trumpet', 121, 0, 56, 'AC.BRASS', 'GM 057'),
('Trombone', 121, 0, 57, 'AC.BRASS', 'GM 058'),
('Tuba', 121, 0, 58, 'AC.BRASS', 'GM 059'),
('MutedTrumpet', 121, 0, 59, 'AC.BRASS', 'GM 060'),
('French Horns', 121, 0, 60, 'AC.BRASS', 'GM 061'),
('Brass 1', 121, 0, 61, 'AC.BRASS', 'GM 062'),
('Synth Brass1', 121, 0, 62, 'SYNTH BRASS', 'GM 063'),
('Synth Brass2', 121, 0, 63, 'SYNTH BRASS', 'GM 064'),
('Soprano Sax', 121, 0, 64, 'SAX', 'GM 065'),
('Alto Sax', 121, 0, 65, 'SAX', 'GM 066'),
('Tenor Sax', 121, 0, 66, 'SAX', 'GM 067'),
('Baritone Sax', 121, 0, 67, 'SAX', 'GM 068'),
('Oboe', 121, 0, 68, 'WIND', 'GM 069'),
('English Horn', 121, 0, 69, 'WIND', 'GM 070'),
('Bassoon', 121, 0, 70, 'WIND', 'GM 071'),
('Clarinet', 121, 0, 71, 'WIND', 'GM 072'),
('Piccolo', 121, 0, 72, 'FLUTE', 'GM 073'),
('FLUTE', 121, 0, 73, 'FLUTE', 'GM 074'),
('Recorder', 121, 0, 74, 'FLUTE', 'GM 075'),
('Pan Flute', 121, 0, 75, 'FLUTE', 'GM 076'),
('Bottle Blow', 121, 0, 76, 'FLUTE', 'GM 077'),
('Shakuhachi', 121, 0, 77, 'ETHNIC', 'GM 078'),
('Whistle', 121, 0, 78, 'FLUTE', 'GM 079'),
('Ocarina', 121, 0, 79, 'FLUTE', 'GM 080'),
('Square Wave', 121, 0, 80, 'HARD LEAD', 'GM 081'),
('Saw Wave', 121, 0, 81, 'HARD LEAD', 'GM 082'),
('Syn.Calliope', 121, 0, 82, 'SOFT LEAD', 'GM 083'),
('Chiffer Lead', 121, 0, 83, 'SOFT LEAD', 'GM 084'),
('Charang', 121, 0, 84, 'HARD LEAD', 'GM 085'),
('Solo Vox', 121, 0, 85, 'SOFT LEAD', 'GM 086'),
('5th Saw Wave', 121, 0, 86, 'HARD LEAD', 'GM 087'),
('Bass & Lead', 121, 0, 87, 'HARD LEAD', 'GM 088'),
('Fantasia', 121, 0, 88, 'OTHER SYNTH', 'GM 089'),
('Warm Pad', 121, 0, 89, 'SOFT PAD', 'GM 090'),
('Polysynth', 121, 0, 90, 'OTHER SYNTH', 'GM 091'),
('Space Voice', 121, 0, 91, 'VOX', 'GM 092'),
('Bowed Glass', 121, 0, 92, 'SOFT PAD', 'GM 093'),
('Metal Pad', 121, 0, 93, 'BRIGHT PAD', 'GM 094'),
('Halo Pad', 121, 0, 94, 'BRIGHT PAD', 'GM 095'),
('Sweep Pad', 121, 0, 95, 'SOFT PAD', 'GM 096'),
('Ice Rain', 121, 0, 96, 'OTHER SYNTH', 'GM 097'),
('Soundtrack', 121, 0, 97, 'SOFT PAD', 'GM 098'),
('Crystal', 121, 0, 98, 'BELL', 'GM 099'),
('Atmosphere', 121, 0, 99, 'AC.GUITAR', 'GM 100'),
('Brightness', 121, 0, 100, 'OTHER SYNTH', 'GM 101'),
('Goblin', 121, 0, 101, 'PULSATING', 'GM 102'),
('Echo Drops', 121, 0, 102, 'BRIGHT PAD', 'GM 103'),
('Star Theme', 121, 0, 103, 'BRIGHT PAD', 'GM 104'),
('Sitar', 121, 0, 104, 'PLUCKED', 'GM 105'),
('Banjo', 121, 0, 105, 'PLUCKED', 'GM 106'),
('Shamisen', 121, 0, 106, 'PLUCKED', 'GM 107'),
('Koto', 121, 0, 107, 'PLUCKED', 'GM 108'),
('Kalimba', 121, 0, 108, 'PLUCKED', 'GM 109'),
('Bagpipe', 121, 0, 109, 'ETHNIC', 'GM 110'),
('Fiddle', 121, 0, 110, 'STRINGS', 'GM 111'),
('Shanai', 121, 0, 111, 'ETHNIC', 'GM 112'),
('Tinkle Bell', 121, 0, 112, 'BELL', 'GM 113'),
('Agogo', 121, 0, 113, 'PERCUSSION', 'GM 114'),
('Steel Drums', 121, 0, 114, 'MALLET', 'GM 115'),
('Woodblock', 121, 0, 115, 'PERCUSSION', 'GM 116'),
('Taiko', 121, 0, 116, 'PERCUSSION', 'GM 117'),
('Melo. Tom 1', 121, 0, 117, 'PERCUSSION', 'GM 118'),
('Synth Drum', 121, 0, 118, 'PERCUSSION', 'GM 119'),
('Reverse Cym.', 121, 0, 119, 'PERCUSSION', 'GM 120'),
('Gt.FretNoise', 121, 0, 120, 'AC.GUITAR', 'GM 121'),
('Breath Noise', 121, 0, 121, 'SYNTH FX', 'GM 122'),
('Seashore', 121, 0, 122, 'SOUND FX', 'GM 123'),
('Bird', 121, 0, 123, 'SOUND FX', 'GM 124'),
('Telephone 1', 121, 0, 124, 'SOUND FX', 'GM 125'),
('Helicopter', 121, 0, 125, 'SOUND FX', 'GM 126'),
('Applause', 121, 0, 126, 'SOUND FX', 'GM 127'),
('Gun Shot', 121, 0, 127, 'SOUND FX', 'GM 128'),
]
| psychobaka/PatchCorral | src/data/synthesizers/generalmidi/GM.py | Python | gpl-3.0 | 7,553 | [
"CRYSTAL"
] | 4406add65fe239f7481e9b289612b722b20d6043c7f96a5bd9a1adc27e3f2681 |
"""Command line interface module"""
import sys
import logging
from pathlib import Path
from argparse import ArgumentParser
import helper
import helper.main
import helper.device
LOGGER = logging.getLogger(__name__)
PARSER = ArgumentParser(
prog="helper", description="CLI utility for interfacing with Android devices",
epilog="""You can pass the '-h' argument to all of the above commands for detailed description.
Feel free to check out the source code and report any issues at github.com/rmmbear/Android-QA-Helper"""
)
PARSER.add_argument(
"-v", "--version", action="version", version="%(prog)s {}".format(helper.VERSION))
COMMANDS = PARSER.add_subparsers(title="Commands", dest="command", metavar="")
### Gneral-use optional arguments
OPT_DEVICE = ArgumentParser("device", add_help=False)
OPT_DEVICE.add_argument(
"-d", "--device", default="", metavar="device",
help="""Specify command target by passing a device's serial number.
This value must be given if there are multiple devices connected.""")
OPT_OUTPUT = ArgumentParser("output", add_help=False)
OPT_OUTPUT.add_argument(
"-o", "--output", default=".", metavar="directory",
help="""Specify the output directory. If no directory is chosen, the files
will be saved in the same directory helper was launched from.""")
### Helper Commands definitions
CMD = COMMANDS.add_parser(
"install", parents=[OPT_DEVICE, OPT_OUTPUT], aliases=["i"],
help="Install an app on connected device.",
epilog="""If another version of the app is already on the device, helper
will attempt to remove it and all its data first before replacing it.
Note that system apps can only be replaced with newer versions and the
'--replace-system-apps' argument must be used.""")
CMD.add_argument(
"install", metavar="APK",
help=".apk file.")
CMD.add_argument(
"--obb", nargs="+", metavar="OBB",
help="Keep data and cache directories when replacing apps.")
CMD.add_argument(
"--keep-data", action="store_true",
help="Keep data and cache directories when replacing apps.")
CMD.add_argument(
"--location", choices=["internal", "external"], default="automatic",
help="""Set the install location to either internal or external SD card. By
default it is set to 'automatic', which lets the device decide the location
based on available storage space and install location set in app's
AndroidManifest.xml.""")
CMD.add_argument(
"--installer-name", default="android.helper",
help="""Use this option to set the installer name used during installation.
By default it is 'android.helper'. Under normal circumstances this would be
the name of the appstore app used, so for example:
com.sec.android.app.samsungapps (Samsung Galaxy Apps),
com.android.vending (Google Play Store),
com.amazon.mShop.android (Amazon Underground - Android app),
com.amazon.venezia (Amazon appstore - native Kindle Fire).
Changing installer name may be useful for testing store-specific
functionality.""")
CMD = COMMANDS.add_parser(
"clean", parents=[OPT_DEVICE, OPT_OUTPUT], aliases="c",
help="Clean the device storage as per the instructions in cleaner config.",
epilog=f"""By default, this command removes only helper-created
files but its behavior can be customized with cleaner config file.
Currently available options are: removing files and directories, clearing
app data, uninstalling apps and replacing files on device with local
versions. For configuration example, see the default config file:
{helper.CLEANER_CONFIG}.""")
CMD.add_argument(
"clean", nargs="?", default=helper.CLEANER_CONFIG, metavar="config",
help="""Path to a valid cleaner config file. For example of a
valid config, see the default file in this program's root directory.""")
CMD = COMMANDS.add_parser(
"record", parents=[OPT_DEVICE, OPT_OUTPUT], aliases="r",
help="Record the screen of your device.",
epilog="""To stop and save the recorded video, press 'ctrl+c'.
Videos have a hard time-limit of three minutes -- this is imposed by
the internal command and cannot be extended -- recording will be stopped
automatically after reaching that limit. NOTE: Sound is not recorded.""")
CMD = COMMANDS.add_parser(
"traces", parents=[OPT_DEVICE, OPT_OUTPUT], aliases="t",
help="Save the dalvik vm stack traces (aka ANR log) to a file.",
epilog="Save the dalvik vm stack traces (aka ANR log) to a file.")
# TODO: Update detailed description after implementing obb extraction
CMD = COMMANDS.add_parser(
"extract", parents=[OPT_DEVICE, OPT_OUTPUT], aliases="x",
help="""Extract .apk file of an installed application.""",
epilog="""Extract the .apk file from device's storage. On some devices
the archives cannot be extracted. In general, if it is possible to
extract third part apps, it should be also possible to the same with
system apps. Note: app's expansion files (OBBs) cannot yet be extracted.""")
CMD.add_argument(
"extract_apk", nargs="+", metavar="app name",
help="Package ID of an installed app. For example: android.com.browser.")
COMMANDS.add_parser(
"scan", aliases="s",
help="Show status of all connected devices.",
epilog="""Scan shows serial number, manufacturer, model and connection
status of all devices connected. If a connection with device could not
be established, only its serial and connection status is shown.""")
COMMANDS.add_parser(
"dump", aliases=["d"], parents=[OPT_DEVICE, OPT_OUTPUT],
help="Dump all available device information to file.",
epilog="Dump all available device information to file.")
CMD = COMMANDS.add_parser(
"shell", aliases=["sh"], parents=[OPT_DEVICE],
help="Issue a shell command for a device.",
epilog="Issue a shell command for a device.")
CMD.add_argument(
"command_", nargs="*", metavar="command",
help="""Note: put "--" as the first argument to suppress argument parsing
(necessary if your shell command contains dashes).""")
CMD = COMMANDS.add_parser(
"adb", parents=[OPT_DEVICE],
help="Issue an adb command for a device.",
epilog="Issue an adb command for a device.")
CMD .add_argument(
"command_", nargs="*", metavar="command",
help="""Note: put "--" as the first argument to suppress argument parsing
(necessary if your shell command contains dashes).""")
### Hidden commands
CMD = COMMANDS.add_parser("debug-dump", parents=[OPT_DEVICE, OPT_OUTPUT])
CMD.add_argument("--full", action="store_true")
COMMANDS.add_parser("run-tests")
del CMD
PARSER_NO_ARGS = PARSER.parse_args([])
def find_adb_and_aapt(require_adb=True, require_aapt=True):
"""Find and set paths of the necessary tools.
Invokes helper.tools.tool_grabber if tools are not found.
This function modifies global state - it sets paths of ADB and AAPT,
as well as their _VERSION variables, in package's __init__'s scope .
"""
if not require_adb and not require_aapt:
return
tools = ("ADB", "AAPT")
missing = []
required = {"ADB":require_adb, "AAPT":require_aapt}
for tool_name in tools:
tool_path = getattr(helper, tool_name)
# only care about the executable if it's required
if required[tool_name] and not tool_path:
missing.append(tool_name)
if missing:
print(f"Tools missing: {missing}")
print("Helper cannot run without above tools - would you like to download them now?")
try:
userinput = input("Y/N ")
except EOFError:
print("Aborting...")
sys.exit()
if userinput[0] not in "yY":
LOGGER.info("early exit - user did not allow download of missing tools")
sys.exit()
if len(missing) == 2:
tool = "all"
else:
tool = missing[0].lower()
# download the missing tool(s) using helper.tools.tool_grabber
from .tools import tool_grabber
tool_grabber.main(arguments=f"--tool {tool}".split())
for tool in missing:
out = helper.find_executable(tool.lower())
setattr(helper, tool, out[0])
setattr(helper, f"{tool}_VERSION", out[1])
# it is possible that tool_grabber fails to download all tools
# this could be because of user-side errors
# (faulty internet connection, for example)
# so we need to make sure they have really been found
found_all = True
for tool in missing:
if not getattr(helper, tool):
found_all = False
if not found_all:
LOGGER.error("Could not find downloaded tools, aborting")
sys.exit()
def pick_device():
"""Ask the user to pick a device from list of currently connected
devices. If only one is available, it will be chosen automatically.
None is returned if there aren't any devices.
"""
device_list = helper.device.get_devices(limit_init=["identity"])
if not device_list:
return None
if len(device_list) == 1:
return device_list[0]
print("Multiple devices detected!\n")
print("Please choose which of devices below you want to work with.\n")
for counter, device in enumerate(device_list):
print(f"{counter} : {device.name}")
while True:
print("Pick a device: ")
user_choice = input().strip()
if not user_choice.isnumeric():
print("The answer must be a number!")
continue
user_choice = int(user_choice)
if user_choice < 0 or user_choice >= len(device_list):
print("Answer must be one of the above numbers!")
continue
return device_list[user_choice]
def record(device, args):
destination = helper.main.record(device, args.output)
if destination:
print("Recorded video was saved to:")
print(destination)
return destination
return False
def install(device, args):
if not Path(args.install).is_file():
print("ERROR: Provided path does not point to an existing file:")
print(args.install)
return
helper.main.install(
device, args.install, args.obb, install_location=args.location,
keep_data=args.keep_data, installer_name=args.installer_name)
def pull_traces(device, args):
""""""
destination = helper.main.pull_traces(device, args.output)
if destination:
print("Traces file was saved to:")
print(destination)
return True
return False
def extract_apk(device, args):
for app_name in args.extract_apk:
out = device.extract_apk(app_name, args.output)
if out:
print("Package saved to:")
print(out)
def clean(device, args):
""""""
config_file = args.clean
if not Path(config_file).is_file():
print("Provided path does not point to an existing config file:")
print(config_file)
return
helper.main.clean(device, config_file)
def scan(args):
""""""
device_list = helper.device.get_devices(True, ["identity"], True)
if not device_list:
print()
print("No devices detected")
return
# Load everything into lines, including the headers
lengths = [2, 10, 12, 5, 6]
lines = [("#", "Serial #", "Manufacturer", "Model", "Status")]
for count, device in enumerate(device_list):
count = str(count + 1)
serial = device.serial
manufacturer = device.info_dict["device_manufacturer"]
manufacturer = manufacturer if manufacturer else "Unknown"
model = device.info_dict["device_manufacturer"]
model = model if model else "Unknown"
status = device._status
for x, item in enumerate((count, serial, manufacturer, model, status)):
lengths[x] = max(len(item), lengths[x])
lines.append((count, serial, manufacturer, model, status))
# create format string dynamically
# each column has a width of its widest element + 2
format_str = "{:" + "}{:".join([str(x+2) for x in lengths]) + "}"
for line in lines:
print(format_str.format(*line))
def info_dump(device, args):
""""""
device.extract_data(limit_to=["identity"])
print(f"Collecting info from {device.name} ...")
filename = f"{device.filename}_REPORT"
output_path = (Path(args.output) / filename).resolve()
with output_path.open(mode="w") as device_report:
device.info_dump(device_report)
print(f"Report saved to {str(output_path)}")
def debug_dump(device, args):
"""Dump device data to files.
What is dumped is controlled by extract_data's INFO_SOURCES.
This data is meant to be loaded into DummyDevice for debugging and
compatibility tests.
"""
print("Please remember that dumped files may contain sensitive data. Use caution.")
directory = Path(args.output)
directory.mkdir(exist_ok=True)
# this is very silly
# make _init_cache getter retrieve the value from immortal_cache
# while its setter simply does nothing, which prevents device.get_data from
# "deleting" the cache (it does so by assigning an empty dict to _init_cache)
# because doing it this way modifies the class definition at runtime and prevents
# updating device information (except for methods which skip cache checks
# explicitly), helper should exit immediately after data is dumped
# The only reason I'm going with this is because I don't want to mess with
# device module right now
device.immortal_cache = {}
helper.device.Device._init_cache = property(
lambda self: self.immortal_cache,
lambda self, value: None
)
print("-----")
print("\nDumping", device.name)
device_dir = Path(directory, (device.filename + "_DUMP"))
device_dir.mkdir(exist_ok=True)
from helper.extract_data import INFO_SOURCES
for source_name, command in INFO_SOURCES.items():
if source_name.startswith("debug") and not args.full:
continue
if source_name in device.immortal_cache:
output = device.immortal_cache[source_name]
else:
output = device.shell_command(*command, return_output=True, as_list=False)
with Path(device_dir, source_name).open(mode="w", encoding="utf-8") as dump_file:
dump_file.write(output)
print(".", end="", flush=True)
print("\nDumping device's info_dict")
print("\nLoading data")
try:
device.extract_data()
except:
print("ERROR: Encountered an exception during load, dumping as-is")
with Path(device_dir, "device_info_dict").open(mode="w", encoding="utf-8") as info_dict_file:
for key, value in device.info_dict.items():
info_dict_file.write(f"{key}: {str(value)}\n")
print("Device dumped to", str(device_dir))
def run_tests(args):
try:
import pytest
except ImportError:
print("Could not import pytest, cannot run tests")
return
sys.argv = [sys.argv[0]]
pytest.main()
def shell_command(device, args):
""""""
print()
device.shell_command(*args.command_, return_output=False, check_server=False)
def adb_command(args):
""""""
helper.device.adb_command(*args.command_, return_output=False, check_server=False)
COMMAND_DICT = { #command : (function, required_devices),
#No device commands
"adb":(adb_command, 0),
"run-tests":(run_tests, 0),
"scan":(scan, 0), "s": (scan, 0),
#Single device commands
"extract":(extract_apk, 1), "x":(extract_apk, 1),
"install":(install, 1), "i":(install, 1),
"record":(record, 1), "r":(record, 1),
"shell":(shell_command, 1), "sh":(shell_command, 1),
"traces":(pull_traces, 1), "t":(pull_traces, 1),
#Multi device commands
#these commands will run even when only one device is available
"clean":(clean, 2), "c":(clean, 2),
"debug-dump":(debug_dump, 2),
"dump":(info_dump, 2), "d":(info_dump, 2),
}
def main(args=None):
"""Parse and execute input commands."""
find_adb_and_aapt()
LOGGER.info("Starting parsing arguments")
args = PARSER.parse_args(args)
LOGGER.info("Starting helper with option %s", args.command)
if args == PARSER_NO_ARGS:
PARSER.parse_args(["-h"])
return
if hasattr(args, "output"):
if not Path(args.output[0]).is_dir():
print("ERROR: The provided path does not point to an existing directory!")
return
command, required_devices = COMMAND_DICT[args.command]
#No devices required, call function directly
if required_devices == 0:
command(args)
return
chosen_device = None
#FIXME: do not initialize other devices when performing actions on specific device
#TODO: Implement a timeout
print("Waiting for any device to come online...")
helper.device.adb_command('wait-for-device', return_output=True)
connected_devices = helper.device.get_devices(initialize=False)
connected_serials = {device.serial:device for device in connected_devices}
if hasattr(args, "device"):
if args.device:
LOGGER.debug("Chosen device set to %s", args.device)
try:
chosen_device = connected_serials[args.device]
except KeyError:
print(f"Device with serial number {args.device} was not found by Helper!")
return
#TODO: implement concurrent commands
if required_devices == 1:
if not chosen_device:
chosen_device = pick_device()
try:
command(chosen_device, args)
except helper.device.DeviceOfflineError:
print("Device has been suddenly disconnected!")
if required_devices == 2:
if chosen_device:
connected_devices = [chosen_device]
for device in connected_devices:
try:
command(device, args)
except helper.device.DeviceOfflineError:
print(f"Device {device.name} has been suddenly disconnected!")
#TODO: Implement screenshot command
#TODO: Implement keyboard and keyboard-interactive
# Enter text into textfields on Android device using PC's keyboard
| rmmbear/Android-QA-Helper | helper/cli.py | Python | gpl-3.0 | 18,327 | [
"Galaxy"
] | bda2dc66376dd41ede28047aa709a805689b11bc14ccd7e3e7522337949fc62b |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from abc import abstractmethod
from dataclasses import dataclass
from typing import Callable, Dict, List, Union
import numpy as np
from psi4.driver import constants
@dataclass
class Lineshape:
"""Lineshape ABC
Attributes
----------
domain : Union[numpy.ndarray, List[float]]
Domain of the spectral band.
gamma : Callable[[float], float]
A function returning the broadening factor.
Notes
-----
Why do we use a callable broadening factor?
For plots in the *wavelength domain*, the broadening factor depends on the location of the band's maximum.
"""
domain: Union[np.ndarray, List[float]]
gamma: Callable[[float], float]
@abstractmethod
def lineshape(self, x_0: float) -> np.ndarray:
pass
@abstractmethod
def maximum(self, x_0: float) -> float:
pass
class Gaussian(Lineshape):
r"""Gaussian function on `domain`, centered at `x_0` with broadening `gamma`.
Parameters
----------
domain : Union[List[float], numpy.ndarray]
The domain of the Gaussian profile.
gamma : float
Broadening parameter.
This is related to the full width at half maximum as :math:`\mathrm{FWHM} = \gamma \sqrt{2\ln 2}`
Notes
-----
Use this profile to model inhomegenous broadening.
"""
def lineshape(self, x_0: float) -> np.ndarray:
"""Gaussian function on `self.domain`, centered at `x_0` with broadening `self.gamma`.
Parameters
----------
x_0 : float
Center of the Gaussian, i.e. its maximum.
Returns
-------
gauss : numpy.ndarray
The Gaussian profile.
"""
prefactor = 2.0 / (self.gamma(x_0) * np.sqrt(2.0 * np.pi))
exponent = -2.0 * ((self.domain - x_0) / self.gamma(x_0))**2
return prefactor * np.exp(exponent)
def maximum(self, x_0: float) -> float:
return 2.0 / (self.gamma(x_0) * np.sqrt(2.0 * np.pi))
class Lorentzian(Lineshape):
"""Lorentzian function on `domain`, centered at `x_0` with broadening `gamma`.
Parameters
----------
domain : Union[List[float], numpy.ndarray]
The domain of the Lorentzian profile.
gamma : float
Broadening parameter.
This is the full width at half maximum (FWHM).
Notes
-----
Use this profile to model homogeneous broadening.
"""
def lineshape(self, x_0: float) -> np.ndarray:
"""Lorentzian function on :py:attr:`Lineshape.domain`, centered at `x_0` with broadening :py:attr:`Lineshape.gamma`.
Parameters
----------
x_0
Center of the Lorentzian, i.e. its maximum.
Returns
-------
lorentz : numpy.ndarray
The Lorentzian profile.
"""
prefactor = 1.0 / np.pi
numerator = self.gamma(x_0) / 2.0
denominator = (self.domain - x_0)**2 + numerator**2
return prefactor * (numerator / denominator)
def maximum(self, x_0: float) -> float:
return 2.0 / (np.pi * self.gamma(x_0))
def prefactor_opa() -> float:
r"""Prefactor for converting microscopic observable to decadic molar
extinction coefficient in one-photon absorption.
Returns
-------
prefactor : float
Notes
-----
This function implements the calculation of the following prefactor:
.. math::
k = \frac{4\pi^{2}N_{\mathrm{A}}}{3\times 1000\times \ln(10) (4 \pi \epsilon_{0}) n \hbar c}
The prefactor is computed in SI units and then adjusted for the fact that
we use atomic units to express microscopic observables: excitation energies
and transition dipole moments.
The refractive index :math:`n` is, in general, frequency-dependent. We
assume it to be constant and equal to 1.
"""
N_A = constants.get("Avogadro constant")
c = constants.get("speed of light in vacuum")
hbar = constants.get("Planck constant over 2 pi")
e_0 = constants.get("electric constant")
au_to_Coulomb_centimeter = constants.get("elementary charge") * constants.get(
"Bohr radius") * constants.conversion_factor("m", "cm")
numerator = 4.0 * np.pi**2 * N_A
denominator = 3 * 1000 * np.log(10) * (4 * np.pi * e_0) * hbar * c
return (numerator / denominator) * au_to_Coulomb_centimeter**2
def prefactor_ecd() -> float:
r"""Prefactor for converting microscopic observable to decadic molar
extinction coefficient in electronic circular dichroism.
Returns
-------
prefactor : float
Notes
-----
This function implements the calculation of the following prefactor:
.. math::
k = \frac{16\pi^{2}N_{\mathrm{A}}}{3\times 1000\times \ln(10) (4 \pi \epsilon_{0}) n \hbar c^{2}}
The prefactor is computed in SI units and then adjusted for the fact that
we use atomic units to express microscopic observables: excitation energies
and transition dipole moments.
The refractive index :math:`n` is, in general, frequency-dependent. We
assume it to be constant and equal to 1.
"""
N_A = constants.get("Avogadro constant")
c = constants.get("speed of light in vacuum")
hbar = constants.get("Planck constant over 2 pi")
e_0 = constants.get("electric constant")
au_to_Coulomb_centimeter = constants.get("elementary charge") * constants.get(
"Bohr radius") * constants.conversion_factor("m", "cm")
au_to_Joule_inverse_Tesla = 2.0 * constants.get("Bohr magneton") * constants.conversion_factor("m", "cm")
conversion = au_to_Coulomb_centimeter * au_to_Joule_inverse_Tesla
numerator = 16.0 * np.pi**2 * N_A
denominator = 3 * 1000 * np.log(10) * (4 * np.pi * e_0) * hbar * c**2
return (numerator / denominator) * conversion
def spectrum(*,
poles: Union[List[float], np.ndarray],
residues: Union[List[float], np.ndarray],
kind: str = "opa",
lineshape: str = "gaussian",
gamma: float = 0.2,
npoints: int = 5000,
out_units: str = "nm") -> Dict[str, np.ndarray]:
r"""One-photon absorption (OPA) or electronic circular dichroism (ECD)
spectra with phenomenological line broadening.
This function gives arrays of values ready to be plotted as OPA spectrum:
.. math::
\varepsilon(\omega) =
\frac{4\pi^{2}N_{\mathrm{A}}\omega}{3\times 1000\times \ln(10) (4 \pi \epsilon_{0}) n \hbar c}
\sum_{i \rightarrow j}g_{ij}(\omega)|\mathbf{\mu}_{ij}|^{2}
or ECD spectrum:
.. math::
\Delta\varepsilon(\omega) =
\frac{16\pi^{2}N_{\mathrm{A}}\omega}{3\times 1000\times \ln(10) (4 \pi \epsilon_{0}) n \hbar c^{2}}
\sum_{i \rightarrow j}g_{ij}(\omega)\Im(\mathbf{\mu}_{ij}\cdot\mathbf{m}_{ij})
in macroscopic units of :math:`\mathrm{L}\cdot\mathrm{mol}^{-1}\cdot\mathrm{cm}^{-1}`.
The lineshape function :math:`g_{ij}(\omega)` with phenomenological
broadening :math:`\gamma` is used for the convolution of the infinitely
narrow results from a linear response calculation.
Parameters
----------
poles
Poles of the response function, i.e. the excitation energies.
These are **expected** in atomic units of angular frequency.
residues
Residues of the linear response functions, i.e. transition dipole moments (OPA) and rotatory strengths (ECD).
These are **expected** in atomic units.
kind
{"opa", "ecd"}
Which kind of spectrum to generate, one-photon absorption ("opa") or electronic circular dichroism ("ecd").
Default is `opa`.
lineshape
{"gaussian", "lorentzian"}
The lineshape function to use in the fitting. Default is `gaussian`.
gamma
Full width at half maximum of the lineshape function.
Default is 0.2 au of angular frequency.
This value is **expected** in atomic units of angular frequency.
npoints
How many points to generate for the x axis. Default is 5000.
out_units
Units for the output array `x`, the x axis of the spectrum plot.
Default is wavelengths in nanometers.
Valid (and case-insensitive) values for the units are:
- `au` atomic units of angular frequency
- `Eh` atomic units of energy
- `eV`
- `nm`
- `THz`
Returns
-------
spectrum : Dict
The fitted electronic absorption spectrum, with units for the x axis specified by the `out_units` parameter.
This is a dictionary containing the convoluted (key: `convolution`) and the infinitely narrow spectra (key: `sticks`).
.. code-block:: python
{"convolution": {"x": np.ndarray, "y": np.ndarray},
"sticks": {"poles": np.ndarray, "residues": np.ndarray}}
Notes
-----
* Conversion of the broadening parameter :math:`\gamma`.
The lineshape functions are formulated as functions of the angular frequency :math:`\omega`.
When converting to other physical quantities, the broadening parameter has to be modified accordingly.
If :math:`\gamma_{\omega}` is the chosen broadening parameter then:
- Wavelength: :math:`gamma_{\lambda} = \frac{\lambda_{ij}^{2}}{2\pi c}\gamma_{\omega}`
- Frequency: :math:`gamma_{\nu} = \frac{\gamma_{\omega}}{2\pi}`
- Energy: :math:`gamma_{E} = \gamma_{\omega}\hbar`
References
----------
A. Rizzo, S. Coriani, K. Ruud, "Response Function Theory Computational Approaches to Linear and Nonlinear Optical Spectroscopy". In Computational Strategies for Spectroscopy.
"""
# Transmute inputs to np.ndarray
if isinstance(poles, list):
poles = np.array(poles)
if isinstance(residues, list):
residues = np.array(residues)
# Validate input arrays
if poles.shape != residues.shape:
raise ValueError(f"Shapes of poles ({poles.shape}) and residues ({residues.shape}) vectors do not match!")
# Validate kind of spectrum
kind = kind.lower()
valid_kinds = ["opa", "ecd"]
if kind not in valid_kinds:
raise ValueError(f"Spectrum kind {kind} not among recognized ({valid_kinds})")
# Validate output units
out_units = out_units.lower()
valid_out_units = ["au", "eh", "ev", "nm", "thz"]
if out_units not in valid_out_units:
raise ValueError(f"Output units {out_units} not among recognized ({valid_out_units})")
c = constants.get("speed of light in vacuum")
c_nm = c * constants.conversion_factor("m", "nm")
hbar = constants.get("Planck constant over 2 pi")
h = constants.get("Planck constant")
Eh = constants.get("Hartree energy")
au_to_nm = 2.0 * np.pi * c_nm * hbar / Eh
au_to_THz = (Eh / h) * constants.conversion_factor("Hz", "THz")
au_to_eV = constants.get("Hartree energy in eV")
converters = {
"au": lambda x: x, # Angular frequency in atomic units
"eh": lambda x: x, # Energy in atomic units
"ev": lambda x: x * au_to_eV, # Energy in electronvolts
"nm": lambda x: au_to_nm / x, # Wavelength in nanometers
"thz": lambda x: x * au_to_THz, # Frequency in terahertz
}
# Perform conversion of poles from au of angular frequency to output units
poles = converters[out_units](poles)
# Broadening functions
gammas = {
"au": lambda x_0: gamma, # Angular frequency in atomic units
"eh": lambda x_0: gamma, # Energy in atomic units
"ev": lambda x_0: gamma * au_to_eV, # Energy in electronvolts
"nm": lambda x_0: ((x_0**2 * gamma * (Eh / hbar)) / (2 * np.pi * c_nm)), # Wavelength in nanometers
"thz": lambda x_0: gamma * au_to_THz, # Frequency in terahertz
}
# Generate x axis
# Add a fifth of the range on each side
expand_side = (np.max(poles) - np.min(poles)) / 5
x = np.linspace(np.min(poles) - expand_side, np.max(poles) + expand_side, npoints)
# Validate lineshape
lineshape = lineshape.lower()
valid_lineshapes = ["gaussian", "lorentzian"]
if lineshape not in valid_lineshapes:
raise ValueError(f"Lineshape {lineshape} not among recognized ({valid_lineshapes})")
# Obtain lineshape function
shape = Gaussian(x, gammas[out_units]) if lineshape == "gaussian" else Lorentzian(x, gammas[out_units])
# Generate y axis, i.e. molar decadic absorption coefficient
prefactor = prefactor_opa() if kind == "opa" else prefactor_ecd()
transform_residue = (lambda x: x**2) if kind == "opa" else (lambda x: x)
y = prefactor * x * np.sum([transform_residue(r) * shape.lineshape(p) for p, r in zip(poles, residues)], axis=0)
# Generate sticks
sticks = prefactor * np.array([p * transform_residue(r) * shape.maximum(p) for p, r in zip(poles, residues)])
return {"convolution": {"x": x, "y": y}, "sticks": {"poles": poles, "residues": sticks}}
| jturney/psi4 | psi4/driver/p4util/spectrum.py | Python | lgpl-3.0 | 13,818 | [
"Avogadro",
"Gaussian",
"Psi4"
] | 07fb4c1b9714feef5de615531a982d1d76855e367786ad10738888093717b14d |
#!/usr/bin/env python
# Pyctools-pal - PAL coding and decoding with Pyctools.
# http://github.com/jim-easterbrook/pyctools-pal
# Copyright (C) 2014-20 Jim Easterbrook jim@jim-easterbrook.me.uk
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
__all__ = ['Coder', 'CoderCore', 'PostFilterPAL', 'PreFilterUV', 'UVtoC']
import numpy
from pyctools.core.compound import Compound
from pyctools.core.frame import Frame
from pyctools.components.arithmetic import Arithmetic2
from pyctools.components.colourspace.rgbtoyuv import RGBtoYUV
from pyctools.components.colourspace.matrix import Matrix
from pyctools.components.interp.filterdesign import FilterDesign
from pyctools.components.interp.gaussianfilter import GaussianFilter
from pyctools.components.interp.resize import Resize
from .common import ModulateUV, To4Fsc
class PreFilterUV(Resize):
"""Gaussian low-pass filter suitable for filtering chrominance
before modulation.
According to the "White Book" the chrominance shall be attenuated by
<3 dB at 1.3 MHz and >20 dB at 4 MHz.
"""
def __init__(self, config={}, **kwds):
super(PreFilterUV, self).__init__(config=config, **kwds)
self.filter(GaussianFilter.core(x_sigma=1.659))
class PostFilterPAL(Compound):
"""5.5 MHz low pass filter.
According to the "White Book" the luminance shall be substantially
uniform from 0 to 5.5 MHz. It needs to be attenuated at 6 MHz to
leave space for the sound carrier.
"""
def __init__(self, config={}, **kwds):
cfg = {}
cfg.update(config)
cfg.update(kwds)
super(PostFilterPAL, self).__init__(
resize = Resize(),
fildes = FilterDesign(
frequency='0.0, 0.307, 0.317, 0.346, 0.356, 0.5',
gain=' 1.0, 1.0, 1.0, 0.0, 0.0, 0.0',
weight=' 1.0, 1.0, 0.0, 0.0, 1.0, 1.0',
aperture=61,
),
config = cfg,
config_map = {
'frequency' : ('fildes.frequency',),
'gain' : ('fildes.gain',),
'weight' : ('fildes.weight',),
'aperture' : ('fildes.aperture',),
'outframe_pool_len': ('resize.outframe_pool_len',),
},
linkages = {
('self', 'input') : [('resize', 'input')],
('fildes', 'filter') : [('resize', 'filter')],
('resize', 'output') : [('self', 'output')],
('fildes', 'response') : [('self', 'response')],
}
)
class UVtoC(Matrix):
"""Matrix modulated Cb,Cr to a single chroma component.
This includes level conversions as specified in the "White Book".
"""
def __init__(self, config={}, **kwds):
super(UVtoC, self).__init__(config=config, **kwds)
mat = Frame()
mat.data = numpy.array(
[[2.0 * 0.886 / 2.02, 2.0 * 0.701 / 1.14]], dtype=numpy.float32)
mat.type = 'mat'
audit = mat.metadata.get('audit')
audit += 'data = Modulated CbCr -> PAL chroma matrix\n'
audit += ' values: %s\n' % (str(mat.data))
mat.metadata.set('audit', audit)
self.matrix(mat)
class Coder(Compound):
"""Conventional PAL coder.
The input is RGB, sampled at 4 fsc. Other input sampling rates will
work, but the output will not be a valid PAL signal.
"""
def __init__(self, config={}, **kwds):
cfg = {}
cfg.update(config)
cfg.update(kwds)
super(Coder, self).__init__(
rgbyuv = RGBtoYUV(outframe_pool_len=5, matrix='601', audit='Y'),
prefilter = PreFilterUV(),
modulator = ModulateUV(),
matrix = UVtoC(),
assemble = Arithmetic2(
func='((data1 + data2) * pt_float(140.0 / 255.0)) + pt_float(64.0)'),
postfilter = PostFilterPAL(),
config = cfg,
config_map = {
'sc_phase' : ('modulator.sc_phase',),
'VAS_phase' : ('modulator.VAS_phase',),
'frequency' : ('postfilter.frequency',),
'gain' : ('postfilter.gain',),
'weight' : ('postfilter.weight',),
'aperture' : ('postfilter.aperture',),
'outframe_pool_len': ('postfilter.outframe_pool_len',),
},
linkages = {
('self', 'input') : [('rgbyuv', 'input')],
('rgbyuv', 'output_Y') : [('assemble', 'input1')],
('rgbyuv', 'output_UV') : [('prefilter', 'input')],
('prefilter', 'output') : [('modulator', 'input')],
('modulator', 'output') : [('matrix', 'input')],
('matrix', 'output') : [('assemble', 'input2')],
('assemble', 'output') : [('postfilter', 'input')],
('postfilter', 'output') : [('self', 'output')],
('prefilter', 'filter') : [('self', 'pre_filt')],
('postfilter', 'response') : [('self', 'post_resp')],
}
)
class CoderCore(Compound):
"""Conventional PAL coder core.
The input is YUV422, assumed to be Rec 601 13.5 MHz sampled. The
output is sampled at 4 fsc. Other input sampling rates will work,
but the output will not be a valid PAL signal.
"""
def __init__(self, config={}, **kwds):
cfg = {}
cfg.update(config)
cfg.update(kwds)
super(CoderCore, self).__init__(
scale_Y = To4Fsc(outframe_pool_len=5),
scale_UV = To4Fsc(up=922),
prefilter = PreFilterUV(),
modulator = ModulateUV(),
matrix = UVtoC(),
assemble = Arithmetic2(
func='((data1 + data2) * pt_float(140.0 / 255.0)) + pt_float(64.0)'),
config = cfg,
config_map = {
'sc_phase' : ('modulator.sc_phase',),
'VAS_phase' : ('modulator.VAS_phase',),
'outframe_pool_len': ('assemble.outframe_pool_len',),
},
linkages = {
('self', 'input_Y') : [('scale_Y', 'input')],
('scale_Y', 'output') : [('assemble', 'input1')],
('self', 'input_UV') : [('scale_UV', 'input')],
('scale_UV', 'output') : [('prefilter', 'input')],
('prefilter', 'output') : [('modulator', 'input')],
('modulator', 'output') : [('matrix', 'input')],
('matrix', 'output') : [('assemble', 'input2')],
('assemble', 'output') : [('self', 'output')],
('prefilter', 'filter') : [('self', 'pre_filt')],
}
)
| jim-easterbrook/pyctools-pal | src/pyctools/components/pal/coder.py | Python | gpl-3.0 | 7,608 | [
"Gaussian"
] | 36a1a8792e18a7ddc2e50222ef899160fc749e39f512b56f2d199e15f3e9bdbd |
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from itertools import chain
from itertools import cycle
import pytest
import requests
TEST_HOSTNAME = 'localhost'
TEST_PORT = 4301
ANDROID_PHONE_UA = [
# Samsung Galaxy S6
'Mozilla/5.0 (Linux; Android 6.0.1; SM-G920V Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36',
# Samsung Galaxy S6 Edge Plus
'Mozilla/5.0 (Linux; Android 5.1.1; SM-G928X Build/LMY47X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36',
# Nexus 6P
'Mozilla/5.0 (Linux; Android 6.0.1; Nexus 6P Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36',
# Sony Xperia Z5
'Mozilla/5.0 (Linux; Android 6.0.1; E6653 Build/32.2.A.0.253) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36',
# HTC One M9
'Mozilla/5.0 (Linux; Android 6.0; HTC One M9 Build/MRA58K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36',
]
ANDROID_TABLET_UA = [
# Google Pixel C
'Mozilla/5.0 (Linux; Android 7.0; Pixel C Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.98 Safari/537.36',
# Sony Xperia Z4
'Mozilla/5.0 (Linux; Android 6.0.1; SGP771 Build/32.2.A.0.253; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.98 Safari/537.36',
# Nvidia Shield Tablet
'Mozilla/5.0 (Linux; Android 5.1.1; SHIELD Tablet Build/LMY48C) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Safari/537.36',
# Samsung Galaxy Tab A
'Mozilla/5.0 (Linux; Android 5.0.2; SAMSUNG SM-T550 Build/LRX22G) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/3.3 Chrome/38.0.2125.102 Safari/537.36',
]
ANDROID_WEBVIEW_UA = [
# KitKat
'Mozilla/5.0 (Linux; U; Android 4.1.1; en-gb; Build/KLP) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30',
# Lillipop
'Mozilla/5.0 (Linux; Android 4.4; Nexus 5 Build/_BuildID_) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/30.0.0.0 Mobile Safari/537.36',
# Lollipop+
'Mozilla/5.0 (Linux; Android 5.1.1; Nexus 5 Build/LMY48B; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/43.0.2357.65 Mobile Safari/537.36'
]
IPHONE_UA = [
# iOS 7
'Mozilla/5.0 (iPhone; CPU iPhone OS 6_1_4 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10B350 Safari/8536.25',
# iOS 6
'Mozilla/5.0 (iPhone; CPU iPhone OS 6_1_3 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10B329 Safari/8536.25',
# iOS 5
'Mozilla/5.0 (iPhone; CPU iPhone OS 5_1_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B206 Safari/7534.48.3',
]
IPAD_UA = [
# iOS 7
'Mozilla/5.0 (iPad; CPU OS 7_0 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) CriOS/30.0.1599.12 Mobile/11A465 Safari/8536.25 (3B92C18B-D9DE-4CB7-A02A-22FD2AF17C8F)',
# iOS 5
'Mozilla/5.0 (iPad; CPU OS 5_1_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B206 Safari/7534.48.3'
]
WINDOW_PHONE_UA = [
# MS Lumia 950
'Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; Microsoft; Lumia 950) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Mobile Safari/537.36 Edge/13.10586',
]
DESKTOP_BROWSER_UA = [
# Firefox on Windows
'Mozilla/5.0 (Windows NT x.y; rv:10.0) Gecko/20100101 Firefox/10.0',
# Firefox on OSX
'Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:10.0) Gecko/20100101 Firefox/10.0',
# Firefox on Linux i686
'Mozilla/5.0 (X11; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0',
# Firefox on Linux x86_64
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0',
# Google Chrome on Win 10
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
# Google Chrome on Win 7
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
# Google Chrome on OSX
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
# Google Chrome on Linux
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
# Firefox on Win 10
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:48.0) Gecko/20100101 Firefox/48.0',
# Firefox on OSX
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0',
# Firefox on Linux
'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',
# Safari on OSX
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/601.7.8 (KHTML, like Gecko) Version/9.1.3 Safari/601.7.8',
# IE 11 on Win 7
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
]
MOBILE_BROWSER_UA = [
# Firefox on Android
'Mozilla/5.0 (Android; Mobile; rv:40.0) Gecko/40.0 Firefox/40.0',
# Firefox OS phone
'Mozilla/5.0 (Mobile; rv:26.0) Gecko/26.0 Firefox/26.0',
]
WHITELISTED_ROUTES = [
'/',
'/comments/',
'/login',
'/message/',
'/r/nba',
'/register/',
'/report',
'/search',
'/submit',
'/submit_to_community',
'/u/wting',
'/vote/',
'/help',
'/w',
'/wiki',
'/apple-app-site-association',
'/csp-report',
'/error',
'/favicon/64x64.png',
'/favicon.ico',
'/fonts/',
'/health',
'/img/',
'/loginproxy',
'/refreshproxy',
'/registerproxy',
'/logout',
'/ProductionClient.feijofejoiefjio12343.css',
'/ProductionClient.feijofejoiefjio12343.js',
'/robots.txt',
'/routes',
'/timings',
'/XXX?key=1239034&jijio132jio23',
'/u/XXX?key=1239034&jijio132jio23',
]
BLACKLISTED_ROUTES = [
'/gilded',
'/thebutton',
]
def is_mweb(resp):
# We force www site to its own status code.
return resp.status_code == 200
def is_soft_mweb(resp):
# We're testing endpoints that may or may not exist, but really we just
# care if we're routing to the www backend instead of actually 200'ing.
return resp.status_code != 400
def is_www(resp):
return resp.status_code == 400
@pytest.mark.parametrize(
'user_agent_string, validation_fn',
chain(
zip(DESKTOP_BROWSER_UA, cycle([is_www])),
zip(ANDROID_PHONE_UA, cycle([is_mweb])),
zip(ANDROID_TABLET_UA, cycle([is_mweb])),
zip(ANDROID_WEBVIEW_UA, cycle([is_mweb])),
zip(IPAD_UA, cycle([is_mweb])),
zip(IPHONE_UA, cycle([is_mweb])),
zip(WINDOW_PHONE_UA, cycle([is_mweb])),
zip(MOBILE_BROWSER_UA, cycle([is_mweb])),
)
)
def test_user_agent(user_agent_string, validation_fn):
resp = requests.get(
'http://%s:%s/' % (TEST_HOSTNAME, TEST_PORT),
headers={'User-Agent': user_agent_string},
)
assert validation_fn(resp) is True
@pytest.mark.parametrize(
'user_agent_string, cookies, validation_fn',
[
(DESKTOP_BROWSER_UA[0], None, is_www),
(DESKTOP_BROWSER_UA[0], {'mweb-no-redirect': '0'}, is_www),
(DESKTOP_BROWSER_UA[0], {'mweb-no-redirect': '1'}, is_www),
(ANDROID_PHONE_UA[0], None, is_mweb),
(ANDROID_PHONE_UA[0], {'mweb-no-redirect': '0'}, is_mweb),
(ANDROID_PHONE_UA[0], {'mweb-no-redirect': '1'}, is_www),
(MOBILE_BROWSER_UA[0], None, is_mweb),
(MOBILE_BROWSER_UA[0], {'mweb-no-redirect': '0'}, is_mweb),
(MOBILE_BROWSER_UA[0], {'mweb-no-redirect': '1'}, is_www),
# Test multiple cookies where the override cookie can be in the middle
# since we're using regex string detection.
(
MOBILE_BROWSER_UA[0],
{
'foo': 'bar',
'mweb-no-redirect': '1',
'zab': 'zoo',
},
is_www
),
]
)
def test_cookie_override(user_agent_string, cookies, validation_fn):
resp = requests.get(
'http://%s:%s/' % (TEST_HOSTNAME, TEST_PORT),
headers={'User-Agent': user_agent_string},
cookies=cookies,
)
assert validation_fn(resp) is True
@pytest.mark.parametrize(
'path, validation_fn',
chain(
zip(WHITELISTED_ROUTES, cycle([is_soft_mweb])),
zip(BLACKLISTED_ROUTES, cycle([is_www])),
)
)
def test_whitelisted_routes(path, validation_fn):
resp = requests.get(
'http://%s:%s%s' % (TEST_HOSTNAME, TEST_PORT, path),
headers={'User-Agent': MOBILE_BROWSER_UA[0]},
cookies=None,
)
assert validation_fn(resp) is True
if __name__ == '__main__':
print('Make sure the docker-compose is up and run Python tests via tox.')
sys.exit(1)
| ajacksified/reddit-mobile | test/test_vcl.py | Python | mit | 8,795 | [
"Galaxy"
] | 34517ae805d25b544fea7d67abb81859c521ffbaf184724510637bc082321d4f |
import os
from os.path import join
import numpy as n
def writeScript(rootName, plate):
f=open(rootName+".sh",'w')
f.write("#!/bin/bash \n")
f.write("#PBS -l walltime=260:00:00 \n")
f.write("#PBS -o "+plate+".o.$PBS_JOBID \n")
f.write("#PBS -e "+plate+".e$PBS_JOBID \n")
f.write("#PBS -M comparat@mpe.mpg.de \n")
f.write("module load apps/anaconda/2.4.1 \n")
f.write("module load apps/python/2.7.8/gcc-4.4.7 \n")
f.write("export PYTHONPATH=$PYTHONPATH:/users/comparat/pySU/galaxy/python/ \n")
f.write("export PYTHONPATH=$PYTHONPATH:/users/comparat/pySU/spm/python/ \n")
f.write(" \n")
f.write("cd /users/comparat/pySU/spm/bin \n")
f.write("python run_stellarpop_ebossdr14_chabrier "+plate+" \n")
f.write(" \n")
f.close()
plates = n.loadtxt( join(os.environ['EBOSSDR14_DIR'], "catalogs", "plateNumberList"), unpack=True, dtype='str')
for plate in plates:
rootName = join(os.environ['HOME'], "batch_dr14_firefly_chabrier", plate)
writeScript(rootName, plate) | JohanComparat/pySU | spm/bin/write_run_scripts_dr14_chabrier.py | Python | cc0-1.0 | 973 | [
"Galaxy"
] | 3faf701da6819b246152b8a2a8f8ab5ba249c43eddfbc891ea2df800df7e3f2d |
"""
Utilities for handling the Graphic Unit Interface.
.. todo::
Switch to Ttk instead of Tk for a better look of the GUI
"""
import Tkinter
from tkFileDialog import askopenfilename
import pygem as pg
import sys
import os
import webbrowser
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
class Gui(object):
"""
The class for the Graphic Unit Interface.
:cvar string filename_geometry: input geometry to be morphed.
:cvar string filename_parameters: input parameters file for FFD.
:cvar int check_var_1: dump or not the original FFD lattice.
:cvar int check_var_2: dump or not the morphed FFD lattice.
:cvar string outfilename: name of the output file geometry.
The extension of the file is set automatically equal to the on of input file 'filename_geometry'.
:cvar string outfilename_lattice_orig: name of the dumped file for the original lattice.
The extension of the file is set automatically equal to '.vtk'.
:cvar string outfilename_lattice_mod: name of the dumped file for the morphed lattice.
The extension of the file is set automatically equal to '.vtk'.
:cvar Tkinter.Tk root: main window object of the GUI.
:cvar string print_geometry_path: geometry path to be printed close to the 'pick geometry' button.
:cvar string print_parameter_path: parameters file path to be printed close to the 'pick parameters' button.
:cvar Tkinter.Label label_geo: label related to 'print_geometry_path'.
:cvar Tkinter.Label label_params: label related to 'print_parameters_path'.
:cvar string url: url of the github page of PyGeM.
:cvar Tkinter.Canvas logo_panel: canvas for PyGeM logo.
:cvar Tkinter.PhotoImage img: PyGeM logo.
:cvar Tkinter.Frame orig_geo_frame: frame for plotting of the original geometry.
:cvar Tkinter.Frame mod_geo_frame: frame for plotting of the final geometry.
"""
def __init__(self):
self.root = Tkinter.Tk()
self.root.resizable(width=False, height=False)
self.root.minsize(width=1400, height=400)
self.root.maxsize(width=1400, height=400)
self.root.title('PyGeM')
self.filename_geometry = Tkinter.StringVar()
self.filename_parameters = Tkinter.StringVar()
self.check_var_1 = Tkinter.IntVar()
self.check_var_2 = Tkinter.IntVar()
self.outfilename = Tkinter.StringVar()
self.outfilename_lattice_orig = Tkinter.StringVar()
self.outfilename_lattice_mod = Tkinter.StringVar()
self.print_geometry_path = Tkinter.StringVar()
self.print_parameter_path = Tkinter.StringVar()
self.label_geo = None
self.label_params = None
self.url = 'https://github.com/mathLab/PyGeM'
self.logo_panel = None
self.img = None
self.orig_geo_frame = None
self.mod_geo_frame = None
def _chose_geometry(self):
"""
The private method explores the file system and allows to select the wanted geometry.
Up to now, you can select only IGES, OpenFOAM, STL, UNV or VTK geometry file.
"""
self.filename_geometry = askopenfilename(filetypes=[("IGES File",('*.iges', '*.igs')), \
("OpenFOAM File",'*'),('STL File','*.stl'),('UNV File','*.unv'),('VTK File','*.vtk'),('All','*')])
self.print_geometry_path.set(self.filename_geometry)
self.label_geo.configure(fg='green')
def _chose_parameters(self):
"""
The private method explores the file system and allows to select the wanted parameters file.
It visualizes only .prm files.
"""
self.filename_parameters = askopenfilename(filetypes=[("Params File","*.prm")])
self.print_parameter_path.set(self.filename_parameters)
self.label_params.configure(fg='green')
def _run_simulation(self):
"""
The private method runs the geometrical morphing.
"""
params = pg.params.FFDParameters()
params.read_parameters(filename=self.filename_parameters)
(__,file_extension_in) = os.path.splitext(self.filename_geometry)
if file_extension_in == '.stl':
geo_handler = pg.stlhandler.StlHandler()
elif file_extension_in in ['.iges', '.igs']:
geo_handler = pg.igeshandler.IgesHandler()
elif file_extension_in == '.unv':
geo_handler = pg.unvhandler.UnvHandler()
elif file_extension_in == '':
geo_handler = pg.openfhandler.OpenFoamHandler()
elif file_extension_in == '.vtk':
geo_handler = pg.vtkhandler.VtkHandler()
else:
raise NotImplementedError("Format not implemented yet")
mesh_points = geo_handler.parse(self.filename_geometry)
free_form = pg.freeform.FFD(params, mesh_points)
free_form.perform()
new_mesh_points = free_form.modified_mesh_points
geo_handler.write(new_mesh_points, self.outfilename.get() + file_extension_in)
if self.check_var_1.get() == 1:
pg.utils.write_bounding_box(params, self.outfilename_lattice_orig.get() + '.vtk', False)
if self.check_var_2.get() == 1:
pg.utils.write_bounding_box(params, self.outfilename_lattice_mod.get() + '.vtk', True)
if file_extension_in in ['.vtk', '.stl', '.iges', '.igs']:
figure_in = geo_handler.plot()
figure_in.set_size_inches(4, 3)
FigureCanvasTkAgg(figure_in, master=self.orig_geo_frame).get_tk_widget().grid(row=1, column=0, padx=5, pady=5)
figure_out = geo_handler.plot(self.outfilename.get() + file_extension_in)
figure_out.set_size_inches(4, 3)
FigureCanvasTkAgg(figure_out, master=self.mod_geo_frame).get_tk_widget().grid(row=1, column=0, padx=5, pady=5)
def _goto_website(self):
"""
The private method opens the PyGeM main page on github.
It is used for info about PyGeM in the menu.
"""
webbrowser.open(self.url)
def main(self):
"""
The method inizializes and visualizes the window.
"""
self.logo_panel = Tkinter.Canvas(self.root, height=60 , width=60)
self.logo_panel.pack(side="bottom", padx=5, pady=5,anchor=Tkinter.SE)
self.img = Tkinter.PhotoImage(master=self.logo_panel, file='readme/logo_PyGeM_gui.gif')
self.logo_panel.create_image(35,35, image=self.img)
self.orig_geo_frame = Tkinter.Frame(self.root, height=450, width=360, bg='#c1d0f0')
self.orig_geo_frame.pack(side="left", padx=5, pady=5)
self.orig_geo_frame.pack_propagate(0)
Tkinter.Label(self.orig_geo_frame, text="INPUT GEOMETRY", bg='#c1d0f0', font=("Arial", 20)).grid(row=0, column=0, padx=3, pady=3)
self.mod_geo_frame = Tkinter.Frame(self.root, height=450, width=360, bg='#80ff80', padx=5, pady=5)
self.mod_geo_frame.pack(side="right", padx=5, pady=5)
self.mod_geo_frame.pack_propagate(0)
Tkinter.Label(self.mod_geo_frame, text="OUTPUT GEOMETRY", bg='#80ff80', font=("Arial", 20)).grid(row=0, column=0, padx=3, pady=3)
code_frame = Tkinter.Frame(self.root, height=490, width=360, relief=Tkinter.GROOVE, borderwidth=1)
code_frame.pack(padx=5, pady=5)
code_frame.pack_propagate(0)
# Buttons 1
Tkinter.Button(code_frame, text ="Pick the geometry", command = self._chose_geometry).grid(row=0, column=0, padx=3, pady=3)
self.label_geo=Tkinter.Label(code_frame, textvariable=self.print_geometry_path, fg='red')
self.print_geometry_path.set("No geometry chosen!")
self.label_geo.grid(row=0, column=1, padx=3, pady=3)
# Button 2
Tkinter.Button(code_frame, text ="Pick the parameters", command = self._chose_parameters).grid(row=1, column=0, padx=3, pady=3)
self.label_params = Tkinter.Label(code_frame, textvariable=self.print_parameter_path, fg='red')
self.print_parameter_path.set("No parameters file chosen!")
self.label_params.grid(row=1, column=1, padx=3, pady=3)
# Entry
Tkinter.Label(code_frame, text="Output geometry file").grid(row=2, column=0, padx=3, pady=3)
Tkinter.Entry(code_frame, bd =5, textvariable=self.outfilename).grid(row=2, column=1, padx=3, pady=3)
# Checkboxes
Tkinter.Checkbutton(code_frame, text = "Dump Original FFD lattice", variable = self.check_var_1, \
onvalue = 1, offvalue = 0, height=3, \
width = 20).grid(row=3, column=0)
Tkinter.Entry(code_frame, bd =5, textvariable=self.outfilename_lattice_orig).grid(row=3, column=1)
Tkinter.Checkbutton(code_frame, text = "Dump Morphed FFD lattice", variable = self.check_var_2, \
onvalue = 1, offvalue = 0, height=3, \
width = 20).grid(row=4, column=0)
Tkinter.Entry(code_frame, bd =5, textvariable=self.outfilename_lattice_mod).grid(row=4, column=1)
# Run button
Tkinter.Button(code_frame, text ="Run PyGeM", command = self._run_simulation, bg='#065893', fg='#f19625', \
font='bold').grid(row=5, column=0, columnspan=2, padx=3, pady=3)
# Menu
menubar = Tkinter.Menu(self.root)
helpmenu = Tkinter.Menu(menubar, tearoff=0)
helpmenu.add_command(label="About...", command=self._goto_website)
menubar.add_cascade(label="Help", menu=helpmenu)
self.root.config(menu=menubar)
def start(self):
self.root.mainloop()
| fsalmoir/PyGeM | pygem/gui.py | Python | mit | 8,725 | [
"VTK"
] | a084582cc605a066b771b5d75ecdac97b908f95538d33ab9287703ea826ddcac |
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD
from collections import Sequence
import numpy as np
import scipy.sparse as sp
from .utils import check_arrays, array2d
from .utils import warn_if_not_float
from .utils.fixes import unique
from .base import BaseEstimator, TransformerMixin
from .utils.sparsefuncs import inplace_csr_row_normalize_l1
from .utils.sparsefuncs import inplace_csr_row_normalize_l2
from .utils.sparsefuncs import inplace_csr_column_scale
from .utils.sparsefuncs import mean_variance_axis0
__all__ = ['Binarizer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'Normalizer',
'Scaler',
'binarize',
'normalize',
'scale']
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std dev for centering, scaling
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
if isinstance(std_, np.ndarray):
std_[std_ == 0.0] = 1.0
elif std_ == 0.:
std_ = 1.
else:
std_ = None
return mean_, std_
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.Scaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if sp.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
warn_if_not_float(X, estimator='The scale function')
if not sp.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis0(X)
var[var == 0.0] = 1.0
inplace_csr_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
warn_if_not_float(X, estimator='The scale function')
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
if with_std:
Xr /= std_
return X
class Scaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen indepently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Attributes
----------
`mean_` : array of floats with shape [n_features]
The mean value for each feature in the training set.
`std_` : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
if sp.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
warn_if_not_float(X, estimator=self)
copy = self.copy
if not sp.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
self.mean_ = None
_, var = mean_variance_axis0(X)
self.std_ = np.sqrt(var)
self.std_[var == 0.0] = 1.0
return self
else:
X = np.asarray(X)
warn_if_not_float(X, estimator=self)
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
if sp.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
warn_if_not_float(X, estimator=self)
if not sp.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
inplace_csr_column_scale(X, 1 / self.std_)
else:
X = np.asarray(X)
warn_if_not_float(X, estimator=self)
if copy:
X = X.copy()
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
if sp.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sp.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
inplace_csr_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
def normalize(X, norm='l2', axis=1, copy=True):
"""Normalize a dataset along any axis
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_arrays(X, sparse_format=sparse_format, copy=copy)[0]
warn_if_not_float(X, 'The normalize function')
if axis == 0:
X = X.T
if sp.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)[:, np.newaxis]
norms[norms == 0.0] = 1.0
elif norm == 'l2':
norms = np.sqrt(np.sum(X ** 2, axis=1))[:, np.newaxis]
norms[norms == 0.0] = 1.0
X /= norms
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Parameters
----------
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
The lower bound that triggers feature values to be replaced by 1.0.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_arrays(X, sparse_format='csr', copy=copy)[0]
if sp.issparse(X):
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
# FIXME: if enough values became 0, it may be worth changing
# the sparsity structure
X.data[not_cond] = 0
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
The default threshold is 0.0 so that any non-zero values are set to 1.0
and zeros are left untouched.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modeled using the Bernoulli
distribution in a Bayesian setting).
Parameters
----------
threshold : float, optional (0.0 by default)
The lower bound that triggers feature values to be replaced by 1.0.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
def _is_label_indicator_matrix(y):
return hasattr(y, "shape") and len(y.shape) == 2
def _is_multilabel(y):
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
return not isinstance(y[0], np.ndarray) and isinstance(y[0], Sequence) \
and not isinstance(y[0], basestring) \
or _is_label_indicator_matrix(y)
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
`classes_`: array of shape [n_class]
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelNormalizer was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
self : returns an instance of self.
"""
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self.classes_, y = unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label: int (default: 0)
Value with which negative labels must be encoded.
pos_label: int (default: 1)
Value with which positive labels must be encoded.
Attributes
----------
`classes_`: array of shape [n_class]
Holds the label for each class.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
>>> lb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> lb.classes_
array([1, 2, 3])
"""
def __init__(self, neg_label=0, pos_label=1):
if neg_label >= pos_label:
raise ValueError("neg_label must be strictly less than pos_label.")
self.neg_label = neg_label
self.pos_label = pos_label
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
self : returns an instance of self.
"""
self.multilabel = _is_multilabel(y)
if self.multilabel:
self.indicator_matrix_ = _is_label_indicator_matrix(y)
if self.indicator_matrix_:
self.classes_ = np.arange(y.shape[1])
else:
self.classes_ = np.array(sorted(set.union(*map(set, y))))
else:
self.classes_ = np.unique(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
Y : numpy array of shape [n_samples, n_classes]
"""
self._check_fitted()
if self.multilabel or len(self.classes_) > 2:
if _is_label_indicator_matrix(y):
# nothing to do as y is already a label indicator matrix
return y
Y = np.zeros((len(y), len(self.classes_)), dtype=np.int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += self.neg_label
y_is_multilabel = _is_multilabel(y)
if y_is_multilabel and not self.multilabel:
raise ValueError("The object was not " +
"fitted with multilabel input!")
elif self.multilabel:
if not _is_multilabel(y):
raise ValueError("y should be a list of label lists/tuples,"
"got %r" % (y,))
# inverse map: label => column index
imap = dict((v, k) for k, v in enumerate(self.classes_))
for i, label_tuple in enumerate(y):
for label in label_tuple:
Y[i, imap[label]] = self.pos_label
return Y
else:
y = np.asarray(y)
if len(self.classes_) == 2:
Y[y == self.classes_[1], 0] = self.pos_label
return Y
elif len(self.classes_) >= 2:
for i, k in enumerate(self.classes_):
Y[y == k, i] = self.pos_label
return Y
else:
# Only one class, returns a matrix with all negative labels.
return Y
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array of shape [n_samples, n_classes]
Target values.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
half = (self.pos_label - self.neg_label) / 2.0
threshold = self.neg_label + half
if self.multilabel:
Y = np.array(Y > threshold, dtype=int)
# Return the predictions in the same format as in fit
if self.indicator_matrix_:
# Label indicator matrix format
return Y
else:
# Lists of tuples format
return [tuple(self.classes_[np.flatnonzero(Y[i])])
for i in range(Y.shape[0])]
if len(Y.shape) == 1 or Y.shape[1] == 1:
y = np.array(Y.ravel() > threshold, dtype=int)
else:
y = Y.argmax(axis=1)
return self.classes_[y]
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
This is equivalent to centering phi(X) with
sklearn.preprocessing.Scaler(with_std=False).
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = array2d(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
K = array2d(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/preprocessing.py | Python | agpl-3.0 | 29,089 | [
"Gaussian"
] | 20228e7a9e46f5f4990f6ad0b560cefe7702cd506357796c9ea22b4f0eaf0ccb |
import pysam
from readtagger.cluster import evidence_for
TEST_BAM = 'improve_counting_support.bam'
GOOD_BREAKPOINT = [13813889, 13813894]
WRONG_BREAKPOINT = 13813890
WRONG_BREAKPOINT_SEQS = {'ATATA', 'ATATA'}
GOOD_BREAKPOINT_SEQS = {'TAAGT', 'GTAAC'}
def test_evidence_for(datadir_copy): # noqa: D103
three_p_qname = 'HISEQ:788:H2G5VBCX2:1:2109:14202:61591'
five_p_qname = 'HISEQ:788:H2G5VBCX2:1:2109:11125:90225'
random_qname = 'HISEQ:785:H2G75BCX2:2:1202:6302:39767'
three_p_read = _get_read(datadir_copy, qname=three_p_qname, is_read1=False)
_breakpoint_and_sequence_combinations(three_p_read, should_be='three_p')
five_p_read = _get_read(datadir_copy, qname=five_p_qname, is_read1=True)
_breakpoint_and_sequence_combinations(five_p_read, breakpoint=1, should_be='five_p')
random_read = _get_read(datadir_copy, qname=random_qname, is_read1=True)
_breakpoint_and_sequence_combinations(random_read, should_be=False)
def _get_read(datadir_copy, qname, is_read1):
p = str(datadir_copy[TEST_BAM])
with pysam.AlignmentFile(p) as f:
reads = [r for r in f if r.query_name == qname and r.is_read1 == is_read1]
assert len(reads) == 1, "Should have 1 read, but got %d reads" % len(reads)
return reads[0]
def _breakpoint_and_sequence_combinations(read, breakpoint=0, should_be=False):
assert evidence_for(read, {GOOD_BREAKPOINT[0]: WRONG_BREAKPOINT_SEQS}) is False, str(read)
assert evidence_for(read, {WRONG_BREAKPOINT: GOOD_BREAKPOINT_SEQS}) is False, str(read)
assert evidence_for(read, {GOOD_BREAKPOINT[breakpoint]: GOOD_BREAKPOINT_SEQS}) == should_be, str(read)
| bardin-lab/readtagger | tests/test_evidence.py | Python | mit | 1,650 | [
"pysam"
] | 548043c2fa77e00cd5621b25ca313b122fc62a463a3bddd4b870fe7d17264f3a |
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unit tests for the schedule module.
from __future__ import absolute_import
from tests import util
import transitfeed
class MinimalWriteTestCase(util.TempFileTestCaseBase):
"""
This test case simply constructs an incomplete feed with very few
fields set and ensures that there are no exceptions when writing it out.
This is very similar to TransitFeedSampleCodeTestCase below, but that one
will no doubt change as the sample code is altered.
"""
def runTest(self):
schedule = transitfeed.Schedule()
schedule.AddAgency("Sample Agency", "http://example.com",
"America/Los_Angeles")
route = transitfeed.Route()
route.route_id = "SAMPLE_ID"
route.route_type = 3
route.route_short_name = "66"
route.route_long_name = "Sample Route acute letter e\202"
schedule.AddRouteObject(route)
service_period = transitfeed.ServicePeriod("WEEK")
service_period.SetStartDate("20070101")
service_period.SetEndDate("20071231")
service_period.SetWeekdayService(True)
schedule.AddServicePeriodObject(service_period)
trip = transitfeed.Trip()
trip.route_id = "SAMPLE_ID"
trip.service_period = service_period
trip.trip_id = "SAMPLE_TRIP"
schedule.AddTripObject(trip)
stop1 = transitfeed.Stop()
stop1.stop_id = "STOP1"
stop1.stop_name = u'Stop 1 acute letter e\202'
stop1.stop_lat = 78.243587
stop1.stop_lon = 32.258937
schedule.AddStopObject(stop1)
trip.AddStopTime(stop1, arrival_time="12:00:00", departure_time="12:00:00")
stop2 = transitfeed.Stop()
stop2.stop_id = "STOP2"
stop2.stop_name = "Stop 2"
stop2.stop_lat = 78.253587
stop2.stop_lon = 32.258937
schedule.AddStopObject(stop2)
trip.AddStopTime(stop2, arrival_time="12:05:00", departure_time="12:05:00")
schedule.Validate()
schedule.WriteGoogleTransitFeed(self.tempfilepath)
class ScheduleBuilderTestCase(util.TempFileTestCaseBase):
"""Tests for using a Schedule object to build a GTFS file."""
def testBuildFeedWithUtf8Names(self):
problems = util.GetTestFailureProblemReporter(self)
schedule = transitfeed.Schedule(problem_reporter=problems)
schedule.AddAgency("\xc8\x8b Fly Agency", "http://iflyagency.com",
"America/Los_Angeles")
service_period = schedule.GetDefaultServicePeriod()
service_period.SetDateHasService('20070101')
# "u020b i with inverted accent breve" encoded in utf-8
stop1 = schedule.AddStop(lng=140, lat=48.2, name="\xc8\x8b hub")
# "u020b i with inverted accent breve" as unicode string
stop2 = schedule.AddStop(lng=140.001, lat=48.201,
name=u"remote \u020b station")
route = schedule.AddRoute(u"\u03b2", "Beta", "Bus")
trip = route.AddTrip(schedule, u"to remote \u020b station")
repr(stop1)
repr(stop2)
repr(route)
repr(trip)
trip.AddStopTime(stop1, schedule=schedule, stop_time='10:00:00')
trip.AddStopTime(stop2, stop_time='10:10:00')
schedule.Validate(problems)
schedule.WriteGoogleTransitFeed(self.tempfilepath)
read_schedule = \
transitfeed.Loader(self.tempfilepath, problems=problems,
extra_validation=True).Load()
self.assertEquals(u'\u020b Fly Agency',
read_schedule.GetDefaultAgency().agency_name)
self.assertEquals(u'\u03b2',
read_schedule.GetRoute(route.route_id).route_short_name)
self.assertEquals(u'to remote \u020b station',
read_schedule.GetTrip(trip.trip_id).trip_headsign)
def testBuildSimpleFeed(self):
"""Make a very simple feed using the Schedule class."""
problems = util.GetTestFailureProblemReporter(self, ("ExpirationDate",
"NoServiceExceptions"))
schedule = transitfeed.Schedule(problem_reporter=problems)
schedule.AddAgency("Test Agency", "http://example.com",
"America/Los_Angeles")
service_period = schedule.GetDefaultServicePeriod()
self.assertTrue(service_period.service_id)
service_period.SetWeekdayService(has_service=True)
service_period.SetStartDate("20070320")
service_period.SetEndDate("20071231")
stop1 = schedule.AddStop(lng=-140.12, lat=48.921,
name="one forty at forty eight")
stop2 = schedule.AddStop(lng=-140.22, lat=48.421, name="west and south")
stop3 = schedule.AddStop(lng=-140.32, lat=48.121, name="more away")
stop4 = schedule.AddStop(lng=-140.42, lat=48.021, name="more more away")
route = schedule.AddRoute(short_name="R", long_name="My Route",
route_type="Bus")
self.assertTrue(route.route_id)
self.assertEqual(route.route_short_name, "R")
self.assertEqual(route.route_type, 3)
trip = route.AddTrip(schedule, headsign="To The End",
service_period=service_period)
trip_id = trip.trip_id
self.assertTrue(trip_id)
trip = schedule.GetTrip(trip_id)
self.assertEqual("To The End", trip.trip_headsign)
self.assertEqual(service_period, trip.service_period)
trip.AddStopTime(stop=stop1, arrival_secs=3600*8, departure_secs=3600*8)
trip.AddStopTime(stop=stop2)
trip.AddStopTime(stop=stop3, arrival_secs=3600*8 + 60*60,
departure_secs=3600*8 + 60*60)
trip.AddStopTime(stop=stop4, arrival_time="9:13:00",
departure_secs=3600*8 + 60*103, stop_headsign="Last stop",
pickup_type=1, drop_off_type=3)
schedule.Validate()
schedule.WriteGoogleTransitFeed(self.tempfilepath)
read_schedule = \
transitfeed.Loader(self.tempfilepath, problems=problems,
extra_validation=True).Load()
self.assertEqual(4, len(read_schedule.GetTrip(trip_id).GetTimeStops()))
self.assertEqual(1, len(read_schedule.GetRouteList()))
self.assertEqual(4, len(read_schedule.GetStopList()))
def testStopIdConflict(self):
problems = util.GetTestFailureProblemReporter(self)
schedule = transitfeed.Schedule(problem_reporter=problems)
schedule.AddStop(lat=3, lng=4.1, name="stop1", stop_id="1")
schedule.AddStop(lat=3, lng=4.0, name="stop0", stop_id="0")
schedule.AddStop(lat=3, lng=4.2, name="stop2")
schedule.AddStop(lat=3, lng=4.2, name="stop4", stop_id="4")
# AddStop will try to use stop_id=4 first but it is taken
schedule.AddStop(lat=3, lng=4.2, name="stop5")
stop_list = sorted(schedule.GetStopList(), key=lambda s: s.stop_name)
self.assertEqual("stop0 stop1 stop2 stop4 stop5",
" ".join([s.stop_name for s in stop_list]))
self.assertMatchesRegex(r"0 1 2 4 \d{7,9}",
" ".join(s.stop_id for s in stop_list))
def testRouteIdConflict(self):
problems = util.GetTestFailureProblemReporter(self)
schedule = transitfeed.Schedule(problem_reporter=problems)
route0 = schedule.AddRoute("0", "Long Name", "Bus")
route1 = schedule.AddRoute("1", "", "Bus", route_id="1")
route3 = schedule.AddRoute("3", "", "Bus", route_id="3")
route_rand = schedule.AddRoute("R", "LNR", "Bus")
route4 = schedule.AddRoute("4", "GooCar", "Bus")
route_list = schedule.GetRouteList()
route_list.sort(key=lambda r: r.route_short_name)
self.assertEqual("0 1 3 4 R",
" ".join(r.route_short_name for r in route_list))
self.assertMatchesRegex("0 1 3 4 \d{7,9}",
" ".join(r.route_id for r in route_list))
self.assertEqual("Long Name,,,GooCar,LNR",
",".join(r.route_long_name for r in route_list))
def testTripIdConflict(self):
problems = util.GetTestFailureProblemReporter(self)
schedule = transitfeed.Schedule(problem_reporter=problems)
service_period = schedule.GetDefaultServicePeriod()
service_period.SetDateHasService("20070101")
route = schedule.AddRoute("0", "Long Name", "Bus")
route.AddTrip()
route.AddTrip(schedule=schedule, headsign="hs1",
service_period=service_period, trip_id="1")
route.AddTrip(schedule, "hs2", service_period, "2")
route.AddTrip(trip_id="4")
route.AddTrip() # This will be given a random trip_id
trip_list = sorted(schedule.GetTripList(), key=lambda t: int(t.trip_id))
self.assertMatchesRegex("0 1 2 4 \d{7,9}",
" ".join(t.trip_id for t in trip_list))
self.assertEqual(",hs1,hs2,,",
",".join(t["trip_headsign"] for t in trip_list))
for t in trip_list:
self.assertEqual(service_period.service_id, t.service_id)
self.assertEqual(route.route_id, t.route_id)
class WriteSampleFeedTestCase(util.TempFileTestCaseBase):
def assertEqualTimeString(self, a, b):
"""Assert that a and b are equal, even if they don't have the same zero
padding on the hour. IE 08:45:00 vs 8:45:00."""
if a[1] == ':':
a = '0' + a
if b[1] == ':':
b = '0' + b
self.assertEqual(a, b)
def assertEqualWithDefault(self, a, b, default):
"""Assert that a and b are equal. Treat None and default as equal."""
if a == b:
return
if a in (None, default) and b in (None, default):
return
self.assertTrue(False, "a=%s b=%s" % (a, b))
def runTest(self):
accumulator = util.RecordingProblemAccumulator(
self, ignore_types=("ExpirationDate",))
problems = transitfeed.ProblemReporter(accumulator)
schedule = transitfeed.Schedule(problem_reporter=problems)
agency = transitfeed.Agency()
agency.agency_id = "DTA"
agency.agency_name = "Demo Transit Authority"
agency.agency_url = "http://google.com"
agency.agency_timezone = "America/Los_Angeles"
agency.agency_lang = 'en'
# Test that unknown columns, such as agency_mission, are preserved
agency.agency_mission = "Get You There"
schedule.AddAgencyObject(agency)
routes = []
route_data = [
("AB", "DTA", "10", "Airport - Bullfrog", 3),
("BFC", "DTA", "20", "Bullfrog - Furnace Creek Resort", 3),
("STBA", "DTA", "30", "Stagecoach - Airport Shuttle", 3),
("CITY", "DTA", "40", "City", 3),
("AAMV", "DTA", "50", "Airport - Amargosa Valley", 3)
]
for route_entry in route_data:
route = transitfeed.Route()
(route.route_id, route.agency_id, route.route_short_name,
route.route_long_name, route.route_type) = route_entry
routes.append(route)
schedule.AddRouteObject(route)
shape_data = [
(36.915760, -116.751709),
(36.905018, -116.763206),
(36.902134, -116.777969),
(36.904091, -116.788185),
(36.883602, -116.814537),
(36.874523, -116.795593),
(36.873302, -116.786491),
(36.869202, -116.784241),
(36.868515, -116.784729),
]
shape = transitfeed.Shape("BFC1S")
for (lat, lon) in shape_data:
shape.AddPoint(lat, lon)
schedule.AddShapeObject(shape)
week_period = transitfeed.ServicePeriod()
week_period.service_id = "FULLW"
week_period.start_date = "20070101"
week_period.end_date = "20071231"
week_period.SetWeekdayService()
week_period.SetWeekendService()
week_period.SetDateHasService("20070604", False)
schedule.AddServicePeriodObject(week_period)
weekend_period = transitfeed.ServicePeriod()
weekend_period.service_id = "WE"
weekend_period.start_date = "20070101"
weekend_period.end_date = "20071231"
weekend_period.SetWeekendService()
schedule.AddServicePeriodObject(weekend_period)
stops = []
stop_data = [
("FUR_CREEK_RES", "Furnace Creek Resort (Demo)",
36.425288, -117.133162, "zone-a", "1234"),
("BEATTY_AIRPORT", "Nye County Airport (Demo)",
36.868446, -116.784682, "zone-a", "1235"),
("BULLFROG", "Bullfrog (Demo)", 36.88108, -116.81797, "zone-b", "1236"),
("STAGECOACH", "Stagecoach Hotel & Casino (Demo)",
36.915682, -116.751677, "zone-c", "1237"),
("NADAV", "North Ave / D Ave N (Demo)", 36.914893, -116.76821, "", ""),
("NANAA", "North Ave / N A Ave (Demo)", 36.914944, -116.761472, "", ""),
("DADAN", "Doing AVe / D Ave N (Demo)", 36.909489, -116.768242, "", ""),
("EMSI", "E Main St / S Irving St (Demo)",
36.905697, -116.76218, "", ""),
("AMV", "Amargosa Valley (Demo)", 36.641496, -116.40094, "", ""),
]
for stop_entry in stop_data:
stop = transitfeed.Stop()
(stop.stop_id, stop.stop_name, stop.stop_lat, stop.stop_lon,
stop.zone_id, stop.stop_code) = stop_entry
schedule.AddStopObject(stop)
stops.append(stop)
# Add a value to an unknown column and make sure it is preserved
schedule.GetStop("BULLFROG").stop_sound = "croak!"
trip_data = [
("AB", "FULLW", "AB1", "to Bullfrog", "0", "1", None),
("AB", "FULLW", "AB2", "to Airport", "1", "2", None),
("STBA", "FULLW", "STBA", "Shuttle", None, None, None),
("CITY", "FULLW", "CITY1", None, "0", None, None),
("CITY", "FULLW", "CITY2", None, "1", None, None),
("BFC", "FULLW", "BFC1", "to Furnace Creek Resort", "0", "1", "BFC1S"),
("BFC", "FULLW", "BFC2", "to Bullfrog", "1", "2", None),
("AAMV", "WE", "AAMV1", "to Amargosa Valley", "0", None, None),
("AAMV", "WE", "AAMV2", "to Airport", "1", None, None),
("AAMV", "WE", "AAMV3", "to Amargosa Valley", "0", None, None),
("AAMV", "WE", "AAMV4", "to Airport", "1", None, None),
]
trips = []
for trip_entry in trip_data:
trip = transitfeed.Trip()
(trip.route_id, trip.service_id, trip.trip_id, trip.trip_headsign,
trip.direction_id, trip.block_id, trip.shape_id) = trip_entry
trips.append(trip)
schedule.AddTripObject(trip)
stop_time_data = {
"STBA": [("6:00:00", "6:00:00", "STAGECOACH", None, None, None, None),
("6:20:00", "6:20:00", "BEATTY_AIRPORT", None, None, None, None)],
"CITY1": [("6:00:00", "6:00:00", "STAGECOACH", 1.34, 0, 0, "stop 1"),
("6:05:00", "6:07:00", "NANAA", 2.40, 1, 2, "stop 2"),
("6:12:00", "6:14:00", "NADAV", 3.0, 2, 2, "stop 3"),
("6:19:00", "6:21:00", "DADAN", 4, 2, 2, "stop 4"),
("6:26:00", "6:28:00", "EMSI", 5.78, 2, 3, "stop 5")],
"CITY2": [("6:28:00", "6:28:00", "EMSI", None, None, None, None),
("6:35:00", "6:37:00", "DADAN", None, None, None, None),
("6:42:00", "6:44:00", "NADAV", None, None, None, None),
("6:49:00", "6:51:00", "NANAA", None, None, None, None),
("6:56:00", "6:58:00", "STAGECOACH", None, None, None, None)],
"AB1": [("8:00:00", "8:00:00", "BEATTY_AIRPORT", None, None, None, None),
("8:10:00", "8:15:00", "BULLFROG", None, None, None, None)],
"AB2": [("12:05:00", "12:05:00", "BULLFROG", None, None, None, None),
("12:15:00", "12:15:00", "BEATTY_AIRPORT", None, None, None, None)],
"BFC1": [("8:20:00", "8:20:00", "BULLFROG", None, None, None, None),
("9:20:00", "9:20:00", "FUR_CREEK_RES", None, None, None, None)],
"BFC2": [("11:00:00", "11:00:00", "FUR_CREEK_RES", None, None, None, None),
("12:00:00", "12:00:00", "BULLFROG", None, None, None, None)],
"AAMV1": [("8:00:00", "8:00:00", "BEATTY_AIRPORT", None, None, None, None),
("9:00:00", "9:00:00", "AMV", None, None, None, None)],
"AAMV2": [("10:00:00", "10:00:00", "AMV", None, None, None, None),
("11:00:00", "11:00:00", "BEATTY_AIRPORT", None, None, None, None)],
"AAMV3": [("13:00:00", "13:00:00", "BEATTY_AIRPORT", None, None, None, None),
("14:00:00", "14:00:00", "AMV", None, None, None, None)],
"AAMV4": [("15:00:00", "15:00:00", "AMV", None, None, None, None),
("16:00:00", "16:00:00", "BEATTY_AIRPORT", None, None, None, None)],
}
for trip_id, stop_time_list in stop_time_data.items():
for stop_time_entry in stop_time_list:
(arrival_time, departure_time, stop_id, shape_dist_traveled,
pickup_type, drop_off_type, stop_headsign) = stop_time_entry
trip = schedule.GetTrip(trip_id)
stop = schedule.GetStop(stop_id)
trip.AddStopTime(stop, arrival_time=arrival_time,
departure_time=departure_time,
shape_dist_traveled=shape_dist_traveled,
pickup_type=pickup_type, drop_off_type=drop_off_type,
stop_headsign=stop_headsign)
self.assertEqual(0, schedule.GetTrip("CITY1").GetStopTimes()[0].pickup_type)
self.assertEqual(1, schedule.GetTrip("CITY1").GetStopTimes()[1].pickup_type)
headway_data = [
("STBA", "6:00:00", "22:00:00", 1800),
("CITY1", "6:00:00", "7:59:59", 1800),
("CITY2", "6:00:00", "7:59:59", 1800),
("CITY1", "8:00:00", "9:59:59", 600),
("CITY2", "8:00:00", "9:59:59", 600),
("CITY1", "10:00:00", "15:59:59", 1800),
("CITY2", "10:00:00", "15:59:59", 1800),
("CITY1", "16:00:00", "18:59:59", 600),
("CITY2", "16:00:00", "18:59:59", 600),
("CITY1", "19:00:00", "22:00:00", 1800),
("CITY2", "19:00:00", "22:00:00", 1800),
]
headway_trips = {}
for headway_entry in headway_data:
(trip_id, start_time, end_time, headway) = headway_entry
headway_trips[trip_id] = [] # adding to set to check later
trip = schedule.GetTrip(trip_id)
trip.AddFrequency(start_time, end_time, headway, 0, problems)
for trip_id in headway_trips:
headway_trips[trip_id] = \
schedule.GetTrip(trip_id).GetFrequencyTuples()
fare_data = [
("p", 1.25, "USD", 0, 0),
("a", 5.25, "USD", 0, 0),
]
fares = []
for fare_entry in fare_data:
fare = transitfeed.FareAttribute(fare_entry[0], fare_entry[1],
fare_entry[2], fare_entry[3],
fare_entry[4])
fares.append(fare)
schedule.AddFareAttributeObject(fare)
fare_rule_data = [
("p", "AB", "zone-a", "zone-b", None),
("p", "STBA", "zone-a", None, "zone-c"),
("p", "BFC", None, "zone-b", "zone-a"),
("a", "AAMV", None, None, None),
]
for fare_id, route_id, orig_id, dest_id, contains_id in fare_rule_data:
rule = transitfeed.FareRule(
fare_id=fare_id, route_id=route_id, origin_id=orig_id,
destination_id=dest_id, contains_id=contains_id)
schedule.AddFareRuleObject(rule, problems)
schedule.Validate(problems)
accumulator.AssertNoMoreExceptions()
schedule.WriteGoogleTransitFeed(self.tempfilepath)
read_schedule = \
transitfeed.Loader(self.tempfilepath, problems=problems,
extra_validation=True).Load()
e = accumulator.PopException("UnrecognizedColumn")
self.assertEqual(e.file_name, "agency.txt")
self.assertEqual(e.column_name, "agency_mission")
e = accumulator.PopException("UnrecognizedColumn")
self.assertEqual(e.file_name, "stops.txt")
self.assertEqual(e.column_name, "stop_sound")
accumulator.AssertNoMoreExceptions()
self.assertEqual(1, len(read_schedule.GetAgencyList()))
self.assertEqual(agency, read_schedule.GetAgency(agency.agency_id))
self.assertEqual(len(routes), len(read_schedule.GetRouteList()))
for route in routes:
self.assertEqual(route, read_schedule.GetRoute(route.route_id))
self.assertEqual(2, len(read_schedule.GetServicePeriodList()))
self.assertEqual(week_period,
read_schedule.GetServicePeriod(week_period.service_id))
self.assertEqual(weekend_period,
read_schedule.GetServicePeriod(weekend_period.service_id))
self.assertEqual(len(stops), len(read_schedule.GetStopList()))
for stop in stops:
self.assertEqual(stop, read_schedule.GetStop(stop.stop_id))
self.assertEqual("croak!", read_schedule.GetStop("BULLFROG").stop_sound)
self.assertEqual(len(trips), len(read_schedule.GetTripList()))
for trip in trips:
self.assertEqual(trip, read_schedule.GetTrip(trip.trip_id))
for trip_id in headway_trips:
self.assertEqual(headway_trips[trip_id],
read_schedule.GetTrip(trip_id).GetFrequencyTuples())
for trip_id, stop_time_list in stop_time_data.items():
trip = read_schedule.GetTrip(trip_id)
read_stoptimes = trip.GetStopTimes()
self.assertEqual(len(read_stoptimes), len(stop_time_list))
for stop_time_entry, read_stoptime in zip(stop_time_list, read_stoptimes):
(arrival_time, departure_time, stop_id, shape_dist_traveled,
pickup_type, drop_off_type, stop_headsign) = stop_time_entry
self.assertEqual(stop_id, read_stoptime.stop_id)
self.assertEqual(read_schedule.GetStop(stop_id), read_stoptime.stop)
self.assertEqualTimeString(arrival_time, read_stoptime.arrival_time)
self.assertEqualTimeString(departure_time, read_stoptime.departure_time)
self.assertEqual(shape_dist_traveled, read_stoptime.shape_dist_traveled)
self.assertEqualWithDefault(pickup_type, read_stoptime.pickup_type, 0)
self.assertEqualWithDefault(drop_off_type, read_stoptime.drop_off_type, 0)
self.assertEqualWithDefault(stop_headsign, read_stoptime.stop_headsign, '')
self.assertEqual(len(fares), len(read_schedule.GetFareAttributeList()))
for fare in fares:
self.assertEqual(fare, read_schedule.GetFareAttribute(fare.fare_id))
read_fare_rules_data = []
for fare in read_schedule.GetFareAttributeList():
for rule in fare.GetFareRuleList():
self.assertEqual(fare.fare_id, rule.fare_id)
read_fare_rules_data.append((fare.fare_id, rule.route_id,
rule.origin_id, rule.destination_id,
rule.contains_id))
fare_rule_data.sort()
read_fare_rules_data.sort()
self.assertEqual(len(read_fare_rules_data), len(fare_rule_data))
for rf, f in zip(read_fare_rules_data, fare_rule_data):
self.assertEqual(rf, f)
self.assertEqual(1, len(read_schedule.GetShapeList()))
self.assertEqual(shape, read_schedule.GetShape(shape.shape_id))
| zarnold/transitfeed | tests/transitfeed/testschedule_write.py | Python | apache-2.0 | 23,071 | [
"CASINO"
] | 89db0e4ae6d34c9aa072d5eb7f7d9d4d5a26361e9b06f6b3b5b52f0305096893 |
#!/usr/bin/python
"""
gene_server v0.01
- a xmlrpc server providing a storage/query service for the GA trade system
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
#
# gene server
# - a xmlrpc server providing a storage/query/configuration/monitoring service for the GA trade system
#
import gene_server_config
__server__ = gene_server_config.__server__
__port__ = gene_server_config.__port__
__path__ = "/gene"
import sys
import time
import json
import hashlib
import SocketServer
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
from operator import itemgetter, attrgetter
from copy import deepcopy
from threading import Lock
import paths
import call_metrics
quit = 0
MAX_PID_MESSAGE_BUFFER_SIZE = 255
AUTO_BACKUP_AFTER_N_SAVES = 60
max_len = 600
max_bobs = 1000
# The default group is set by the first client connection.
g_default_group_set = False
g_undefined_gene_def_hash = '0db45d2a4141101bdfe48e3314cfbca3' #precalculated md5 hash of the UNDEFINED gene_def config.
g_default_group_gene_def_hash = g_undefined_gene_def_hash
g_gene_conf = {'gene_def_hash':g_undefined_gene_def_hash,'gene_def':'UNDEFINED','gene_high_scores':[[],[],[],[]],'gene_best':[[],[],[],[]],'g_trgt':json.dumps({'buy':0}),'g_active_quartile':0,'trade_enabled':0,'trade_priority':0}
g_gene_library = {'0db45d2a4141101bdfe48e3314cfbca3':deepcopy(g_gene_conf)} #default library starts with the default UNDEFINED group.
g_signed_package_library = {}
g_save_counter = 0
g_trgt = json.dumps({'buy':0})
g_active_quartile = 0
g_d = [[],[],[],[]] #default group high scores - quartiles 1-4
g_bobs = [[],[],[],[]] #default group best of the best - quartiles 1-4
g_pids = {}
def echo(msg):
return msg
@call_metrics.call_metrics
def put_signed_package(package):
global g_signed_package_library
try:
package = json.loads(package)
except:
return "NOK"
g_signed_package_library.update({package['package_name']:package})
return "OK"
@call_metrics.call_metrics
def get_signed_package(package_name):
global g_signed_package_library
if g_signed_package_library.has_key(package_name):
return json.dumps(g_signed_package_library[package_name])
return "NOK"
@call_metrics.call_metrics
def check_signed_package(package_name,MD5):
"""
used by clients to check that local packages
are in synch with the signed package library
"""
global g_signed_package_library
if g_signed_package_library.has_key(package_name):
if g_signed_package_library[package_name]['MD5'] == MD5:
return "OK"
return "NOK"
return "NA"
@call_metrics.call_metrics
def trade_enable(state,gdh):
"""
sets the trade_enable state
state = 0, disable trading
state = 1, enable trading
"""
global g_gene_library
try:
state = int(state)
except:
return "NOK : state",state
if not (state==1 or state==0):
return "NOK : val"
if not g_gene_library.has_key(gdh):
return "NOK : gdh",gdh
g_gene_library[gdh]['trade_enabled'] = state
return "OK"
@call_metrics.call_metrics
def trade_priority(priority,gdh):
"""
sets the trade priority
priority = 0, highest priority
.
.
priority = 9, lowest priority
"""
global g_gene_library
try:
priority = int(priority)
except:
return "NOK:priority",priority
if (priority < 0 or priority > 9):
return "NOK:val",priority
if not g_gene_library.has_key(gdh):
return "NOK:gdh",gdh
g_gene_library[gdh]['trade_priority'] = priority
return "OK"
@call_metrics.call_metrics
def put_target(target,pid=None):
global g_trgt
global g_gene_library
gdh = get_pid_gene_def_hash(pid)
g_gene_library[gdh]['g_trgt'] = target
return "OK"
@call_metrics.call_metrics
def get_target(pid=None):
global g_trgt
global g_gene_library
gdh = get_pid_gene_def_hash(pid)
return g_gene_library[gdh]['g_trgt']
@call_metrics.call_metrics
def put_active_quartile(quartile,pid=None):
global g_active_quartile
global g_gene_library
g_active_quartile = quartile
gdh = get_pid_gene_def_hash(pid)
g_gene_library[gdh]['g_active_quartile'] = quartile
if gdh == g_default_group_gene_def_hash:
g_active_quartile = quartile
return "OK"
@call_metrics.call_metrics
def get_active_quartile(pid=None):
global g_active_quartile
global g_gene_library
gdh = get_pid_gene_def_hash(pid)
#return g_gene_library[gdh]['g_active_quartile']
return g_active_quartile
@call_metrics.call_metrics
def get_gene(n_sec,quartile,pid = None):
global g_d
global g_bobs
global g_gene_library
gdh = get_pid_gene_def_hash(pid)
t = time.time() - n_sec
#get the highest score calculated within the last n seconds
#or return the latest if none are found.
r = []
#collect the high scoring records
for a_d in g_gene_library[gdh]['gene_high_scores'][quartile - 1]:
if a_d['time'] > t:
r.append(a_d)
#collect the bob records
for a_d in g_gene_library[gdh]['gene_best'][quartile - 1]:
if a_d['time'] > t:
r.append(a_d)
#if no records found, grab the most recent
if len(r) == 0 and len(g_gene_library[gdh]['gene_high_scores'][quartile - 1]) > 0:
r = sorted(g_gene_library[gdh]['gene_high_scores'][quartile - 1], key=itemgetter('time'),reverse = True)[0]
print "get",r['time'],r['score']
elif len(g_gene_library[gdh]['gene_high_scores'][quartile - 1]) > 0:
#if more than one record found find the highest scoring one
r = sorted(r, key=itemgetter('score'),reverse = True)[0]
print "get",r['time'],r['score']
else:
r = {}
return json.dumps(r)
@call_metrics.call_metrics
def get_all_genes(quartile,pid = None):
global g_d
global g_gene_library
gdh = get_pid_gene_def_hash(pid)
return json.dumps(sorted(g_gene_library[gdh]['gene_high_scores'][quartile - 1], key=itemgetter('score')))
#return json.dumps(sorted(g_d[quartile - 1], key=itemgetter('score')))
@call_metrics.call_metrics
def get_bobs(quartile,pid = None):
global g_bobs
global g_gene_library
gdh = get_pid_gene_def_hash(pid)
return json.dumps(sorted(g_gene_library[gdh]['gene_best'][quartile - 1], key=itemgetter('score')))
#return json.dumps(sorted(g_bobs[quartile - 1], key=itemgetter('score')))
@call_metrics.call_metrics
def put_gene(d,quartile,pid = None):
global g_d
global g_bobs
global g_gene_library
gdh = get_pid_gene_def_hash(pid)
#dictionary must have two key values, time & score
#add the record and sort the dictionary list
d = json.loads(d)
if any(adict['gene'] == d['gene'] for adict in g_gene_library[gdh]['gene_high_scores'][quartile - 1]):
print "put_gene: duplicate gene detected"
for i in xrange(len(g_gene_library[gdh]['gene_high_scores'][quartile - 1])):
if g_gene_library[gdh]['gene_high_scores'][quartile - 1][i]['gene'] == d['gene']:
print "put_gene: removing previous record"
g_gene_library[gdh]['gene_high_scores'][quartile - 1].pop(i)
break
if d['score'] != -987654321.12346:
#timestamp the gene submission
d['time'] = time.time()
g_gene_library[gdh]['gene_high_scores'][quartile - 1].append(d)
g_gene_library[gdh]['gene_high_scores'][quartile - 1] = sorted(g_gene_library[gdh]['gene_high_scores'][quartile - 1], key=itemgetter('score'),reverse = True)
print "put",d['time'],d['score']
#prune the dictionary list
if len(g_gene_library[gdh]['gene_high_scores'][quartile - 1]) > max_len:
g_gene_library[gdh]['gene_high_scores'][quartile - 1] = g_gene_library[gdh]['gene_high_scores'][quartile - 1][:max_len]
#update the bob dict if needed.
if any(adict['gene'] == d['gene'] for adict in g_gene_library[gdh]['gene_best'][quartile - 1]):
print "put_gene: BOB gene detected"
#update the gene
put_bob(json.dumps(d),quartile,pid)
return "OK"
@call_metrics.call_metrics
def put_gene_buffered(d_buffer,quartile,pid = None):
for d in d_buffer:
put_gene(d,quartile,pid)
return "OK"
@call_metrics.call_metrics
def put_bob(d,quartile,pid = None):
global g_bobs
global g_gene_library
gdh = get_pid_gene_def_hash(pid)
#dictionary must have two key values, time & score
#add the record and sort the dictionary list
d = json.loads(d)
if any(adict['gene'] == d['gene'] for adict in g_gene_library[gdh]['gene_best'][quartile - 1]):
print "put_bob: duplicate gene detected"
for i in xrange(len(g_gene_library[gdh]['gene_best'][quartile - 1])):
if g_gene_library[gdh]['gene_best'][quartile - 1][i]['gene'] == d['gene']:
print "put_bob: removing previous record"
g_gene_library[gdh]['gene_best'][quartile - 1].pop(i)
break
if d['score'] != -987654321.12346:
#timestamp the gene submission
d['time'] = time.time()
g_gene_library[gdh]['gene_best'][quartile - 1].append(d)
g_gene_library[gdh]['gene_best'][quartile - 1] = sorted(g_gene_library[gdh]['gene_best'][quartile - 1], key=itemgetter('score'),reverse = True)
print "put bob",d['time'],d['score']
#prune the dictionary list
if len(g_gene_library[gdh]['gene_best'][quartile - 1]) > max_bobs:
g_gene_library[gdh]['gene_best'][quartile - 1] = g_gene_library[gdh]['gene_best'][quartile - 1][:max_bobs]
return "OK"
#remote process services
@call_metrics.call_metrics
def pid_register_gene_def(pid,gene_def):
global g_pids
global g_gene_library
global g_gene_conf
#calc the hash of gene_def
conf_hash = hashlib.md5(gene_def).hexdigest()
if conf_hash in g_gene_library.keys():
#gene_def already exists
pass
else:
gc = deepcopy(g_gene_conf)
gc['gene_def_hash'] = conf_hash
gc['gene_def'] = gene_def
g_gene_library.update({conf_hash:gc})
pid_register_client(pid,conf_hash)
return conf_hash
@call_metrics.call_metrics
def pid_register_client(pid,gene_def_hash):
global g_pids
global g_gene_library
global g_default_group_gene_def_hash
global g_default_group_set
print pid,gene_def_hash
if gene_def_hash in g_gene_library.keys():
#the first registered client sets the default group
if g_default_group_set == False:
g_default_group_set = True
g_default_group_gene_def_hash = gene_def_hash
pid_alive(pid)
g_pids[pid].update({'gene_def_hash':gene_def_hash})
return "OK"
return "NOK:HASH NOT FOUND:"+gene_def_hash
@call_metrics.call_metrics
def pid_alive(pid):
global g_pids
global g_undefined_gene_def_hash
global g_default_group_gene_def_hash
global g_default_group_set
#pid ping (watchdog reset)
if pid in g_pids.keys(): #existing pid
g_pids[pid]['watchdog_reset'] = time.time()
else: #new pid
g_pids.update({pid:{'watchdog_reset':time.time(),'msg_buffer':[],'gene_def_hash':None}})
pid_register_gene_def(pid,"UNDEFINED") #g_undefined_gene_def_hash
if g_default_group_set == False:
g_default_group_set = True
g_default_group_gene_def_hash = g_undefined_gene_def_hash
return "OK"
@call_metrics.call_metrics
def pid_check(pid,time_out):
global g_pids
#check for PID watchdog timeout (seconds)
if pid in g_pids.keys():
dt = time.time() - g_pids[pid]['watchdog_reset']
if dt > time_out:
return "NOK"
else:
return "OK"
else:
return "NOK"
@call_metrics.call_metrics
def pid_remove(pid):
global g_pids
try:
g_pids.pop(pid)
except:
pass
return "OK"
@call_metrics.call_metrics
def pid_msg(pid,msg):
global g_pids
#append a message to the PID buffer
if pid in g_pids.keys(): #existing pid
g_pids[pid]['msg_buffer'].insert(0,msg)
#limit the message buffer size
if len(g_pids[pid]['msg_buffer']) > MAX_PID_MESSAGE_BUFFER_SIZE:
g_pids[pid]['msg_buffer'] = g_pids[pid]['msg_buffer'][:-1]
return "OK"
else:
return "NOK"
@call_metrics.call_metrics
def pid_list(ping_seconds=9999999):
global g_pids
pids = []
for pid in g_pids.keys():
if pid_check(pid,ping_seconds) == "OK":
pids.append(pid)
return json.dumps(pids)
@call_metrics.call_metrics
def get_pids():
global g_pids
js_pids = json.dumps(g_pids)
#clear the message buffers
#for pid in g_pids.keys():
# g_pids[pid]['msg_buffer'] = ''
return js_pids
def get_pid_gene_def_hash(pid):
global g_pids
global g_undefined_gene_def_hash
if pid == None:
return g_undefined_gene_def_hash
elif pid in g_pids.keys():
return g_pids[pid]['gene_def_hash']
else:
return "NOK:PID_NOT_FOUND"
@call_metrics.call_metrics
def get_default_gene_def_hash():
global g_default_group_gene_def_hash
return json.dumps(g_default_group_gene_def_hash)
@call_metrics.call_metrics
def get_gene_def_hash_list():
global g_gene_library
return json.dumps(g_gene_library.keys())
@call_metrics.call_metrics
def get_gene_def(gene_def_hash):
global g_gene_library
if gene_def_hash in g_gene_library.keys():
return g_gene_library[gene_def_hash]['gene_def']
return json.dumps('NOK:NOT_FOUND')
@call_metrics.call_metrics
def set_default_gene_def_hash(gd_hash):
global g_default_group_gene_def_hash
if get_gene_def(gd_hash).find('NOK:') < 0:
g_default_group_set = True
g_default_group_gene_def_hash = gd_hash
return json.dumps(gd_hash)
#system services
def shutdown():
global quit
quit = 1
save_db()
return 1
@call_metrics.call_metrics
def get_db():
global g_gene_library
return json.dumps(g_gene_library)
@call_metrics.call_metrics
def get_db_stripped():
global g_gene_library
#sdb = deepcopy(g_gene_library)
#for key in sdb:
# sdb[key].pop('gene_def')
# sdb[key].pop('gene_high_scores')
# sdb[key].pop('gene_best')
#return json.dumps(sdb)
strip_list = ['gene_def','gene_high_scores','gene_best']
sdbl = {}
for db_key in g_gene_library:
sdb = {}
for item_key in g_gene_library[db_key]:
if not item_key in strip_list:
sdb.update({item_key:g_gene_library[db_key][item_key]})
sdbl.update({db_key:sdb})
return json.dumps(sdbl)
@call_metrics.call_metrics
def save_db():
global AUTO_BACKUP_AFTER_N_SAVES
global g_save_counter
global g_gene_library
global g_signed_package_library
g_save_counter += 1
if g_save_counter == AUTO_BACKUP_AFTER_N_SAVES:
g_save_counter = 0
backup = True
else:
backup = False
#embed the signed package library into the gene library
g_gene_library.update({'signed_package_library':g_signed_package_library})
if backup:
f = open('./config/gene_server_db_library.json.bak','w')
f.write(json.dumps(g_gene_library))
f.close()
f = open('./config/gene_server_db_library.json','w')
f.write(json.dumps(g_gene_library))
#pop the signed package library back out of the gene library
g_gene_library.pop('signed_package_library')
return 'OK'
@call_metrics.call_metrics
def reload_db():
global g_gene_library
global g_signed_package_library
import os
reload_error = False
#save the gene db before shut down
print "reloading stored gene data into server..."
#
# migrate any old style db archives from old db format into the new format...delete the old files once migrated
#
for quartile in [1,2,3,4]:
try:
f = open('./config/gene_server_db_backup_quartile' + str(quartile) + '.json','r')
d = json.loads(f.read())
f.close()
for g in d['bobs']:
put_bob(json.dumps(g),quartile)
for g in d['high_scores']:
put_gene(json.dumps(g),quartile)
reload_error = True #force load the backup too
save_db() #save using the new format
#delete the old format files once loaded.
os.remove('./config/gene_server_db_backup_quartile' + str(quartile) + '.json')
except:
reload_error = True
#migrate the backups too...
if reload_error == True:
for quartile in [1,2,3,4]:
try:
f = open('./config/gene_server_db_backup_quartile' + str(quartile) + '.json.bak','r')
d = json.loads(f.read())
f.close()
for g in d['bobs']:
put_bob(json.dumps(g),quartile)
for g in d['high_scores']:
put_gene(json.dumps(g),quartile)
save_db() #save using the new format
#delete the old format files once loaded.
os.remove('./config/gene_server_db_backup_quartile' + str(quartile) + '.json.bak')
except:
reload_error = True
pass
#try to load new db archive format
try:
f = open('./config/gene_server_db_library.json','r')
g_gene_library = json.loads(f.read())
f.close()
reload_error = False
except:
reload_error = True
if reload_error == True:
try:
f = open('./config/gene_server_db_library.json.bak','r')
g_gene_library = json.loads(f.read())
f.close()
reload_error = False
except:
reload_error = True
if reload_error == True:
return "NOK"
#extract the signed code library if one's available
if g_gene_library.has_key('signed_package_library'):
g_signed_package_library = g_gene_library.pop('signed_package_library')
#upgrade old db format to include new records
for key in g_gene_library.keys():
if g_gene_library[key].has_key('trade_enabled') == False:
g_gene_library[key].update({'trade_enabled':0})
if g_gene_library[key].has_key('trade_priority') == False:
g_gene_library[key].update({'trade_priority':0})
return "OK"
def get_gene_server_metrics():
return json.dumps(call_metrics.get_metrics())
#set the service url
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/gene','/RPC2')
# Threaded mix-in
class AsyncXMLRPCServer(SocketServer.ThreadingMixIn,SimpleXMLRPCServer):
def __init__(self, *args, **kwargs):
SimpleXMLRPCServer.__init__(self, *args, **kwargs)
self.lock = Lock()
def process_request_thread(self, request, client_address):
# Blatant copy of SocketServer.ThreadingMixIn, but we need a single threaded handling of the request
self.lock.acquire()
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
finally:
self.lock.release()
#create the server
server = AsyncXMLRPCServer((__server__, __port__),requestHandler = RequestHandler,logRequests = False, allow_none = True)
#register the functions
#client services
server.register_function(put_signed_package,'put_signed_package')
server.register_function(get_signed_package,'get_signed_package')
server.register_function(check_signed_package,'check_signed_package')
server.register_function(trade_enable,'trade_enable')
server.register_function(trade_priority,'trade_priority')
server.register_function(get_target,'get_target')
server.register_function(put_target,'put_target')
server.register_function(get_active_quartile,'get_active_quartile')
server.register_function(put_active_quartile,'put_active_quartile')
server.register_function(get_gene,'get')
server.register_function(get_all_genes,'get_all')
server.register_function(put_gene,'put')
server.register_function(put_bob,'put_bob')
server.register_function(get_bobs,'get_bobs')
server.register_function(get_gene_def_hash_list,'get_gene_def_hash_list')
server.register_function(get_default_gene_def_hash,'get_default_gene_def_hash')
server.register_function(get_gene_def,'get_gene_def')
server.register_function(get_pid_gene_def_hash,'get_pid_gene_def_hash')
server.register_function(set_default_gene_def_hash,'set_default_gene_def_hash')
#process & monitoring services
server.register_function(pid_register_gene_def,'pid_register_gene_def')
server.register_function(pid_register_client,'pid_register_client')
server.register_function(pid_alive,'pid_alive')
server.register_function(pid_check,'pid_check')
server.register_function(pid_remove,'pid_remove')
server.register_function(pid_remove,'pid_exit')
server.register_function(pid_msg,'pid_msg')
server.register_function(get_pids,'get_pids')
server.register_function(pid_list,'pid_list')
#debug services
server.register_function(echo,'echo')
#system services
server.register_function(shutdown,'shutdown')
server.register_function(reload_db,'reload')
server.register_function(save_db,'save')
server.register_function(get_db,'get_db')
server.register_function(get_db_stripped,'get_db_stripped')
server.register_function(get_gene_server_metrics,'get_gene_server_metrics')
server.register_multicall_functions()
server.register_function(put_gene,'mc_put')
server.register_introspection_functions()
if __name__ == "__main__":
print "gene_server: running on port %s"%__port__
while not quit:
server.handle_request()
| stahn/ga-bitbot | gene_server.py | Python | gpl-3.0 | 23,193 | [
"Brian"
] | 73121898ae09e867f0d6f554de90486ea6a56456d0ea150f76380291ce73942b |
# $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import copy
urlBase="http://eutils.ncbi.nlm.nih.gov/entrez/eutils"
searchBase=urlBase+"/esearch.fcgi"
summaryBase=urlBase+"/esummary.fcgi"
fetchBase=urlBase+"/efetch.fcgi"
linkBase=urlBase+"/elink.fcgi"
# for links to pubmed web pages:
queryBase="http://www.ncbi.nlm.nih.gov/entrez/query.fcgi"
_details = {
'db':'pubmed',
'retmode':'xml',
'tool':'RDPMTools',
'email':'Info@RationalDiscovery.com',
}
def details():
return copy.copy(_details)
# FIX: Allow PMID searches
searchableFields={
"Author":("AU","Authors' Name "),
"Keyword":("MH","MeSH keyword"),
"Substance":("NM","Substance Name"),
"Title":("TI","Text from the article title"),
"Title/Abstract":("TIAB","Text from the article title and/or abstract"),
"Registry Number":("RN","CAS Registry Number"),
"Subject":("SB","Pubmed Subject Subset (tox,aids,cancer,bioethics,cam,history,space,systematic)"),
"Journal":("TA","Journal Name"),
"Year":("DP","Publication Date"),
"Affiliation":("AD","Authors' affiliation"),
}
searchableFieldsOrder=[
"Author",
"Keyword",
"Title",
"Title/Abstract",
"Substance",
"Registry Number",
"Subject",
"Journal",
"Year",
"Affiliation",
]
| rdkit/rdkit-orig | rdkit/Dbase/Pubmed/QueryParams.py | Python | bsd-3-clause | 1,462 | [
"RDKit"
] | 2934936b45b422247a7a2685c352b9e72f043d0bb35bf6b4643edee42eb8601a |
#!/usr/bin/env python
import os
import numpy as np
import matplotlib.pyplot as plt
compare = True # flag to plot the comparison plot between Lammps dump file and values computed by sim using the Lammps traj
doPairWrite = True # flag to plot comparison between sim and Lammps pair_write
doParseLog = True # flag to plot data parsed from input LD file supplied by user. Used to verify if the parsing is correct
plot_force = True # flag to plot the force (derivative of the LD potential)
def plot1():
"""
Plot the soft sphere and LD energies computed in sim, using the Lammps trajectory.
Also compares the total energies computed by sim from the Lammps traj and the Lammps dump file
"""
if compare and os.path.isfile('compare.txt'):
data = np.loadtxt('compare.txt', skiprows = 1)
data = data[np.argsort(data[:,0])]
r = data[:,0]
e_ss = data[:,2]
e_ld = data[:,3]
e_tot = data[:,4]
e_lmp = data[:,5]
plt.figure(figsize = (10,10))
sp = plt.subplot(221)
sp.set_xlabel('r (LJ units)', fontsize = 20)
sp.set_ylabel('Energy (kT)', fontsize = 20)
sp.plot(r, e_ss, linewidth = 3)
sp.set_title('Ene_SS with sim using Lammps traj')
sp = plt.subplot(222)
sp.set_xlabel('r (LJ units)', fontsize = 20)
sp.set_ylabel('Energy (kT)', fontsize = 20)
sp.plot(r,e_ld, linewidth = 3)
sp.set_title('Ene_LD with sim using Lammps traj')
sp = plt.subplot(223)
sp.set_xlabel('r (LJ units)', fontsize = 20)
sp.set_ylabel('Energy (kT)', fontsize = 20)
sp.plot(r,e_tot, linewidth = 1, linestyle = 'solid', color = 'red', label = 'Total PE sim')
sp.plot(r,e_lmp, linewidth = 1, marker = 'o', markersize = 4, markerfacecolor= 'None', linestyle = 'None', color = 'blue', label = 'Total PE Lammps')
sp.legend(loc = 4)
#plt.tight_layout
def plot2():
"""
Compare the LD potential and the force as drawn with sim
and with pair_write function of Lammps
"""
if doPairWrite and os.path.isfile('LD_potential_sim.txt') and os.path.isfile('LD_potential_lammps.txt'):
data1 = np.loadtxt('LD_potential_sim.txt');
data2 = np.loadtxt('LD_potential_lammps.txt', skiprows = 5)
plt.figure(figsize = (10,10))
sp = plt.subplot(211)
sp.set_xlabel('r(LJ units)', fontsize = 20); sp.set_ylabel(r'$U_{LD} (r) \mathrm{kT}$', fontsize = 20)
sp.plot(data1[:,0], data1[:,1], linestyle = 'solid', linewidth = 3, color = 'green', label = 'Energy - Sim')
if plot_force: sp.plot(data1[:,0], data1[:,2], linestyle = 'solid', linewidth = 3, color = 'red', label = 'Force - Sim')
sp.legend(loc = 4, prop = {'size' : 12})
sp = plt.subplot(212)
sp.set_xlabel('r(LJ units)', fontsize = 20); sp.set_ylabel(r'$U_{LD} (r) \mathrm{kT}$', fontsize = 20)
sp.plot(data2[:,1], data2[:,2], linewidth = 3, color = 'green', label = 'Energy - Lammps (pair_write)')
if plot_force: sp.plot(data2[:,1], data2[:,3], linewidth = 3, color = 'red', label = 'Force-Lammps(pair_write)')
sp.legend(loc = 4, prop = {'size': 12})
#plt.tight_layout
def plot3():
"""
-----------------Test of the Lammps interpolation routine------------------------
"""
if doParseLog and os.path.isfile('parselog.txt'):
lines = open('parselog.txt', 'r').readlines()
### EXTRACTING SPLINE COEFFICIENTS CALCULATED INSIDE THE LAMMPS ROUTINE - interpolate()
start = [lines.index(line) for line in lines if line.startswith('>>> LD POTENTIAL 1')][0] + 1
stop = start + 500
rho = np.zeros([500], np.float64)
frho = np.zeros([500], np.float64)
for i, line in enumerate(lines[start:stop]):
rho[i] = float(line.split()[0])
frho[i] = float(line.split()[1])
start = [lines.index(line) for line in lines if line.startswith('FRHO SPLINE COEFFICIENTS')][0] + 2
stop = start + 500
frho_spline = np.zeros([500,7], np.float64)
for i, line in enumerate(lines[start:stop]):
for j in range(7):
frho_spline[i][j] = float(line.split()[j])
### REPLICATING THE LAMMPS ROUTINE interpolate()
nrho = 500
delta = rho[1]-rho[0]
rho_min = 0.01
rho_max = 2.0
spline = np.zeros([nrho, 7], np.float64)
for m in range(nrho):
spline[m,6] = frho[m]
spline[0, 5] = spline[1, 6] - spline[0, 6]
spline[1, 5] = 0.5 * (spline[2,6]-spline[0,6])
spline[nrho-2,5] = 0.5 * (spline[nrho-1, 6]-spline[nrho-3,6])
spline[nrho-1,5] = spline[nrho-1,6] - spline[nrho-2,6]
for m in range(2,nrho-3):
spline[m,5] = ((spline[m-2,6]-spline[m+2,6]) + 8.0*(spline[m+1,6]-spline[m-1,6])) / 12.0
for m in range(nrho-2):
spline[m,4] = 3.0*(spline[m+1,6]-spline[m,6]) -2.0*spline[m,5] - spline[m+1,5];
spline[m,3] = spline[m,5] + spline[m+1,5] -2.0*(spline[m+1,6]-spline[m,6]);
spline[nrho-1,4] = 0.0
spline[nrho-1,3] = 0.0
for m in range(nrho):
spline[m,2] = spline[m,5]/delta;
spline[m,1] = 2.0*spline[m,4]/delta;
spline[m,0] = 3.0*spline[m,3]/delta;
### LD POTENTIAL CALCULATION USING A LAMMPS-LIKE CUBIC SPLINE INTERPOLATION
uLD1 = np.zeros([nrho], np.float64); uLD2 = np.zeros([nrho], np.float64)
dFdrho1 = np.zeros([nrho], np.float64); dFdrho2 = np.zeros([nrho], np.float64)
p = 0.; m = 0;
for i in range(nrho):
p = (rho[i] - rho_min)/delta + 1
m = max(1,min(m,nrho-1))
p -= m
p = min(p, 1.0)
coeff1 = frho_spline[i]
coeff2 = spline[i]
uLD1[i] = ((coeff1[3]*p + coeff1[4])*p + coeff1[5])*p + coeff1[6]
uLD2[i] = ((coeff2[3]*p + coeff2[4])*p + coeff2[5])*p + coeff2[6]
dFdrho1[i] = (coeff1[0]*p + coeff1[1])*p + coeff1[2];
dFdrho2[i] = (coeff2[0]*p + coeff2[1])*p + coeff2[2];
#### REPLICATING THE LAMMPS PAIR WRITE FUNCTION
r = np.linspace(2.2, 4.0, 400)
LD = np.zeros([400], np.float64)
ene_LD = np.zeros([400], np.float64)
force_LD = np.zeros([400], np.float64)
def get_phi(r, rsq, R1, R2, option):
lowercutsq = R1*R1
uppercutsq = R2*R2
uppercutfourth = uppercutsq*uppercutsq
uppercutsixth = uppercutfourth*uppercutsq
cut_ratio = lowercutsq/uppercutsq
denom = (1-cut_ratio)**3
c0 = (1 - 3*cut_ratio) / denom
c2 = (1/uppercutsq) * (6*cut_ratio) / denom
c4 = -(1/uppercutfourth) * (3 + 3*cut_ratio) / denom
c6 = (1/uppercutsixth) * 2.00 / denom
if (this_r < R1):
phi = 1.0
dphidr = 0.
if (this_r > R2):
phi = 0.0
dphidr = 0
if (this_r >= R1 and this_r <= R2):
phi = c0 + rsq * (c2 + rsq * (c4 + c6*rsq));
dphidr = (1./this_r) * (rsq * (2*c2 + rsq * (4*c4 + 6*c6*rsq)));
if not option: return phi
else: return dphidr
R2 = 3.0; R1 = 0.8 * R2
phi = 0.; dphidr = 0.
dFdrho = 0.0
for i, this_r in enumerate(r):
rsq = this_r*this_r
phi = get_phi(this_r, rsq, R1, R2, 0)
dphidr = get_phi(this_r, rsq, R1, R2, 1)
LD[i] += phi
if (LD[i] <= rho_min):
p = 0.0; m = 1
coeff = frho_spline[i]
dFdrho = coeff[2]
ene_LD[i] = coeff[6] + dFdrho*(LD[i] - rho_min)
elif (LD[i] >= rho_max):
p = 1.0; m = nrho - 1
coeff = frho_spline[i];
dFdrho = coeff[0] + coeff[1] + coeff[2]
ene_LD[i] = (coeff[3] + coeff[4] + coeff[5] + coeff[6]) + dFdrho*(LD[i] - rho_max) ;
else:
p = (LD[i] - rho_min) / delta + 1.0
m = int(p)
m = max(1,min(m, nrho-1))
p -= m;
p = min(p, 1.0);
coeff = frho_spline[i];
dFdrho = (coeff[0]*p + coeff[1])*p + coeff[2]
ene_LD[i] = ((coeff[3]*p + coeff[4])*p + coeff[5])*p + coeff[6]
force_LD[i] = -dFdrho * dphidr / this_r;
plt.figure(figsize = (10,10))
sp = plt.subplot(221)
sp.set_title('Verify Spline Interpolation')
sp.plot(rho, frho, linestyle = 'solid', linewidth = 3, color = 'red', label = 'F(rho_LD) supplied')
sp.plot(rho, uLD1, linestyle = 'solid', linewidth = 3, color = 'blue', label = 'F(rho_LD) from Lammps')
sp.plot(rho, uLD2, linestyle = 'solid', linewidth = 3, color = 'green', label = 'F(rho_LD) from this code')
sp.set_xlabel('rho_LD'); sp.set_ylabel('F(rho_LD)')
sp.legend()
sp = plt.subplot(222)
sp.set_title('Verify dF/drho')
sp.plot(rho, dFdrho1, linestyle = 'solid', linewidth = 3, color = 'blue', label = 'dF/drho from Lammps')
sp.plot(rho, dFdrho2, linestyle = 'solid', linewidth = 3, color = 'green', label = 'dF/drho from this code')
sp.set_xlabel('rho_LD'); sp.set_ylabel('dF/drho')
sp.legend()
sp = plt.subplot(223)
sp.set_title('Verify energy and force')
sp.plot(r, ene_LD, linestyle = 'solid', linewidth = 3, color = 'green', label = 'ene_LD from this code')
if plot_force: sp.plot(r, force_LD, linestyle = 'solid', linewidth = 3, color = 'red', label = 'force_LD from this code')
sp.set_xlabel('r (LJ unit)'); sp.set_ylabel('ene_LD (lJ unit)')
sp.legend()
sp = plt.subplot(224)
sp.set_title('Verify Local Density')
sp.plot(r, LD, linestyle = 'solid', linewidth = 3, color = 'green', label = 'Local Density from this code')
sp.set_xlabel('r (LJ unit)'); sp.set_ylabel('Local-Density')
sp.legend()
#plt.tight_layout
## MAIN ##
plot1()
plot2()
plot3()
plt.show()
| tanmoy7989/Lammps_Local_Density | plot_potentials.py | Python | gpl-2.0 | 9,007 | [
"LAMMPS"
] | 6e87699a20aada13a91b43a0fce58fde298ffde629b1ac4aceb1ceb45f6abc43 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
#!/usr/bin/env python
from __future__ import division, unicode_literals
"""
#TODO: Write module doc.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/1/15'
import warnings
warnings.warn("pymatgen.io.zeoio has been moved pymatgen.io.zeo. "
"This stub will be removed in pymatgen 4.0.")
from .zeo import *
| migueldiascosta/pymatgen | pymatgen/io/zeoio.py | Python | mit | 574 | [
"pymatgen"
] | 692f938cf720ee8854a87ea1e5fe76a9c1f8596991ee003d052baad775e4a98b |
#!/usr/bin/python2
from distutils.core import setup
import os
import sys
# config file
data_files = [("/etc/lorax", ["etc/lorax.conf"]),
("/etc/lorax", ["etc/composer.conf"]),
("/usr/lib/systemd/system", ["systemd/lorax-composer.service",
"systemd/lorax-composer.socket"]),
("/usr/lib/tmpfiles.d/", ["systemd/lorax-composer.conf",
"systemd/lorax.conf"])]
# shared files
for root, dnames, fnames in os.walk("share"):
for fname in fnames:
data_files.append((root.replace("share", "/usr/share/lorax", 1),
[os.path.join(root, fname)]))
# executable
data_files.append(("/usr/sbin", ["src/sbin/lorax", "src/sbin/mkefiboot",
"src/sbin/livemedia-creator", "src/sbin/lorax-composer",
"src/sbin/mkksiso"]))
data_files.append(("/usr/bin", ["src/bin/image-minimizer",
"src/bin/mk-s390-cdboot",
"src/bin/composer-cli"]))
# get the version
sys.path.insert(0, "src")
try:
import pylorax.version
except ImportError:
vernum = "devel"
else:
vernum = pylorax.version.num
finally:
sys.path = sys.path[1:]
setup(name="lorax",
version=vernum,
description="Lorax",
long_description="Tools for creating bootable images, including the Anaconda boot.iso",
author="Martin Gracik, Will Woods <wwoods@redhat.com>, Brian C. Lane <bcl@redhat.com>",
author_email="bcl@redhat.com",
url="http://www.github.com/weldr/lorax/",
download_url="http://www.github.com/weldr/lorax/releases/",
license="GPLv2+",
packages=["pylorax", "pylorax.api", "composer", "composer.cli", "lifted"],
package_dir={"" : "src"},
data_files=data_files
)
| wgwoods/lorax | setup.py | Python | gpl-2.0 | 1,887 | [
"Brian"
] | e958979124d5041506b9bafe23e4af5713f4beccbbd2ed4e5a419455b8158672 |
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import List
from kivy.logger import Logger
# noinspection PyUnresolvedReferences
from jnius import detach
from ORCA.utils.Platform.android.android_helper import GetAndroidModule
__all__ = ['GetMACAddress']
def GetMACAddress() -> List[str]:
"""
Retrieves the MAC Address on an Android device
:return: A List of MAC Addresses (strings): First with colon as separator, second with colon as separator
"""
uRetColon:str = u'00:00:00:00:00:00'
uRetDash:str = u'00-00-00-00-00-00'
try:
cContext = GetAndroidModule("Context","android.content")
cPythonActivity = GetAndroidModule("PythonActivity")
oPythonActivity = cPythonActivity.mActivity
oWifiManager = oPythonActivity.getSystemService(cContext.WIFI_SERVICE)
uRetColon = oWifiManager.getConnectionInfo().getMacAddress()
uRetDash = uRetColon.replace(":","-")
except Exception as e:
Logger.error("Error on GetMACAddress:"+str(e))
return [uRetColon,uRetDash]
| thica/ORCA-Remote | src/ORCA/utils/Platform/android/android_GetMACAddress.py | Python | gpl-3.0 | 1,979 | [
"ORCA"
] | 4d7d61309a01d64aaa17be6ffdc1e5fb477f1c2a9b7906031884764655753ec5 |
"""Helper classes for doing jellium calculations."""
from math import pi
import numpy as np
from ase import Atoms
from ase.units import Bohr
from gpaw.poisson import PoissonSolver
class JelliumPoissonSolver(PoissonSolver):
"""Jellium Poisson solver."""
mask_g = None # where to put the jellium
rs = None # Wigner Seitz radius
def get_mask(self, r_gv):
"""Choose which grid points are inside the jellium.
r_gv: 4-dimensional ndarray
positions of the grid points in Bohr units.
Return ndarray of ones and zeros indicating where the jellium
is. This implementation will put the positive background in the
whole cell. Overwrite this method in subclasses."""
return self.gd.zeros() + 1.0
def initialize(self):
PoissonSolver.initialize(self)
r_gv = self.gd.get_grid_point_coordinates().transpose((1, 2, 3, 0))
self.mask_g = self.get_mask(r_gv).astype(float)
self.volume = self.gd.comm.sum(self.mask_g.sum()) * self.gd.dv
def solve(self, phi, rho, eps=None, charge=0, maxcharge=1e-6,
zero_initial_phi=False):
if eps is None:
eps = self.eps
self.rs = (3 / pi / 4 * self.volume / charge)**(1 / 3.0)
rho -= self.mask_g * (charge / self.volume)
niter = self.solve_neutral(phi, rho, eps=eps)
return niter
class JelliumSurfacePoissonSolver(JelliumPoissonSolver):
def __init__(self, z1, z2, **kwargs):
"""Put the positive background charge where z1 < z < z2.
z1: float
Position of lower surface in Angstrom units.
z2: float
Position of upper surface in Angstrom units."""
PoissonSolver.__init__(self, **kwargs)
self.z1 = (z1 - 0.0001) / Bohr
self.z2 = (z2 - 0.0001) / Bohr
def get_mask(self, r_gv):
return np.logical_and(r_gv[:, :, :, 2] > self.z1,
r_gv[:, :, :, 2] < self.z2)
| robwarm/gpaw-symm | gpaw/jellium.py | Python | gpl-3.0 | 2,044 | [
"ASE",
"GPAW"
] | c2f754eae5fdd8936bf361baadc7767671160679523c2b956dd14f74e151659a |
""" simple hello world job
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from DIRAC.Interfaces.API.Job import Job
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
j = Job()
j.setName("helloWorld-test")
j.setExecutable("exe-script.py", "", "Executable.log")
# <-- user settings
j.setCPUTime(172800)
tier1s = DMSHelpers().getTiers(tier=(0, 1))
j.setBannedSites(tier1s)
# user settings -->
# print j.workflow
# submit the job to dirac
result = Dirac().submitJob(j)
print(result)
| yujikato/DIRAC | src/DIRAC/tests/Workflow/Regression/helloWorld.py | Python | gpl-3.0 | 618 | [
"DIRAC"
] | 54ad18ad1772d677a8ddb63d8613a2c90e5740745bf173344d14b0ebab2dadad |
########################################################################
# $HeadURL$
########################################################################
""" DIRAC JobDB class is a front-end to the main WMS database containing
job definitions and status information. It is used in most of the WMS
components
The following methods are provided for public usage:
getJobID()
getJobAttribute()
getJobAttributes()
getAllJobAttributes()
getDistinctJobAttributes()
getAttributesForJobList()
getJobParameter()
getJobParameters()
getAllJobParameters()
getInputData()
getSubjobs()
getJobJDL()
selectJobs()
selectJobsWithStatus()
setJobAttribute()
setJobAttributes()
setJobParameter()
setJobParameters()
setJobJDL()
setJobStatus()
setInputData()
insertNewJobIntoDB()
removeJobFromDB()
rescheduleJob()
rescheduleJobs()
getMask()
setMask()
allowSiteInMask()
banSiteInMask()
getCounters()
"""
__RCSID__ = "$Id$"
import sys, types
import operator
from DIRAC import S_OK, S_ERROR, Time
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.Base.DB import DB
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getSites
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.WorkloadManagementSystem.Client.JobState.JobManifest import JobManifest
from DIRAC.Core.Utilities import Time
DEBUG = False
JOB_STATES = ['Received', 'Checking', 'Staging', 'Waiting', 'Matched',
'Running', 'Stalled', 'Done', 'Completed', 'Failed']
JOB_FINAL_STATES = ['Done', 'Completed', 'Failed']
JOB_DEPRECATED_ATTRIBUTES = [ 'UserPriority', 'SystemPriority' ]
JOB_STATIC_ATTRIBUTES = [ 'JobID', 'JobType', 'DIRACSetup', 'JobGroup', 'HerdState', 'MasterJobID',
'JobName', 'Owner', 'OwnerDN', 'OwnerGroup', 'SubmissionTime', 'VerifiedFlag' ]
JOB_VARIABLE_ATTRIBUTES = [ 'Site', 'RescheduleTime', 'StartExecTime', 'EndExecTime', 'RescheduleCounter',
'DeletedFlag', 'KilledFlag', 'FailedFlag',
'ISandboxReadyFlag', 'OSandboxReadyFlag', 'RetrievedFlag', 'AccountedFlag' ]
JOB_DYNAMIC_ATTRIBUTES = [ 'LastUpdateTime', 'HeartBeatTime',
'Status', 'MinorStatus', 'ApplicationStatus', 'ApplicationNumStatus', 'CPUTime'
]
#############################################################################
class JobDB( DB ):
_tablesDict = {}
# Jobs table
_tablesDict[ 'Jobs' ] = {
'Fields' :
{
'JobID' : 'INTEGER NOT NULL AUTO_INCREMENT',
'JobType' : 'VARCHAR(32) NOT NULL DEFAULT "normal"',
'DIRACSetup' : 'VARCHAR(32) NOT NULL',
'JobGroup' : 'VARCHAR(32) NOT NULL DEFAULT "NoGroup"',
'JobSplitType' : 'ENUM ("Single","Master","Subjob","DAGNode") NOT NULL DEFAULT "Single"',
'MasterJobID' : 'INTEGER NOT NULL DEFAULT 0',
'Site' : 'VARCHAR(100) NOT NULL DEFAULT "ANY"',
'JobName' : 'VARCHAR(128) NOT NULL DEFAULT "Unknown"',
'Owner' : 'VARCHAR(32) NOT NULL DEFAULT "Unknown"',
'OwnerDN' : 'VARCHAR(255) NOT NULL DEFAULT "Unknown"',
'OwnerGroup' : 'VARCHAR(128) NOT NULL DEFAULT "lhcb_user"',
'SubmissionTime' : 'DATETIME',
'RescheduleTime' : 'DATETIME',
'LastUpdateTime' : 'DATETIME',
'StartExecTime' : 'DATETIME',
'HeartBeatTime' : 'DATETIME',
'EndExecTime' : 'DATETIME',
'Status' : 'VARCHAR(32) NOT NULL DEFAULT "Received"',
'MinorStatus' : 'VARCHAR(128) NOT NULL DEFAULT "Initial insertion"',
'ApplicationStatus' : 'VARCHAR(256) NOT NULL DEFAULT "Unknown"',
'ApplicationNumStatus' : 'INTEGER NOT NULL DEFAULT 0',
'CPUTime' : 'FLOAT NOT NULL DEFAULT 0.0',
'UserPriority' : 'INTEGER NOT NULL DEFAULT 0',
'SystemPriority' : 'INTEGER NOT NULL DEFAULT 0',
'RescheduleCounter' : 'INTEGER NOT NULL DEFAULT 0',
'VerifiedFlag' : 'ENUM ("True","False") NOT NULL DEFAULT "False"',
'DeletedFlag' : 'ENUM ("True","False") NOT NULL DEFAULT "False"',
'KilledFlag' : 'ENUM ("True","False") NOT NULL DEFAULT "False"',
'FailedFlag' : 'ENUM ("True","False") NOT NULL DEFAULT "False"',
'ISandboxReadyFlag' : 'ENUM ("True","False") NOT NULL DEFAULT "False"',
'OSandboxReadyFlag' : 'ENUM ("True","False") NOT NULL DEFAULT "False"',
'RetrievedFlag' : 'ENUM ("True","False") NOT NULL DEFAULT "False"',
'AccountedFlag' : 'ENUM ("True","False","Failed") NOT NULL DEFAULT "False"'
},
'Indexes' :
{
'JobType' : [ 'JobType' ],
'DIRACSetup' : [ 'DIRACSetup' ],
'JobGroup' : [ 'JobGroup' ],
'JobSplitType' : [ 'JobSplitType' ],
'Site' : [ 'Site' ],
'Owner' : [ 'Owner' ],
'OwnerDN' : [ 'OwnerDN' ],
'OwnerGroup' : [ 'OwnerGroup' ],
'Status' : [ 'Status' ],
'StatusSite' : [ 'Status', 'Site' ],
'MinorStatus' : [ 'MinorStatus' ],
'ApplicationStatus' : [ 'ApplicationStatus' ]
},
'PrimaryKey' : [ 'JobID' ]
}
# JobJDLs table
_tablesDict[ 'JobJDLs' ] = {
'Fields' :
{
'JobID' : 'INTEGER NOT NULL AUTO_INCREMENT',
'JDL' : 'BLOB NOT NULL',
'JobRequirements' : 'BLOB NOT NULL',
'OriginalJDL' : 'BLOB NOT NULL',
},
'PrimaryKey' : [ 'JobID' ]
}
# SubJobs table
_tablesDict[ 'SubJobs' ] = {
'Fields' :
{
'JobID' : 'INTEGER NOT NULL',
'SubJobID' : 'INTEGER NOT NULL',
}
}
# PrecursorJobs table
_tablesDict[ 'PrecursorJobs' ] = {
'Fields' :
{
'JobID' : 'INTEGER NOT NULL',
'PreJobID' : 'INTEGER NOT NULL',
}
}
# InputData table
_tablesDict[ 'InputData' ] = {
'Fields' :
{
'JobID' : 'INTEGER NOT NULL',
'Status' : 'VARCHAR(32) NOT NULL DEFAULT "AprioriGood"',
'LFN' : 'VARCHAR(255)'
},
'PrimaryKey' : [ 'JobID', 'LFN' ]
}
# JobParameters table
_tablesDict[ 'JobParameters' ] = {
'Fields' :
{
'JobID' : 'INTEGER NOT NULL',
'Name' : 'VARCHAR(100) NOT NULL',
'Value' : 'BLOB NOT NULL'
},
'PrimaryKey' : [ 'JobID', 'Name' ]
}
# OptimizerParameters table
_tablesDict[ 'OptimizerParameters' ] = {
'Fields' :
{
'JobID' : 'INTEGER NOT NULL',
'Name' : 'VARCHAR(100) NOT NULL',
'Value' : 'MEDIUMBLOB NOT NULL'
},
'PrimaryKey' : [ 'JobID', 'Name' ]
}
# AtticJobParameters table
_tablesDict[ 'AtticJobParameters' ] = {
'Fields' :
{
'JobID' : 'INTEGER NOT NULL',
'RescheduleCycle' : 'INTEGER NOT NULL',
'Name' : 'VARCHAR(100) NOT NULL',
'Value' : 'BLOB NOT NULL'
},
'PrimaryKey' : [ 'JobID', 'Name', 'RescheduleCycle' ]
}
# TaskQueues table
_tablesDict[ 'TaskQueues' ] = {
'Fields' :
{
'TaskQueueID' : 'INTEGER NOT NULL AUTO_INCREMENT',
'Priority' : 'INTEGER NOT NULL DEFAULT 0',
'Requirements' : 'BLOB NOT NULL',
'NumberOfJobs' : 'INTEGER NOT NULL DEFAULT 0'
},
'PrimaryKey' : [ 'TaskQueueID' ]
}
# TaskQueue table
_tablesDict[ 'TaskQueue' ] = {
'Fields' :
{
'TaskQueueID' : 'INTEGER NOT NULL',
'JobID' : 'INTEGER NOT NULL',
'Rank' : 'INTEGER NOT NULL DEFAULT 0'
},
'PrimaryKey' : [ 'JobID', 'TaskQueueID' ]
}
# SiteMask table
_tablesDict[ 'SiteMask' ] = {
'Fields' :
{
'Site' : 'VARCHAR(64) NOT NULL',
'Status' : 'VARCHAR(64) NOT NULL',
'LastUpdateTime' : 'DATETIME NOT NULL',
'Author' : 'VARCHAR(255) NOT NULL',
'Comment' : 'BLOB NOT NULL'
},
'PrimaryKey' : [ 'Site' ]
}
# SiteMaskLogging table
_tablesDict[ 'SiteMaskLogging' ] = {
'Fields' :
{
'Site' : 'VARCHAR(64) NOT NULL',
'Status' : 'VARCHAR(64) NOT NULL',
'UpdateTime' : 'DATETIME NOT NULL',
'Author' : 'VARCHAR(255) NOT NULL',
'Comment' : 'BLOB NOT NULL'
}
}
# HeartBeatLoggingInfo table
_tablesDict[ 'HeartBeatLoggingInfo' ] = {
'Fields' :
{
'JobID' : 'INTEGER NOT NULL',
'Name' : 'VARCHAR(100) NOT NULL',
'Value' : 'BLOB NOT NULL',
'HeartBeatTime' : 'DATETIME NOT NULL'
},
'Indexes' : { 'JobID' : [ 'JobID' ] }
}
# JobCommands table
_tablesDict[ 'JobCommands' ] = {
'Fields' :
{
'JobID' : 'INTEGER NOT NULL',
'Command' : 'VARCHAR(100) NOT NULL',
'Arguments' : 'VARCHAR(100) NOT NULL',
'Status' : 'VARCHAR(64) NOT NULL DEFAULT "Received"',
'ReceptionTime' : 'DATETIME NOT NULL',
'ExecutionTime' : 'DATETIME',
},
'Indexes' : { 'JobID' : [ 'JobID' ] }
}
def __init__( self, maxQueueSize = 10 ):
""" Standard Constructor
"""
DB.__init__( self, 'JobDB', 'WorkloadManagement/JobDB', maxQueueSize, debug = DEBUG )
self.maxRescheduling = gConfig.getValue( self.cs_path + '/MaxRescheduling', 3 )
self.jobAttributeNames = []
self.nJobAttributeNames = 0
result = self.__getAttributeNames()
if not result['OK']:
error = 'Can not retrieve job Attributes'
self.log.fatal( 'JobDB: %s' % error )
sys.exit( error )
return
self.log.info( "MaxReschedule: %s" % self.maxRescheduling )
self.log.info( "==================================================" )
if DEBUG:
result = self.dumpParameters()
self.__updateDBSchema()
def _checkTable( self ):
""" _checkTable.
Method called on the MatcherHandler instead of on the JobDB constructor
to avoid an awful number of unnecessary queries with "show tables".
"""
return self.__createTables()
def __createTables( self ):
""" __createTables
Writes the schema in the database. If a table is already in the schema, it is
skipped to avoid problems trying to create a table that already exists.
"""
# Horrible SQL here !!
existingTables = self._query( "show tables" )
if not existingTables[ 'OK' ]:
return existingTables
existingTables = [ existingTable[0] for existingTable in existingTables[ 'Value' ] ]
# Makes a copy of the dictionary _tablesDict
tables = {}
tables.update( self._tablesDict )
for existingTable in existingTables:
if existingTable in tables:
del tables[ existingTable ]
res = self._createTables( tables )
if not res[ 'OK' ]:
return res
# Human readable S_OK message
if res[ 'Value' ] == 0:
res[ 'Value' ] = 'No tables created'
else:
res[ 'Value' ] = 'Tables created: %s' % ( ','.join( tables.keys() ) )
return res
def __updateDBSchema( self ):
result = self.__checkDBVersion()
if not result[ 'OK' ]:
self.log.error( "Can't retrieve schema version: %s" % result[ 'Message' ] )
return result
version = result[ 'Value' ]
while True:
self.log.info( "Current DB schema version %d" % version )
version += 1
try:
migration = getattr( self, '_JobDB__schemaMigration_%d' % version )
self.log.info( "Found schema migration" )
except AttributeError:
return S_OK( version )
with self.transaction as commit:
result = migration()
if not result[ 'OK' ]:
self.log.error( "Can't apply migration from schema version %d: %s" % ( version, result[ 'Message' ] ) )
return result
result = self._update( "INSERT INTO SchemaVersion VALUES ( %d )" % version )
if not result[ 'OK' ]:
self.log.error( "Error saving schema version %d: %s" % ( version, result[ 'Message' ] ) )
return result
commit()
return S_OK( version )
def __schemaMigration_0( self ):
self.log.info( "Updating MasterJobIDs..." )
#Set MasterJobID to the JobID if not set
result = self._update( "UPDATE `Jobs` SET MasterJobID = JobID WHERE MasterJobID = 0" )
if not result[ 'OK' ]:
return result
#Alter JobSplitType to HerdState
result = self._query( "DESCRIBE Jobs" )
if not result[ 'OK' ]:
return result
cols = result[ 'Value' ]
found = False
for col in cols:
if col[0] == 'JobSplitType':
result = self._update( "ALTER TABLE Jobs CHANGE JobSplitType HerdState ENUM ('Single','WillSplit','Splitted') NOT NULL DEFAULT 'Single'" )
if not result[ 'OK' ]:
return result
#Get current tables
result = self._query( "SHOW TABLES" )
if not result[ 'OK' ]:
return result
tablesInDB = [ t[0] for t in result[ 'Value' ] ]
#Create required tables
tables = { 'SchemaVersion' : { 'Fields' : { 'Version' : 'INTEGER UNSIGNED' } } }
tables[ 'LFN' ] = { 'Fields' : { 'LFNID' : 'INTEGER NOT NULL AUTO_INCREMENT',
'JobID' : 'INTEGER UNSIGNED NOT NULL',
'LFN' : 'VARCHAR(512) NOT NULL' ,
'Checksum' : 'VARCHAR(64) DEFAULT ""',
'CreationDate' : 'DATETIME',
'ModificationDate' : 'DATETIME',
'Size' : 'INTEGER UNSIGNED DEFAULT 0' },
'PrimaryKey' : 'LFNID',
'UniqueIndexes' : { 'joblfn' : [ 'JobID', 'LFN' ] } }
tables[ 'Replicas' ] = { 'Fields' : { 'LFNID' : 'INTEGER NOT NULL',
'SEName' : 'VARCHAR(64) NOT NULL',
'SURL' : 'VARCHAR(256) NOT NULL',
'Disk' : 'TINYINT(1) NOT NULL' },
'PrimaryKey' : [ 'LFNID', 'SEName', 'SURL' ],
'Indexes' : { 'LFNID' : [ 'LFNID' ] },
'ForeignKeys' : { 'LFNID' : 'LFN' } }
tables[ 'MasterJDLs' ] = { 'Fields' : { 'JobID' : 'int(11) NOT NULL',
'JDL' : 'BLOB NOT NULL' },
'PrimaryKey' : [ 'JobID' ] }
for t in tablesInDB:
try:
tables.pop( t )
except KeyError:
pass
result = self._createTables( tables )
if not result[ 'OK' ]:
return result
#Create foreign keys
#Drop old tables
dropTables = []
for t in [ 'SubJobs', 'PrecursorJobs', 'TaskQueues', 'TaskQueue', 'InputData' ]:
if t in tablesInDB:
dropTables.append( t )
if dropTables:
self.log.info( "Info dropping tables %s" % ", ".join( dropTables ) )
result = self._update( "DROP TABLE %s" % ", ".join( dropTables ) )
if not result[ 'OK' ]:
return result
#Migrate to innodb
result = self._query( "SHOW TABLE STATUS" )
if not result[ 'OK' ]:
return result
tableStatus = dict( [ ( r[0], r[1] ) for r in result[ 'Value' ] ] )
for tableName in tableStatus:
if tableStatus[ tableName ] == "MyISAM":
self.log.info( "Migrating table %s to innodb" % tableName )
result = self._update( "ALTER TABLE `%s` ENGINE = INNODB" % tableName )
if not result[ 'OK' ]:
self.log.error( "Error migrating %s to innodb: %s" % ( tableName, result[ 'Message' ] ) )
return S_OK()
def __checkDBVersion( self ):
result = self._query( "show tables" )
if not result[ 'OK' ]:
return result
tables = [ r[0] for r in result[ 'Value' ] ]
if 'SchemaVersion' not in tables:
return S_OK( -1 )
result = self._query( "SELECT MAX(Version) FROM SchemaVersion" )
if not result[ 'OK' ]:
return result
version = result[ 'Value' ][0][0]
if version == None:
version = -1
return S_OK( version )
def dumpParameters( self ):
""" Dump the JobDB connection parameters to the stdout
"""
print "=================================================="
print "User: ", self.dbUser
print "Host: ", self.dbHost
print "Password ", self.dbPass
print "DBName ", self.dbName
print "MaxQueue ", self.maxQueueSize
print "=================================================="
return S_OK()
def __getAttributeNames( self ):
""" get Name of Job Attributes defined in DB
set self.jobAttributeNames to the list of Names
return S_OK()
return S_ERROR upon error
"""
res = self._query( 'DESCRIBE Jobs' )
if not res['OK']:
return res
self.jobAttributeNames = []
for row in res['Value']:
field = row[0]
self.jobAttributeNames.append( field )
self.nJobAttributeNames = len( self.jobAttributeNames )
return S_OK()
#############################################################################
def getJobID( self ):
"""Get the next unique JobID and prepare the new job insertion
"""
cmd = 'INSERT INTO Jobs (SubmissionTime) VALUES (UTC_TIMESTAMP())'
err = 'JobDB.getJobID: Failed to retrieve a new Id.'
res = self._update( cmd )
if not res['OK']:
return S_ERROR( '1 %s\n%s' % ( err, res['Message'] ) )
if not 'lastRowId' in res['Value']:
return S_ERROR( '2 %s' % err )
jobID = int( res['Value']['lastRowId'] )
self.log.info( 'JobDB: New JobID served "%s"' % jobID )
#############################################################################
def getAttributesForJobList( self, jobIDList, attrList = None ):
""" Get attributes for the jobs in the the jobIDList.
Returns an S_OK structure with a dictionary of dictionaries as its Value:
ValueDict[jobID][attribute_name] = attribute_value
"""
if not jobIDList:
return S_OK( {} )
if attrList:
attrNames = ','.join( [ str( x ) for x in attrList ] )
attr_tmp_list = attrList
else:
attrNames = ','.join( [ str( x ) for x in self.jobAttributeNames ] )
attr_tmp_list = self.jobAttributeNames
jobList = ','.join( [str( x ) for x in jobIDList] )
# FIXME: need to check if the attributes are in the list of job Attributes
cmd = 'SELECT JobID,%s FROM Jobs WHERE JobID in ( %s )' % ( attrNames, jobList )
res = self._query( cmd )
if not res['OK']:
return res
try:
retDict = {}
for retValues in res['Value']:
jobID = retValues[0]
jobDict = {}
jobDict[ 'JobID' ] = jobID
attrValues = retValues[1:]
for i in range( len( attr_tmp_list ) ):
try:
jobDict[attr_tmp_list[i]] = attrValues[i].tostring()
except Exception:
jobDict[attr_tmp_list[i]] = str( attrValues[i] )
retDict[int( jobID )] = jobDict
return S_OK( retDict )
except Exception, x:
return S_ERROR( 'JobDB.getAttributesForJobList: Failed\n%s' % str( x ) )
#############################################################################
def getDistinctJobAttributes( self, attribute, condDict = None, older = None,
newer = None, timeStamp = 'LastUpdateTime' ):
""" Get distinct values of the job attribute under specified conditions
"""
return self.getDistinctAttributeValues( 'Jobs', attribute, condDict = condDict,
older = older, newer = newer, timeStamp = timeStamp )
#############################################################################
def getJobParameters( self, jobID, paramList = None ):
""" Get Job Parameters defined for jobID.
Returns a dictionary with the Job Parameters.
If parameterList is empty - all the parameters are returned.
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
e_jobID = ret['Value']
self.log.debug( 'JobDB.getParameters: Getting Parameters for job %s' % jobID )
resultDict = {}
if paramList:
paramNameList = []
for x in paramList:
ret = self._escapeString( x )
if not ret['OK']:
return ret
paramNameList.append( ret['Value'] )
paramNames = ','.join( paramNameList )
cmd = "SELECT Name, Value from JobParameters WHERE JobID=%s and Name in (%s)" % ( e_jobID, paramNames )
result = self._query( cmd )
if result['OK']:
if result['Value']:
for name, value in result['Value']:
try:
resultDict[name] = value.tostring()
except Exception:
resultDict[name] = value
return S_OK( resultDict )
else:
return S_ERROR( 'JobDB.getJobParameters: failed to retrieve parameters' )
else:
result = self.getFields( 'JobParameters', ['Name', 'Value'], {'JobID': jobID} )
if not result['OK']:
return result
else:
for name, value in result['Value']:
try:
resultDict[name] = value.tostring()
except Exception:
resultDict[name] = value
return S_OK( resultDict )
#############################################################################
def getAtticJobParameters( self, jobID, paramList = None, rescheduleCounter = -1 ):
""" Get Attic Job Parameters defined for a job with jobID.
Returns a dictionary with the Attic Job Parameters per each rescheduling cycle.
If parameterList is empty - all the parameters are returned.
If recheduleCounter = -1, all cycles are returned.
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
self.log.debug( 'JobDB.getAtticJobParameters: Getting Attic Parameters for job %s' % jobID )
resultDict = {}
paramCondition = ''
if paramList:
paramNameList = []
for x in paramList:
ret = self._escapeString( x )
if not ret['OK']:
return ret
paramNameList.append( x )
paramNames = ','.join( paramNameList )
paramCondition = " AND Name in (%s)" % paramNames
rCounter = ''
if rescheduleCounter != -1:
rCounter = ' AND RescheduleCycle=%d' % int( rescheduleCounter )
cmd = "SELECT Name, Value, RescheduleCycle from AtticJobParameters"
cmd += " WHERE JobID=%s %s %s" % ( jobID, paramCondition, rCounter )
result = self._query( cmd )
if result['OK']:
if result['Value']:
for name, value, counter in result['Value']:
if not resultDict.has_key( counter ):
resultDict[counter] = {}
try:
resultDict[counter][name] = value.tostring()
except Exception:
resultDict[counter][name] = value
return S_OK( resultDict )
else:
return S_ERROR( 'JobDB.getAtticJobParameters: failed to retrieve parameters' )
#############################################################################
def getJobAttributes( self, jobID, attrList = None ):
""" Get all Job Attributes for a given jobID.
Return a dictionary with all Job Attributes,
return an empty dictionary if matching job found
"""
if not attrList:
attrList = self.jobAttributeNames
result = self.getFields( "Jobs", outFields = attrList, condDict = { 'JobID' : jobID } )
if not result[ 'OK' ]:
return result
dbData = result[ 'Value' ]
if not dbData:
return S_OK( {} )
jobData = dbData[0]
ret = {}
for iP in range( len( attrList ) ):
ret[ attrList[iP] ] = jobData[ iP ]
return S_OK( ret )
#############################################################################
def getJobInfo( self, jobID, parameters = None ):
""" Get parameters for job specified by jobID. Parameters can be
either job attributes ( fields in the Jobs table ) or those
stored in the JobParameters table.
The return value is a dictionary of the structure:
Dict[Name] = Value
"""
resultDict = {}
# Parameters are not specified, get them all - parameters + attributes
if not parameters:
result = self.getJobAttributes( jobID )
if result['OK']:
resultDict = result['value']
else:
return S_ERROR( 'JobDB.getJobAttributes: can not retrieve job attributes' )
result = self.getJobParameters( jobID )
if result['OK']:
resultDict.update( result['value'] )
else:
return S_ERROR( 'JobDB.getJobParameters: can not retrieve job parameters' )
return S_OK( resultDict )
paramList = []
attrList = []
for par in parameters:
if par in self.jobAttributeNames:
attrList.append( par )
else:
paramList.append( par )
# Get Job Attributes first
if attrList:
result = self.getJobAttributes( jobID, attrList )
if not result['OK']:
return result
if len( result['Value'] ) > 0:
resultDict = result['Value']
else:
return S_ERROR( 'Job ' + str( jobID ) + ' not found' )
# Get Job Parameters
if paramList:
result = self.getJobParameters( jobID, paramList )
if not result['OK']:
return result
if len( result['Value'] ) > 0:
resultDict.update( result['Value'] )
return S_OK( resultDict )
#############################################################################
def getJobAttribute( self, jobID, attribute ):
""" Get the given attribute of a job specified by its jobID
"""
result = self.getJobAttributes( jobID, [attribute] )
if result['OK']:
value = result['Value'][attribute]
return S_OK( value )
else:
return result
#############################################################################
def getJobParameter( self, jobID, parameter ):
""" Get the given parameter of a job specified by its jobID
"""
result = self.getJobParameters( jobID, [parameter] )
if result['OK']:
if result['Value']:
value = result['Value'][parameter]
else:
value = None
return S_OK( value )
else:
return result
#############################################################################
def getJobOptParameter( self, jobID, parameter ):
""" Get optimizer parameters for the given job.
"""
result = self.getFields( 'OptimizerParameters', ['Value'], {'JobID': jobID, 'Name': parameter} )
if result['OK']:
if result['Value']:
return S_OK( result['Value'][0][0] )
else:
return S_ERROR( 'Parameter not found' )
else:
return S_ERROR( 'Failed to access database' )
#############################################################################
def getJobOptParameters( self, jobID, paramList = None ):
""" Get optimizer parameters for the given job. If the list of parameter names is
empty, get all the parameters then
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
resultDict = {}
if paramList:
paramNameList = []
for x in paramList:
ret = self._escapeString( x )
if not ret['OK']:
return ret
paramNameList.append( ret['Value'] )
paramNames = ','.join( paramNameList )
cmd = "SELECT Name, Value from OptimizerParameters WHERE JobID=%s and Name in (%s)" % ( jobID, paramNames )
else:
cmd = "SELECT Name, Value from OptimizerParameters WHERE JobID=%s" % jobID
result = self._query( cmd )
if result['OK']:
if result['Value']:
for name, value in result['Value']:
try:
resultDict[name] = value.tostring()
except Exception:
resultDict[name] = value
return S_OK( resultDict )
else:
return S_ERROR( 'JobDB.getJobOptParameters: failed to retrieve parameters' )
#############################################################################
def getTimings( self, site, period = 3600 ):
""" Get CPU and wall clock times for the jobs finished in the last hour
"""
ret = self._escapeString( site )
if not ret['OK']:
return ret
site = ret['Value']
date = str( Time.dateTime() - Time.second * period )
req = "SELECT JobID from Jobs WHERE Site=%s and EndExecTime > '%s' " % ( site, date )
result = self._query( req )
jobList = [ str( x[0] ) for x in result['Value'] ]
jobString = ','.join( jobList )
req = "SELECT SUM(Value) from JobParameters WHERE Name='TotalCPUTime(s)' and JobID in (%s)" % jobString
result = self._query( req )
if not result['OK']:
return result
cpu = result['Value'][0][0]
if not cpu:
cpu = 0.0
req = "SELECT SUM(Value) from JobParameters WHERE Name='WallClockTime(s)' and JobID in (%s)" % jobString
result = self._query( req )
if not result['OK']:
return result
wctime = result['Value'][0][0]
if not wctime:
wctime = 0.0
return S_OK( {"CPUTime":int( cpu ), "WallClockTime":int( wctime )} )
#############################################################################
def __checkInputDataStructure( self, pDict ):
if type( pDict ) != types.DictType:
return S_ERROR( "Input data has to be a dictionary" )
for lfn in pDict:
if 'Metadata' not in pDict[ lfn ]:
return S_ERROR( "Missing metadata for lfn %s" % lfn )
if 'Replicas' not in pDict[ lfn ]:
return S_ERROR( "Missing replicas for lfn %s" % lfn )
replicas = pDict[ lfn ][ 'Replicas' ]
for seName in replicas:
if 'SURL' not in replicas or 'Disk' not in replicas:
return S_ERROR( "Missing SURL or Disk for %s:%s replica" % ( seName, lfn ) )
return S_OK()
#############################################################################
def getInputData ( self, jid ):
"""Get input data for the given job
"""
try:
jid = int( jid )
except ValueError:
return S_ERROR( "Job id needs to be an integer" )
result = self._query( "SELECT l.LFN, l.Checksum, l.CreationDate, l.ModificationDate, l.Size, r.SURL, r.SEName, r.Disk FROM LFN l, Replicas r WHERE l.JobID = %d AND l.LFNID = r.LFNID" % jid )
if not result[ 'OK' ]:
return result
data = {}
for lfn, checksum, creationDate, modifDate, size, surl, SEName, onDisk in result[ 'Value' ]:
if lfn not in data:
data[ lfn ] = { 'Metadata' : { 'Checksum' : checksum, 'CreationDate' : creationDate,
'ModificationDate' : modifDate, 'Size' : size },
'Replicas' : {} }
rD = data[ lfn ][ 'Replicas' ]
rD[ SEName ] = { 'SURL' : surl, 'Disk' : onDisk }
return S_OK( data )
#############################################################################
def __cleanInputData( self, jid ):
cmd = "DELETE FROM Replicas WHERE LFNID IN ( SELECT LFNID FROM LFN WHERE JobID = %d )" % jid
result = self._update( cmd )
if not result['OK']:
return S_ERROR( "Could not clean input data for job %d: %s" % ( jid, result[ 'Message' ] ) )
cmd = "DELETE FROM LFN WHERE JobID = %d" % jid
result = self._update( cmd )
if not result['OK']:
return S_ERROR( "Could not clean input data for job %d: %s" % ( jid, result[ 'Message' ] ) )
return S_OK()
#############################################################################
def setInputData ( self, jid, lfnData ):
"""Inserts input data for the given job
"""
try:
jid = int( jid )
except ValueError:
return S_ERROR( "Job id needs to be an integer" )
result = self.__checkInputDataStructure( lfnData )
if not result['OK']:
return result
result = self.__cleanInputData( jid )
if not result['OK']:
return S_ERROR( "Could not clean input data for job %d: %s" % ( jid, result[ 'Message' ] ) )
for lfn in lfnData:
vD = { 'JobID' : jid, 'LFN' : lfn }
metaDict = lfnData[ lfn ][ 'Metadata' ]
for k in ( 'Checksum', 'CreationDate', 'ModificationDate', 'Size' ):
if k in metaDict:
vD[ k ] = metaDict[ k ]
result = self.insertFields( "LFN", inDict = vD )
if not result[ 'OK' ]:
return S_ERROR( "Can not insert input data: %s" % result[ 'Message' ] )
lfnid = result[ 'lastRowId' ]
vL = []
replicas = lfnData[ lfn ][ 'Replicas' ]
for seName in replicas:
result = self._escapeString( replicas[ seName ][ 'SURL' ] )
if not result[ 'OK' ]:
return result
surl = result[ 'Value' ]
disk = int( replicas[ seName ][ 'Disk' ] )
result = self._escapeString( seName )
if not result[ 'OK' ]:
return result
seName = result[ 'Value' ]
vL.append( "%d, %s, %s, %d" % ( lfnid, seName, surl, disk ) )
values = "),(".join( vL )
cmd = "INSERT INTO Replicas ( LFNID, SEName, SURL, Disk ) VALUES ( %s )" % values
result = self._update( cmd )
if not result[ 'OK' ]:
return S_ERROR( "Can not insert input data: %s" % result[ 'Message' ] )
return S_OK()
#############################################################################
def setOptimizerChain( self, jobID, optimizerList ):
""" Set the optimizer chain for the given job. The 'TaskQueue'
optimizer should be the last one in the chain, it is added
if not present in the optimizerList
"""
optString = ','.join( optimizerList )
result = self.setJobOptParameter( jobID, 'OptimizerChain', optString )
return result
#############################################################################
def setNextOptimizer( self, jobID, currentOptimizer ):
""" Set the job status to be processed by the next optimizer in the
chain
"""
result = self.getJobOptParameter( jobID, 'OptimizerChain' )
if not result['OK']:
return result
optListString = result['Value']
optList = optListString.split( ',' )
try:
sindex = None
for i in xrange( len( optList ) ):
if optList[i] == currentOptimizer:
sindex = i
if sindex >= 0:
if sindex < len( optList ) - 1:
nextOptimizer = optList[sindex + 1]
else:
return S_ERROR( 'Unexpected end of the Optimizer Chain' )
else:
return S_ERROR( 'Could not find ' + currentOptimizer + ' in chain' )
except ValueError:
return S_ERROR( 'The ' + currentOptimizer + ' not found in the chain' )
result = self.setJobStatus( jobID, status = "Checking", minor = nextOptimizer )
if not result[ 'OK' ]:
return result
return S_OK( nextOptimizer )
############################################################################
def countJobs( self, condDict, older = None, newer = None, timeStamp = 'LastUpdateTime' ):
""" Get the number of jobs matching conditions specified by condDict and time limits
"""
self.log.debug ( 'JobDB.countJobs: counting Jobs' )
return self.countEntries( 'Jobs', condDict, older = older, newer = newer, timeStamp = timeStamp )
#############################################################################
def selectJobs( self, condDict, older = None, newer = None, timeStamp = 'LastUpdateTime',
orderAttribute = None, limit = None ):
""" Select jobs matching the following conditions:
- condDict dictionary of required Key = Value pairs;
- with the last update date older and/or newer than given dates;
The result is ordered by JobID if requested, the result is limited to a given
number of jobs if requested.
"""
self.log.debug( 'JobDB.selectJobs: retrieving jobs.' )
res = self.getFields( 'Jobs', ['JobID'], condDict = condDict, limit = limit,
older = older, newer = newer, timeStamp = timeStamp, orderAttribute = orderAttribute )
if not res['OK']:
return res
if not len( res['Value'] ):
return S_OK( [] )
return S_OK( [ self._to_value( i ) for i in res['Value'] ] )
#############################################################################
def selectJobWithStatus( self, status ):
""" Get the list of jobs with a given Major Status
"""
return self.selectJobs( {'Status':status} )
#############################################################################
def setJobAttribute( self, jobID, attrName, attrValue, update = False, myDate = None ):
""" Set an attribute value for job specified by jobID.
The LastUpdate time stamp is refreshed if explicitly requested
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( attrValue )
if not ret['OK']:
return ret
value = ret['Value']
#FIXME: need to check the validity of attrName
if update:
cmd = "UPDATE Jobs SET %s=%s,LastUpdateTime=UTC_TIMESTAMP() WHERE JobID=%s" % ( attrName, value, jobID )
else:
cmd = "UPDATE Jobs SET %s=%s WHERE JobID=%s" % ( attrName, value, jobID )
if myDate:
cmd += ' AND LastUpdateTime < %s' % myDate
res = self._update( cmd )
if res['OK']:
return res
else:
return S_ERROR( 'JobDB.setAttribute: failed to set attribute' )
#############################################################################
def setJobAttributes( self, jobID, attrNames, attrValues, update = False, myDate = None ):
""" Set an attribute value for job specified by jobID.
The LastUpdate time stamp is refreshed if explicitely requested
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
if len( attrNames ) != len( attrValues ):
return S_ERROR( 'JobDB.setAttributes: incompatible Argument length' )
# FIXME: Need to check the validity of attrNames
attr = []
for i in range( len( attrNames ) ):
ret = self._escapeString( attrValues[i] )
if not ret['OK']:
return ret
value = ret['Value']
attr.append( "%s=%s" % ( attrNames[i], value ) )
if update:
attr.append( "LastUpdateTime=UTC_TIMESTAMP()" )
if len( attr ) == 0:
return S_ERROR( 'JobDB.setAttributes: Nothing to do' )
cmd = 'UPDATE Jobs SET %s WHERE JobID=%s' % ( ', '.join( attr ), jobID )
if myDate:
cmd += ' AND LastUpdateTime < %s' % myDate
res = self._update( cmd )
if res['OK']:
return res
else:
return S_ERROR( 'JobDB.setAttributes: failed to set attribute' )
#############################################################################
def setJobStatus( self, jobID, status = '', minor = '', application = '', appCounter = None ):
""" Set status of the job specified by its jobID
"""
# Do not update the LastUpdate time stamp if setting the Stalled status
update_flag = True
if status == "Stalled":
update_flag = False
attrNames = []
attrValues = []
if status:
attrNames.append( 'Status' )
attrValues.append( status )
if minor:
attrNames.append( 'MinorStatus' )
attrValues.append( minor )
if application:
attrNames.append( 'ApplicationStatus' )
attrValues.append( application )
if appCounter:
attrNames.append( 'ApplicationNumStatus' )
attrValues.append( appCounter )
result = self.setJobAttributes( jobID, attrNames, attrValues, update = update_flag )
if not result['OK']:
return result
return S_OK()
#############################################################################
def setEndExecTime( self, jobID, endDate = None ):
""" Set EndExecTime time stamp
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
if endDate:
ret = self._escapeString( endDate )
if not ret['OK']:
return ret
endDate = ret['Value']
req = "UPDATE Jobs SET EndExecTime=%s WHERE JobID=%s AND EndExecTime IS NULL" % ( endDate, jobID )
else:
req = "UPDATE Jobs SET EndExecTime=UTC_TIMESTAMP() WHERE JobID=%s AND EndExecTime IS NULL" % jobID
result = self._update( req )
return result
#############################################################################
def setStartExecTime( self, jobID, startDate = None ):
""" Set StartExecTime time stamp
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
if startDate:
ret = self._escapeString( startDate )
if not ret['OK']:
return ret
startDate = ret['Value']
req = "UPDATE Jobs SET StartExecTime=%s WHERE JobID=%s AND StartExecTime IS NULL" % ( startDate, jobID )
else:
req = "UPDATE Jobs SET StartExecTime=UTC_TIMESTAMP() WHERE JobID=%s AND StartExecTime IS NULL" % jobID
result = self._update( req )
return result
#############################################################################
def setJobParameter( self, jobID, key, value ):
""" Set a parameter specified by name,value pair for the job JobID
"""
ret = self._escapeString( key )
if not ret['OK']:
return ret
e_key = ret['Value']
ret = self._escapeString( value )
if not ret['OK']:
return ret
e_value = ret['Value']
cmd = 'REPLACE JobParameters (JobID,Name,Value) VALUES (%d,%s,%s)' % ( int( jobID ), e_key, e_value )
result = self._update( cmd )
if not result['OK']:
result = S_ERROR( 'JobDB.setJobParameter: operation failed.' )
return result
#############################################################################
def setJobParameters( self, jobID, parameters ):
""" Set parameters specified by a list of name/value pairs for the job JobID
"""
if not parameters:
return S_OK()
insertValueList = []
for name, value in parameters:
ret = self._escapeString( name )
if not ret['OK']:
return ret
e_name = ret['Value']
ret = self._escapeString( value )
if not ret['OK']:
return ret
e_value = ret['Value']
insertValueList.append( '(%s,%s,%s)' % ( jobID, e_name, e_value ) )
cmd = 'REPLACE JobParameters (JobID,Name,Value) VALUES %s' % ', '.join( insertValueList )
result = self._update( cmd )
if not result['OK']:
return S_ERROR( 'JobDB.setJobParameters: operation failed.' )
return result
#############################################################################
def setJobOptParameter( self, jobID, name, value ):
""" Set an optimzer parameter specified by name,value pair for the job JobID
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
e_jobID = ret['Value']
ret = self._escapeString( name )
if not ret['OK']:
return ret
e_name = ret['Value']
cmd = 'DELETE FROM OptimizerParameters WHERE JobID=%s AND Name=%s' % ( e_jobID, e_name )
if not self._update( cmd )['OK']:
result = S_ERROR( 'JobDB.setJobOptParameter: operation failed.' )
result = self.insertFields( 'OptimizerParameters', ['JobID', 'Name', 'Value'], [jobID, name, value] )
if not result['OK']:
return S_ERROR( 'JobDB.setJobOptParameter: operation failed.' )
return S_OK()
#############################################################################
def removeJobOptParameter( self, jobID, name ):
""" Remove the specified optimizer parameter for jobID
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( name )
if not ret['OK']:
return ret
name = ret['Value']
cmd = 'DELETE FROM OptimizerParameters WHERE JobID=%s AND Name=%s' % ( jobID, name )
if not self._update( cmd )['OK']:
return S_ERROR( 'JobDB.removeJobOptParameter: operation failed.' )
else:
return S_OK()
#############################################################################
def clearAtticParameters( self, jid ):
try:
jid = int( jid )
except ValueError:
return S_ERROR( "jid has to be a number" )
return self._update( "DELETE FROM AtticJobParameters WHERE JobID=%s" % jid )
#############################################################################
def setAtticParameters( self, jid, rescheduleCounter, paramsDict ):
if not paramsDict:
return S_OK()
try:
jid = int( jid )
rescheduleCounter = int( rescheduleCounter )
except ValueError:
return S_ERROR( "Invalud jid/reschedulecounter. Not a number" )
entries = []
for key in paramsDict:
ret = self._escapeString( paramsDict[ key ] )
if not ret['OK']:
return ret
value = ret['Value']
ret = self._escapeString( key )
if not ret['OK']:
return ret
key = ret['Value']
entries.append( "(%d,%d,%s,%s)" % ( jid, rescheduleCounter, key, value, ) )
return self._update( "INSERT INTO AtticJobParameters VALUES %s" % ",".join( entries ) )
#############################################################################
def setAtticJobParameter( self, jobID, key, value, rescheduleCounter ):
""" Set attic parameter for job specified by its jobID when job rescheduling
for later debugging
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( key )
if not ret['OK']:
return ret
key = ret['Value']
ret = self._escapeString( value )
if not ret['OK']:
return ret
value = ret['Value']
ret = self._escapeString( rescheduleCounter )
if not ret['OK']:
return ret
rescheduleCounter = ret['Value']
cmd = 'INSERT INTO AtticJobParameters (JobID,RescheduleCycle,Name,Value) VALUES(%s,%s,%s,%s)' % \
( jobID, rescheduleCounter, key, value )
result = self._update( cmd )
if not result['OK']:
result = S_ERROR( 'JobDB.setAtticJobParameter: operation failed.' )
return result
#############################################################################
def setJobJDL( self, jobID, jdl = None, originalJDL = None ):
""" Insert JDL's for job specified by jobID
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( jdl )
if not ret['OK']:
return ret
e_JDL = ret['Value']
ret = self._escapeString( originalJDL )
if not ret['OK']:
return ret
e_originalJDL = ret['Value']
req = "SELECT OriginalJDL FROM JobJDLs WHERE JobID=%s" % jobID
result = self._query( req )
updateFlag = False
if result['OK']:
if len( result['Value'] ) > 0:
updateFlag = True
if jdl:
if updateFlag:
cmd = "UPDATE JobJDLs Set JDL=%s WHERE JobID=%s" % ( e_JDL, jobID )
else:
cmd = "INSERT INTO JobJDLs (JobID,JDL) VALUES (%s,%s)" % ( jobID, e_JDL )
result = self._update( cmd )
if not result['OK']:
return result
if originalJDL:
if updateFlag:
cmd = "UPDATE JobJDLs Set OriginalJDL=%s WHERE JobID=%s" % ( e_originalJDL, jobID )
else:
cmd = "INSERT INTO JobJDLs (JobID,OriginalJDL) VALUES (%s,%s)" % ( jobID, e_originalJDL )
result = self._update( cmd )
return result
#############################################################################
def __insertNewJDL( self, jdl ):
"""Insert a new JDL in the system, this produces a new JobID
"""
err = 'JobDB.__insertNewJDL: Failed to retrieve a new Id.'
result = self.insertFields( 'JobJDLs' , ['OriginalJDL'], [jdl] )
if not result['OK']:
self.log.error( 'Can not insert New JDL', result['Message'] )
return result
jid = result[ 'lastRowId' ]
return S_OK( jid )
#############################################################################
def getJobJDL( self, jobID, original = False, status = '' ):
""" Get JDL for job specified by its jobID. By default the current job JDL
is returned. If 'original' argument is True, original JDL is returned
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( status )
if not ret['OK']:
return ret
e_status = ret['Value']
if original:
cmd = "SELECT OriginalJDL FROM JobJDLs WHERE JobID=%s" % jobID
else:
cmd = "SELECT JDL FROM JobJDLs WHERE JobID=%s" % jobID
if status:
cmd = cmd + " AND Status=%s" % e_status
result = self._query( cmd )
if result['OK']:
jdl = result['Value']
if not jdl:
return S_OK( jdl )
else:
return S_OK( result['Value'][0][0] )
else:
return result
def getJobsInHerd( self, jid ):
try:
jid = int( jid )
except ValueError:
return S_ERROR( "jid has to be a number" % jid )
result = self._query( "SELECT JobID FROM Jobs WHERE MasterJobID=%d" % jid )
if not result[ 'OK' ]:
return result
return S_OK( [ row[0] for row in result[ 'Value' ] ] )
def insertSplittedManifests( self, jid, manifests ):
jidList = [ jid ]
with self.transaction as commit:
result = self.getJobJDL( jid, original = True )
if not result[ 'OK' ]:
return result
sourceManifest = result[ 'Value' ]
result = self.insertFields( 'MasterJDLs', inDict = { 'JobID' : jid,
'JDL' : sourceManifest } )
if not result[ 'OK' ]:
return result
jobManifest = manifests[0]
manifestJDL = jobManifest.dumpAsJDL()
result = self.updateFields( 'JobJDLs',
condDict = { 'JobID' : jid },
updateDict = { 'JDL' : manifestJDL,
'OriginalJDL' : manifestJDL,
'JobRequirements' : '' } )
if not result[ 'OK' ]:
return result
#Get source job input data
result = self.getInputData( jid )
if not result[ 'OK' ]:
return result
sourceInputData = result[ 'Value' ]
#Reset source Job Values
upDict = { 'Status' : 'Received',
'MinorStatus' : 'Job accepted',
'ApplicationStatus' : 'Unknown',
'HerdState' : 'Splitted' }
for name in ( 'JobName', 'JobType', 'JobGroup', 'Priority' ):
value = jobManifest.getOption( name )
if name == 'Priority':
name = 'UserPriority'
if value:
upDict[ name ] = value
result = self.updateFields( 'Jobs',
condDict = { 'JobID' : jid },
updateDict = upDict )
if not result[ 'OK' ]:
return result
#Reduce source job input data
if sourceInputData:
inputData = {}
for lfn in jobManifest.getOption( "InputData", [] ):
if lfn not in sourceInputData:
return S_ERROR( "LFN in splitted manifest does not exist in the original: %s" % lfn )
inputData[ lfn ] = dict( sourceInputData[ lfn ] )
result = self.setInputData( jid, inputData )
if not result[ 'OK' ]:
return result
#Get job attributes to copy them to children jobs
result = self.getJobAttributes( jid, [ 'Owner', 'OwnerDN', 'OwnerGroup', 'DIRACSetup' ] )
if not result[ 'OK' ]:
return result
attrs = result[ 'Value' ]
#Do the magic!
for manifest in manifests[1:]:
result = self.insertNewJobIntoDB( manifest.dumpAsJDL(), attrs[ 'Owner' ], attrs[ 'OwnerDN' ],
attrs[ 'OwnerGroup' ], attrs[ 'DIRACSetup' ], jid )
if not result[ 'OK' ]:
return result
jidList.append( result[ 'Value' ] )
if sourceInputData:
inputData = {}
for lfn in manifest.getOption( "InputData", [] ):
if lfn not in sourceInputData:
return S_ERROR( "LFN in splitted manifest does not exist in the original: %s" % lfn )
inputData[ lfn ] = dict( sourceInputData[ lfn ] )
result = self.setInputData( jidList[-1], inputData )
if not result[ 'OK' ]:
return result
commit()
return S_OK( jidList )
#############################################################################
def insertNewJobIntoDB( self, jdl, owner, ownerDN, ownerGroup, diracSetup, parentJob = None ):
""" Insert the initial JDL into the Job database,
Do initial JDL crosscheck,
Set Initial job Attributes and Status
"""
jobManifest = JobManifest()
result = jobManifest.load( jdl )
if not result['OK']:
return result
jobManifest.setOptionsFromDict( { 'OwnerName' : owner,
'OwnerDN' : ownerDN,
'OwnerGroup' : ownerGroup,
'DIRACSetup' : diracSetup } )
result = jobManifest.check()
if not result['OK']:
return result
# 1.- insert original JDL on DB and get new JobID
# Fix the possible lack of the brackets in the JDL
if jdl.strip()[0].find( '[' ) != 0 :
jdl = '[' + jdl + ']'
result = self.__insertNewJDL( jdl )
if not result[ 'OK' ]:
return S_ERROR( 'Can not insert manifest into DB: %s' % result[ 'Message' ] )
jid = result[ 'Value' ]
result = self.__checkAndPrepareManifest( jobManifest, jid,
owner, ownerDN,
ownerGroup, diracSetup )
if not result['OK']:
return result
jobManifest.remove( 'JobRequirements' )
result = self.setJobJDL( jid, jobManifest.dumpAsJDL() )
if not result['OK']:
return result
attrs = {}
attrs[ 'JobID' ] = jid
attrs[ 'LastUpdateTime' ] = Time.toString()
attrs[ 'SubmissionTime' ] = Time.toString()
attrs[ 'Owner' ] = owner
attrs[ 'OwnerDN' ] = ownerDN
attrs[ 'OwnerGroup' ] = ownerGroup
attrs[ 'DIRACSetup' ] = diracSetup
attrs[ 'VerifiedFlag' ] = True
attrs[ 'Status' ] = 'Received'
attrs[ 'MinorStatus' ] = 'Job accepted'
site = jobManifest.getOption( 'Site', [] )
if not site:
attrs[ 'Site' ] = 'ANY'
elif len( site ) > 1:
attrs[ 'Site' ] = 'Multiple'
else:
attrs[ 'Site' ] = site[0]
if jobManifest.getOption( "Splitter", "" ):
attrs[ 'HerdState' ] = "WillSplit"
if parentJob == None:
parentJob = jid
attrs[ 'MasterJobID' ] = parentJob
for name in ( 'JobName', 'JobType', 'JobGroup', 'Priority' ):
value = jobManifest.getOption( name )
if name == 'Priority':
name = 'UserPriority'
if value:
attrs[ name ] = value
result = self.insertFields( 'Jobs', inDict = attrs )
if not result['OK']:
return result
result = S_OK( jid )
result[ 'JobID' ] = jid
result[ 'Status' ] = 'Received'
result[ 'MinorStatus' ] = 'Job accepted'
return result
#############################################################################
def __checkAndPrepareManifest( self, jobManifest, jid, owner, ownerDN,
ownerGroup, DIRACSetup ):
"""
Check Consistency of Submitted JDL and set some defaults
Prepare subJDL with Job Requirements
"""
VO = Registry.getVOForGroup( ownerGroup )
manifestData = { 'OwnerName' : owner, 'OwnerDN': ownerDN, 'OwnerGroup' : ownerGroup,
'DIRACSetup' : DIRACSetup, 'JobID' : jid, 'VirtualOrganization' : VO }
jobManifest.setOptionsFromDict( manifestData )
if not jobManifest.isOption( 'SubmitPools' ):
submitPools = Registry.getVOOption( VO, 'SubmitPools' )
if submitPools:
jobManifest.setOption( 'SubmitPools', submitPools )
voPolicyDict = gConfig.getOptionsDict( '/DIRAC/VOPolicy/%s/%s' % ( VO, DIRACSetup ) )
if voPolicyDict['OK']:
voPolicy = voPolicyDict['Value']
for k in voPolicy:
if not jobManifest.isOption( k ):
jobManifest.setOption( k, voPolicy[ k ] )
jobManifest.remove( "JobRequirements" )
# Legacy check to suite the LHCb logic
#if not systemConfig:
# systemConfig = classAdJob.getAttributeString( 'SystemConfig' )
result = jobManifest.check()
if not result['OK']:
return result
jobManifest.expand()
return S_OK()
#############################################################################
def removeJobFromDB( self, jobIDs ):
"""Remove job from DB
Remove job from the Job DB and clean up all the job related data
in various tables
"""
#ret = self._escapeString(jobID)
#if not ret['OK']:
# return ret
#e_jobID = ret['Value']
if type( jobIDs ) != type( [] ):
jobIDList = [jobIDs]
else:
jobIDList = jobIDs
failedTablesList = []
jobIDString = ','.join( [str( int( j ) ) for j in jobIDList] )
cmd = "DELETE LFN, Replicas FROM LFN, Replicas WHERE Replicas.LFNID = LFN.LFNID AND LFN.JobID in (%s)" % jobIDString
result = self._update( cmd )
if not result[ 'OK' ]:
return result
for table in ( 'JobJDLs',
'InputData',
'JobParameters',
'AtticJobParameters',
'HeartBeatLoggingInfo',
'OptimizerParameters',
'Jobs',
'MasterJDLs'
):
cmd = 'DELETE FROM %s WHERE JobID in (%s)' % ( table, jobIDString )
result = self._update( cmd )
if not result['OK']:
failedTablesList.append( table )
result = S_OK()
#if failedSubjobList:
# result = S_ERROR( 'Errors while job removal' )
# result['FailedSubjobs'] = failedSubjobList
if failedTablesList:
result = S_ERROR( 'Errors while job removal' )
result['FailedTables'] = failedTablesList
return result
#################################################################
def rescheduleJobs( self, jobIDs ):
""" Reschedule all the jobs in the given list
"""
failedJobs = []
for jobID in jobIDs:
result = self.rescheduleJob( jobID )
if not result['OK']:
failedJobs.append( jobID )
if failedJobs:
result = S_ERROR( 'JobDB.rescheduleJobs: Not all the jobs were rescheduled' )
result['FailedJobs'] = failedJobs
return S_OK()
#############################################################################
def __failJob( self, jid, minor, errMsg ):
result = self.setJobStatus( jid, status = 'Failed', minor = minor )
ret = S_ERROR( errMsg )
if result[ 'OK' ]:
ret.update( { 'Status' : 'Failed', 'MinorStatus' : minor } )
return ret
#############################################################################
def resetJob( self, jid ):
result = self.setJobAttributes( jid, [ 'RescheduleCounter' ], [ -1 ] )
if not result[ 'OK' ]:
return self.__failJob( jid, "Error resetting job", "Error setting reschedule counter: %s" % result[ 'Message' ] )
result = self.clearAtticParameters( jid )
if not result[ 'OK' ]:
return self.__failJob( jid, "Error cleaning attic", "Error cleaning attic: %s" % result[ 'Message' ] )
return self.rescheduleJob( jid )
#############################################################################
def rescheduleJob ( self, jid ):
""" Reschedule the given job to run again from scratch. Retain the already
defined parameters in the parameter Attic
"""
#I feel dirty after looking at this method
try:
jid = int( jid )
except ValueError:
return S_ERROR( "jid is not an number! Get out!" )
# Check Verified Flag
result = self.getJobAttributes( jid, ['Status', 'MinorStatus', 'VerifiedFlag', 'RescheduleCounter',
'Owner', 'OwnerDN', 'OwnerGroup', 'DIRACSetup'] )
if not result['OK']:
return S_ERROR( 'JobDB.getJobAttributes: Job %d does not exist' % jid )
attrs = result['Value']
#This is useless?
if not attrs['VerifiedFlag']:
return S_ERROR( 'Job %s not Verified: Status = %s, MinorStatus = %s' % (
jid,
attrs['Status'],
attrs['MinorStatus'] ) )
# Check the Reschedule counter first
rescheduleCounter = int( attrs['RescheduleCounter'] ) + 1
self.maxRescheduling = gConfig.getValue( self.cs_path + '/MaxRescheduling', self.maxRescheduling )
# Exit if the limit of the reschedulings is reached
if rescheduleCounter > self.maxRescheduling:
self.log.warn( 'Maximum number of reschedulings is reached', 'Job %s' % jid )
return self.__failJob( jid, 'Maximum of reschedulings reached',
'Maximum number of reschedulings is reached: %s' % self.maxRescheduling )
# Save the job parameters for later debugging
result = self.getJobParameters( jid )
if result['OK']:
parDict = result['Value']
result = self.setAtticParameters( jid, rescheduleCounter - 1, parDict )
if not result['OK']:
return self.__failJob( jid, "Error setting parameter", "Can't set attic parameter: %s" % result[ 'Message' ] )
for tableName in ( 'JobParameters', 'OptimizerParameters' ):
cmd = 'DELETE FROM JobParameters WHERE JobID=%d' % jid
res = self._update( cmd )
if not res['OK']:
return self.__failJob( jid, "Error cleaning %s" % tableName,
"Can't clean %s: %s" % ( tableName, res[ 'Message' ] ) )
result = self.__cleanInputData( jid )
if not result[ 'OK' ]:
return self.__failJob( jid, "Error rescheduling", "Can't clean input data: %s" % result[ 'Message' ] )
# the Jobreceiver needs to know if there is InputData ??? to decide which optimizer to call
# proposal: - use the getInputData method
res = self.getJobJDL( jid, original = True )
if not res['OK']:
return self.__failJob( jid, "Error getting JDL", "Can't retrieve JDL: %s" % res[ 'Value' ] )
jdl = res['Value']
# Fix the possible lack of the brackets in the JDL
if jdl.strip()[0].find( '[' ) != 0 :
jdl = '[%s]' % jdl
jobManifest = JobManifest()
jobManifest.loadJDL( jdl )
result = self.__checkAndPrepareManifest( jobManifest, jid, attrs['Owner'],
attrs['OwnerDN'], attrs['OwnerGroup'],
attrs['DIRACSetup'] )
if not result['OK']:
return self.__failJob( jid, "Error prepare JDL", "Can't prepare JDL: %s" % res[ 'Value' ] )
result = self.setJobJDL( jid, jobManifest.dumpAsJDL() )
if not result['OK']:
return self.__failJob( jid, "Error setting JDL", "Can't set JDL: %s" % res[ 'Value' ] )
attrs = {}
attrs[ 'RescheduleCounter' ] = rescheduleCounter
attrs[ 'Status' ] = 'Received'
attrs[ 'MinorStatus' ] = 'Job accepted'
attrs[ 'ApplicationStatus' ] = 'Unknown'
attrs[ 'ApplicationNumStatus' ] = 0
attrs[ 'RescheduleTime' ] = Time.toString()
attrs[ 'LastUpdateTime' ] = Time.toString()
attrs[ 'SubmissionTime' ] = Time.toString()
site = jobManifest.getOption( 'Site', [] )
if not site:
attrs[ 'Site' ] = 'ANY'
elif len( site ) > 1:
attrs[ 'Site' ] = 'Multiple'
else:
attrs[ 'Site' ] = site[0]
result = self.updateFields( 'Jobs', condDict = { 'JobID' : jid },
updateDict = attrs )
if not result['OK']:
return self.__failJob( jid, "Error setting attrs", "Can't set attributes: %s" % res[ 'Value' ] )
retVal = S_OK( jid )
retVal['JobID'] = jid
retVal['InputData'] = jobManifest.getOption( "InputData" )
retVal['RescheduleCounter'] = rescheduleCounter
retVal['Status'] = 'Received'
retVal['MinorStatus'] = 'Job Rescheduled'
return retVal
#############################################################################
def setSandboxReady( self, jobID, stype = 'InputSandbox' ):
""" Set the sandbox status ready for the job with jobID
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
if stype == "InputSandbox":
field = "ISandboxReadyFlag"
elif stype == "OutputSandbox":
field = "OSandboxReadyFlag"
else:
return S_ERROR( 'Illegal Sandbox type: ' + stype )
cmd = "UPDATE Jobs SET %s='True' WHERE JobID=%s" % ( field, jobID )
result = self._update( cmd )
return result
#################################################################################
def getSiteSummary( self ):
""" Get the summary of jobs in a given status on all the sites
"""
waitingList = ['"Submitted"', '"Assigned"', '"Waiting"', '"Matched"']
waitingString = ','.join( waitingList )
result = self.getDistinctJobAttributes( 'Site' )
if not result['OK']:
return result
siteList = result['Value']
siteDict = {}
totalDict = {'Waiting':0, 'Running':0, 'Stalled':0, 'Done':0, 'Failed':0}
for site in siteList:
if site == "ANY":
continue
# Waiting
siteDict[site] = {}
ret = self._escapeString( site )
if not ret['OK']:
return ret
e_site = ret['Value']
req = "SELECT COUNT(JobID) FROM Jobs WHERE Status IN (%s) AND Site=%s" % ( waitingString, e_site )
result = self._query( req )
if result['OK']:
count = result['Value'][0][0]
else:
return S_ERROR( 'Failed to get Site data from the JobDB' )
siteDict[site]['Waiting'] = count
totalDict['Waiting'] += count
# Running,Stalled,Done,Failed
for status in ['"Running"', '"Stalled"', '"Done"', '"Failed"']:
req = "SELECT COUNT(JobID) FROM Jobs WHERE Status=%s AND Site=%s" % ( status, e_site )
result = self._query( req )
if result['OK']:
count = result['Value'][0][0]
else:
return S_ERROR( 'Failed to get Site data from the JobDB' )
siteDict[site][status.replace( '"', '' )] = count
totalDict[status.replace( '"', '' )] += count
siteDict['Total'] = totalDict
return S_OK( siteDict )
#################################################################################
def getSiteSummaryWeb( self, selectDict, sortList, startItem, maxItems ):
""" Get the summary of jobs in a given status on all the sites in the standard Web form
"""
paramNames = ['Site', 'GridType', 'Country', 'Tier', 'MaskStatus']
paramNames += JOB_STATES
paramNames += ['Efficiency', 'Status']
siteT1List = ['CERN', 'IN2P3', 'NIKHEF', 'PIC', 'CNAF', 'RAL', 'GRIDKA']
# Sort out records as requested
sortItem = -1
sortOrder = "ASC"
if sortList:
item = sortList[0][0] # only one item for the moment
sortItem = paramNames.index( item )
sortOrder = sortList[0][1]
last_update = None
if selectDict.has_key( 'LastUpdateTime' ):
last_update = selectDict['LastUpdateTime']
del selectDict['LastUpdateTime']
result = self.getCounters( 'Jobs', ['Site', 'Status'],
{}, newer = last_update,
timeStamp = 'LastUpdateTime' )
last_day = Time.dateTime() - Time.day
resultDay = self.getCounters( 'Jobs', ['Site', 'Status'],
{}, newer = last_day,
timeStamp = 'EndExecTime' )
# Get the site mask status
siteStatus = SiteStatus()
siteMask = {}
resultMask = getSites( fullName = True )
if resultMask['OK']:
for site in resultMask['Value']:
siteMask[site] = 'NoMask'
resultMask = siteStatus.getUsableSites( 'ComputingAccess' )
if resultMask['OK']:
for site in resultMask['Value']:
siteMask[site] = 'Active'
resultMask = siteStatus.getUnusableSites( 'ComputingAccess' )
if resultMask['OK']:
for site in resultMask['Value']:
siteMask[site] = 'Banned'
# Sort out different counters
resultDict = {}
if result['OK']:
for attDict, count in result['Value']:
siteFullName = attDict['Site']
status = attDict['Status']
if not resultDict.has_key( siteFullName ):
resultDict[siteFullName] = {}
for state in JOB_STATES:
resultDict[siteFullName][state] = 0
if status not in JOB_FINAL_STATES:
resultDict[siteFullName][status] = count
if resultDay['OK']:
for attDict, count in resultDay['Value']:
siteFullName = attDict['Site']
if not resultDict.has_key( siteFullName ):
resultDict[siteFullName] = {}
for state in JOB_STATES:
resultDict[siteFullName][state] = 0
status = attDict['Status']
if status in JOB_FINAL_STATES:
resultDict[siteFullName][status] = count
# Collect records now
records = []
countryCounts = {}
for siteFullName in resultDict:
siteDict = resultDict[siteFullName]
if siteFullName.find( '.' ) != -1:
grid, site, country = siteFullName.split( '.' )
else:
grid, site, country = 'Unknown', 'Unknown', 'Unknown'
tier = 'Tier-2'
if site in siteT1List:
tier = 'Tier-1'
if not countryCounts.has_key( country ):
countryCounts[country] = {}
for state in JOB_STATES:
countryCounts[country][state] = 0
rList = [siteFullName, grid, country, tier]
if siteMask.has_key( siteFullName ):
rList.append( siteMask[siteFullName] )
else:
rList.append( 'NoMask' )
for status in JOB_STATES:
rList.append( siteDict[status] )
countryCounts[country][status] += siteDict[status]
efficiency = 0
total_finished = 0
for state in JOB_FINAL_STATES:
total_finished += resultDict[siteFullName][state]
if total_finished > 0:
efficiency = float( siteDict['Done'] + siteDict['Completed'] ) / float( total_finished )
rList.append( '%.1f' % ( efficiency * 100. ) )
# Estimate the site verbose status
if efficiency > 0.95:
rList.append( 'Good' )
elif efficiency > 0.80:
rList.append( 'Fair' )
elif efficiency > 0.60:
rList.append( 'Poor' )
elif total_finished == 0:
rList.append( 'Idle' )
else:
rList.append( 'Bad' )
records.append( rList )
# Select records as requested
if selectDict:
for item in selectDict:
selectItem = paramNames.index( item )
values = selectDict[item]
if type( values ) != type( [] ):
values = [values]
indices = range( len( records ) )
indices.reverse()
for ind in indices:
if records[ind][selectItem] not in values:
del records[ind]
# Sort records as requested
if sortItem != -1 :
if sortOrder.lower() == "asc":
records.sort( key = operator.itemgetter( sortItem ) )
else:
records.sort( key = operator.itemgetter( sortItem ), reverse = True )
# Collect the final result
finalDict = {}
finalDict['ParameterNames'] = paramNames
# Return all the records if maxItems == 0 or the specified number otherwise
if maxItems:
if startItem + maxItems > len( records ):
finalDict['Records'] = records[startItem:]
else:
finalDict['Records'] = records[startItem:startItem + maxItems]
else:
finalDict['Records'] = records
finalDict['TotalRecords'] = len( records )
finalDict['Extras'] = countryCounts
return S_OK( finalDict )
#################################################################################
def getUserSummaryWeb( self, selectDict, sortList, startItem, maxItems ):
""" Get the summary of user jobs in a standard form for the Web portal.
Pagination and global sorting is supported.
"""
paramNames = ['Owner', 'OwnerDN', 'OwnerGroup']
paramNames += JOB_STATES
paramNames += ['TotalJobs']
# Sort out records as requested
sortItem = -1
sortOrder = "ASC"
if sortList:
item = sortList[0][0] # only one item for the moment
sortItem = paramNames.index( item )
sortOrder = sortList[0][1]
last_update = None
if selectDict.has_key( 'LastUpdateTime' ):
last_update = selectDict['LastUpdateTime']
del selectDict['LastUpdateTime']
if selectDict.has_key( 'Owner' ):
username = selectDict['Owner']
del selectDict['Owner']
result = Registry.getDNForUsername( username )
if result['OK']:
selectDict['OwnerDN'] = result['Value']
else:
return S_ERROR( 'Unknown user %s' % username )
result = self.getCounters( 'Jobs', ['OwnerDN', 'OwnerGroup', 'Status'],
selectDict, newer = last_update,
timeStamp = 'LastUpdateTime' )
last_day = Time.dateTime() - Time.day
resultDay = self.getCounters( 'Jobs', ['OwnerDN', 'OwnerGroup', 'Status'],
selectDict, newer = last_day,
timeStamp = 'EndExecTime' )
# Sort out different counters
resultDict = {}
for attDict, count in result['Value']:
owner = attDict['OwnerDN']
group = attDict['OwnerGroup']
status = attDict['Status']
if not resultDict.has_key( owner ):
resultDict[owner] = {}
if not resultDict[owner].has_key( group ):
resultDict[owner][group] = {}
for state in JOB_STATES:
resultDict[owner][group][state] = 0
resultDict[owner][group][status] = count
for attDict, count in resultDay['Value']:
owner = attDict['OwnerDN']
group = attDict['OwnerGroup']
status = attDict['Status']
if status in JOB_FINAL_STATES:
resultDict[owner][group][status] = count
# Collect records now
records = []
totalUser = {}
for owner in resultDict:
totalUser[owner] = 0
for group in resultDict[owner]:
result = Registry.getUsernameForDN( owner )
if result['OK']:
username = result['Value']
else:
username = 'Unknown'
rList = [username, owner, group]
count = 0
for state in JOB_STATES:
s_count = resultDict[owner][group][state]
rList.append( s_count )
count += s_count
rList.append( count )
records.append( rList )
totalUser[owner] += count
# Sort out records
if sortItem != -1 :
if sortOrder.lower() == "asc":
records.sort( key = operator.itemgetter( sortItem ) )
else:
records.sort( key = operator.itemgetter( sortItem ), reverse = True )
# Collect the final result
finalDict = {}
finalDict['ParameterNames'] = paramNames
# Return all the records if maxItems == 0 or the specified number otherwise
if maxItems:
if startItem + maxItems > len( records ):
finalDict['Records'] = records[startItem:]
else:
finalDict['Records'] = records[startItem:startItem + maxItems]
else:
finalDict['Records'] = records
finalDict['TotalRecords'] = len( records )
return S_OK( finalDict )
#####################################################################################
def setHeartBeatData( self, jobID, staticDataDict, dynamicDataDict ):
""" Add the job's heart beat data to the database
"""
# Set the time stamp first
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
e_jobID = ret['Value']
req = "UPDATE Jobs SET HeartBeatTime=UTC_TIMESTAMP(), Status='Running' WHERE JobID=%s" % e_jobID
result = self._update( req )
if not result['OK']:
return S_ERROR( 'Failed to set the heart beat time: ' + result['Message'] )
ok = True
# FIXME: It is rather not optimal to use parameters to store the heartbeat info, must find a proper solution
# Add static data items as job parameters
result = self.setJobParameters( jobID, staticDataDict.items() )
if not result['OK']:
ok = False
self.log.warn( result['Message'] )
# Add dynamic data to the job heart beat log
# start = time.time()
valueList = []
for key, value in dynamicDataDict.items():
result = self._escapeString( key )
if not result['OK']:
self.log.warn( 'Failed to escape string ' + key )
continue
e_key = result['Value']
result = self._escapeString( value )
if not result['OK']:
self.log.warn( 'Failed to escape string ' + value )
continue
e_value = result['Value']
valueList.append( "( %s, %s,%s,UTC_TIMESTAMP())" % ( e_jobID, e_key, e_value ) )
if valueList:
valueString = ','.join( valueList )
req = "INSERT INTO HeartBeatLoggingInfo (JobID,Name,Value,HeartBeatTime) VALUES "
req += valueString
result = self._update( req )
if not result['OK']:
ok = False
self.log.warn( result['Message'] )
if ok:
return S_OK()
else:
return S_ERROR( 'Failed to store some or all the parameters' )
#####################################################################################
def getHeartBeatData( self, jobID ):
""" Retrieve the job's heart beat data
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
cmd = 'SELECT Name,Value,HeartBeatTime from HeartBeatLoggingInfo WHERE JobID=%s' % jobID
res = self._query( cmd )
if not res['OK']:
return res
if len( res['Value'] ) == 0:
return S_OK ( [] )
result = []
values = res['Value']
for row in values:
result.append( ( str( row[0] ), '%.01f' % ( float( row[1].replace( '"', '' ) ) ), str( row[2] ) ) )
return S_OK( result )
#####################################################################################
def setJobCommand( self, jobID, command, arguments = None ):
""" Store a command to be passed to the job together with the
next heart beat
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( command )
if not ret['OK']:
return ret
command = ret['Value']
if arguments:
ret = self._escapeString( arguments )
if not ret['OK']:
return ret
arguments = ret['Value']
else:
arguments = "''"
req = "INSERT INTO JobCommands (JobID,Command,Arguments,ReceptionTime) "
req += "VALUES (%s,%s,%s,UTC_TIMESTAMP())" % ( jobID, command, arguments )
result = self._update( req )
return result
#####################################################################################
def getJobCommand( self, jobID, status = 'Received' ):
""" Get a command to be passed to the job together with the
next heart beat
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( status )
if not ret['OK']:
return ret
status = ret['Value']
req = "SELECT Command, Arguments FROM JobCommands WHERE JobID=%s AND Status=%s" % ( jobID, status )
result = self._query( req )
if not result['OK']:
return result
resultDict = {}
if result['Value']:
for row in result['Value']:
resultDict[row[0]] = row[1]
return S_OK( resultDict )
#####################################################################################
def setJobCommandStatus( self, jobID, command, status ):
""" Set the command status
"""
ret = self._escapeString( jobID )
if not ret['OK']:
return ret
jobID = ret['Value']
ret = self._escapeString( command )
if not ret['OK']:
return ret
command = ret['Value']
ret = self._escapeString( status )
if not ret['OK']:
return ret
status = ret['Value']
req = "UPDATE JobCommands SET Status=%s WHERE JobID=%s AND Command=%s" % ( status, jobID, command )
result = self._update( req )
return result
#####################################################################################
def getSummarySnapshot( self, requestedFields = False ):
""" Get the summary snapshot for a given combination
"""
if not requestedFields:
requestedFields = [ 'Status', 'MinorStatus',
'Site', 'Owner', 'OwnerGroup', 'JobGroup', 'HerdState' ]
defFields = [ 'DIRACSetup' ] + requestedFields
valueFields = [ 'COUNT(JobID)', 'SUM(RescheduleCounter)' ]
defString = ", ".join( defFields )
valueString = ", ".join( valueFields )
sqlCmd = "SELECT %s, %s From Jobs GROUP BY %s" % ( defString, valueString, defString )
result = self._query( sqlCmd )
if not result[ 'OK' ]:
return result
return S_OK( ( ( defFields + valueFields ), result[ 'Value' ] ) )
| avedaee/DIRAC | WorkloadManagementSystem/DB/JobDB.py | Python | gpl-3.0 | 87,548 | [
"DIRAC"
] | 2879ce0191e15d8e8190696068a49147d3baea9e79dda41deea124644d86360f |
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
import sys
import random
import threading
import time
import Tank as DewTank
name = 'Galaxy War'
#Up Down Left Right
xMin = 400 - 50
yMin = 300 - 50
xMax = 400 + 50
yMax = 300 + 50
def main():
print "main hi"
tmpRen = RenderPls()
tmpRen.setupVariable()
class RenderPls ():
a = 0
b = 0
lx = 0
ly = 0
myTank = DewTank
def __init__(self):
print "RenderPls init"
def setupVariable(self):
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(800, 600)
glutCreateWindow(name)
glClearColor(0.,0.,0.,1.)
glutDisplayFunc(self.display)
gluOrtho2D(0, 800, 600, 0);
glMatrixMode(GL_MODELVIEW);
glPushMatrix()
glutMouseFunc(self.mouseEvent);
#while(True):
#display()
#print ("looping")
#thread1 = myThread(1, "Thread-1", 1,self)
#thread1.start()
glutMainLoop()
print "end"
return
def mouseEvent(self, button, state, x, y):
print ("mouse Event " )
mouseButton = button;
if button == GLUT_LEFT_BUTTON:
if(state == GLUT_DOWN):
oldX = x;
oldY = y;
print ("click " + str(x) + " ," + str(y))
tmpC = [x,y]
myTank.setX(x)
myTank.setY(y)
if button == GLUT_RIGHT_BUTTON:
if(state == GLUT_DOWN):
print ("right click test " + str(x) + " , " + str(y))
def display(self):
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glPushMatrix()
glLineWidth(2.5);
glBegin(GL_LINES);
# left line
glColor3f(1.0, 0.0, 0.0);
#a = random.random() * 800;
#b = random.random() * 600
self.a = self.a + 1
# self.b = self.b + 1
a = self.a
b = self.b
glVertex2f(-400, 300);
glVertex2f(400, 300);
# right line
glColor3f(0.0, 1.0, 0.0);
glVertex2f(400, 300);
glVertex2f(800, 300);
glVertex2f(xMin ,yMin)
glVertex2f(xMin ,yMax)
glVertex2f(xMax ,yMin)
glVertex2f(xMax ,yMax)
glVertex2f(xMin ,yMin)
glVertex2f(xMax ,yMin)
glVertex2f(xMin ,yMax)
glVertex2f(xMax ,yMax)
# top line
glColor3f(0.0, 0.0, 1.0);
glVertex2f(400, 300);
glVertex2f(400+(a-400), 300+(b-300));
lop = 0
tmpX = self.lx;
tmpY = self.ly;
glVertex2f(self.myTank.getX(), self.myTank.getY())
tmpX2 = 0;
tmpY2 = 0;
glVertex2f(tmpX2 , tmpY2)
lop = lop + 2
glEnd();
glPopMatrix()
glutSwapBuffers()
glutPostRedisplay()
return
class myThread (threading.Thread):
def __init__(self, threadID, name, counter, renderPls):
threading.Thread.__init__(self)
self.renderPls = renderPls
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print "Starting " + self.name
while(True):
self.counter = self.counter + 1
print "myThread Force display : " + str(self.counter)
time.sleep(0.5)
#self.renderPls.display()
#glutPostRedisplay()
print "myThread Force display 2"
print "Exiting " + self.name
if __name__ == '__main__': main()
| dewtx29/python_ann | python/myGame/main.py | Python | gpl-3.0 | 3,743 | [
"Galaxy"
] | da69594623428bc2f3d39c51635f6cdbfbf4202b2e3060bd6b6c4a948cbe18a2 |
###############
# Brian Burns
# MATH 238
# ay'' + by' + cy = 0
# secondOrder.py
###############
import sys
import math
def main():
# make sure we have enough arguments
if len(sys.argv) != 4:
print("Usage: \"python secondOrder.py [a] [b] [c]\"")
sys.exit(0)
# grab the arguments
a = float(sys.argv[1])
b = float(sys.argv[2])
c = float(sys.argv[3])
print("The solution to the equation is:\n")
# get the auxiliary roots
root = pow(b, 2) - (4*a*c)
# y = c_1e^(r_1t) + c_2e^(r_2t)
if root > 0:
r1 = (-1*b + math.sqrt(root)) / 2*a
r2 = (-1*b - math.sqrt(root)) / 2*a
print("y = c_1e^("+str(r1)+"t) + c_2e^("+str(r2)+"t)")
# y = c_1e^(rt) + c_2te^(rt)
elif root == 0:
r = (-1*b) / (2*a)
print("y = c_1e^("+str(r)+"t) + c_2te^("+str(r)+"t)")
else:
alpha = (-1*b) / (2*a)
beta = math.sqrt((-1*root)) / (2*a)
# y = c_1cos(Bt) + c_2sin(Bt)
if alpha == 0.0:
print("y = c_1cos("+str(beta)+"t) + c_2sin("+str(beta)+"t)")
# y = c_1e^(at)cos(Bt) + c_2e^(at)sin(Bt)
else:
print("y = c_1e^("+str(alpha)+"t)cos("+str(beta)+"t) + c_2e^("+str(alpha)+"t)sin("+str(beta)+"t)")
if __name__ == "__main__":
main()
| brnbrns/DifferentialEquations | secondOrder.py | Python | gpl-2.0 | 1,287 | [
"Brian"
] | e26edd8df316417522d8e90665c3e4758836bb4f5aae40a414f5303765ba51d3 |
#
# Copyright 2008, 2009 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
# Module by Elliot Jenner, updated 9/16/2013
import visa
class stepper_motor(visa.SerialInstrument):
def __init__(self, loc, mode='Jog', vel_max = 10.0, accel_max=400.0):
visa.SerialInstrument.__init__(self, loc)
self.term_chars = visa.CR
self.ask('PR4') #turn on acknowledgement
self.write('IFD')
self.accel_max = accel_max
self.vel_max = vel_max
self.is_moving = False
self.__mode = mode
def __return_parser(self, responce):
#buffering issues sometimes occur where the ack-nak charter does not get pulled and waits for next read. Must clear to get to proper return if expecting value.
if responce.partition('%')[1] != '%' or responce.partition('*')[1] != '*':
return responce
elif responce.partition('?')[1] !='?':
return self.__acknowledge(responce, True)
else:
return self.__return_parser(self.read())
def __acknowledge(self, input, check = False):
#return interpreter - for items that do not return numerical answers, a three symbol code is used.
if input == ('%' or '*'): #% for instant, * for buffered
return True
else:# failure includes a code indicating failure type. to check, check = True
if not check:
return False
else:
error = {
'1' : 'Timeout',
'2' : 'Parameter Too Long',
'3' : 'Too Few Parameters',
'4' : 'Too Many Parameters',
'5' : 'Parameter Out Of Range',
'6' : 'Buffer Full',
'7' : 'Cannot Process Command',
'8' : 'Program Running',
'9' : 'Bad Password',
'10': 'Comm Port Error',
'11': 'Bad Character',
}
return error[input.partition('%')[2]]
#Process Commands - normal operation
#motion commands
def __get_mode(self):
#mode defaults to Jog
return self.__mode
def __set_mode(self, mode):
self.__mode = mode
return True
mode = property(__get_mode, __set_mode)
def start(self):
self.motor_enable()
if self.mode == 'Jog':
#accelerates with (accel) to speed set by (vel)
stat = self.ask('CJ')
if self.__acknowledge(stat):
self.is_moving = True
else:
return 'wrong mode'
def set_move(self, dist):
self.motor_enable()
#make a stepwise move
if self.mode == 'Step':
command = 'FL'+ str(dist)
stat = self.__return_parser(self.ask(command))
if self.__acknowledge(stat):
self.is_moving = True
else:
return 'wrong mode'
def set_position(self, pos):
#set the current position to pos
command = 'SP'+ str(pos)
return self.ask(command)
def stop(self):
if self.mode == 'Jog':
#stops jog at decel rate (set decell)
stat = self.ask('SJ')
elif self.mode == 'Step':
#stops steped movement with rate from (set_deccel)
stat = self.ask('STD')
if self.__acknowledge(stat):
self.is_moving = False
def __get_vel(self):
if self.mode == 'Jog':
#get currently set velocity in rev/sec. commands are different depending on if the motor is already moving.
if self.is_moving:
reply = self.__return_parser(self.ask('CS'))
else:
reply = self.__return_parser(self.ask('JS'))
elif self.mode == 'Step':
#get currently set velocity in rev/sec
reply = self.__return_parser(self.ask('VE'))
elif self.mode == 'Analog':
return self.vel_max
try:
return float(reply.partition('=')[2])
except ValueError:
return reply
def __set_vel(self, vel):
vel = '%.4f' % vel
if self.mode == 'Jog':
#set velocity in rev/sec. commands are different depending on if the motor is already moving. Be sure to set before start or last value set when motor is not moving has prescednece.
if self.is_moving:
command = 'CS' + str(vel)
else:
if float(vel) < 0:
self.ask('DI-1') #set this to go counter clockwise
vel[1:] #pull off the negative sign, this command doesn't understand it
else:
self.ask('DI1') #set this to go clockwise
command = 'JS'+ str(vel)
elif self.mode == 'Step':
#set velocity in rev/sec
if self.is_moving:
command = 'VC'+ str(vel)
else:
command = 'VE'+ str(vel)
return self.ask(command)
vel = property(__get_vel, __set_vel)
def get_current_vel(self): #returns the actual current velocity, rather than the set volocity as vel does. Replies in RPM, not RPS
if self.mode == 'Jog':
#get currently set velocity in rev/min. commands are different depending on if the motor is already moving.
reply = self.__return_parser(self.ask('RLJ')) #Checks the intermediate commands. It does not read back anything from the motor
if 'RS' in reply: #buffering catch
reply = self.read()
elif self.mode == 'Step':
#get currently set velocity in rev/min
reply = self.__return_parser(self.ask('RLJ')) #Checks the intermediate commands. It does not read back anything from the motor
if 'RS' in reply: #buffering catch
reply = self.read()
elif self.mode == 'Analog':
return self.vel_max
try:
return float(reply.partition('=')[2])*0.25
except ValueError:
return reply*0.25
def __get_accel(self):
if self.mode == 'Jog':
#get currently set Acceleration in rev/sec/sec.
reply = self.__return_parser(self.ask('JA'))
elif self.mode == 'Step':
#get currently set Acceleration in rev/sec/sec
reply = self.__return_parser(self.ask('AC'))
elif self.mode == 'Analog':
return self.accel_max
try:
return float(reply.partition('=')[2])
except ValueError:
return reply
def __set_accel(self, accel):
accel += 0.01 #due to truncation issues, it is necesary to increase the last digit
accel = '%.4f' % accel
if self.mode == 'Jog':
#set acceleration in rev/sec/sec CANNOT BE CHANGED WHILE MOVING
command = 'JA'+ str(accel)
elif self.mode == 'Step':
#set acceleration in rev/sec/sec
command = 'AC'+ str(accel)
return self.ask(command)
accel = property(__get_accel, __set_accel)
def __get_dccel(self):
if self.mode == 'Jog':
#get currently set Decceleration in rev/sec/sec
reply = self.__return_parser(self.ask('JL'))
elif self.mode == 'Step':
#get currently set Decceleration in rev/sec/sec
reply = self.__return_parser(self.ask('DE'))
elif self.mode == 'Analog':
return self.accel_max
try:
return float(reply.partition('=')[2])
except ValueError:
return reply
def __set_dccel(self, dccel):
dccel += 0.01 #due to truncation issues, it is necesary to increase the last digit
dccel = '%.4f' % dccel
if self.mode == 'Jog':
#set decceleration in rev/sec/sec. CANNOT BE CHANGED WHILE MOVING
command = 'JL'+ str(dccel)
elif self.mode == 'Step':
#set decceleration in rev/sec/sec
command = 'DE'+ str(dccel)
elif self.mode == 'Analog':
return 0
return self.ask(command)
dccel = property(__get_dccel, __set_dccel)
# analog commands
def motor_mode_set(self,offset = 0.0, max_voltage = 5.0, deadband = 0.0): #NOTE deadband set in mV, all others in V
if self.mode == 'Analog Velocity':
self.motor_disable()
self.ask('CM11') #go to analog velocity mode
self.deadband = deadband
self.scaling = 'Single Ended 0-5' #to match the capability of the ST5-S
self.offset = offset
self.max_voltage = max_voltage
elif self.mode == 'Analog Position':
self.motor_disable()
self.ask('CM22') #go to analog position mode
self.deadband = deadband
self.scaling = 'Single Ended 0-5' #to match the capability of the ST5-S
self.offset = offset
self.max_voltage = max_voltage
elif self.mode == 'Jog' or self.mode == 'Step':
self.ask('CM21') #normal mode
self.motor_enable()
return True
def __get_pos_gain(self): #the position in degrees given by the maximum analog input
if self.mode == 'Analog Position':
setting = self.__return_parser(self.ask('AP'))
setting = float(setting.partition('=')[2])
return (setting*(360.0/20000.0))
else:
return 'wrong mode'
def __set_pos_gain(self, pos):
if self.mode == 'Analog Position':
input = int(pos*(20000.0/360.0))#convert from degrees to encoder counts where 1rev = 20000 counts
return self.ask('AP' + str(input))
else:
return 'wrong mode'
pos_gain = property(__get_pos_gain, __set_pos_gain)
def __get_vel_gain(self): #the speed given by the maximum analog input in RPS.
if self.mode == 'Analog Velocity':
setting = self.__return_parser(self.ask('AG'))
setting = float(setting.partition('=')[2])
return (setting/240.0)/(4.0*self.max_voltage/(self.max_voltage-self.offset))
else:
return 'wrong mode'
def __set_vel_gain(self, vel):
if self.mode == 'Analog Velocity':
vel = 4.0*vel*(self.max_voltage/(self.max_voltage-self.offset))#compensates for offsets by comparing the offset to the maximum input voltage so the the highest point off center of the input wave will be the full set velocity
input = int(vel*240.0)#240*speed in RPS gives the motor setting to match
return self.ask('AG' + str(input))
else:
return 'wrong mode'
vel_gain = property(__get_vel_gain, __set_vel_gain)
def __get_accel_max(self):
#get currently set maximum allowed Acceleration in rev/sec/sec. also max deceleration. Can be used in other modes. Also sets e-stop.
reply = self.__return_parser(self.ask('AM'))
try:
return float(reply.partition('=')[2])
except ValueError:
return reply
def __set_accel_max(self, accel):
#set maximum allowed acceleration in rev/sec/sec. also sets max deceleration. Can be used in other modes. Also sets e-stop.
accel += 0.01 #due to truncation issues, it is necesary to increase the last digit
accel = '%.2f' % accel
command = 'AM'+ str(accel)
return self.ask(command)
accel_max = property(__get_accel_max, __set_accel_max)
def __get_offset(self): #offset for analog input in Volts. this value is read as zero
if self.mode == 'Analog Position' or self.mode == 'Analog Velocity':
reply = self.ask('AV')
return float(reply.partition('=')[2])
else:
return 'wrong mode'
def __set_offset(self, voltage = 0.0):
if self.mode == 'Analog Position' or self.mode == 'Analog Velocity':
if voltage == 'seek':
return self.ask('AZ') #pulls the current value of the voltage and sets it as the zero input
else:
return self.ask('AV' + str(voltage)) #set to the designated value
else:
return 'wrong mode'
offset= property(__get_offset, __set_offset)
def __get_filter(self): #analog input frequency filter in Hz. a value of zero indicates the filter is disabled
if self.mode == 'Analog Position' or self.mode == 'Analog Velocity':
input = self.ask('AF')
input = float(input.partition('=')[2])
if input == 0:
return 0
else:
frequency = 1400/((72090/input) - 2.2)
return frequency
else:
return 'wrong mode'
def __set_filter(self, frequency = 0.0):
if self.mode == 'Analog Position' or self.mode == 'Analog Velocity':
if frequency != 0:
input = int(72090/((1400/frequency)+2.2))
else:
input = 0
return self.ask('AF' + str(input))
else:
return 'wrong mode'
filter = property(__get_filter, __set_filter)
def __get_deadband(self): #deadband is the magnitude of the voltage (in mV) around the zero setpoint (both ways) the will be interpreted as zero
if self.mode == 'Analog Position' or self.mode == 'Analog Velocity':
reply = self.ask('AD')
return float(reply.partition('=')[2])
else:
return 'wrong mode'
def __set_deadband(self, voltage):
if self.mode == 'Analog Position' or self.mode == 'Analog Velocity':
return self.ask('AD' + str(voltage))
else:
return 'wrong mode'
deadband = property(__get_deadband, __set_deadband)
def __get_scaling(self): #sets the input type the motor is expecting.
if self.mode == 'Analog Position' or self.mode == 'Analog Velocity':
setting = self.ask('AS')
setting = float(setting.partition('=')[2])
if setting == 0: return 'Single Ended +-10'
elif setting == 1: return 'Single Ended 0-10'
elif setting == 2: return 'Single Ended +-5'
elif setting == 3: return 'Single Ended 0-5'
elif setting == 4: return 'Double Ended +-10'
elif setting == 5: return 'Double Ended 0-10'
elif setting == 6: return 'Double Ended +-5'
elif setting == 7: return 'Double Ended 0-5'
else:
return 'wrong mode'
def __set_scaling(self, setting):
if self.mode == 'Analog Position' or self.mode == 'Analog Velocity':
if setting == 'Single Ended +-10': input = 0
elif setting == 'Single Ended 0-10': input = 1
elif setting == 'Single Ended +-5': input = 2
elif setting == 'Single Ended 0-5': input = 3
elif setting == 'Double Ended +-10': input = 4
elif setting == 'Double Ended 0-10': input = 5
elif setting == 'Double Ended +-5': input = 6
elif setting == 'Double Ended 0-5': input = 7
return self.ask('AG' + str(input))
else:
return 'wrong mode'
scaling = property(__get_scaling, __set_scaling)
#auxiliary commands
def wait(self, time):
#pauses execution of commands for time seconds (0.0-320s with 0.01 resolution). NOTE:ONLY ESTOP OVERRIDES! OTHER STOP COMMANDS WILL WAIT!
command = 'WT'+ str(time)
return self.ask(command)
def monitor_string(self, string):
#the motor will send this string back when this command is reached (for monitoring delays)
self.write('SS' + str(string))
return self.read()
#Setup Commands - These commands are normally set beforehand and left alone. Can be place in non-volitile momory in controller using save
def save(self):
return self.ask('SA')
def motor_enable(self):
#enable motor at start of use, after motor_disable, or after alarm has shut it down.
return self.ask('ME')
def motor_disable(self):
#disables the motor. call motor enable to reactive
return self.ask('MD')
#emergency commands
def e_brake(self):
#stops all motion at maximum allowed acceleration (set by set_accel_max). Clears any unexecuted commands from buffer.
stat = self.ask('SK')
self.vel_gain = 0
if self.__acknowledge(stat):
self.is_moving = False
return stat
def reset(self):
#resets drive to startup parameters and leaves it in motor disabled. For accident recovery
return self.ask('RE')
@property
def status(self):
#check machine status
status = self.ask('RS')
status = status.partition('=')[2]
status = list(status)
states = {
'A' : 'Alarm',
'D' : 'Disabled',
'E' : 'Drive Fault',
'F' : 'Motor Moving',
'H' : 'Homing',
'J' : 'Jogging',
'M' : 'Motion in progress',
'P' : 'In position',
'R' : 'Ready',
'S' : 'Stopping a motion',
'T' : 'Wait Time',
'W' : 'Wait Input',
}
output = ''
for i in status:
if i.isdigit() or i == '.':
continue
else:
state = states[i]
if state == 'Alarm':
state += ':' + alarm
output += state + ', '
return output
@property
def alarm(self):
#get alarm code. returns hex code if ret = hex or error string if ret = str. use alarm reset to clear
error = self.ask('AL')
error = error.partition('=')[2] #returns AL=HexCode, so dump off the string part of the return
message = {
'0000' : 'No Alarm',
'0001' : 'Position Limit',
'0002' : 'CCW Limit',
'0004' : 'CW Limit',
'0008' : 'Over Temp',
'0010' : 'Internal Voltage',
'0020' : 'Over Voltage',
'0040' : 'Under Voltage',
'0080' : 'Over Current',
'0100' : 'Open Motor Winding',
'0200' : 'Bad Encoder',
'0400' : 'Comm Error',
'0800' : 'Bad Flash',
'1000' : 'No Move',
'4000' : 'Blank Q Segment',
}
error = message[error]
if error != 'Comm Error':
return error
else:
return 'Comm Error:' + comm_error
def alarm_reset(self):
#resets alarm codes. if alarms are clear, Does not clear motor shutdown, use motor_enable.
error = self.ask('AR')
error = error.partition('=')[2] #returns AL=HexCode, so dump off the string part of the return
if error == '':
error = '0000'
message = {
'0000' : 'No Alarm',
'0001' : 'Position Limit',
'0002' : 'CCW Limit',
'0004' : 'CW Limit',
'0008' : 'Over Temp',
'0010' : 'Internal Voltage',
'0020' : 'Over Voltage',
'0040' : 'Under Voltage',
'0080' : 'Over Current',
'0100' : 'Open Motor Winding',
'0200' : 'Bad Encoder',
'0400' : 'Comm Error',
'0800' : 'Bad Flash',
'1000' : 'No Move',
'4000' : 'Blank Q Segment',
}
error = message[error]
if error != 'Comm Error':
return 'Alarm:' + error
else:
return 'Comm Error:' + comm_error
@property
def comm_error(self):
#communication errors have there own syntax and output
code = self.ask('CE')
code = code.partition('=')[2]
problem = {
'0000' : 'No Error',
'0001' : 'parity flag error',
'0002' : 'framing error',
'0004' : 'noise flag error',
'0008' : 'overrun error',
'0010' : 'Rx buffer full',
'0020' : 'Tx buffer full',
'0040' : 'bad SPI op-code',
'0080' : 'Tx time-out',
}
return problem[code]
| dursobr/Pythics | pythics/instruments/applied_motion_stepper.py | Python | gpl-3.0 | 20,942 | [
"Brian"
] | 50da430a364f4e5a4701cd071af0d8233786ca55a57d0e1b27b6529d22997cba |
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
This test compares the NEON recurrent layer against a numpy reference recurrent
implementation and compares the NEON recurrent bprop deltas to the gradients
estimated by finite differences.
The numpy reference recurrent layer contains static methods for forward pass
and backward pass.
The test runs a SINGLE layer of recurrent layer and compare numerical values
The reference model handles batch_size as 1 only
The following are made sure to be the same in both recurrent layers
- initial h values (all zeros)
- initial W, b (ones or random values)
- input data (random data matrix)
- input error (random data matrix)
- the data shape inside recurrent_ref is seq_len, input_size, 1
- the data shape inside recurrent (neon) is feature, seq_len * batch_size
"""
import itertools as itt
import numpy as np
from neon import NervanaObject
from neon.initializers.initializer import Constant, Gaussian
from neon.layers import Recurrent
from neon.transforms import Tanh
from tests.recurrent_ref import Recurrent as RefRecurrent
from tests.utils import allclose_with_out
def pytest_generate_tests(metafunc):
bsz_rng = [1]
if 'refgruargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
seq_rng = [2, 3, 4]
inp_rng = [3, 5, 10]
out_rng = [3, 5, 10]
else:
seq_rng = [3]
inp_rng = [5]
out_rng = [10]
fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)
metafunc.parametrize('refgruargs', fargs)
if 'gradgruargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
seq_rng = [2, 3]
inp_rng = [5, 10]
out_rng = [3, 5, 10]
else:
seq_rng = [3]
inp_rng = [5]
out_rng = [10]
fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)
metafunc.parametrize('gradgruargs', fargs)
def test_ref_compare_ones(backend_default, refgruargs):
# run comparison with reference code
# for all ones init
seq_len, input_size, hidden_size, batch_size = refgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
check_rnn(seq_len, input_size, hidden_size,
batch_size, Constant(val=1.0), [1.0, 0.0])
def test_ref_compare_rand(backend_default, refgruargs):
# run comparison with reference code
# for Gaussian random init
seq_len, input_size, hidden_size, batch_size = refgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
check_rnn(seq_len, input_size, hidden_size, batch_size,
Gaussian())
# compare neon RNN to reference RNN implementation
def check_rnn(seq_len, input_size, hidden_size,
batch_size, init_func, inp_moms=[0.0, 1.0]):
# init_func is the initializer for the model params
# inp_moms is the [ mean, std dev] of the random input
input_shape = (input_size, seq_len * batch_size)
output_shape = (hidden_size, seq_len * batch_size)
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
# ======== create models ========
# neon RNN
rnn = Recurrent(hidden_size, init_func, activation=Tanh())
# reference numpy RNN
rnn_ref = RefRecurrent(input_size, hidden_size)
Wxh = rnn_ref.Wxh
Whh = rnn_ref.Whh
bh = rnn_ref.bh
# ========= generate data =================
# generate random input tensor
inp = np.random.rand(*input_shape)*inp_moms[1] + inp_moms[0]
inpa = rnn.be.array(inp)
# generate random deltas tensor
deltas = np.random.randn(*output_shape)
# the reference code expects these shapes:
# input_shape: (seq_len, input_size, batch_size)
# output_shape: (seq_len, hidden_size, batch_size)
inp_ref = inp.copy().T.reshape(
seq_len, batch_size, input_size).swapaxes(1, 2)
deltas_ref = deltas.copy().T.reshape(
seq_len, batch_size, hidden_size).swapaxes(1, 2)
# ========= running models ==========
# run neon fprop
rnn.configure((input_size, seq_len))
rnn.prev_layer = True
rnn.allocate()
rnn.set_deltas([rnn.be.iobuf(rnn.in_shape)])
rnn.fprop(inpa)
# weights are only initialized after doing fprop, so now
# make ref weights and biases the same with neon model
Wxh[:] = rnn.W_input.get()
Whh[:] = rnn.W_recur.get()
bh[:] = rnn.b.get()
(dWxh_ref, dWhh_ref, db_ref, h_ref_list,
dh_ref_list, d_out_ref) = rnn_ref.lossFun(inp_ref, deltas_ref)
# now test the bprop
rnn.bprop(rnn.be.array(deltas))
# grab the delta W from gradient buffer
dWxh_neon = rnn.dW_input.get()
dWhh_neon = rnn.dW_recur.get()
db_neon = rnn.db.get()
# comparing outputs
print '====Verifying hidden states===='
print allclose_with_out(rnn.outputs.get(),
h_ref_list,
rtol=0.0,
atol=1.0e-5)
print 'fprop is verified'
print '====Verifying update on W and b ===='
print 'dWxh'
assert allclose_with_out(dWxh_neon,
dWxh_ref,
rtol=0.0,
atol=1.0e-5)
print 'dWhh'
assert allclose_with_out(dWhh_neon,
dWhh_ref,
rtol=0.0,
atol=1.0e-5)
print '====Verifying update on bias===='
print 'db'
assert allclose_with_out(db_neon,
db_ref,
rtol=0.0,
atol=1.0e-5)
print 'bprop is verified'
return
def reset_rnn(rnn):
# in order to run fprop multiple times
# for the gradient check tests the
# rnn internal variables need to be
# cleared
rnn.x = None
rnn.xs = None # just in case
rnn.outputs = None
return
def test_gradient_neon_gru(backend_default, gradgruargs):
seq_len, input_size, hidden_size, batch_size = gradgruargs
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
gradient_check(seq_len, input_size, hidden_size, batch_size)
def gradient_check(seq_len, input_size, hidden_size, batch_size,
threshold=1.0e-3):
# 'threshold' is the max fractional difference
# between gradient estimate and
# bprop deltas (def is 5%)
# for a given set of layer parameters calculate
# the gradients and compare to the derivatives
# obtained with the bprop function. repeat this
# for a range of perturbations and use the
# perturbation size with the best results.
# This is necessary for 32 bit computations
min_max_err = -1.0 # minimum max error
print 'Perturb mag, max grad diff'
for pert_exp in range(-5, 0):
# need to generate the scaling and input outside
# having an issue with the random number generator
# when these are generated inside the gradient_calc
# function
input_shape = (input_size, seq_len * batch_size)
output_shape = (hidden_size, seq_len * batch_size)
rand_scale = np.random.random(output_shape)*2.0 - 1.0
inp = np.random.randn(*input_shape)
pert_mag = 10.0**pert_exp
(grad_est, deltas) = gradient_calc(seq_len,
input_size,
hidden_size,
batch_size,
epsilon=pert_mag,
rand_scale=rand_scale,
inp_bl=inp)
dd = np.max(np.abs(grad_est-deltas))
print '%e, %e' % (pert_mag, dd)
if min_max_err < 0.0 or dd < min_max_err:
min_max_err = dd
# reset the seed so models are same in each run
# allclose_with_out(grad_est,deltas, rtol=0.0, atol=0.0)
NervanaObject.be.rng_reset()
# check that best value of worst case error is less than threshold
print 'Worst case error %e with perturbation %e' % (min_max_err, pert_mag)
print 'Threshold %e' % (threshold)
assert min_max_err < threshold
def gradient_calc(seq_len, input_size, hidden_size, batch_size,
epsilon=None, rand_scale=None, inp_bl=None):
NervanaObject.be.bsz = NervanaObject.be.batch_size = batch_size
input_shape = (input_size, seq_len * batch_size)
# generate input if one is not given
if inp_bl is None:
inp_bl = np.random.randn(*input_shape)
# neon rnn instance
rnn = Recurrent(hidden_size, Gaussian(), activation=Tanh())
inpa = rnn.be.array(np.copy(inp_bl))
# run fprop on the baseline input
rnn.configure((input_size, seq_len))
rnn.prev_layer = True
rnn.allocate()
rnn.set_deltas([rnn.be.iobuf(rnn.in_shape)])
out_bl = rnn.fprop(inpa).get()
# random scaling/hash to generate fake loss
if rand_scale is None:
rand_scale = np.random.random(out_bl.shape) * 2.0 - 1.0
# loss function would be:
# loss_bl = np.sum(rand_scale * out_bl)
# run back prop with rand_scale as the errors
# use copy to avoid any interactions
deltas_neon = rnn.bprop(rnn.be.array(np.copy(rand_scale))).get()
# add a perturbation to each input element
grads_est = np.zeros(inpa.shape)
inp_pert = inp_bl.copy()
for pert_ind in range(inpa.size):
save_val = inp_pert.flat[pert_ind]
inp_pert.flat[pert_ind] = save_val + epsilon
reset_rnn(rnn)
rnn.allocate()
out_pos = rnn.fprop(rnn.be.array(inp_pert)).get()
inp_pert.flat[pert_ind] = save_val - epsilon
reset_rnn(rnn)
rnn.allocate()
out_neg = rnn.fprop(rnn.be.array(inp_pert)).get()
# calculate the loss with perturbations
loss_pos = np.sum(rand_scale*out_pos)
loss_neg = np.sum(rand_scale*out_neg)
# compute the gradient estimate
grad = 0.5*(loss_pos-loss_neg)/epsilon
grads_est.flat[pert_ind] = grad
# reset the perturbed input element
inp_pert.flat[pert_ind] = save_val
del rnn
return (grads_est, deltas_neon)
| coufon/neon-distributed | tests/test_recurrent.py | Python | apache-2.0 | 10,970 | [
"Gaussian"
] | 90c022c8436e40ad2702c9740053ca958afc7718b04cedd1f904f89f5623387c |
#***********************************************************************
# This code is part of CMPL
#
# Copyright (C) 2007, 2008, 2009, 2010, 2011
# Thomas Schleiff - Halle(Saale), Germany and
# Mike Steglich - Technical University of Applied Sciences
# Wildau, Germany
#
# CMPL is a project of the Technical University of
# Applied Sciences Wildau and the Institute for Operations Research
# and Business Management at the Martin Luther University
# Halle-Wittenberg.
# Please visit the project homepage <www.coliop.org>
#
# CMPL is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# CMPL is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
#**********************************************************************
#!/usr/bin/python
import sys
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def cleanBlanks(str):
return str.replace('%', ' ').strip()
if not module_exists('gurobipy'):
print 'Cant find Gurobi'
quit(-1)
from gurobipy import *
i = 1
for arg in sys.argv:
#print "%i %s\n" % (i, arg)
if i == 2:
os.chdir(cleanBlanks(arg))
if i == 3:
solutionPool = arg
if i == 4:
model = read(cleanBlanks(arg))
if i == 5:
solFile = cleanBlanks(arg)
if i > 5:
s = 'model.params.%s' % arg
exec(s)
i += 1
nrOfSolutions = 0
def writeSol():
f.write(' <solution>\n')
f.write(' <header')
s = ' idx="%g"' % model.SolCount
if model.status == GRB.status.OPTIMAL:
s = s + ' value="%g"' % model.objVal
if model.isMIP == 1:
s = s + ' status="integer optimal solution"/>\n'
else:
s = s + ' status="optimal solution"/>\n'
else:
s = s + ' value="0"'
s = s + ' status="Infeasible or unbounded model"/>\n'
f.write(s)
if model.status == GRB.status.OPTIMAL:
f.write(' <variables>\n')
i=0
for v in model.getVars():
if model.isMIP == 1:
s = ' <variable idx="%g" activity="%e"/>\n' % (i,v.x)
else:
s = ' <variable idx="%g" activity="%e" marginal="%e"/>\n' % (i,v.x, v.RC)
f.write(s)
i=i+1
f.write(' </variables>\n')
f.write(' <constraints>\n')
i=0
for c in model.getConstrs():
if model.isMIP == 1:
s = ' <constraint idx="%g" activity="%e"/>\n' % (i,c.RHS-c.Slack)
else:
s = ' <constraint idx="%g" activity="%e" marginal="%e"/>\n' % (i,c.RHS-c.Slack, c.Pi)
f.write(s)
i=i+1
f.write(' </constraints>\n')
f.write(' </solution>\n')
def mycallback(model, where):
if solutionPool == "1":
if where == GRB.callback.MIPSOL:
f.write(' <solution>\n')
f.write(' <header')
s = ' idx="%g"' % int(model.cbGet(GRB.callback.MIPSOL_SOLCNT))
s = s + ' value="%g"' % model.cbGet(GRB.callback.MIPSOL_OBJ)
s = s + ' status="integer feasible solution"/>\n'
f.write(s)
f.write(' <variables>\n')
#print model.cbGetSolution(model.getVars())
vList = model.cbGetSolution(model.getVars())
i=0
for v in vList:
s = ' <variable idx="%g" activity="%e"/>\n' % (i,v)
f.write(s)
i=i+1
f.write(' </variables>\n')
f.write(' </solution>\n')
f = open(solFile, 'w')
f.write('<?xml version = "1.0" encoding="UTF-8" standalone="yes"?>\n')
f.write('<CmplGurobiSolutions>\n')
model.optimize(mycallback)
print 'Write solution'
nrOfSolutions = nrOfSolutions + 1
writeSol()
f.write('</CmplGurobiSolutions>')
f.close()
print '...done' | Mangara/ArboralExplorer | lib/Cmpl/bin/gurobiCmpl.py | Python | apache-2.0 | 3,962 | [
"VisIt"
] | df1209634e0194b45b01181aebd7161354a1f46142273f47fc4ba9f1783da729 |
# Author: Suyog Dutt Jain <suyog.jain@aero.iitb.ac.in>
# Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import abspath
from StringIO import StringIO
import copy
import numpy
import unittest
import datasets
# Local imports.
from mayavi.core.null_engine import NullEngine
# Enthought library imports
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi.modules.outline import Outline
from mayavi.modules.iso_surface import IsoSurface
from mayavi.modules.contour_grid_plane import ContourGridPlane
from mayavi.modules.scalar_cut_plane import ScalarCutPlane
class TestContour(unittest.TestCase):
def setUp(self):
"""Initial setting up of test fixture, automatically called by TestCase before any other test method is invoked"""
e = NullEngine()
# Uncomment to see visualization for debugging etc.
#e = Engine()
e.start()
e.new_scene()
self.e=e
sgrid=datasets.generateStructuredGrid()
src = VTKDataSource(data = sgrid)
e.add_source(src)
# Create an outline for the data.
o = Outline()
e.add_module(o)
# Create one ContourGridPlane normal to the 'x' axis.
cgp1 = ContourGridPlane()
e.add_module(cgp1)
# Set the position to the middle of the data.
cgp1.grid_plane.position = 15
# Another with filled contours normal to 'y' axis.
cgp2 = ContourGridPlane()
cgp2.contour.filled_contours = True
# Set the axis and position to the middle of the data.
cgp2.grid_plane.axis = 'y'
cgp2.grid_plane.position = 15
e.add_module(cgp2)
# An isosurface module.
iso = IsoSurface(compute_normals=True)
e.add_module(iso)
iso.contour.contours = [5]
# An interactive scalar cut plane.
cp = ScalarCutPlane()
e.add_module(cp)
ip = cp.implicit_plane
ip.normal = 0,0,1
ip.origin = 0.5, 0.5, 1.0
# Since this is running offscreen this seems necessary.
ip.widget.origin = 0.5, 0.5, 1.0
ip.widget.enabled = False
self.scene = e.current_scene
self.cgp2=cgp2
self.iso=iso
self.cp=cp
return
def tearDown(self):
"""For necessary clean up, automatically called by TestCase after the test methods have been invoked"""
self.e.stop()
return
def check(self):
"""Do the actual testing."""
scene = self.scene
src = scene.children[0]
mm = src.children[0]
cgp1 = mm.children[1]
self.assertEqual(cgp1.grid_plane.position,15)
cgp2 = mm.children[2]
self.assertEqual(cgp2.contour.filled_contours,True)
self.assertEqual(cgp2.grid_plane.axis, 'y')
self.assertEqual(cgp2.grid_plane.position,15)
iso = mm.children[3]
ctr = iso.contour.contours
self.assertEqual(iso.compute_normals,True)
self.assertEqual(ctr, [5.0])
rng = iso.actor.mapper.input.point_data.scalars.range
self.assertEqual(rng[0],5.0)
self.assertEqual(rng[1],5.0)
cp = mm.children[4]
ip = cp.implicit_plane
self.assertAlmostEqual(numpy.sum(ip.normal - (0,0,1)) , 1e-16)
self.assertAlmostEqual(numpy.sum(ip.origin - (0.5, 0.5, 1.0)), 0.0)
self.assertEqual(ip.widget.enabled,False)
def test_contour(self):
"Test if the test fixture works"
#Now test.
self.check()
#from mayavi.tools.show import show
#show()
def test_components_changed(self):
"""Test if the modules respond correctly when the components
are changed."""
cgp2=self.cgp2
cp =self.cp
iso =self.iso
ctr = cgp2.contour
cgp2.contour = ctr.__class__()
cgp2.contour = ctr
cgp2.actor = cgp2.actor.__class__()
iso.contour = iso.contour.__class__()
iso.contour.contours = [5.0]
iso.actor = iso.actor.__class__()
iso.normals = iso.normals.__class__()
ip = cp.implicit_plane
cp.implicit_plane = cp.implicit_plane.__class__()
cp.implicit_plane = ip
ip.widget.enabled = False
cp.contour = cp.contour.__class__()
cp.cutter = cp.cutter.__class__()
cp.actor = cp.actor.__class__()
# Now check.
self.check()
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
engine = self.e
scene = self.scene
# Save visualization.
f = StringIO()
f.name = abspath('test.mv2') # We simulate a file.
engine.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine.close_scene(scene)
# Load visualization
engine.load_visualization(f)
self.scene = engine.current_scene
self.check()
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
# Pop the source object.
s = self.scene
source = s.children.pop()
# Add it back to see if that works without error.
s.children.append(source)
cp = source.children[0].children[-1]
cp.implicit_plane.widget.enabled = False
self.check()
# Now deepcopy the source and replace the existing one with
# the copy. This basically simulates cutting/copying the
# object from the UI via the right-click menu on the tree
# view, and pasting the copy back.
source1 = copy.deepcopy(source)
s.children[0] = source1
cp = source1.children[0].children[-1]
cp.implicit_plane.widget.enabled = False
self.check()
if __name__ == '__main__':
unittest.main()
| liulion/mayavi | mayavi/tests/test_contour.py | Python | bsd-3-clause | 6,068 | [
"Mayavi"
] | a87ccdf9bee0cf419f0707c2f99884bc863d68b51151b9f959c926996f1519b3 |
"""
Provides factory methods to assemble the Galaxy web application
"""
import logging, atexit
import os, os.path, sys
from inspect import isclass
from paste.request import parse_formvars
from paste.util import import_string
from paste import httpexceptions
import pkg_resources
from galaxy.util import asbool
import config
import galaxy.model
import galaxy.model.mapping
import galaxy.web.framework
log = logging.getLogger( __name__ )
class ReportsWebApplication( galaxy.web.framework.webapp.WebApplication ):
pass
def add_ui_controllers( webapp, app ):
"""
Search for controllers in the 'galaxy.webapps.controllers' module and add
them to the webapp.
"""
from galaxy.web.base.controller import BaseUIController
from galaxy.web.base.controller import ControllerUnavailable
import galaxy.webapps.reports.controllers
controller_dir = galaxy.webapps.reports.controllers.__path__[0]
for fname in os.listdir( controller_dir ):
if not fname.startswith( "_" ) and fname.endswith( ".py" ):
name = fname[:-3]
module_name = "galaxy.webapps.reports.controllers." + name
module = __import__( module_name )
for comp in module_name.split( "." )[1:]:
module = getattr( module, comp )
# Look for a controller inside the modules
for key in dir( module ):
T = getattr( module, key )
if isclass( T ) and T is not BaseUIController and issubclass( T, BaseUIController ):
webapp.add_ui_controller( name, T( app ) )
def app_factory( global_conf, **kwargs ):
"""Return a wsgi application serving the root object"""
# Create the Galaxy application unless passed in
if 'app' in kwargs:
app = kwargs.pop( 'app' )
else:
from galaxy.webapps.reports.app import UniverseApplication
app = UniverseApplication( global_conf = global_conf, **kwargs )
atexit.register( app.shutdown )
# Create the universe WSGI application
webapp = ReportsWebApplication( app, session_cookie='galaxyreportssession', name="reports" )
add_ui_controllers( webapp, app )
# These two routes handle our simple needs at the moment
webapp.add_route( '/:controller/:action', controller="root", action='index' )
webapp.add_route( '/:action', controller='root', action='index' )
webapp.finalize_config()
# Wrap the webapp in some useful middleware
if kwargs.get( 'middleware', True ):
webapp = wrap_in_middleware( webapp, global_conf, **kwargs )
if kwargs.get( 'static_enabled', True ):
webapp = wrap_in_static( webapp, global_conf, **kwargs )
# Close any pooled database connections before forking
try:
galaxy.model.mapping.metadata.engine.connection_provider._pool.dispose()
except:
pass
# Return
return webapp
def wrap_in_middleware( app, global_conf, **local_conf ):
"""Based on the configuration wrap `app` in a set of common and useful middleware."""
# Merge the global and local configurations
conf = global_conf.copy()
conf.update(local_conf)
debug = asbool( conf.get( 'debug', False ) )
# First put into place httpexceptions, which must be most closely
# wrapped around the application (it can interact poorly with
# other middleware):
app = httpexceptions.make_middleware( app, conf )
log.debug( "Enabling 'httpexceptions' middleware" )
# The recursive middleware allows for including requests in other
# requests or forwarding of requests, all on the server side.
if asbool(conf.get('use_recursive', True)):
from paste import recursive
app = recursive.RecursiveMiddleware( app, conf )
log.debug( "Enabling 'recursive' middleware" )
# Various debug middleware that can only be turned on if the debug
# flag is set, either because they are insecure or greatly hurt
# performance
if debug:
# Middleware to check for WSGI compliance
if asbool( conf.get( 'use_lint', True ) ):
from paste import lint
app = lint.make_middleware( app, conf )
log.debug( "Enabling 'lint' middleware" )
# Middleware to run the python profiler on each request
if asbool( conf.get( 'use_profile', False ) ):
import profile
app = profile.ProfileMiddleware( app, conf )
log.debug( "Enabling 'profile' middleware" )
# Middleware that intercepts print statements and shows them on the
# returned page
if asbool( conf.get( 'use_printdebug', True ) ):
from paste.debug import prints
app = prints.PrintDebugMiddleware( app, conf )
log.debug( "Enabling 'print debug' middleware" )
if debug and asbool( conf.get( 'use_interactive', False ) ):
# Interactive exception debugging, scary dangerous if publicly
# accessible, if not enabled we'll use the regular error printing
# middleware.
pkg_resources.require( "WebError" )
from weberror import evalexception
app = evalexception.EvalException( app, conf,
templating_formatters=build_template_error_formatters() )
log.debug( "Enabling 'eval exceptions' middleware" )
else:
# Not in interactive debug mode, just use the regular error middleware
from paste.exceptions import errormiddleware
app = errormiddleware.ErrorMiddleware( app, conf )
log.debug( "Enabling 'error' middleware" )
# Transaction logging (apache access.log style)
if asbool( conf.get( 'use_translogger', True ) ):
from paste.translogger import TransLogger
app = TransLogger( app )
log.debug( "Enabling 'trans logger' middleware" )
# X-Forwarded-Host handling
from galaxy.web.framework.middleware.xforwardedhost import XForwardedHostMiddleware
app = XForwardedHostMiddleware( app )
log.debug( "Enabling 'x-forwarded-host' middleware" )
return app
def wrap_in_static( app, global_conf, **local_conf ):
from paste.urlmap import URLMap
from galaxy.web.framework.middleware.static import CacheableStaticURLParser as Static
urlmap = URLMap()
# Merge the global and local configurations
conf = global_conf.copy()
conf.update(local_conf)
# Get cache time in seconds
cache_time = conf.get( "static_cache_time", None )
if cache_time is not None:
cache_time = int( cache_time )
# Send to dynamic app by default
urlmap["/"] = app
# Define static mappings from config
urlmap["/static"] = Static( conf.get( "static_dir" ), cache_time )
urlmap["/images"] = Static( conf.get( "static_images_dir" ), cache_time )
urlmap["/static/scripts"] = Static( conf.get( "static_scripts_dir" ), cache_time )
urlmap["/static/style"] = Static( conf.get( "static_style_dir" ), cache_time )
urlmap["/favicon.ico"] = Static( conf.get( "static_favicon_dir" ), cache_time )
# URL mapper becomes the root webapp
return urlmap
def build_template_error_formatters():
"""
Build a list of template error formatters for WebError. When an error
occurs, WebError pass the exception to each function in this list until
one returns a value, which will be displayed on the error page.
"""
formatters = []
# Formatter for mako
import mako.exceptions
def mako_html_data( exc_value ):
if isinstance( exc_value, ( mako.exceptions.CompileException, mako.exceptions.SyntaxException ) ):
return mako.exceptions.html_error_template().render( full=False, css=False )
if isinstance( exc_value, AttributeError ) and exc_value.args[0].startswith( "'Undefined' object has no attribute" ):
return mako.exceptions.html_error_template().render( full=False, css=False )
formatters.append( mako_html_data )
return formatters
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/webapps/reports/buildapp.py | Python | gpl-3.0 | 7,937 | [
"Galaxy"
] | 07f44dea496b623512f3860c4473fb173d017c456beb7ac7a1eadf1f6238cd9e |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The 'gcloud test android run' command."""
import datetime
import random
import string
from googlecloudsdk.api_lib.test import arg_util
from googlecloudsdk.api_lib.test import ctrl_c_handler
from googlecloudsdk.api_lib.test import exit_code
from googlecloudsdk.api_lib.test import history_picker
from googlecloudsdk.api_lib.test import matrix_ops
from googlecloudsdk.api_lib.test import results_bucket
from googlecloudsdk.api_lib.test import results_summary
from googlecloudsdk.api_lib.test import tool_results
from googlecloudsdk.api_lib.test import util
from googlecloudsdk.api_lib.test.android import arg_manager
from googlecloudsdk.api_lib.test.android import matrix_creator
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
@base.UnicodeIsSupported
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class Run(base.ListCommand):
"""Invoke a test in Firebase Test Lab for Android and view test results."""
detailed_help = {
'DESCRIPTION': """\
*{command}* invokes and monitors tests in Firebase Test Lab for
Android.
Three main types of tests are currently supported:
- *robo*: runs a smart, automated exploration of the activities in
your Android app which records any installation failures or crashes
and builds an activity map with associated screenshots and video.
- *instrumentation*: runs automated unit or integration tests written
using a testing framework. Firebase Test Lab for Android currently
supports the Espresso, Robotium and UI Automator 2.0 testing
frameworks.
The type of test to run can be specified with the *--type* flag,
although the type can often be inferred from other flags.
Specifically, if the *--test* flag is present, the test *--type* will
default to `instrumentation`. If *--test* is not present, then
*--type* defaults to `robo`.
All arguments for *{command}* may be specified on the command line
and/or within an argument file. Run *$ gcloud topic arg-files* for
more information about argument files.
""",
'EXAMPLES': """\
To invoke a robo test lasting 100 seconds against the default device
environment, run:
$ {command} --app APP_APK --timeout 100s
To invoke a robo test against a virtual Nexus9 device in
landscape orientation, run:
$ {command} --app APP_APK --device-ids Nexus9 --orientations landscape
To invoke an instrumentation test (Espresso or Robotium) against a
physical Nexus 4 device (DEVICE_ID: mako) which is running Android API
level 18 in French, run:
$ {command} --app APP_APK --test TEST_APK --device-ids mako --os-version-ids 18 --locales fr --orientations portrait
To run the same test as above using short flags, run:
$ {command} --app APP_APK --test TEST_APK -d mako -v 18 -l fr -o portrait
To run a series of 5-minute robo tests against a comprehensive matrix
of virtual and physical devices, OS versions and locales, run:
$ {command} --app APP_APK --timeout 5m --device-ids mako,Nexus5,Nexus6,g3,zeroflte --os-version-ids 17,18,19,21,22 --locales de,en_US,en_GB,es,fr,it,ru,zh
To run an instrumentation test against the default test environment,
but using a specific Google Cloud Storage bucket to hold the raw test
results and specifying the name under which the history of your tests
will be collected and displayed in the Google Developers Console, run:
$ {command} --app APP_APK --test TEST_APK --results-bucket excelsior-app-results-bucket --results-history-name 'Excelsior App Test History'
All test arguments for a given test may alternatively be stored in an
argument group within a YAML-formatted argument file. The _ARG_FILE_
may contain one or more named argument groups, and argument groups may
be combined using the `include:` attribute (Run *$ gcloud topic
arg-files* for more information). The ARG_FILE can easily be shared
with colleagues or placed under source control to ensure consistent
test executions.
To run a test using arguments loaded from an ARG_FILE named
*excelsior_args*, which contains an argument group named *robo-args:*,
use the following syntax:
$ {command} path/to/excelsior_args:robo-args
""",
}
@staticmethod
def Args(parser):
"""Method called by Calliope to register flags for this command.
Args:
parser: An argparse parser used to add arguments that follow this
command in the CLI. Positional arguments are allowed.
"""
arg_util.AddCommonTestRunArgs(parser)
arg_util.AddMatrixArgs(parser)
arg_util.AddAndroidTestArgs(parser)
def Run(self, args):
"""Run the 'gcloud test run' command to invoke a test in Firebase Test Lab.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation (i.e. group and command arguments combined).
Returns:
One of:
- a list of TestOutcome tuples (if ToolResults are available).
- a URL string pointing to the user's results in ToolResults or GCS.
"""
device_catalog = util.GetAndroidCatalog(self.context)
arg_manager.AndroidArgsManager(device_catalog).Prepare(args)
project = util.GetProject()
tr_client = self.context['toolresults_client']
tr_messages = self.context['toolresults_messages']
storage_client = self.context['storage_client']
bucket_ops = results_bucket.ResultsBucketOps(
project, args.results_bucket, _UniqueGcsObjectName(),
tr_client, tr_messages, storage_client)
bucket_ops.UploadFileToGcs(args.app)
if args.test:
bucket_ops.UploadFileToGcs(args.test)
for obb_file in (args.obb_files or []):
bucket_ops.UploadFileToGcs(obb_file)
bucket_ops.LogGcsResultsUrl()
tr_history_picker = history_picker.ToolResultsHistoryPicker(
project, tr_client, tr_messages)
history_name = PickHistoryName(args)
history_id = tr_history_picker.GetToolResultsHistoryId(history_name)
matrix = matrix_creator.CreateMatrix(
args, self.context, history_id, bucket_ops.gcs_results_root)
monitor = matrix_ops.MatrixMonitor(
matrix.testMatrixId, args.type, self.context)
with ctrl_c_handler.CancellableTestSection(monitor):
supported_executions = monitor.HandleUnsupportedExecutions(matrix)
tr_ids = tool_results.GetToolResultsIds(matrix, monitor)
url = tool_results.CreateToolResultsUiUrl(project, tr_ids)
log.status.Print('')
if args.async:
return url
log.status.Print('Test results will be streamed to [{0}].'.format(url))
# If we have exactly one testExecution, show detailed progress info.
if len(supported_executions) == 1:
monitor.MonitorTestExecutionProgress(supported_executions[0].id)
else:
monitor.MonitorTestMatrixProgress()
log.status.Print('\nMore details are available at [{0}].'.format(url))
# Fetch the per-dimension test outcomes list, and also the "rolled-up"
# matrix outcome from the Tool Results service.
summary_fetcher = results_summary.ToolResultsSummaryFetcher(
project, tr_client, tr_messages, tr_ids)
self.exit_code = exit_code.ExitCodeFromRollupOutcome(
summary_fetcher.FetchMatrixRollupOutcome(),
tr_messages.Outcome.SummaryValueValuesEnum)
return summary_fetcher.CreateMatrixOutcomeSummary()
def Collection(self):
"""Choose the default resource collection key used to format test outcomes.
Returns:
A collection string used as a key to select the default ResourceInfo
from core.resources.resource_registry.RESOURCE_REGISTRY.
"""
log.debug('gcloud test command exit_code is: {0}'.format(self.exit_code))
return 'test.android.run.outcomes'
def _UniqueGcsObjectName():
"""Create a unique GCS object name to hold test results.
The Testing back-end needs a unique GCS object name within the results
bucket to prevent race conditions while processing test results. The gcloud
client uses the current time down to the microsecond in ISO format plus a
random 4-letter suffix. The format is: "YYYY-MM-DD_hh:mm:ss.ssssss_rrrr".
Returns:
A string with the unique GCS object name.
"""
return '{0}_{1}'.format(datetime.datetime.now().isoformat('_'),
''.join(random.sample(string.letters, 4)))
def PickHistoryName(args):
"""Returns the results history name to use to look up a history ID.
The history ID corresponds to a history name. If the user provides their
own history name, we use that to look up the history ID; If not, but the user
provides an app-package name, we use the app-package name with ' (gcloud)'
appended as the history name. Otherwise, we punt and let the Testing service
determine the appropriate history ID to publish to.
Args:
args: an argparse namespace. All the arguments that were provided to the
command invocation (i.e. group and command arguments combined).
Returns:
Either a string containing a history name derived from user-supplied data,
or None if we lack the required information.
"""
if args.results_history_name:
return args.results_history_name
if args.app_package:
return args.app_package + ' (gcloud)'
return None
| Sorsly/subtle | google-cloud-sdk/lib/surface/test/android/run.py | Python | mit | 10,247 | [
"ESPResSo"
] | 9e315540bb2aadaa47025b049f01ea2e30000bf349dcdb06ef5f91ec32774a16 |
#!/usr/bin/env python
extras = {}
try:
from setuptools import setup
extras['zip_safe'] = False
except ImportError:
from distutils.core import setup
setup(name='futures',
version='3.0.3',
description='Backport of the concurrent.futures package from Python 3.2',
author='Brian Quinlan',
author_email='brian@sweetapp.com',
maintainer='Alex Gronholm',
maintainer_email='alex.gronholm+pypi@nextday.fi',
url='https://github.com/agronholm/pythonfutures',
packages=['concurrent', 'concurrent.futures'],
license='BSD',
classifiers=['License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only'],
**extras
)
| harlowja/pythonfutures | setup.py | Python | bsd-2-clause | 969 | [
"Brian"
] | f1e9b0c2c783d4a7668ec7d060e9dc71980fa8706fb9358db39033c7299d2afc |
"""Unit test for roman82.py
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2004/05/05 21:57:20 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
import roman82
import unittest
class KnownValues(unittest.TestCase):
knownValues = ( (1, 'I'),
(2, 'II'),
(3, 'III'),
(4, 'IV'),
(5, 'V'),
(6, 'VI'),
(7, 'VII'),
(8, 'VIII'),
(9, 'IX'),
(10, 'X'),
(50, 'L'),
(100, 'C'),
(500, 'D'),
(1000, 'M'),
(31, 'XXXI'),
(148, 'CXLVIII'),
(294, 'CCXCIV'),
(312, 'CCCXII'),
(421, 'CDXXI'),
(528, 'DXXVIII'),
(621, 'DCXXI'),
(782, 'DCCLXXXII'),
(870, 'DCCCLXX'),
(941, 'CMXLI'),
(1043, 'MXLIII'),
(1110, 'MCX'),
(1226, 'MCCXXVI'),
(1301, 'MCCCI'),
(1485, 'MCDLXXXV'),
(1509, 'MDIX'),
(1607, 'MDCVII'),
(1754, 'MDCCLIV'),
(1832, 'MDCCCXXXII'),
(1993, 'MCMXCIII'),
(2074, 'MMLXXIV'),
(2152, 'MMCLII'),
(2212, 'MMCCXII'),
(2343, 'MMCCCXLIII'),
(2499, 'MMCDXCIX'),
(2574, 'MMDLXXIV'),
(2646, 'MMDCXLVI'),
(2723, 'MMDCCXXIII'),
(2892, 'MMDCCCXCII'),
(2975, 'MMCMLXXV'),
(3051, 'MMMLI'),
(3185, 'MMMCLXXXV'),
(3250, 'MMMCCL'),
(3313, 'MMMCCCXIII'),
(3408, 'MMMCDVIII'),
(3501, 'MMMDI'),
(3610, 'MMMDCX'),
(3743, 'MMMDCCXLIII'),
(3844, 'MMMDCCCXLIV'),
(3888, 'MMMDCCCLXXXVIII'),
(3940, 'MMMCMXL'),
(3999, 'MMMCMXCIX'),
(4000, 'MMMM'),
(4500, 'MMMMD'),
(4888, 'MMMMDCCCLXXXVIII'),
(4999, 'MMMMCMXCIX'))
def testToRomanKnownValues(self):
"""toRoman should give known result with known input"""
for integer, numeral in self.knownValues:
result = roman82.toRoman(integer)
self.assertEqual(numeral, result)
def testFromRomanKnownValues(self):
"""fromRoman should give known result with known input"""
for integer, numeral in self.knownValues:
result = roman82.fromRoman(numeral)
self.assertEqual(integer, result)
class ToRomanBadInput(unittest.TestCase):
def testTooLarge(self):
"""toRoman should fail with large input"""
self.assertRaises(roman82.OutOfRangeError, roman82.toRoman, 5000)
def testZero(self):
"""toRoman should fail with 0 input"""
self.assertRaises(roman82.OutOfRangeError, roman82.toRoman, 0)
def testNegative(self):
"""toRoman should fail with negative input"""
self.assertRaises(roman82.OutOfRangeError, roman82.toRoman, -1)
def testNonInteger(self):
"""toRoman should fail with non-integer input"""
self.assertRaises(roman82.NotIntegerError, roman82.toRoman, 0.5)
class FromRomanBadInput(unittest.TestCase):
def testTooManyRepeatedNumerals(self):
"""fromRoman should fail with too many repeated numerals"""
for s in ('MMMMM', 'DD', 'CCCC', 'LL', 'XXXX', 'VV', 'IIII'):
self.assertRaises(roman82.InvalidRomanNumeralError, roman82.fromRoman, s)
def testRepeatedPairs(self):
"""fromRoman should fail with repeated pairs of numerals"""
for s in ('CMCM', 'CDCD', 'XCXC', 'XLXL', 'IXIX', 'IVIV'):
self.assertRaises(roman82.InvalidRomanNumeralError, roman82.fromRoman, s)
def testMalformedAntecedent(self):
"""fromRoman should fail with malformed antecedents"""
for s in ('IIMXCC', 'VX', 'DCM', 'CMM', 'IXIV',
'MCMC', 'XCX', 'IVI', 'LM', 'LD', 'LC'):
self.assertRaises(roman82.InvalidRomanNumeralError, roman82.fromRoman, s)
def testBlank(self):
"""fromRoman should fail with blank string"""
self.assertRaises(roman82.InvalidRomanNumeralError, roman82.fromRoman, "")
class SanityCheck(unittest.TestCase):
def testSanity(self):
"""fromRoman(toRoman(n))==n for all n"""
for integer in range(1, 5000):
numeral = roman82.toRoman(integer)
result = roman82.fromRoman(numeral)
self.assertEqual(integer, result)
class CaseCheck(unittest.TestCase):
def testToRomanCase(self):
"""toRoman should always return uppercase"""
for integer in range(1, 5000):
numeral = roman82.toRoman(integer)
self.assertEqual(numeral, numeral.upper())
def testFromRomanCase(self):
"""fromRoman should only accept uppercase input"""
for integer in range(1, 5000):
numeral = roman82.toRoman(integer)
roman82.fromRoman(numeral.upper())
self.assertRaises(roman82.InvalidRomanNumeralError,
roman82.fromRoman, numeral.lower())
if __name__ == "__main__":
unittest.main()
| tapomayukh/projects_in_python | sandbox_tapo/src/refs/diveintopython-pdf-5.4/diveintopython-5.4/py/roman/stage8/romantest82.py | Python | mit | 5,848 | [
"VisIt"
] | d8a6b407f465400370d88a14d5e2ee064fac83c4d15cf3184b5b83e3a159f29d |
"""
Newick format (:mod:`skbio.io.newick`)
======================================
.. currentmodule:: skbio.io.newick
Newick format (``newick``) stores spanning-trees with weighted edges and node
names in a minimal file format [1]_. This is useful for representing
phylogenetic trees and taxonomies. Newick was created as an informal
specification on June 26, 1986 [2]_.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.tree.TreeNode` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
A Newick file represents a tree using the following grammar. See below for an
explanation of the format in plain English.
Formal Grammar
^^^^^^^^^^^^^^
.. code-block:: none
NEWICK ==> NODE ;
NODE ==> FORMATTING SUBTREE FORMATTING NODE_INFO FORMATTING
SUBTREE ==> ( CHILDREN ) | null
NODE_INFO ==> LABEL | LENGTH | LABEL FORMATTING LENGTH | null
FORMATTING ==> [ COMMENT_CHARS ] | whitespace | null
CHILDREN ==> NODE | CHILDREN , NODE
LABEL ==> ' ALL_CHARS ' | SAFE_CHARS
LENGTH ==> : FORMATTING NUMBER
COMMENT_CHARS ==> any
ALL_CHARS ==> any
SAFE_CHARS ==> any except: ,;:()[] and whitespace
NUMBER ==> a decimal or integer
.. note:: The ``_`` character inside of SAFE_CHARS will be converted to a
blank space in ``skbio.tree.TreeNode`` and vice versa.
``'`` is considered the escape character. To escape ``'`` use a
preceding ``'``.
The implementation of newick in scikit-bio allows nested comments. To
escape ``[`` or ``]`` from within COMMENT_CHARS, use a preceding ``'``.
Explanation
^^^^^^^^^^^
The Newick format defines a tree by creating a minimal representation of nodes
and their relationships to each other.
Basic Symbols
~~~~~~~~~~~~~
There are several symbols which define nodes, the first of which is the
semi-colon (``;``). The semi-colon creates a root node to its left. Recall that
there can only be one root in a tree.
The next symbol is the comma (``,``), which creates a node to its right.
However, these two alone are not enough. For example imagine the following
string: ``, , , ;``. It is evident that there is a root, but the other 3 nodes,
defined by commas, have no relationship. For this reason, it is not a valid
Newick string to have more than one node at the root level.
To provide these relationships, there is another structure:
paired parenthesis (``( )``). These are inserted at the location of an existing
node and give it the ability to have children. Placing ``( )`` in a node's
location will create a child inside the parenthesis on the left-most
inner edge.
Application of Rules
~~~~~~~~~~~~~~~~~~~~
Adding a comma within the parenthesis will create two children: ``( , )``
(also known as a bifurcating node). Notice that only one comma is needed
because the parenthesis have already created a child. Adding more commas will
create more children who are siblings to each other. For example, writing
``( , , , )`` will create a multifurcating node with 4 child nodes who are
siblings to each other.
The notation for a root can be used to create a complete tree. The ``;`` will
create a root node where parenthesis can be placed: ``( );``. Adding commas
will create more children: ``( , );``. These rules can be applied recursively
ad. infinitum: ``(( , ), ( , ));``.
Adding Node Information
~~~~~~~~~~~~~~~~~~~~~~~
Information about a node can be added to improve the clarity and meaning of a
tree. Each node may have a label and/or a length (to the parent). Newick always
places the node information at the right-most edge of a node's position.
Starting with labels, ``(( , ), ( , ));`` would become
``((D, E)B, (F, G)C)A;``. There is a named root ``A`` and the root's children
(from left to right) are ``B`` and ``C``. ``B`` has the children ``D`` and
``E``, and ``C`` has the children ``F`` and ``G``.
Length represents the distance (or weight of the edge) that connects a node to
its parent. This must be a decimal or integer. As an example, suppose ``D`` is
rather estranged from ``B``, and ``E`` is very close. That can be written as:
``((D:10, E:0.5)B, (F, G)C)A;``. Notice that the colon (``:``) separates the
label from the length. If the length is provided but the label is omitted, a
colon must still precede the length (``(:0.25,:0.5):0.0;``). Without this, the
length would be interpreted as a label (which happens to be a number).
.. note:: Internally scikit-bio will cast a length to ``float`` which
technically means that even exponent strings (``1e-3``) are supported)
Advanced Label and Length Rules
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
More characters can be used to create more descriptive labels. When creating a
label there are some rules that must be considered due to limitations in the
Newick format. The following characters are not allowed within a standard
label: parenthesis, commas, square-brackets, colon, semi-colon, and whitespace.
These characters are also disallowed from occurring within a length, which has
a much stricter format: decimal or integer. Many of these characters are
symbols which define the structure of a Newick tree and are thus disallowed for
obvious reasons. The symbols not yet mentioned are square-brackets (``[ ]``)
and whitespace (space, tab, and newline).
What if these characters are needed within a label? In the simple case of
spaces, an underscore (``_``) will be translated as a space on read and vice
versa on write.
What if a literal underscore or any of the others mentioned are needed?
A label can be escaped (meaning that its contents are understood as regular
text) using single-quotes (``'``). When a label is surrounded by single-quotes,
any character is permissible. If a single-quote is needed inside of an escaped
label or anywhere else, it can be escaped with another single-quote.
For example, ``A_1`` is written ``'A_1'`` and ``'A'_1`` would be ``'''A''_1'``.
Inline Comments
~~~~~~~~~~~~~~~
Square-brackets define a comment, which are the least commonly used part of
the Newick format. Comments are not included in the generated objects and exist
only as human readable text ignored by the parser. The implementation in
scikit-bio allows for nested comments (``[comment [nested]]``). Unpaired
square-brackets can be escaped with a single-quote preceding the bracket when
inside an existing comment. (This is identical to escaping a single-quote).
The single-quote has the highest operator precedence, so there is no need to
worry about starting a comment from within a properly escaped label.
Whitespace
~~~~~~~~~~
Whitespace is not allowed within any un-escaped label or in any length, but it
is permitted anywhere else.
Caveats
~~~~~~~
Newick cannot always provide a unique representation of any tree, in other
words, the same tree can be written multiple ways. For example: ``(A, B);`` is
isomorphic to ``(B, A);``. The implementation in scikit-bio maintains the given
sibling order in its object representations.
Newick has no representation of an unrooted tree. Some biological packages make
the assumption that when a trifurcated root exists in an otherwise bifurcated
tree that the tree must be unrooted. In scikit-bio, ``skbio.tree.TreeNode``
will always be rooted at the ``newick`` root (``;``).
Format Parameters
-----------------
The only supported format parameter is `convert_underscores`. This is `True` by
default. When `False`, underscores found in unescaped labels will not be
converted to spaces. This is useful when reading the output of an external
program in which the underscores were not escaped. This parameter only affects
`read` operations. It does not exist for `write` operations; they will always
properly escape underscores.
Examples
--------
This is a simple Newick string.
>>> from StringIO import StringIO
>>> from skbio import read
>>> from skbio.tree import TreeNode
>>> f = StringIO(u"((D, E)B, (F, G)C)A;")
>>> tree = read(f, format="newick", into=TreeNode)
>>> f.close()
>>> print(tree.ascii_art())
/-D
/B-------|
| \-E
-A-------|
| /-F
\C-------|
\-G
This is a complex Newick string.
>>> f = StringIO(u"[example](a:0.1, 'b_b''':0.2, (c:0.3, d_d:0.4)e:0.5)f:0.0;")
>>> tree = read(f, format="newick", into=TreeNode)
>>> f.close()
>>> print(tree.ascii_art())
/-a
|
-f-------|--b_b'
|
| /-c
\e-------|
\-d d
Notice that the node originally labeled ``d_d`` became ``d d``. Additionally
``'b_b'''`` became ``b_b'``. Note that the underscore was preserved in `b_b'`.
References
----------
.. [1] http://evolution.genetics.washington.edu/phylip/newick_doc.html
.. [2] http://evolution.genetics.washington.edu/phylip/newicktree.html
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import zip, range
from skbio.io import (register_reader, register_writer, register_sniffer,
NewickFormatError)
from skbio.tree import TreeNode
@register_sniffer("newick")
def _newick_sniffer(fh):
# Strategy:
# The following conditions preclude a file from being newick:
# * It is an empty file.
# * There is whitespace inside of a label (handled by tokenizer)
# * : is followed by anything that is an operator
# * ( is not preceded immediately by , or another (
# * The parens are unablanced when ; is found.
# If 100 tokens (or less if EOF occurs earlier) then it is probably
# newick, or at least we can't prove it isn't.
operators = set(",;:()")
empty = True
last_token = ','
indent = 0
try:
# 100 tokens ought to be enough for anybody.
for token, _ in zip(_tokenize_newick(fh), range(100)):
if token not in operators:
pass
elif token == ',' and last_token != ':' and indent > 0:
pass
elif token == ':' and last_token != ':':
pass
elif token == ';' and last_token != ':' and indent == 0:
pass
elif token == ')' and last_token != ':':
indent -= 1
elif token == '(' and (last_token == '(' or last_token == ','):
indent += 1
else:
raise NewickFormatError()
last_token = token
empty = False
except NewickFormatError:
return False, {}
return not empty, {}
@register_reader('newick', TreeNode)
def _newick_to_tree_node(fh, convert_underscores=True):
tree_stack = []
current_depth = 0
last_token = ''
next_is_distance = False
root = TreeNode()
tree_stack.append((root, current_depth))
for token in _tokenize_newick(fh, convert_underscores=convert_underscores):
# Check for a label
if last_token not in '(,):':
if not next_is_distance:
tree_stack[-1][0].name = last_token if last_token else None
else:
next_is_distance = False
# Check for a distance
if token == ':':
next_is_distance = True
elif last_token == ':':
try:
tree_stack[-1][0].length = float(token)
except ValueError:
raise NewickFormatError("Could not read length as numeric type"
": %s." % token)
elif token == '(':
current_depth += 1
tree_stack.append((TreeNode(), current_depth))
elif token == ',':
tree_stack.append((TreeNode(), current_depth))
elif token == ')':
if len(tree_stack) < 2:
raise NewickFormatError("Could not parse file as newick."
" Parenthesis are unbalanced.")
children = []
# Pop all nodes at this depth as they belong to the remaining
# node on the top of the stack as children.
while current_depth == tree_stack[-1][1]:
node, _ = tree_stack.pop()
children.insert(0, node)
parent = tree_stack[-1][0]
if parent.children:
raise NewickFormatError("Could not parse file as newick."
" Contains unnested children.")
# This is much faster than TreeNode.extend
for child in children:
child.parent = parent
parent.children = children
current_depth -= 1
elif token == ';':
if len(tree_stack) == 1:
return root
break
last_token = token
raise NewickFormatError("Could not parse file as newick."
" `(Parenthesis)`, `'single-quotes'`,"
" `[comments]` may be unbalanced, or tree may be"
" missing its root.")
@register_writer("newick", TreeNode)
def _tree_node_to_newick(obj, fh):
operators = set(",:_;()[]")
current_depth = 0
nodes_left = [(obj, 0)]
while len(nodes_left) > 0:
entry = nodes_left.pop()
node, node_depth = entry
if node.children and node_depth >= current_depth:
fh.write('(')
nodes_left.append(entry)
nodes_left += ((child, node_depth + 1) for child in
reversed(node.children))
current_depth = node_depth + 1
else:
if node_depth < current_depth:
fh.write(')')
current_depth -= 1
# Note we don't check for None because there is no way to represent
# an empty string as a label in Newick. Therefore, both None and ''
# are considered to be the absence of a label.
if node.name:
escaped = node.name.replace("'", "''")
if any(t in operators for t in node.name):
fh.write("'")
fh.write(escaped)
fh.write("'")
else:
fh.write(escaped.replace(" ", "_"))
if node.length is not None:
fh.write(':')
fh.write(str(node.length))
if nodes_left and nodes_left[-1][1] == current_depth:
fh.write(',')
fh.write(';\n')
def _tokenize_newick(fh, convert_underscores=True):
structure_tokens = set('(),;:')
not_escaped = True
label_start = False
last_non_ws_char = ''
last_char = ''
comment_depth = 0
metadata_buffer = []
# Strategy:
# We will iterate by character.
# Comments in newick are defined as:
# [This is a comment]
# Nested comments are allowed.
#
# The following characters indicate structure:
# ( ) , ; :
#
# Whitespace is never allowed in a newick label, so an exception will be
# thrown.
#
# We use ' to indicate a literal string. It has the highest precedence of
# any operator.
for line in fh:
for character in line:
# We will start by handling the comment case.
# This code branch will probably never execute in practice.
# Using a comment_depth we can handle nested comments.
# Additionally if we are inside an escaped literal string, then
# we don't want to consider it a comment.
if character == "[" and not_escaped:
# Sometimes we might not want to nest a comment, so we will use
# our escape character. This is not explicitly mentioned in
# any format specification, but seems like what a reasonable
# person might do.
if last_non_ws_char != "'" or comment_depth == 0:
# Once again, only advance our depth if [ has not been
# escaped inside our comment.
comment_depth += 1
if comment_depth > 0:
# Same as above, but in reverse
if character == "]" and last_non_ws_char != "'":
comment_depth -= 1
last_non_ws_char = character
continue
# We are not in a comment block if we are below here.
# If we are inside of an escaped string literal, then ( ) , ; are
# meaningless to the structure.
# Otherwise, we are ready to submit our metadata token.
if not_escaped and character in structure_tokens:
label_start = False
metadata = ''.join(metadata_buffer)
# If the following condition is True, then we must have just
# closed a literal. We know this because last_non_ws_char is
# either None or the last non-whitespace character.
# last_non_ws_char is None when we have just escaped an escape
# and at the first iteration.
if last_non_ws_char == "'" or not convert_underscores:
# Make no modifications.
yield metadata
elif metadata:
# Underscores are considered to be spaces when not in an
# escaped literal string.
yield metadata.replace('_', ' ')
# Clear our buffer for the next metadata token and yield our
# current structure token.
metadata_buffer = []
yield character
# We will now handle escaped string literals.
# They are inconvenient because any character inside of them is
# valid, especially whitespace.
# We also need to allow ' to be escaped by '. e.g. '' -> '
elif character == "'":
not_escaped = not not_escaped
label_start = True
if last_non_ws_char == "'":
# We are escaping our escape, so it should be added to our
# metadata_buffer which will represent some future token.
metadata_buffer.append(character)
# We do not want a running chain of overcounts, so we need
# to clear the last character and continue iteration from
# the top. Without this, the following would happen:
# ''' ' -> '' <open literal>
# What we want is:
# ''' ' -> '<open literal> <close literal>
last_non_ws_char = ''
last_char = ''
continue
elif not character.isspace() or not not_escaped:
if label_start and last_char.isspace() and not_escaped:
raise NewickFormatError("Newick files cannot have"
" unescaped whitespace in their"
" labels.")
metadata_buffer.append(character)
label_start = True
# This is equivalent to an `else` however it prevents coverage from
# mis-identifying the `continue` as uncalled because cpython will
# optimize it to a jump that is slightly different from the normal
# jump it would have done anyways.
elif True:
# Skip the last statement
last_char = character
continue
last_char = character
# This line is skipped in the following cases:
# * comment_depth > 0, i.e. we are in a comment.
# * We have just processed the sequence '' and we don't want
# the sequence ''' to result in ''.
# * We have encountered whitespace that is not properly escaped.
last_non_ws_char = character
| Kleptobismol/scikit-bio | skbio/io/newick.py | Python | bsd-3-clause | 20,610 | [
"scikit-bio"
] | f525f7f889f34408657098302e67626d59b014c68f11d10c5d3cfe6779d1d21e |
# -*- coding: utf-8 -*-
import json
import magic
import copy
import re
import pytz
import hashlib
from sqlalchemy import or_
from mock import Mock, PropertyMock
from nose.tools import (
assert_raises,
assert_equal,
assert_not_equal,
assert_true)
from jsonschema.exceptions import ValidationError
from dateutil.parser import parse as du_parse
from tests.base import BaseTestCase
from splice.ingest import ingest_links, generate_artifacts, IngestError, distribute
from splice.models import Tile, Adgroup, AdgroupSite
DESKTOP_LOCALE_DISTRO_PATTERN = re.compile(r'desktop/(.*)\..*.ag.json')
AG_DIST_PATHNAME = re.compile('desktop/([A-Z]{2}/([a-z]{2}-[A-Z]{2}))\.[a-z0-9]+\.ag\.json')
LEGACY_DIST_PATHNAME = re.compile('desktop/([A-Z]{2}/([a-z]{2}-[A-Z]{2}))\.[a-z0-9]+\.json')
class TestIngestLinks(BaseTestCase):
def test_invalid_data(self):
"""
Invalid data is sent for ingestion
"""
assert_raises(ValidationError, ingest_links, {"invalid": {"data": 1}}, self.channels[0].id)
def test_empty_data(self):
"""
Empty data input is not processed
"""
data = ingest_links({}, self.channels[0].id)
assert_equal(data, {})
def test_invalid_country_code(self):
"""
Invalid country code is rejected
"""
assert_raises(IngestError, ingest_links, {"INVALID/en-US": [
{
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF"
}
]}, self.channels[0].id)
def test_invalid_locale(self):
"""
Invalid locale is rejected
"""
assert_raises(IngestError, ingest_links, {"US/en-DE": [
{
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF"
}
]}, self.channels[0].id)
def test_invalid_related(self):
"""
Invalid suggested type is rejected
"""
assert_raises(ValidationError, ingest_links, {"US/en-US": [
{
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"frecent_sites": "not an array, really"
}
]}, self.channels[0].id)
def test_check_type_uniqueness(self):
"""
A test of type uniqueness
"""
tile_1 = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"type": "affiliate"
}
tile_2 = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"type": "sponsored"
}
dist = {"US/en-US": [tile_1, tile_1, tile_2]}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links(dist, self.channels[0].id)
assert_equal(len(dist['US/en-US']), len(data['US/en-US']))
c = self.env.db.session.query(Adgroup).count()
assert_equal(30 + len(dist['US/en-US']) - 1, c)
def test_suggested_sites(self):
"""
just a simple suggested site tile
"""
tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"frecent_sites": ["http://abc.com", "https://xyz.com"]
}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
c = self.env.db.session.query(AdgroupSite).count()
assert_equal(0, c)
data = ingest_links({"US/en-US": [tile]}, self.channels[0].id)
assert_equal(1, len(data["US/en-US"]))
c = self.env.db.session.query(Adgroup).count()
assert_equal(31, c)
c = self.env.db.session.query(AdgroupSite).count()
assert_equal(2, c)
def test_sorted_suggested_sites(self):
"""
ensure suggested sites are sorted
"""
tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"frecent_sites": ["http://lmnop.org", "http://def.com", "http://abc.com", "http://def.com", "https://xyz.com"]
}
data = ingest_links({"CA/en-US": [tile]}, self.channels[0].id)
assert_equal(1, len(data["CA/en-US"]))
assert_equal(data["CA/en-US"][0]['frecent_sites'],
["http://abc.com", "http://def.com", "http://lmnop.org", "https://xyz.com"])
def test_ingest_suggested_sites(self):
"""
Test that there is no duplication when ingesting tiles
"""
with open(self.get_fixture_path("tiles_suggested.json"), 'r') as f:
tiles = json.load(f)
num_tiles = self.env.db.session.query(Tile).count()
data = ingest_links(tiles, self.channels[0].id)
assert_equal(len(data['STAR/en-US']), 5)
new_num_tiles = self.env.db.session.query(Tile).count()
assert_equal(num_tiles + 4, new_num_tiles)
# ingesting the same thing a second time should be idempotent
data = ingest_links(tiles, self.channels[0].id)
assert_equal(len(data['STAR/en-US']), 5)
new_num_tiles = self.env.db.session.query(Tile).count()
assert_equal(num_tiles + 4, new_num_tiles)
def test_ingest_compact_payload(self):
""" Test compact payload for ingest link
"""
image_uri = "data:image/png;base64,somedata foo"
enhanced_uri = "data:image/png;base64,somedata bar"
assets = {
"image 0": image_uri,
"enhanced image 0": enhanced_uri,
}
tile_us = {
"imageURI": "image 0",
"enhancedImageURI": "enhanced image 0",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
}
tile_ca = {
"imageURI": "image 0",
"url": "https://somewhere.ca",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
}
dist = {
"assets": assets,
"distributions": {
"US/en-US": [tile_us],
"CA/en-US": [tile_ca]
}
}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links(dist, self.channels[0].id)
assert_equal(1, len(data["US/en-US"]))
assert_equal(1, len(data["CA/en-US"]))
c = self.env.db.session.query(Adgroup).count()
assert_equal(32, c)
# test tile for CA/en-US
tile = self.env.db.session.query(Tile).filter(Tile.id == 31).one()
ag = self.env.db.session.query(Adgroup).filter(Adgroup.id == 31).one()
assert_equal(tile.adgroup_id, ag.id)
assert_equal(tile.image_uri, hashlib.sha1(image_uri).hexdigest())
# test tile for US/en-US
tile = self.env.db.session.query(Tile).filter(Tile.id == 32).one()
ag = self.env.db.session.query(Adgroup).filter(Adgroup.id == 32).one()
assert_equal(tile.adgroup_id, ag.id)
assert_equal(tile.image_uri, hashlib.sha1(image_uri).hexdigest())
assert_equal(tile.enhanced_image_uri, hashlib.sha1(enhanced_uri).hexdigest())
def test_ingest_invalid_compact_payload(self):
""" Test invalid compact payload for ingest link
"""
image_uri = "data:image/png;base64,somedata foo"
tile_us = {
"imageURI": "image missing",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
}
invalid_dist_assets_missing = {
"distributions": {
"US/en-US": [tile_us],
}
}
invalid_dist_distributions_missing = {
"assets": {
"image 0": image_uri,
}
}
invalid_dist_uri_missing = {
"assets": {
"image 0": image_uri,
},
"distributions": {
"US/en-US": [tile_us]
}
}
assert_raises(ValidationError, ingest_links,
invalid_dist_assets_missing, self.channels[0].id)
assert_raises(ValidationError, ingest_links,
invalid_dist_distributions_missing, self.channels[0].id)
tiles = ingest_links(invalid_dist_uri_missing, self.channels[0].id)
assert_equal(len(tiles["US/en-US"]), 0)
def test_start_end_dates(self):
"""
a simple start/end date tile
"""
tile_no_tz = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {
"start": "2014-01-12T00:00:00.000",
"end": "2014-01-31T00:00:00.000"
}
}
dist = {"US/en-US": [tile_no_tz]}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links(dist, self.channels[0].id)
assert_equal(1, len(data["US/en-US"]))
c = self.env.db.session.query(Adgroup).count()
assert_equal(31, c)
tile = self.env.db.session.query(Tile).filter(Tile.id == 31).one()
ag = self.env.db.session.query(Adgroup).filter(Adgroup.id == 31).one()
assert_equal(tile.adgroup_id, ag.id)
assert_equal(ag.start_date, dist["US/en-US"][0]['time_limits']['start'])
assert_equal(ag.end_date, dist["US/en-US"][0]['time_limits']['end'])
assert_equal(ag.start_date_dt, du_parse(dist["US/en-US"][0]['time_limits']['start']))
assert_equal(ag.end_date_dt, du_parse(dist["US/en-US"][0]['time_limits']['end']))
def test_start_end_dates_timezones(self):
"""
test start/end dates with timezones
"""
def parse_to_utc_notz(dt_str):
"""
Return a TZ unaware dt in UTC
"""
dt = du_parse(dt_str)
if dt.tzinfo:
dt = dt.astimezone(pytz.utc).replace(tzinfo=None)
return dt
tile_no_tz = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {
"start": "2014-01-12T00:00:00.000",
"end": "2014-01-31T00:00:00.000"
}
}
tile_with_tz = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhereelse.com",
"title": "Some Other Title",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {
"start": "2014-01-12T00:00:00.000Z",
"end": "2014-01-31T00:00:00.000Z"
}
}
tile_with_mixed_tz = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhereelseyet.com",
"title": "Yet Some Other Title",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {
"start": "2014-01-12T00:00:00.000",
"end": "2014-01-31T00:00:00.000Z"
}
}
dist = {"US/en-US": [tile_no_tz, tile_with_tz, tile_with_mixed_tz]}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links(dist, self.channels[0].id)
assert_equal(len(dist["US/en-US"]), len(data["US/en-US"]))
c = self.env.db.session.query(Adgroup).count()
assert_equal(30 + len(dist["US/en-US"]), c)
tile_tested = 0
for i, tile_def in enumerate(dist["US/en-US"]):
obj_id = 30 + 1 + i
tile = self.env.db.session.query(Tile).filter(Tile.id == obj_id).one()
ag = self.env.db.session.query(Adgroup).filter(Adgroup.id == obj_id).one()
assert_equal(tile.adgroup_id, ag.id)
assert_equal(ag.start_date, tile_def['time_limits']['start'])
assert_equal(ag.end_date, tile_def['time_limits']['end'])
assert_equal(ag.start_date_dt, parse_to_utc_notz(tile_def['time_limits']['start']))
assert_equal(ag.end_date_dt, parse_to_utc_notz(tile_def['time_limits']['end']))
tile_tested += 1
assert_equal(len(dist["US/en-US"]), tile_tested)
def test_start_end_dates_optional(self):
"""
Ensure that start/end dates are optional
"""
tile_no_start = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {
"end": "2014-01-31T00:00:00.000"
}
}
tile_no_end = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhereelse.com",
"title": "Some Other Title",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {
"start": "2014-01-12T00:00:00.000",
}
}
tile_empty_limits = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://yetsomewhereelse.com",
"title": "Yet Some Other Title",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {}
}
dist = {"US/en-US": [tile_no_start, tile_no_end, tile_empty_limits]}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links(dist, self.channels[0].id)
assert_equal(len(dist["US/en-US"]), len(data["US/en-US"]))
c = self.env.db.session.query(Adgroup).count()
assert_equal(30 + len(dist["US/en-US"]), c)
tile_tested = 0
for i, tile_def in enumerate(dist["US/en-US"]):
obj_id = 30 + 1 + i
tile = self.env.db.session.query(Tile).filter(Tile.id == obj_id).one()
ag = self.env.db.session.query(Adgroup).filter(Adgroup.id == obj_id).one()
assert_equal(tile.adgroup_id, ag.id)
if ag.start_date:
assert_equal(ag.start_date, tile_def['time_limits']['start'])
tile_tested += 1
if ag.end_date:
assert_equal(ag.end_date, tile_def['time_limits']['end'])
tile_tested += 1
# one tile not tested because it has neither start or end dates
assert_equal(len(dist["US/en-US"]) - 1, tile_tested)
def test_start_end_dates_uniqueness(self):
"""
Test that start/end are part of what make tiles unique
"""
tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {
"start": "2014-01-12T00:00:00.000",
"end": "2014-01-31T00:00:00.000"
}
}
tile_no_start = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {
"end": "2014-01-31T00:00:00.000"
}
}
tile_no_end = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {
"start": "2014-01-12T00:00:00.000",
}
}
tile_empty_limits = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Tile",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {}
}
dist = {"US/en-US": [tile, tile_no_start, tile_no_end, tile_empty_limits]}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links(dist, self.channels[0].id)
assert_equal(len(dist["US/en-US"]), len(data["US/en-US"]))
c = self.env.db.session.query(Adgroup).count()
assert_equal(30 + len(dist["US/en-US"]), c)
def test_adgroups_channel_id_uniqueness(self):
"""
Test that channel_ids in adgroups are part of what makes Tiles unique
"""
tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
}
dist = {"US/en-US": [tile]}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
ingest_links(dist, self.channels[0].id)
ingest_links(dist, self.channels[1].id)
c = self.env.db.session.query(Adgroup).count()
assert_equal(32, c)
def test_title_bg_color(self):
"""
A simple test of title_bg_color
"""
tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"titleBgColor": "#FF00FF"
}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links({"US/en-US": [tile]}, self.channels[0].id)
assert_equal(1, len(data["US/en-US"]))
c = self.env.db.session.query(Adgroup).count()
assert_equal(31, c)
tile = self.env.db.session.query(Tile).filter(Tile.id == 31).one()
ag = self.env.db.session.query(Adgroup).filter(Adgroup.id == 31).one()
assert_equal(tile.adgroup_id, ag.id)
assert_equal(tile.title_bg_color, "#FF00FF")
def test_adgroup_categories_invalid(self):
"""
A simple test of adgroup_categories with invalid data
"""
tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"adgroup_categories": "Technology_General" # should be a list here
}
assert_raises(ValidationError, ingest_links, {"US/en-US": [tile]}, self.channels[0].id)
def test_adgroup_categories_single(self):
"""
A simple test of adgroup_categories
"""
from splice.queries import get_categories_for_adgroup
tile_ = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"adgroup_categories": ["Technology_General"]
}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links({"US/en-US": [tile_]}, self.channels[0].id)
assert_equal(1, len(data["US/en-US"]))
c = self.env.db.session.query(Adgroup).count()
assert_equal(31, c)
tile = self.env.db.session.query(Tile).filter(Tile.id == 31).one()
ag = self.env.db.session.query(Adgroup).filter(Adgroup.id == 31).one()
assert_equal(tile.adgroup_id, ag.id)
db_categories = get_categories_for_adgroup(self.env.db.session, ag.id)
assert_equal(tile_["adgroup_categories"], db_categories)
def test_adgroup_categories_multiple(self):
"""
A simple test of adgroup_categories with multiple categories
"""
from splice.queries import get_categories_for_adgroup
tile_ = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"adgroup_categories": ["Technology_General", "Technology_Mobile"]
}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links({"US/en-US": [tile_]}, self.channels[0].id)
assert_equal(1, len(data["US/en-US"]))
c = self.env.db.session.query(Adgroup).count()
assert_equal(31, c)
tile = self.env.db.session.query(Tile).filter(Tile.id == 31).one()
ag = self.env.db.session.query(Adgroup).filter(Adgroup.id == 31).one()
assert_equal(tile.adgroup_id, ag.id)
db_categories = get_categories_for_adgroup(self.env.db.session, ag.id)
assert_equal(sorted(tile_["adgroup_categories"]), db_categories)
def test_frequency_caps(self):
"""
A simple test of frequency caps
"""
tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"frequency_caps": {
"daily": 3,
"total": 10
}
}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links({"US/en-US": [tile]}, self.channels[0].id)
assert_equal(1, len(data["US/en-US"]))
c = self.env.db.session.query(Adgroup).count()
assert_equal(31, c)
tile = self.env.db.session.query(Tile).filter(Tile.id == 31).one()
ag = self.env.db.session.query(Adgroup).filter(Adgroup.id == 31).one()
assert_equal(tile.adgroup_id, ag.id)
assert_equal(ag.frequency_cap_daily, 3)
assert_equal(ag.frequency_cap_total, 10)
def test_frequency_cap_missing_data(self):
"""
Test caps with details missing
"""
def make_dist(caps):
tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"frequency_caps": caps
}
return {"US/en-US": [tile]}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
assert_raises(ValidationError, ingest_links, make_dist({}), self.channels[0].id)
assert_raises(ValidationError, ingest_links, make_dist({'daily': 3}), self.channels[0].id)
assert_raises(ValidationError, ingest_links, make_dist({'total': 10}), self.channels[0].id)
assert_raises(ValidationError, ingest_links, make_dist({'daily': "a number"}), self.channels[0].id)
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
def test_frequency_caps_uniqueness(self):
"""
A test of frequency caps uniqueness
"""
tile_1 = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"frequency_caps": {
"daily": 3,
"total": 10
}
}
tile_2 = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"frequency_caps": {
"daily": 4,
"total": 10
}
}
tile_3 = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"frequency_caps": {
"daily": 3,
"total": 11
}
}
dist = {"US/en-US": [tile_1, tile_1, tile_2, tile_3]}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links(dist, self.channels[0].id)
assert_equal(len(dist['US/en-US']), len(data['US/en-US']))
c = self.env.db.session.query(Adgroup).count()
assert_equal(30 + len(dist['US/en-US']) - 1, c)
def test_explanation(self):
explanation = "Suggested for %1$S fans who visit site %2$S"
tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"adgroup_name": "Technology",
"explanation": explanation,
}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links({"US/en-US": [tile]}, self.channels[0].id)
assert_equal(1, len(data["US/en-US"]))
c = self.env.db.session.query(Adgroup).count()
assert_equal(31, c)
tile = self.env.db.session.query(Tile).filter(Tile.id == 31).one()
ag = self.env.db.session.query(Adgroup).filter(Adgroup.id == 31).one()
assert_equal(tile.adgroup_id, ag.id)
assert_equal(ag.name, "Technology")
assert_equal(ag.explanation, explanation)
@staticmethod
def _make_dist(parts):
tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"adgroup_name": "Technology",
"explanation": "Suggested for %1$S fans who visit site %2$S",
}
tile.update(parts)
return {"US/en-US": [tile]}
def test_explanation_invalid_data(self):
tile = self._make_dist({"explanation": "A huge template %1$S, %2$S" * 100})
assert_raises(ValidationError, ingest_links, tile, self.channels[0].id)
def test_explanation_template_sanitization(self):
# test templates with html tags
tile = self._make_dist({
"adgroup_name": "<script>Technology</script>",
"explanation": "<br/>Suggested for %1$S, %2$S<br/>"})
ingest_links(tile, self.channels[0].id)
ag = self.env.db.session.query(Adgroup).filter(Adgroup.id == 31).one()
assert_equal(ag.name, "Technology")
assert_equal(ag.explanation, "Suggested for %1$S, %2$S")
# test templates with tags only and special characters
tile = self._make_dist({
"title": "Some Another Title",
"adgroup_name": "<script><script/>",
"explanation": "< Suggested for %1$S, %2$S >"})
ingest_links(tile, self.channels[0].id)
ag = self.env.db.session.query(Adgroup).filter(Adgroup.id == 32).one()
assert_equal(ag.name, None)
assert_equal(ag.explanation, "< Suggested for %1$S, %2$S >")
def test_explanation_uniqueness(self):
"""
A test of explanation uniqueness
"""
tile_1 = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"adgroup_name": "A",
"explanation": "B",
}
tile_2 = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"adgroup_name": "A",
"explanation": "C",
}
tile_3 = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"adgroup_name": "D",
"explanation": "B",
}
dist = {"US/en-US": [tile_1, tile_1, tile_2, tile_3]}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links(dist, self.channels[0].id)
assert_equal(len(dist['US/en-US']), len(data['US/en-US']))
c = self.env.db.session.query(Adgroup).count()
assert_equal(30 + len(dist['US/en-US']) - 1, c)
def test_check_inadjacency(self):
"""
Simple inadjacency flag test
"""
suggested_tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title Suggested",
"type": "organic",
"bgColor": "#FFFFFF",
"frecent_sites": ["http://lmnop.org", "http://def.com", "http://abc.com", "http://def.com", "https://xyz.com"],
"check_inadjacency": True
}
directory_tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere-else.com",
"title": "Some Title Directory",
"type": "organic",
"bgColor": "#FFFFFF",
"check_inadjacency": True
}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links({"US/en-US": [suggested_tile, directory_tile]}, self.channels[0].id)
assert_equal(2, len(data["US/en-US"]))
c = self.env.db.session.query(Adgroup).count()
assert_equal(32, c)
ag = self.env.db.session.query(Adgroup).filter(or_(Adgroup.id == 31, Adgroup.id == 32)).all()
asserted = 0
for a in ag:
assert(a.check_inadjacency)
asserted += 1
assert_equal(2, asserted)
def test_check_inadjacency_invalid(self):
tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title Suggested",
"type": "organic",
"bgColor": "#FFFFFF",
"frecent_sites": ["http://lmnop.org", "http://def.com", "http://abc.com", "http://def.com", "https://xyz.com"],
"check_inadjacency": "True"
}
dist = {"US/en-US": [tile]}
c = self.env.db.session.query(Adgroup).count()
assert_raises(ValidationError, ingest_links, dist, self.channels[0].id)
c2 = self.env.db.session.query(Adgroup).count()
assert_equal(c, c2)
def test_check_inadjacency_uniqueness(self):
"""
A test of inadjacency uniqueness
"""
tile_1 = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"check_inadjacency": True
}
tile_2 = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"check_inadjacency": False
}
dist = {"US/en-US": [tile_1, tile_1, tile_2]}
c = self.env.db.session.query(Adgroup).count()
assert_equal(30, c)
data = ingest_links(dist, self.channels[0].id)
assert_equal(len(dist['US/en-US']), len(data['US/en-US']))
c = self.env.db.session.query(Adgroup).count()
assert_equal(30 + len(dist['US/en-US']) - 1, c)
def test_id_creation(self):
"""
Test an id is created for a valid tile
"""
tile = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF"
}
data = ingest_links({"STAR/en-US": [tile]}, self.channels[0].id)
directory_id = data["STAR/en-US"][0]["directoryId"]
# the biggest ID is 30 - next one should be 31
assert_equal(31, directory_id)
def test_id_not_duplicated(self):
"""
Test an id is created for a valid tile
"""
tiles_star = [
{
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF"
},
{
"imageURI": "data:image/png;base64,someotherdata",
"url": "https://somewhereelse.com",
"title": "Some Other Title",
"type": "organic",
"bgColor": "#FFFFFF"
},
]
tiles_ca = [
{
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF"
}
]
data = ingest_links({
"STAR/en-US": tiles_star,
"CA/en-US": tiles_ca,
}, self.channels[0].id)
directory_id_star = data["STAR/en-US"][0]["directoryId"]
directory_id_ca = data["CA/en-US"][0]["directoryId"]
assert_equal(31, directory_id_star)
assert_not_equal(data["STAR/en-US"][1]["directoryId"], directory_id_star)
assert_equal(directory_id_ca, directory_id_star)
def test_id_not_overwritten(self):
"""
Test an id is created for a valid tile
"""
tiles_star = [
{
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF"
}
]
data = ingest_links({"STAR/en-US": tiles_star}, self.channels[0].id)
directory_id = data["STAR/en-US"][0]["directoryId"]
assert_equal(31, directory_id)
data = ingest_links({"STAR/en-US": tiles_star}, self.channels[0].id)
directory_id = data["STAR/en-US"][0]["directoryId"]
assert_equal(31, directory_id)
def test_error_mid_ingestion(self):
"""
Test an error happening mid-ingestion
"""
tiles_star = [
{
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF"
},
{
"imageURI": "data:image/png;base64,someotherdata",
"url": "https://somewhereelse.com",
"title": "Some Other Title",
"type": "organic",
"bgColor": "#FFFFFF"
},
]
tile_count_before = self.env.db.session.query(Tile).count()
import splice.ingest
insert_function = splice.ingest.insert_tile
# put counts in a dict to get around python's
# non-local scope restrictions on variables
# for access in mock_ingest
counts = {
'call': 0,
'exception_at': 2,
}
def mock_ingest(*args, **kwargs):
counts['call'] += 1
if counts['call'] < counts['exception_at']:
return insert_function(*args, **kwargs)
else:
raise Exception('Boom')
function_mock = Mock(side_effect=mock_ingest)
try:
splice.ingest.insert_tile = function_mock
assert_raises(Exception, ingest_links, {"STAR/en-US": tiles_star}, self.channels[0].id)
tile_count_after = self.env.db.session.query(Tile).count()
# None of two has been inserted, to test the "all or nothing" scenario
assert_equal(0, tile_count_after - tile_count_before)
finally:
# put the module function back to what it was
splice.ingest.insert_tile = insert_function
def test_ingest_dbpool(self):
"""
Test a ingestion of a large number of tiles that could use up connections to the db
"""
with open(self.get_fixture_path("2014-10-30.ja-pt.json"), 'r') as f:
tiles = json.load(f)
ingest_links(tiles, self.channels[0].id)
num_tiles = self.env.db.session.query(Tile).count()
assert(num_tiles > 30)
def test_ingest_no_duplicates(self):
"""
Test that there is no duplication when ingesting tiles
"""
with open(self.get_fixture_path("tiles_duplicates.json"), 'r') as f:
tiles = json.load(f)
num_tiles = self.env.db.session.query(Tile).count()
ingest_links(tiles, self.channels[0].id)
new_num_tiles = self.env.db.session.query(Tile).count()
assert_equal(num_tiles + 1, new_num_tiles)
class TestGenerateArtifacts(BaseTestCase):
def test_generate_artifacts(self):
"""
Tests that the correct number of artifacts are generated
"""
with open(self.get_fixture_path("tiles_suggested.json"), 'r') as f:
fixture = json.load(f)
tile = fixture["STAR/en-US"][4]
data = ingest_links({"STAR/en-US": [tile]}, self.channels[0].id)
artifacts = generate_artifacts(data, self.channels[0].name, True)
# tile index, v2, v3 and 2 image files are generated
assert_equal(6, len(artifacts))
data = ingest_links({
"STAR/en-US": [tile],
"CA/en-US": [tile]
}, self.channels[0].id)
artifacts = generate_artifacts(data, self.channels[0].name, True)
# includes two more file: the locale data payload for each version
assert_equal(8, len(artifacts))
def test_generate_artifacts_compact(self):
"""
Tests that the correct number of artifacts are generated for compact
payload
"""
image_uri = "data:image/png;base64,QSBwcmV0dHkgaW1hZ2UgOik="
enhanced_uri = "data:image/png;base64,WWV0IGFub3RoZXIgcHJldHR5IGltYWdlIDop"
assets = {
"image 0": image_uri,
"enhanced image 0": enhanced_uri,
}
tile_us = {
"imageURI": "image 0",
"enhancedImageURI": "enhanced image 0",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
}
dist_us = {
"assets": assets,
"distributions": {
"US/en-US": [tile_us]
}
}
dist_us_ca = {
"assets": assets,
"distributions": {
"US/en-US": [tile_us],
"CA/en-US": [tile_us]
}
}
data = ingest_links(dist_us, self.channels[0].id)
artifacts = generate_artifacts(data, self.channels[0].name, True)
# tile index, v2, v3 and 2 image files are generated
assert_equal(6, len(artifacts))
data = ingest_links(dist_us_ca, self.channels[0].id)
artifacts = generate_artifacts(data, self.channels[0].name, True)
# includes two more file: the locale data payload for each version
assert_equal(8, len(artifacts))
# verify the compact tiles
artifact = artifacts[-1]
payload = json.loads(artifact["data"])
assert_true("assets" in payload)
assert_true("distributions" in payload)
assets = payload["assets"]
for _, tiles in payload["distributions"].iteritems():
for tile in tiles:
assert_true(tile["imageURI"] in assets)
if tile.get("enhancedImageURI"):
assert_true(tile["enhancedImageURI"] in assets)
def test_unknown_mime_type(self):
"""
Tests that an unknown mime type is rejected
"""
tiles_star = [
{
"imageURI": "data:image/weirdimage;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF"
}
]
data = ingest_links({"STAR/en-US": tiles_star}, self.channels[0].id)
assert_raises(IngestError, generate_artifacts, data, self.channels[0].name, True)
def test_malformed_data_uri_meta(self):
"""
Tests that a malformed data uri declaration is rejected
"""
tiles_star = [
{
"imageURI": "data:image/somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF"
}
]
data = ingest_links({"STAR/en-US": tiles_star}, self.channels[0].id)
assert_raises(IngestError, generate_artifacts, data, self.channels[0].name, True)
def test_image_content(self):
with open(self.get_fixture_path("valid_tile.json"), 'r') as f:
tiles = json.load(f)
data = ingest_links(tiles, self.channels[0].id)
artifacts = generate_artifacts(data, self.channels[0].name, True)
found_image = False
for file in artifacts:
if "mime" in file:
found_image = True
assert_equal(file["mime"], magic.from_buffer(file["data"], mime=True))
assert_true(found_image)
def test_image_artifact_hash(self):
"""
Test that the correct number of image artifacts are produced
"""
with open(self.get_fixture_path("valid_tile.json"), 'r') as f:
fixture = json.load(f)
tile_1 = fixture["STAR/en-US"][0]
tile_2 = copy.deepcopy(tile_1)
tile_2['title'] = 'Some Other Title'
tile_3 = copy.deepcopy(tile_1)
tile_3['title'] = 'Yet Another Title'
tiles = {'STAR/en-US': [tile_1, tile_2, tile_3]}
data = ingest_links(tiles, self.channels[0].id)
artifacts = generate_artifacts(data, self.channels[0].name, True)
# even if there are 3 tiles, there should only be 2 images
image_count = 0
for a in artifacts:
mime = a.get('mime')
if mime and mime == 'image/png':
image_count += 1
assert_equal(2, image_count)
def test_generate_artifacts_tile_count(self):
"""
Tests that the correct number of tiles are produced
"""
with open(self.get_fixture_path('mozilla-tiles.fennec.sg.json'), 'r') as f:
tiles = json.load(f)
data = ingest_links(tiles, self.channels[0].id)
artifacts = generate_artifacts(data, self.channels[0].name, True)
assertions_run = False
for a in artifacts:
m = DESKTOP_LOCALE_DISTRO_PATTERN.match(a['key'])
if m:
country_locale = m.groups()[0]
distro_data = json.loads(a['data'])
assert_equal(len(tiles[country_locale]) - 1, len(distro_data['directory']))
assert_equal(1, len(distro_data['suggested']))
assertions_run = True
assert(assertions_run)
class TestDistribute(BaseTestCase):
def setUp(self):
import splice.ingest
self.key_mock = Mock()
self.bucket_mock = Mock()
def bucket_get_key_mock(*args, **kwargs):
return None
self.bucket_mock.get_key = Mock(side_effect=bucket_get_key_mock)
def get_key_mock(*args, **kwargs):
return self.key_mock
splice.ingest.Key = Mock(side_effect=get_key_mock)
def get_bucket_mock(*args, **kwargs):
return self.bucket_mock
self.env.s3.get_bucket = Mock(side_effect=get_bucket_mock)
self.key_names = []
def key_set_name(name):
self.key_names.append(name)
type(self.key_mock).name = PropertyMock(side_effect=key_set_name)
self.key_contents = []
def key_set_contents(data, **kwargs):
self.key_contents.append(data)
self.key_mock.set_contents_from_string = Mock(side_effect=key_set_contents)
super(TestDistribute, self).setUp()
def test_distribute(self):
tiles_star = [
{
"imageURI": "data:image/png;base64,somedata",
"enhancedImageURI": "data:image/png;base64,somemoredata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF"
}
]
tiles_ca = [
{
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF"
}
]
data = ingest_links({"STAR/en-US": tiles_star}, self.channels[0].id)
distribute(data, self.channels[0].id, True)
# 6 files are uploaded, mirrors generate artifacts
assert_equal(6, self.key_mock.set_contents_from_string.call_count)
self.key_mock.set_contents_from_string = Mock()
data = ingest_links({
"STAR/en-US": tiles_star,
"CA/en-US": tiles_ca,
}, self.channels[0].id)
distribute(data, self.channels[0].id, True)
# includes two more upload: the locate data payload (for both versions)
assert_equal(8, self.key_mock.set_contents_from_string.call_count)
def test_distribute_suggested(self):
tiles_star = [
{
"imageURI": "data:image/png;base64,somedata",
"enhancedImageURI": "data:image/png;base64,somemoredata",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
"frecent_sites": ['http://xyz.com', 'http://abc.com']
}
]
tiles_ca = [
{
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Other Title",
"type": "organic",
"bgColor": "#FFFFFF"
}
]
data = ingest_links({
"STAR/en-US": tiles_star,
"CA/en-US": tiles_ca,
}, self.channels[0].id)
distribute(data, self.channels[0].id, True)
# in this case, the 3rd element should be the mock of the s3 upload for the 'ag' index
frecents = json.loads(self.key_mock.set_contents_from_string.mock_calls[3][1][0])['suggested'][0]['frecent_sites']
assert_equal(frecents, ['http://abc.com', 'http://xyz.com'])
def test_distribute_compact(self):
image_uri = "data:image/png;base64,QSBwcmV0dHkgaW1hZ2UgOik="
enhanced_uri = "data:image/png;base64,WWV0IGFub3RoZXIgcHJldHR5IGltYWdlIDop"
assets = {
"image 0": image_uri,
"enhanced image 0": enhanced_uri,
}
tile_us = {
"imageURI": "image 0",
"enhancedImageURI": "enhanced image 0",
"url": "https://somewhere.com",
"title": "Some Title",
"type": "organic",
"bgColor": "#FFFFFF",
}
dist_ca_us = {
"assets": assets,
"distributions": {
"US/en-US": [tile_us],
"CA/en-US": [tile_us]
}
}
data = ingest_links(dist_ca_us, self.channels[0].id)
distribute(data, self.channels[0].id, True)
assert_equal(8, self.key_mock.set_contents_from_string.call_count)
def test_distribute_frequency_cap(self):
"""
Tests if frequency cap makes it in distributions
"""
tile_en_gb = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title CA",
"type": "organic",
"bgColor": "#FFFFFF",
"frequency_caps": {
"daily": 3,
"total": 10
}
}
tile_en_us = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere_else.com",
"title": "Some Title US",
"type": "organic",
"bgColor": "#FFFFFF",
"frequency_caps": {
"daily": 5,
"total": 15
}
}
tiles_en_us_suggested = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title US Suggested",
"type": "organic",
"bgColor": "#FFFFFF",
"frecent_sites": ['http://xyz.com', 'http://abc.com'],
"frequency_caps": {
"daily": 7,
"total": 20
}
}
distribution = {
"US/en-US": [tile_en_us, tiles_en_us_suggested],
"GB/en-US": [tile_en_us],
"GB/en-GB": [tile_en_gb]
}
data = ingest_links(distribution, self.channels[0].id)
distribute(data, self.channels[0].id, True)
# one image, 3 AG distributions, 3 legacy distributions, one index, one input distribution
assert_equal(9, self.key_mock.set_contents_from_string.call_count)
num_tiles_checked = 0
for i, key in enumerate(self.key_names):
ag = AG_DIST_PATHNAME.match(key)
leg = LEGACY_DIST_PATHNAME.match(key)
if ag:
country_locale, locale = ag.groups()
data = json.loads(self.key_contents[i])
for tile in data['directory']:
# index 0 expected, only for US/en-US
assert_equal(distribution[country_locale][0]['frequency_caps'], tile.get('frequency_caps'))
num_tiles_checked += 1
for tile in data['suggested']:
# index 1 expected, only for US/en-US
assert_equal(distribution[country_locale][1]['frequency_caps'], tile.get('frequency_caps'))
num_tiles_checked += 1
elif leg:
country_locale, locale = leg.groups()
data = json.loads(self.key_contents[i])
assert_equal(1, len(data[locale]))
tile = data[locale][0]
assert_equal(None, tile.get('frequency_caps'))
num_tiles_checked += 1
assert_equal(7, num_tiles_checked)
def test_distribute_adgroup_explanation(self):
tile_en_us = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere_else.com",
"title": "Some Title US",
"type": "organic",
"bgColor": "#FFFFFF",
"adgroup_name": "Teçhnology".decode('utf-8'),
"explanation": "推荐 for %1$S fans who also like %2$S".decode('utf-8')
}
tiles_en_us_suggested = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title US Suggested",
"type": "organic",
"bgColor": "#FFFFFF",
"frecent_sites": ['http://xyz.com', 'http://abc.com'],
"adgroup_name": "Technology",
"explanation": "Suggested for %1$S fans who also like %2$S"
}
distribution = {
"US/en-US": [tile_en_us, tiles_en_us_suggested],
"GB/en-US": [tile_en_us],
}
data = ingest_links(distribution, self.channels[0].id)
distribute(data, self.channels[0].id, True)
# one image, 2 AG distributions, 2 legacy distributions, one index, one input distribution
assert_equal(7, self.key_mock.set_contents_from_string.call_count)
num_tiles_checked = 0
for i, key in enumerate(self.key_names):
ag = AG_DIST_PATHNAME.match(key)
leg = LEGACY_DIST_PATHNAME.match(key)
if ag:
country_locale, locale = ag.groups()
data = json.loads(self.key_contents[i])
for tile in data['directory']:
# index 0 expected, only for US/en-US
assert_equal(distribution[country_locale][0]['adgroup_name'], tile.get('adgroup_name'))
assert_equal(distribution[country_locale][0]['explanation'], tile.get('explanation'))
num_tiles_checked += 1
for tile in data['suggested']:
# index 1 expected, only for US/en-US
assert_equal(distribution[country_locale][1]['adgroup_name'], tile.get('adgroup_name'))
assert_equal(distribution[country_locale][1]['explanation'], tile.get('explanation'))
num_tiles_checked += 1
elif leg:
country_locale, locale = leg.groups()
data = json.loads(self.key_contents[i])
assert_equal(1, len(data[locale]))
tile = data[locale][0]
assert_equal(None, tile.get('adgroup_name'))
assert_equal(None, tile.get('explanation'))
num_tiles_checked += 1
assert_equal(5, num_tiles_checked)
def test_distribute_inadjacency_check(self):
"""
Test if check_inadjacency makes it in distributions
"""
tile_en_gb = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title CA",
"type": "organic",
"bgColor": "#FFFFFF",
"check_inadjacency": True
}
tile_en_us = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere_else.com",
"title": "Some Title US",
"type": "organic",
"bgColor": "#FFFFFF",
"check_inadjacency": True
}
tiles_en_us_suggested = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title US Suggested",
"type": "organic",
"bgColor": "#FFFFFF",
"frecent_sites": ['http://xyz.com', 'http://abc.com'],
"check_inadjacency": True,
"frequency_caps": {
"daily": 7,
"total": 20
}
}
distribution = {
"US/en-US": [tile_en_us, tiles_en_us_suggested],
"GB/en-US": [tile_en_us],
"GB/en-GB": [tile_en_gb]
}
data = ingest_links(distribution, self.channels[0].id)
distribute(data, self.channels[0].id, True)
# one image, 3 AG distributions, 3 legacy distributions, one index, one input distribution
assert_equal(9, self.key_mock.set_contents_from_string.call_count)
num_tiles_checked = 0
for i, key in enumerate(self.key_names):
ag = AG_DIST_PATHNAME.match(key)
leg = LEGACY_DIST_PATHNAME.match(key)
if ag:
country_locale, locale = ag.groups()
data = json.loads(self.key_contents[i])
for tile in data['directory']:
# index 0 expected, only for US/en-US
assert_equal(distribution[country_locale][0]['check_inadjacency'], tile.get('check_inadjacency'))
num_tiles_checked += 1
for tile in data['suggested']:
# index 1 expected, only for US/en-US
assert_equal(distribution[country_locale][1]['check_inadjacency'], tile.get('check_inadjacency'))
num_tiles_checked += 1
elif leg:
country_locale, locale = leg.groups()
data = json.loads(self.key_contents[i])
assert_equal(1, len(data[locale]))
tile = data[locale][0]
assert_equal(None, tile.get('check_inadjacency'))
num_tiles_checked += 1
assert_equal(7, num_tiles_checked)
def test_distribute_time_limits(self):
"""
Test if time limits make it in distributions
"""
tile_en_gb = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title CA",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {
"start": "2014-01-12T00:00:00.000",
"end": "2014-01-31T00:00:00.000"
}
}
tile_en_us = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere_else.com",
"title": "Some Title US",
"type": "organic",
"bgColor": "#FFFFFF",
"time_limits": {
"start": "2014-01-12T00:00:00.000",
"end": "2014-01-31T00:00:00.000"
}
}
tiles_en_us_suggested = {
"imageURI": "data:image/png;base64,somedata",
"url": "https://somewhere.com",
"title": "Some Title US Suggested",
"type": "organic",
"bgColor": "#FFFFFF",
"frecent_sites": ['http://xyz.com', 'http://abc.com'],
"check_inadjacency": True,
"frequency_caps": {
"daily": 7,
"total": 20
},
"time_limits": {
"start": "2014-01-12T00:00:00.000",
"end": "2014-01-31T00:00:00.000"
}
}
distribution = {
"US/en-US": [tile_en_us, tiles_en_us_suggested],
"GB/en-US": [tile_en_us],
"GB/en-GB": [tile_en_gb]
}
data = ingest_links(distribution, self.channels[0].id)
distribute(data, self.channels[0].id, True)
# one image, 3 AG distributions, 3 legacy distributions, one index, one input distribution
assert_equal(9, self.key_mock.set_contents_from_string.call_count)
num_tiles_checked = 0
for i, key in enumerate(self.key_names):
ag = AG_DIST_PATHNAME.match(key)
leg = LEGACY_DIST_PATHNAME.match(key)
if ag:
country_locale, locale = ag.groups()
data = json.loads(self.key_contents[i])
for tile in data['directory']:
# index 0 expected, only for US/en-US
assert_equal(distribution[country_locale][0]['time_limits'], tile.get('time_limits'))
num_tiles_checked += 1
for tile in data['suggested']:
# index 1 expected, only for US/en-US
assert_equal(distribution[country_locale][1]['time_limits'], tile.get('time_limits'))
num_tiles_checked += 1
elif leg:
country_locale, locale = leg.groups()
data = json.loads(self.key_contents[i])
assert_equal(1, len(data[locale]))
tile = data[locale][0]
assert_equal(None, tile.get('time_limits'))
num_tiles_checked += 1
assert_equal(7, num_tiles_checked)
def test_deploy_always_generates_tile_index(self):
"""A tiles index file should always be generated"""
# this is a dict, because of a quirk in python's namespacing/scoping
# https://docs.python.org/2/tutorial/classes.html#python-scopes-and-namespaces
index_uploaded = {'count': 0}
def key_set_name(name):
if name == "{0}_tile_index.v3.json".format(self.channels[0].name):
index_uploaded['count'] += 1
name_mock = PropertyMock(side_effect=key_set_name)
type(self.key_mock).name = name_mock
with open(self.get_fixture_path("mozilla-tiles.fennec.json"), 'r') as f:
tiles = json.load(f)
data = ingest_links(tiles, self.channels[0].id)
distribute(data, self.channels[0].id, True)
assert_equal(1, index_uploaded['count'])
data = ingest_links(tiles, self.channels[0].id)
distribute(data, self.channels[0].id, True)
assert_equal(2, index_uploaded['count'])
class TestISOPattern(BaseTestCase):
def test_relative_time_str(self):
"""
Verify a relative ISO8061 time string validates
"""
from splice.schemas import ISO_8061_pattern
pat = re.compile(ISO_8061_pattern)
date_str = '2014-01-12T00:00:00.000'
m = pat.match(date_str)
assert(m)
assert_equal(None, m.groups()[-2])
def test_absolute_time_str(self):
"""
Verify a ISO8061 time string with Z time string validates
"""
from splice.schemas import ISO_8061_pattern
pat = re.compile(ISO_8061_pattern)
date_str = '2014-01-12T00:00:00.000Z'
m = pat.match(date_str)
assert(m)
assert_equal('Z', m.groups()[-2])
def test_timezone_str(self):
"""
Verify a ISO8061 time string with timezone time string validates
"""
from splice.schemas import ISO_8061_pattern
pat = re.compile(ISO_8061_pattern)
date_str = '2015-05-05T14:19:58.359981-05:00'
m = pat.match(date_str)
assert(m)
assert_equal('-05:00', m.groups()[-2])
date_str = '2015-05-05T14:19:58.359981-05'
m = pat.match(date_str)
assert(m)
assert_equal('-05', m.groups()[-2])
date_str = '2015-05-05T14:19:58.359981-0500'
m = pat.match(date_str)
assert(m)
assert_equal('-0500', m.groups()[-2])
| mostlygeek/splice | tests/test_ingest.py | Python | mpl-2.0 | 62,667 | [
"VisIt"
] | 0c682eecf19a0077d1d985d78ebd0be052712c17ae9962c332a320ea963c9a15 |
# -*- coding: utf-8 -*-
#
# Collection of functions related to BAM and SAM files
#
# pysam uses 0-based coordinates
from collections import namedtuple
import os
import re
import sys
import pysam
from .chain import ChainFile
from .exceptions import G2GCigarFormatError, G2GBAMError, G2GValueError
from .g2g_utils import get_logger
import g2g_fileutils as g2g_fu
FLAG_NONE = 0x0 # base value
FLAG_PAIRED = 0x1 # template having multiple segments in sequencing
FLAG_PROPER_PAIR = 0x2 # each segment properly aligned according to the aligner
FLAG_UNMAP = 0x4 # segment unmapped
FLAG_MUNMAP = 0x8 # next segment in the template unmapped (mate unmapped)
FLAG_REVERSE = 0x10 # SEQ being reverse complemented
FLAG_MREVERSE = 0x20 # SEQ of the next segment in the template being reversed
FLAG_READ1 = 0x40 # the first segment in the template
FLAG_READ2 = 0x80 # the last segment in the template
FLAG_SECONDARY = 0x100 # secondary alignment
FLAG_QCFAIL = 0x200 # not passing quality controls
FLAG_DUP = 0x400 # PCR or optical duplicate
FLAG_SUPPLEMENTARY = 0x800 # supplementary alignment
REGEX_CIGAR = re.compile("(\d+)([\w=])")
REGEX_CIGAR_LENGTH = re.compile("\D")
CIGAR_M = 'M'
CIGAR_I = 'I'
CIGAR_D = 'D'
CIGAR_N = 'N'
CIGAR_S = 'S'
CIGAR_H = 'H'
CIGAR_P = 'P'
CIGAR_E = '='
CIGAR_X = 'X'
CIGAR_m = 0
CIGAR_i = 1
CIGAR_d = 2
CIGAR_n = 3
CIGAR_s = 4
CIGAR_h = 5
CIGAR_p = 6
CIGAR_e = 7
CIGAR_x = 8
CIGAR_N2C = {
0: 'M', # alignment match (can be a sequence match or mismatch)
1: 'I', # insertion to the reference
2: 'D', # deletion from the reference
3: 'N', # skipped region from the reference
4: 'S', # soft clipping (clipped sequences present in SEQ)
5: 'H', # hard clipping (clipped sequences NOT present in SEQ)
6: 'P', # padding (silent deletion from padded reference)
7: '=', # sequence match
8: 'X', # sequence mismatch
'0': 'M',
'1': 'I',
'2': 'D',
'3': 'N',
'4': 'S',
'5': 'H',
'6': 'P',
'7': '=',
'8': 'X'
}
CIGAR_C2N = {
'M': 0,
'I': 1,
'D': 2,
'N': 3,
'S': 4,
'H': 5,
'P': 6,
'=': 7,
'X': 8
}
LOG = get_logger()
Cigar = namedtuple('Cigar', ['code', 'length', 'start', 'end'])
def convert_bam_file(chain_file, file_in, file_out, reverse=False):
"""
Convert genome coordinates (in BAM/SAM format) between assemblies. These coordinates
are stored in the :class:`.chain.ChainFile` object.
:param chain_file: chain file used for conversion
:type chain_file: :class:`.chain.ChainFile`
:param str file_in: the input SAM or BAM file
:type file_in: string
:param file_out: the output SAM or file
:type file_out: string
:param reverse: reverse direction of original chain file
:type reverse: boolean
"""
if not isinstance(chain_file, ChainFile):
chain_file = g2g_fu.check_file(chain_file)
if not isinstance(file_in, pysam.Samfile):
file_in = g2g_fu.check_file(file_in)
output_file_name = g2g_fu.check_file(file_out, 'w')
unmapped_file_name = "{0}.unmapped".format(output_file_name)
LOG.info("CHAIN FILE: {0}".format(chain_file))
LOG.info("INPUT FILE: {0}".format(file_in))
LOG.info("OUTPUT FILE: {0}".format(output_file_name))
LOG.info("UNMAPPED FILE: {0}".format(unmapped_file_name))
if not isinstance(chain_file, ChainFile):
LOG.info("Parsing chain file...")
chain_file = ChainFile(chain_file, reverse=reverse)
LOG.info("Chain file parsed")
if not isinstance(file_in, pysam.Samfile):
try:
sam_file = pysam.Samfile(file_in, 'rb')
if len(sam_file.header) == 0:
raise G2GBAMError("BAM File has no header information")
except:
sam_file = pysam.Samfile(file_in, 'r')
if len(sam_file.header) == 0:
raise G2GBAMError("SAM File has no header information")
LOG.info("Converting BAM file")
new_header = sam_file.header
# replace 'HD'
new_header['HD'] = {'VN': 1.0, 'SO': 'coordinate'}
# replace SQ
tmp = []
name_to_id = {}
id = 0
for ref_name in sorted(chain_file.chrom_size_to):
tmp.append({'LN': chain_file.chrom_size_from[ref_name], 'SN': ref_name})
name_to_id[ref_name] = id
id += 1
new_header['SQ'] = tmp
if 'PG' not in new_header:
new_header['PG'] = []
new_header['PG'].append({'ID': 'gtgtools', 'VN': 1.0})
if 'CO' not in new_header:
new_header['CO'] = []
new_header['CO'].append("Original file: {0}".format(file_in))
new_header['CO'].append("Chain File: {0}".format(chain_file.file_name))
dir, temp_file_name = os.path.split(file_out)
parts = temp_file_name.split('.')
ext = parts[-1]
if ext.lower() == 'bam':
new_file = pysam.Samfile(file_out, 'wb', header=new_header)
new_file_unmapped = pysam.Samfile(unmapped_file_name, 'wb', template=sam_file)
elif ext.lower() == 'sam':
new_file = pysam.Samfile(file_out, 'wh', header=new_header)
new_file_unmapped = pysam.Samfile(unmapped_file_name, 'wh', template=sam_file)
else:
raise G2GBAMError("Unable to create new file based upon file extension")
total = 0
total_unmapped = 0
total_fail_qc = 0
map_statistics = {'total': 0,
'fail_cannot_map': 0,
'success_simple': 0,
'success_complex': 0}
map_statistics_pair = {'total': 0,
'fail_cannot_map': 0,
'success_1_fail_2_simple': 0,
'success_1_fail_2_complex': 0,
'success_1_simple_2_fail': 0,
'success_1_simple_2_simple': 0,
'success_1_simple_2_complex': 0,
'success_1_complex_2_fail': 0,
'success_1_complex_2_simple': 0,
'success_1_complex_2_complex': 0}
try:
while True:
if total and total % 10000 == 0:
status_success = 0
status_failed = 0
for k, v in map_statistics_pair.iteritems():
if k.startswith('success'):
status_success += v
elif k.startswith('fail'):
status_failed += v
LOG.info("Processed {0:,} reads, {1:,} successful, {2:,} failed".format(total, status_success, status_failed))
alignment = sam_file.next()
alignment_new = pysam.AlignedRead()
read_chr = sam_file.getrname(alignment.tid)
# READ ONLY
# aend aligned reference position of the read on the reference genome
# alen aligned length of the read on the reference genome.
# positions a list of reference positions that this read aligns to
# qend end index of the aligned query portion of the sequence (0-based, exclusive)
# qlen Length of the aligned query sequence
# qqual aligned query sequence quality values
# qstart start index of the aligned query portion of the sequence (0-based, inclusive)
# query aligned portion of the read and excludes any flanking bases that were soft clipped
# rlen length of the read
# TRUE / FALSE (setting effects flag)
# is_paired true if read is paired in sequencing
# is_proper_pair true if read is mapped in a proper pair
# is_qcfail true if QC failure
# is_read1 true if this is read1
# is_read2 true if this is read2
# is_reverse true if read is mapped to reverse strand
# is_secondary true if not primary alignment
# is_unmapped true if read itself is unmapped
# mate_is_reverse true is read is mapped to reverse strand
# mate_is_unmapped true if the mate is unmapped
# SET
# cigar cigar as list of tuples
# cigarstring alignment as a string
# flag properties flag
# mapq mapping quality
# pnext the position of the mate
# pos 0-based leftmost coordinate
# pnext the position of the mate
# qname the query name
# rnext the reference id of the mate
# seq read sequence bases, including soft clipped bases
# tid target id, contains the index of the reference sequence in the sequence dictionary
# DON'T NEED TO SET or SHOULD WE SET?
# qual read sequence base qualities, including soft clipped bases
# tags the tags in the AUX field
# tlen insert size
total += 1
LOG.debug('~'*80)
LOG.debug("Converting {0} {1} {2} {3}".format(alignment.qname, read_chr, alignment.pos, alignment.cigarstring))
if alignment.is_qcfail:
LOG.debug("\tFail due to qc of old alignment")
new_file_unmapped.write(alignment)
total_fail_qc += 1
continue
if alignment.is_unmapped:
LOG.debug("\tFail due to unmapped old alignment")
new_file_unmapped.write(alignment)
total_unmapped += 1
continue
if not alignment.is_paired:
LOG.debug("SINGLE END ALIGNMENT")
map_statistics['total'] += 1
alignment_new.seq = alignment.seq
alignment_new.flag = FLAG_NONE
alignment_new.mapq = alignment.mapq
alignment_new.qname = alignment.qname
alignment_new.qual = alignment.qual
alignment_new.tags = alignment.tags
read_start = alignment.pos
read_end = alignment.aend
read_strand = '-' if alignment.is_reverse else '+'
mappings = chain_file.find_mappings(read_chr, read_start, read_end)
# unmapped
if mappings is None:
LOG.debug("\tFail due to no mappings")
new_file_unmapped.write(alignment)
map_statistics['fail_cannot_map'] += 1
elif len(mappings) == 1:
if alignment.is_reverse:
alignment_new.flag |= FLAG_REVERSE
alignment_new.tid = name_to_id[mappings[0].to_chr]
alignment_new.pos = mappings[0].to_start
alignment_new.cigar = alignment.cigar
new_file.write(alignment_new)
LOG.debug("\tSuccess (simple): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
map_statistics['success_simple'] += 1
else:
LOG.debug("MAPPINGS: {0}".format(len(mappings)))
for m in mappings:
LOG.debug("> {0}".format(m))
if alignment.is_reverse:
alignment_new.flag |= FLAG_REVERSE
alignment_new.tid = name_to_id[mappings[0].to_chr]
alignment_new.pos = mappings[0].to_start
alignment_new.cigar = convert_cigar(alignment.cigar, read_chr, chain_file, alignment.seq, read_strand, alignment.pos)
new_file.write(alignment_new)
LOG.debug("\tSuccess (complex): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
map_statistics['success_complex'] += 1
else:
LOG.debug("PAIRED END ALIGNMENT")
map_statistics_pair['total'] += 1
alignment_new.seq = alignment.seq
alignment_new.flag = FLAG_PAIRED
alignment_new.mapq = alignment.mapq
alignment_new.qname = alignment.qname
alignment_new.qual = alignment.qual
alignment_new.tags = alignment.tags
if alignment.is_read1:
alignment_new.flag |= FLAG_READ1
if alignment.is_read2:
alignment_new.flag |= FLAG_READ2
if alignment.is_reverse:
alignment_new.flag |= FLAG_REVERSE
if alignment.mate_is_reverse:
alignment_new.flag |= FLAG_MREVERSE
read1_chr = sam_file.getrname(alignment.tid)
read1_start = alignment.pos
read1_end = alignment.aend
read1_strand = '-' if alignment.is_reverse else '+'
read1_mappings = chain_file.find_mappings(read1_chr, read1_start, read1_end) #, read1_strand)
read2_chr = None
read2_start = None
read2_end = None
read2_strand = None
read2_mappings = None
if alignment.mate_is_unmapped:
alignment_new.flag |= FLAG_MUNMAP
else:
read2_chr = sam_file.getrname(alignment.rnext)
read2_start = alignment.pnext
read2_end = read2_start + 1
read2_strand = '-' if alignment.mate_is_reverse else '+'
try:
read2_mappings = chain_file.find_mappings(read2_chr, read2_start, read2_end)
except:
read2_mappings = None
if read1_mappings is None and read2_mappings is None:
alignment_new.flag |= FLAG_UNMAP
alignment_new.flag |= FLAG_MUNMAP
LOG.debug("\tFail due to no mappings")
new_file_unmapped.write(alignment)
map_statistics_pair['fail_cannot_map'] += 1
elif read1_mappings is None and read2_mappings and len(read2_mappings) == 1:
alignment_new.flag |= FLAG_UNMAP
alignment_new.pos = 0
alignment_new.cigarstring = '0M'
alignment_new.rnext = name_to_id[read2_mappings[0].to_chr]
alignment_new.pnext = read2_mappings[0].to_start
alignment_new.tlen = 0
LOG.debug("\tPair Success (1:fail,2:simple): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_fail_2_simple'] += 1
elif read1_mappings is None and read2_mappings and len(read2_mappings) > 1:
alignment_new.flag |= FLAG_UNMAP
alignment_new.pos = 0
alignment_new.cigarstring = '0M'
alignment_new.rnext = name_to_id[read2_mappings[0].to_chr]
alignment_new.pnext = read2_mappings[0].to_start
alignment_new.tlen = 0
LOG.debug("\tPair Success (1:fail,2:complex): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_fail_2_complex'] += 1
elif read1_mappings and len(read1_mappings) == 1 and read2_mappings is None:
alignment_new.flag |= FLAG_MUNMAP
alignment_new.tid = name_to_id[read1_mappings[0].to_chr]
alignment_new.pos = read1_mappings[0].to_start
alignment_new.cigar = alignment.cigar
alignment_new.rnext = name_to_id[read1_mappings[0].to_chr]
alignment_new.pnext = 0
alignment_new.tlen = 0 # CHECK
LOG.debug("\tPair Success (1:simple,2:fail): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_simple_2_fail'] += 1
elif read1_mappings and len(read1_mappings) == 1 and read2_mappings and len(read2_mappings) == 1:
alignment_new.tid = name_to_id[read1_mappings[0].to_chr]
alignment_new.pos = read1_mappings[0].to_start
alignment_new.cigar = alignment.cigar
alignment_new.rnext = name_to_id[read2_mappings[0].to_chr]
alignment_new.pnext = read2_mappings[0].to_start
alignment_new.tlen = 0 # CHECK
LOG.debug("\tPair Success (1:simple,2:simple): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_simple_2_simple'] += 1
elif read1_mappings and len(read1_mappings) == 1 and read2_mappings and len(read2_mappings) > 1:
alignment_new.tid = name_to_id[read1_mappings[0].to_chr]
alignment_new.pos = read1_mappings[0].to_start
alignment_new.cigar = alignment.cigar
alignment_new.rnext = name_to_id[read2_mappings[0].to_chr]
alignment_new.pnext = read2_mappings[0].to_start
alignment_new.tlen = 0 # CHECK
LOG.debug("\tPair Success (1:simple,2:complex): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_simple_2_complex'] += 1
elif read1_mappings and len(read1_mappings) > 1 and read2_mappings is None:
alignment_new.flag |= FLAG_MUNMAP
alignment_new.tid = name_to_id[read1_mappings[0].to_chr]
alignment_new.pos = read1_mappings[0].to_start
alignment_new.cigar = convert_cigar(alignment.cigar, read_chr, chain_file, alignment.seq, read1_strand, alignment.pos)
alignment_new.rnext = name_to_id[read1_mappings[0].to_chr]
alignment_new.pnext = 0
alignment_new.tlen = 0 # CHECK
LOG.debug("\tPair Success (1:complex,2:fail): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_complex_2_fail'] += 1
elif read1_mappings and len(read1_mappings) > 1 and read2_mappings and len(read2_mappings) == 1:
alignment_new.tid = name_to_id[read1_mappings[0].to_chr]
alignment_new.pos = read1_mappings[0].to_start
alignment_new.cigar = convert_cigar(alignment.cigar, read_chr, chain_file, alignment.seq, read1_strand, alignment.pos)
alignment_new.rnext = name_to_id[read2_mappings[0].to_chr]
alignment_new.pnext = read2_mappings[0].to_start
alignment_new.tlen = 0 # CHECK
LOG.debug("\tPair Success (1:complex,2:simple): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_complex_2_simple'] += 1
elif read1_mappings and len(read1_mappings) > 1 and read2_mappings and len(read2_mappings) > 1:
alignment_new.tid = name_to_id[read1_mappings[0].to_chr]
alignment_new.pos = read1_mappings[0].to_start
alignment_new.cigar = convert_cigar(alignment.cigar, read_chr, chain_file, alignment.seq, read1_strand, alignment.pos)
alignment_new.rnext = name_to_id[read2_mappings[0].to_chr]
alignment_new.pnext = read2_mappings[0].to_start
alignment_new.tlen = 0 # CHECK
LOG.debug("\tPair Success (1:complex,2:complex): {0} {1}".format(alignment_new.pos, alignment_new.cigarstring))
new_file.write(alignment_new)
map_statistics_pair['success_1_complex_2_complex'] += 1
else:
raise G2GBAMError("Unknown BAM/SAM conversion/parse situation")
except StopIteration:
LOG.info("All reads processed")
LOG.info(" {:>10} TOTAL ENTRIES".format(total))
LOG.info(" {:>10} TOTAL UNMAPPED ".format(total_unmapped))
LOG.info(" {:>10} TOTAL FAIL QC ".format(total_fail_qc))
if map_statistics['total'] > 0:
LOG.info("")
LOG.info("Mapping Summary Single End")
LOG.info(" {:>10} TOTAL ENTRIES".format(map_statistics['total']))
LOG.info("")
LOG.info(" {:>10} TOTAL SUCCESS".format(map_statistics['success_simple'] + map_statistics['success_complex']))
LOG.info(" {:>10} Simple".format(map_statistics['success_simple']))
LOG.info(" {:>10} Complex".format(map_statistics['success_complex']))
LOG.info("")
LOG.info(" {:>10} TOTAL FAILURES".format(map_statistics['fail_cannot_map']))
LOG.info(" {:>10} Cannot Map ".format(map_statistics['fail_cannot_map']))
if map_statistics_pair['total'] > 0:
total_success = 0
for k, v in map_statistics_pair.iteritems():
if k.startswith('success'):
total_success += v
LOG.info("")
LOG.info("Mapping Summary Paired End")
LOG.info(" {:>10} TOTAL ENTRIES".format(map_statistics_pair['total']))
LOG.info("")
LOG.info(" {:>10} TOTAL SUCCESS".format(total_success))
LOG.info(" {:>10} Read 1 Failed, Read 2 Simple".format(map_statistics_pair['success_1_fail_2_simple']))
LOG.info(" {:>10} Read 1 Failed, Read 2 Complex".format(map_statistics_pair['success_1_fail_2_complex']))
LOG.info(" {:>10} Read 1 Simple, Read 2 Failed".format(map_statistics_pair['success_1_simple_2_fail']))
LOG.info(" {:>10} Read 1 Simple, Read 2 Simple".format(map_statistics_pair['success_1_simple_2_simple']))
LOG.info(" {:>10} Read 1 Simple, Read 2 Complex".format(map_statistics_pair['success_1_simple_2_complex']))
LOG.info(" {:>10} Read 1 Complex, Read 2 Failed".format(map_statistics_pair['success_1_complex_2_fail']))
LOG.info(" {:>10} Read 1 Complex, Read 2 Simple".format(map_statistics_pair['success_1_complex_2_simple']))
LOG.info(" {:>10} Read 1 Complex, Read 2 Complex".format(map_statistics_pair['success_1_complex_2_complex']))
LOG.info("")
LOG.info(" {:>10} TOTAL FAILURES".format(map_statistics_pair['fail_cannot_map']))
LOG.info(" {:>10} Cannot Map".format(map_statistics_pair['fail_cannot_map']))
LOG.info("")
LOG.info("BAM File Converted")
#
# Functions dealing with CIGAR strings
#
#
# BAM OP Description
# 0 M alignment match
# 1 I insertion to reference
# 2 D deletion from reference. region deleted from reference genome
# 3 N skipped region from the reference
# 4 S soft clipping (clipped sequence present in SEQ)
# 5 H hard clipping (clipped sequences NOT present in SEQ)
# 6 P padding (silent deletion from padded reference)
# 7 = sequence match
# 8 X sequence mismatch
#
def cigarlist_to_cigarstring(cigar_list):
"""
Convert a list of tuples into a cigar string.
Example::
[ (0, 10), (1, 1), (0, 75), (2, 2), (0, 20) ]
=> 10M 1I 75M 2D 20M
=> 10M1I75M2D20M
:param cigar_list: a list of tuples (code, length)
:type cigar_list: list
:return: the cigar string
:rtype: string
:raises: :class:`.exceptions.G2GCigarFormatError` on invalid cigar string
"""
cigar = ''
if isinstance(cigar_list, Cigar):
try:
for i in cigar_list:
cigar += str(i.length) + i.code
except KeyError:
raise G2GCigarFormatError("Invalid cigar code: " + str(i))
else:
try:
for i in cigar_list:
cigar += str(i[1]) + CIGAR_N2C[i[0]]
except KeyError:
raise G2GCigarFormatError("Invalid cigar code: " + str(i))
return cigar
def cigar_to_string(cigar):
"""
Convert a list of tuples into a cigar string.
Example::
[ (0, 10), (1, 1), (0, 75), (2, 2), (0, 20) ]
=> 10M 1I 75M 2D 20M
=> 10M1I75M2D20M
:param cigar_list: a list of tuples (code, length)
:type cigar_list: list
:return: the cigar string
:rtype: string
:raises: :class:`.exceptions.G2GCigarFormatError` on invalid cigar string
"""
cigar = ''
try:
for i in cigar:
cigar += str(i.length) + i.code
except KeyError:
raise G2GCigarFormatError("Invalid cigar code: " + str(i))
return cigar
def _cigar_to_list(cigar_string):
"""
Convert a list of tuples into a cigar string
Example::
10M1I75M2D20M
=> 10M 1I 75M 2D 20M
=> [ (0, 10), (1, 1), (0, 75), (2, 2), (0, 20) ]
:param cigar_string: a cigar string
:return: a list of tuples (code, length)
:rtype: list
:raises: :class:`.exceptions.G2GCigarFormatError` on invalid cigar string
"""
matches = REGEX_CIGAR.findall(cigar_string)
possible_length = len(REGEX_CIGAR_LENGTH.findall(cigar_string))
if len(matches) != possible_length:
raise G2GCigarFormatError("Invalid cigar string: {0}".format(cigar_string))
lst = []
try:
for m in matches:
lst.append(1)#(CIGAR_CODES_REV[m[1]], int(m[0])))
except KeyError:
raise G2GCigarFormatError("Invalid cigar string: {0} : {1} ".format(cigar_string, str(m)))
return lst
def _cigar_convert(cigar, chromosome, chain, strand='+', position=0):
"""
PHASE 1
Convert each CIGAR element to new mappings and construct an array on NEW cigar elements
For example, depending on the Intervals in the CHAIN file, let's say we have the following
CIGAR string: 35M49N65M
This could get converted into
35M ==> 4M150D31M
49N ==> -1N (remember, surrounding M's are used to find the length of N which is done on next pass)
65M ==> 65M
First pass yields: 35M49N65M => 4M150D31M-1N65M
:param cigar:
:param chromosome:
:param chain:
:param strand:
:param position:
:return:
"""
cigar_new = []
current_pos = position
cigar_no = 0
for c in cigar:
cigar_no += 1
LOG.debug("Element #{0}, '{1}{2}' specified, location: {3}".format(cigar_no, c[1], CIGAR_N2C[c[0]], current_pos))
increment = c[1]
if c[0] == CIGAR_m:
new_mappings = chain.find_mappings(chromosome, current_pos, current_pos + c[1])
if not new_mappings:
LOG.debug("Mappings: None")
cigar_new.append(Cigar(CIGAR_S, c[1], 0, 0))
elif len(new_mappings) == 1:
LOG.debug("Mappings: Easy: {0}".format(new_mappings[0]))
cigar_new.append(Cigar(CIGAR_M, new_mappings[0].to_end - new_mappings[0].to_start, new_mappings[0].to_start, new_mappings[0].to_end))
else:
# multiple maps, not so easy
last = None
for m in new_mappings:
LOG.debug("Mappings: Multiple: {0}".format(m))
if not last:
last = m
if current_pos < m.from_start:
# special case of first match not in interval, handle accordingly
LOG.debug("Adding 'S', because {0} < {1}".format(current_pos, m.from_start))
cigar_new.append(Cigar(CIGAR_S, m.from_start - current_pos, 0, 0))
else:
if m.from_start != last.from_end:
LOG.debug("Adding 'M' and 'I', because {0} != {1}".format(m.from_start, last.from_end))
cigar_new.append(Cigar(CIGAR_M, last.to_end - last.to_start, last.to_start, last.to_end))
cigar_new.append(Cigar(CIGAR_I, m.from_start - last.from_end, last.to_start, last.to_end))
elif m.to_start != last.to_end:
LOG.debug("Adding 'M' and 'D', because {0} != {1}".format(m.to_start, last.to_end))
cigar_new.append(Cigar(CIGAR_M, last.to_end - last.to_start, last.to_start, last.to_end))
cigar_new.append(Cigar(CIGAR_D, m.to_start - last.to_end, 0, 0))
last = m
LOG.debug("Adding 'M'")
cigar_new.append(Cigar(CIGAR_M, last.to_end - last.to_start, last.to_start, last.to_end))
elif c[0] == CIGAR_i:
LOG.debug("Adding 'I' and 'D'")
cigar_new.append(Cigar(CIGAR_I, c[1], 0, 0))
cigar_new.append(Cigar(CIGAR_D, -1, 0, 0))
increment = 0
elif c[0] == CIGAR_d:
LOG.debug("Adding 'D'")
cigar_new.append(Cigar(CIGAR_D, -1, 0, 0))
elif c[0] == CIGAR_n:
LOG.debug("Adding 'N'")
cigar_new.append(Cigar(CIGAR_N, -1, 0, 0))
elif c[0] in [CIGAR_s, CIGAR_h]:
LOG.debug("Adding '{0}'".format(CIGAR_N2C[c[0]]))
cigar_new.append(Cigar(CIGAR_N2C[c[0]], c[1], 0, 0))
else:
# other
LOG.debug("OTHER CODE '{0}' found, looking at {1} at {2}".format(CIGAR_N2C[c[0]], c, current_pos))
raise G2GCigarFormatError("ERROR: Not handling the values in this cigar string: {0}".format(cigar))
#current_pos += c[1]
current_pos += increment
LOG.debug("Current CIGAR: {0}".format(cigar_new))
return cigar_new
def _cigar_combine_consecutive(cigar):
"""
Combine consecutive features in a cigar string.
For example, 2 N's become 1
:param cigar:
:return:
"""
done = False
while not done:
done = True
for i in xrange(0, len(cigar)-1):
LOG.debug("{0}={1}".format(i, cigar[i]))
LOG.debug("{0}={1}".format(i+1, cigar[i+1]))
if cigar[i].code == cigar[i+1].code:
done = False
break
if not done:
cigar_temp = []
cigar_temp.extend(cigar[:i])
cm1 = cigar[i]
cm2 = cigar[i+1]
cm_new = Cigar(cm1.code, cm1.length + cm2.length, cm1.start, cm2.end)
cigar_temp.append(cm_new)
LOG.debug("Found consecutive elements {0} and {1}, combined into {2}".format(cm1, cm2, cm_new))
cigar_temp.extend(cigar[i+2:])
cigar = cigar_temp
return cigar
def _cigar_fix_pre_and_post_M(cigar):
"""
:param cigar:
:return:
"""
# pre M to S fix
for i in xrange(0, len(cigar)):
if cigar[i].code == CIGAR_M:
break
if i != 0:
first_m = i
length = 0
for i in xrange(0, first_m):
if cigar[i].code in [CIGAR_I, CIGAR_S, CIGAR_H]:
length += cigar[i].length
temp_cigar = [Cigar(CIGAR_S, length, 0, 0)]
temp_cigar.extend(cigar[i+1:])
cigar = temp_cigar
# post M to S fix
for i in reversed(xrange(0, len(cigar))):
if cigar[i].code == CIGAR_M:
break
if i > 0 and i != (len(cigar) - 1):
last_m = i
length = 0
for i in xrange(last_m+1, len(cigar)):
if cigar[i].code in [CIGAR_M, CIGAR_I, CIGAR_S]:
length += cigar[i].length
temp_cigar = []
temp_cigar.extend(cigar[:i-1])
temp_cigar.append(Cigar(CIGAR_S, length, 0, 0))
cigar = temp_cigar
return cigar
def _cigar_remove_softs_between_m(cigar):
"""
Remove soft if surrounded by Ms
:param cigar:
:return:
"""
done = False
while not done:
done = True
for i in xrange(1, len(cigar)-1):
if cigar[i].code == CIGAR_S:
done = False
break
if done:
break
before = None
after = None
for x in reversed(xrange(i)):
if cigar[x].code == CIGAR_M:
before = cigar[x]
break
for x in xrange(i+1, len(cigar)):
if cigar[x].code == CIGAR_M:
after = cigar[x]
break
if before and after:
LOG.debug("Found 'S' between 'M' so removing 'S'")
cigar_temp = []
cigar_temp.extend(cigar[:i])
cigar_temp.extend(cigar[i+1:])
cigar = cigar_temp
LOG.debug(cigar)
else:
done = True
return cigar
def _cigar_fix_lengths(cigar, sequence):
"""
:return:
"""
# Assign length to -1's
#
# Since N's aren't mapped we look at the surrounding M's to find the length of the N's
#
# Example: 35M49N65M ==> 4M150D31M-1N65M, the -1 will be corrected by finding the last position of the previous
# M and first position of the next M
#
# there are a few special cases that are handled
# since there were multiple mappings, we will need to figure out the location on the N's
done = False
while not done:
done = True
# find first element without a length
i = 0
for cm in cigar:
if cm.length == -1:
break
i += 1
if i == len(cigar):
done = True
break
LOG.debug("Found '{0}' at {1}: {2}".format(cm.code, i, cm))
before = None
after = None
# Simple case is surrounded by mapping positions, but might not be the case
for x in reversed(xrange(i)):
if cigar[x].code == CIGAR_M:
before = cigar[x]
break
for x in xrange(i+1, len(cigar)):
if cigar[x].code == CIGAR_M:
after = cigar[x]
break
# special case of 89M2000N11M
# what happens when thi sis converted to 89M-1N11S (no M at end)
# we should have 89M11S
LOG.debug("Before: {0}".format(before))
LOG.debug("After: {0}".format(after))
# check if all cigar elements from here to end do not have a length
a = i
while a < len(cigar) - 1:
if cigar[a].length != -1:
break
a += 1
# if a == len(cigar_mapping) -1 than all the rest have no length
LOG.debug("a={0}, len(cigar_mapping) - 1={1}".format(a, len(cigar) - 1))
if (a == len(cigar) - 1 and cigar[a].start == -1) or not after or not before:
# take the rest as a clip
LOG.debug("Found a clip")
temp_cigar_mappings = cigar[:i]
temp_total = 0
for t in temp_cigar_mappings:
if t.code in [CIGAR_M, CIGAR_I, CIGAR_S]:
temp_total += t.length
temp_cigar_mappings.append(Cigar(CIGAR_S, len(sequence) - temp_total, -1, -1))
cigar = temp_cigar_mappings
done = True
else:
c = cigar[i]
new_c = Cigar(c.code, after.start - before.end, before.end, after.start)
LOG.debug("Replacing, old = {0}, new = {1}".format(c, new_c))
cigar[i] = new_c
done = False
LOG.debug("Removing 0 length elements, if any")
new_cigar = []
for cm in cigar:
if cm[1] == 0:
LOG.debug("Removing {}".format(cm))
continue
new_cigar.append(cm)
return new_cigar
def convert_cigar(cigar, chromosome, chain, sequence, strand='+', position=0):
"""
Generate the cigar string of an old alignment.
P1: Map M with bx; Inherit S and H; Inherit I but put -1D right behind it; Put -1D or -1N when it’s there.
P1a: Convert xM, if it has zero length after mapping, to xS
P2: Remove S (including new S originate from unmapped M) if it is surrounded by any pair of consecutive Ms that
survived P2
P3: Adjust the size of D or N that are inbetween Ms. Remove it if they have zero length.
P4: Combine duplicated entries (I guess mostly M or S)
P5: Put yS for the unmapped regions before the first M and/or after the last M (I believe adding S, H, I’s in
those regions should get you y). In this phase remove the remaining -1D or -1N in those regions first.
:param old_alignment: the old alignment
:type old_alignment: :class:`pysam.AlignedRead`
:param chromosome: the chromosome
:type chromosome: string
:param chain: the chain file
:type chain: the chain file
:return: a new cigar string based upon the mappings
:raises: :class:`.exceptions.G2GCigarFormatError` on invalid cigar string
"""
old_cigar = cigarlist_to_cigarstring(cigar)
LOG.debug("CIGAR CONVERSION : {0}".format(old_cigar))
#
# PHASE 1: Convert each CIGAR element to new mappings and construct an array on NEW cigar elements
#
LOG.debug("CIGAR CONVERSION : PHASE 1 : Converting cigar elements")
new_cigar = _cigar_convert(cigar, chromosome, chain, strand, position)
LOG.debug("AFTER PHASE 1 : {0} ".format(new_cigar))
if len(new_cigar) == 1:
LOG.debug("CIGAR CONVERSION : Skipping to end since only 1 element")
else:
#
# PHASE 2: Remove S if surrounded by M
#
LOG.debug("CIGAR CONVERSION : PHASE 2 : Remove S if surrounded by M")
new_cigar = _cigar_remove_softs_between_m(new_cigar)
LOG.debug("AFTER PHASE 2 : {0} ".format(new_cigar))
#
# PHASE 3: Fix element lengths
#
LOG.debug("CIGAR CONVERSION : PHASE 3 : Fix element lengths")
new_cigar = _cigar_fix_lengths(new_cigar, sequence)
LOG.debug("AFTER PHASE 3 : {0} ".format(new_cigar))
#
# PHASE 4: Combine consecutive matching elements
#
LOG.debug("CIGAR CONVERSION : PHASE 4 : Combining elements")
new_cigar = _cigar_combine_consecutive(new_cigar)
LOG.debug("AFTER PHASE 4 : {0} ".format(new_cigar))
#
# PHASE 5: Combine consecutive matching elements
#
LOG.debug("CIGAR CONVERSION : PHASE 5 : Fix pre and post Ms")
new_cigar = _cigar_fix_pre_and_post_M(new_cigar)
LOG.debug("AFTER PHASE 5 : {0} ".format(new_cigar))
#
# Final pass through CIGAR string
#
# test cigar string length
#
# SEQ: segment SEQuence. This field can be a '*' when the sequence is not stored. If not a '*',
# the length of the sequence must equal the sum of lengths of M/I/S/=/X operations in CIGAR.
# An '=' denotes the base is identical to the reference base. No assumptions can be made on the
# letter cases.
#
LOG.debug("CIGAR CONVERSION : PHASE 6 : Testing length and conversion")
cigar_seq_length = 0
# simplify the cigar, throw away the other stuff we used
simple_cigar = []
for c in new_cigar:
simple_cigar.append((CIGAR_C2N[c.code], c.length))
if c.code in [CIGAR_M, CIGAR_I, CIGAR_S, CIGAR_E, CIGAR_X]:
cigar_seq_length += c.length
if cigar_seq_length != len(sequence):
LOG.debug("CIGAR SEQ LENGTH={0} != SEQ_LEN={1}".format(cigar_seq_length, len(sequence)))
# not equal according to chain file format, add the clipping length
simple_cigar.append((CIGAR_s, len(sequence) - cigar_seq_length))
if old_cigar != cigar_to_string(simple_cigar):
LOG.debug("old cigar != new cigar")
else:
LOG.debug("old cigar == new cigar")
LOG.debug("CIGAR CONVERSION : {0} ==> {1}".format(old_cigar, cigar_to_string(simple_cigar)))
LOG.debug(simple_cigar)
return simple_cigar
if __name__ == '__main__':
from .g2g_utils import get_logger, configure_logging
configure_logging(10)
LOG = get_logger()
cigarstring = '5I3D4M9D3S104M7D2I'
cigarlist = _cigar_to_list(cigarstring)
LOG.debug(cigarstring)
print cigarlist
cigar_new = _cigar_remove_softs_between_m(cigarlist)
#cigar_new = _cigar_fix_pre_and_post_M(cigarlist)
print cigar_to_string(cigar_new)
print cigar_new
| everestial/g2gtools | g2gtools/bamsam.py | Python | gpl-3.0 | 41,096 | [
"pysam"
] | 114bf5bee3b6a23008de3699c0e7758f486a5d92ced2ed5ba48c5dd33f1e0ffb |
#Cancer Sim
from numpy import *
import scipy as sp
import pylab as py
import math
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.colorbar as cb
import matplotlib.pyplot as plt
import cPickle as pickle
from scipy.spatial.distance import euclidean
from math import pow
from scipy.spatial import Delaunay
#from scipy.spatial import KDTree
from scipy.spatial import cKDTree
from hybridKDTree import KDTree
import random
import time
import pprint
#XSIZE = 20
#YSIZE = 20
from neighborlist import NeighborList
from helper import norm, unitize, disp_func, unitize_arr
import links, cells
from logger import logger
base_logger = logger.getChild('cancer')
base_logger.info('Inside the cancer.py module')
########################################################
### Simulation Class ###################################
########################################################
#try to speed things up a little bit
from scipy import zeros_like, nan_to_num, allclose
import numexpr as ne
import os
if 'CANCERC' in os.environ:
CANCERC = True
#import pyximport
#pyximport.install()
from forcefunccelltypes import force_func_hertz, force_func_basal, norm, disp_func
base_logger.info('CYTHON SUPPORT')
else:
CANCERC = False
force_func_basal = None
force_func_hertz = None
base_logger.info('NO CYTHON SUPPORT')
class CancerSim:
"""
The main Cancer Simulation Class.
Creates an array of Cells, allows for the designation of cancer cells
And the evolution of the cells thereafter.
"""
def __init__(self,config):
""" Initialize the simulation """
#load the configs
self.config = config
self.XSIZE = config['XSIZE']
self.YSIZE = config['YSIZE']
self.boxsize = (self.XSIZE,self.YSIZE)
if config['seed'] is None:
self.seed = int(time.time())
else:
self.seed = config['seed']
self.xi = config['force_cutoff']
self.a = config['force_magnitude']
self.basalstrength = config['force_magnitude_basal']
self.basalcutoff = config['force_cutoff_basal']
self.basal_height = config['basal_height']
self.basal_wavenumber = config['basal_wavenumber']
self.basal_amplitude = config['basal_amplitude']
self.pressure_filename = config['pressure_filename']
self.cancer_evolution_filename = config['cancer_evolution_filename']
sp.random.seed(self.seed)
random.seed(self.seed)
#KDTree
#self._kdtree = None
#self._kdtree_cache_T = -1
self._updated = True
self.T = 0
# cell types (should be arguments)
self.cancer = cells.CellType(**config['cancer_cell_params'])
self.epidermal = cells.CellType(**config['epidermal_cell_params'])
self.basal = cells.CellType(**config['basal_cell_params'])
self.dermal = cells.CellType(**config['dermal_cell_params'])
self.corneum = cells.CellType(**config['stratum_corneum_cell_params'])
self.num_cells = 0
# containers
self.links = links.Links()
self._cell_arr = sp.array([])
self.cells = []
self._ghosts = []
self._ghost_cutoff = 4
self._ghost_offset = sp.array([self.boxsize[0],0.])
self.cancer_cells = []
self.logger = base_logger.getChild('CancerSim')
self.logger.info('Initializing CancerSim')
self.neighs = None
def _setup(self):
self._triang_lattice()
self.jiggle(sigma=self.config['jiggle_sigma'])
self.delaunay()
self._freeze_links()
XSIZE, YSIZE = self.boxsize
period = 2*3.141592*self.basal_wavenumber/XSIZE
self.add_cancer_cell([self.XSIZE/2.+self.config['first_cancer_cell_xoffset'], self.basal_height + self.basal_amplitude*sin((self.XSIZE/2+self.config['first_cancer_cell_xoffset'])*period) + self.config['first_cancer_cell_yoffset']], self.config['first_cancer_cell_radius'])
def _triang_lattice(self):
""" Create a triangular grid of points """
XSIZE, YSIZE = self.boxsize
period = 2*3.141592*self.basal_wavenumber/XSIZE
self.logger.info('Setting up the Triangular Lattice...')
#setup the epicells
epispacing = self.epidermal.L
xspace,yspace = epispacing , epispacing * sp.sqrt(3)
for i in sp.arange(0,XSIZE,xspace):
for ind,j in enumerate(sp.arange(self.basal_height-self.basal_amplitude+5.0*self.basalcutoff,YSIZE,yspace)):
if ind:
pass
if j >= self.basal_height+self.basal_amplitude*sin(i*period)+5.0*self.basalcutoff :
cell1 = cells.Cell([i,j],self.epidermal,self.num_cells)
#print 'added epicell at', i, j
self.add_cell(cell1)
if (j+0.5*yspace) > self.basal_height+self.basal_amplitude*sin((i+0.5*xspace)*period) :
cell2 = cells.Cell([i+0.5*xspace,j+0.5*yspace],self.epidermal,self.num_cells)
#print 'added epicell at', i+0.5*xspace, j+0.5*yspace
self.add_cell(cell2)
#add ghosts for first few layers
if i<self._ghost_cutoff:
if ind:
if j >= self.basal_height+self.basal_amplitude*sin(i*period)+5.0*self.basalcutoff :
ghost1 = cells.GhostCell(cell1,XSIZE,1)
self._ghosts.append(ghost1)
if (j+0.5*yspace) > self.basal_height+self.basal_amplitude*sin((i+0.5*xspace)*period) :
ghost2 = cells.GhostCell(cell2,XSIZE,1)
self._ghosts.append(ghost2)
#add ghosts for last few layers
if i>(XSIZE-self._ghost_cutoff):
if ind:
if j >= self.basal_height+self.basal_amplitude*sin(i*period)+5.0*self.basalcutoff :
ghost1 = cells.GhostCell(cell1,XSIZE,-1)
self._ghosts.append(ghost1)
if (j+0.5*yspace) > self.basal_height+self.basal_amplitude*sin((i+0.5*xspace)*period) :
ghost2 = cells.GhostCell(cell2,XSIZE,-1)
self._ghosts.append(ghost2)
#setup the bottom cells
dermalspacing = self.dermal.L
xspace,yspace = dermalspacing , dermalspacing*sp.sqrt(3)
for i in sp.arange(0,XSIZE,xspace):
for ind,j in enumerate(sp.arange(self.basal_height+self.basal_amplitude-5.0*self.basalcutoff,0,-yspace)):
if j<= self.basal_height+self.basal_amplitude*sin(i*period)-5.0*self.basalcutoff :
cell1 = cells.Cell([i,j],self.dermal,self.num_cells)
#print 'added dermacell at', i, j
self.add_cell(cell1)
if ind and (j+0.5*yspace) <= self.basal_height+self.basal_amplitude*sin((i+0.5*xspace)*period)-5.0*self.basalcutoff:
cell2 = cells.Cell([i+0.5*xspace,j+0.5*yspace],self.dermal,self.num_cells)
#print 'added dermacell at', i+0.5*xspace, j+0.5*yspace
self.add_cell(cell2)
#add ghosts for first few layers
if i<self._ghost_cutoff:
if j<= self.basal_height+self.basal_amplitude*sin(i*period)-5*self.basalcutoff :
ghost1 = cells.GhostCell(cell1,XSIZE,1)
ghost2 = cells.GhostCell(cell2,XSIZE,1)
self._ghosts.extend([ghost1,ghost2])
#add ghosts for last few layers
if i>(XSIZE-self._ghost_cutoff):
if j<= self.basal_height+self.basal_amplitude*sin(i*period)-5.0*self.basalcutoff :
ghost1 = cells.GhostCell(cell1,XSIZE,-1)
ghost2 = cells.GhostCell(cell2,XSIZE,-1)
self._ghosts.extend([ghost1,ghost2])
#setup the middle cells
basalspacing = self.basal.L
for i in sp.arange(0,XSIZE,basalspacing/2):
cell = cells.Cell([i,self.basal_height+self.basal_amplitude*sin(i*period)],self.basal,self.num_cells)
#print 'added basalcell at', i, self.basal_height+self.basal_amplitude*sin(i*period)
self.add_cell(cell)
if i<self._ghost_cutoff:
ghost = cells.GhostCell(cell,XSIZE,1)
self._ghosts.append(ghost)
if i>(XSIZE-self._ghost_cutoff):
ghost = cells.GhostCell(cell,XSIZE,-1)
self._ghosts.append(ghost)
#setup the corneum cells
corneumspacing = self.corneum.L
for i in sp.arange(0,XSIZE,corneumspacing):
cell = cells.Cell([i,YSIZE+2.0*self.basalcutoff],self.corneum,self.num_cells)
#print 'added corneumcell at', i, YSIZE
self.add_cell(cell)
if i<self._ghost_cutoff:
ghost = cells.GhostCell(cell,XSIZE,1)
self._ghosts.append(ghost)
if i>(XSIZE-self._ghost_cutoff):
ghost = cells.GhostCell(cell,XSIZE,-1)
self._ghosts.append(ghost)
self.logger.info('Set up the Triangular Lattice')
def get_pos_arr(self,force=False):
""" Get an array of all of the cell positions """
#if self._updated is False or force:
# return self._cell_arr
self._cell_arr = sp.zeros((len(self.cells),2))
for (i,cell) in enumerate(self.cells):
self._cell_arr[i] = cell.pos
self._updated = False
return self._cell_arr
def get_radius_arr(self):
rad_arr=sp.zeros(len(self.cells))
for (i,cell) in enumerate(self.cells):
rad_arr[i] = cell.radius
return rad_arr
def _get_kdtree(self,force=False,new=True):
""" Generate a KDTree for the cells,
allows for efficient geometric neighbor computation """
#if new or self._kdtree_cache_T != self.T or self._updated:
pos = self.get_pos_arr(force).copy()
_kdtree = KDTree(pos)
return _kdtree
def _get_ckdtree(self,force=False):
""" Generate a cKDTree """
pos = self.get_pos_arr(force).copy()
return cKDTree(pos)
def _query_point(self,x,r,eps=None):
""" Get all of the cell inds near point, with radius r """
kdtree = self._get_kdtree()
if eps:
cell_inds = kdtree.query_ball_point(x,r,eps)
else:
cell_inds = kdtree.query_ball_point(x,r)
cells = [ self.cells[ind] for ind in cell_inds ]
return cells
def _get_vel_arr(self):
""" Get an array of all of the cell velocities """
vel_arr = sp.zeros((self.num_cells,2))
for (i,cell) in enumerate(self.cells):
vel_arr[i] = cell.vel
return vel_arr
def _update_pos(self,pos_arr):
""" Update all of the cell positions with an array """
for (pos,cell) in zip(pos_arr,self.cells):
#enact the periodic boundary conditions
pos[0] = pos[0]%self.XSIZE
cell.pos = pos
self._cell_arr = pos_arr
#self._updated = True
def _update_vel(self,vel_arr):
""" Update all of the cell velocities with an array """
for (vel,cell) in zip(vel_arr,self.cells):
cell.vel = vel
def _get_ghost_pos_arr(self):
""" Get all of the ghost positions """
arr = sp.zeros((len(self._ghosts),2))
for ind,cell in enumerate(self._ghosts):
arr[ind] = cell.pos
return arr
def _update_ghosts(self):
""" Update the positions of all of the ghost cells """
for ghost in self._ghosts:
ghost.update()
def jiggle(self,sigma=0.1,ghosts=True):
""" Jiggle the atom positions """
pos = self.get_pos_arr()
sigarr = sp.array([cell.type.L for cell in self.cells])
randn = sp.randn(self.num_cells,2)
newpos = pos + sigma*(sigarr*randn.T).T
self._update_pos(newpos)
self._updated = True
if ghosts:
self._update_ghosts()
self.logger.info('Jiggled the atoms')
def _set_radii(self):
""" set radii as the average of the links starting from each cell """
for cell in [cell for cell in self.cells if cell.type == self.epidermal]:
average_length=0.0
count=0.
for neigh in self.links.get_neighbors(cell):
average_length += self.links.get_link(cell,neigh).L/2.0
count += 1.
if count:
cell.radius=average_length/count
for cell in [cell for cell in self.cells if cell.type == self.dermal]:
cell.radius=self.epidermal.L/2.0
def _set_radii_min(self):
""" set radii as the smallest link size """
for cell in [cell for cell in self.cells if cell.type == self.epidermal]:
min_length = min([link.L/2. for link in self.links.get_links(cell)])
#rint min_length
cell.radius=min_length
for cell in [cell for cell in self.cells if cell.type == self.dermal]:
cell.radius=self.epidermal.L/2.0
def _freeze_links(self):
""" Adjust all of the links to be their current extension """
for link in self.links:
link.L = link.extension_without_breaking()
if (link.one.type.name == 'Dermal'):
if (link.two.type.name == 'Basal') :
print link.one, link.two, link.L
if (link.one.type.name == 'Epidermal'):
if (link.two.type.name == 'Basal') :
print link.one, link.two, link.L
self._set_radii_min()
self.logger.info('Froze the links in place')
def _filter_ghosts(self,one,two):
if isinstance(one,cells.GhostCell) and isinstance(two,cells.GhostCell):
raise Exception("DoubleGhost")
elif isinstance(one,cells.GhostCell):
return one.original,two
elif isinstance(two,cells.GhostCell):
return one,two.original
else:
return one,two
def _clear_links(self):
""" Clear all Links """
self.links = links.Links()
def delaunay(self):
""" Delaunay routine, sets the initial links """
self.logger.debug('Running the Delaunay routine')
#first get the positions of all the cells and the ghosts
num_cells = len(self.cells)
num_ghosts = len(self._ghosts)
fulllist = self.cells + self._ghosts
num_full = len(fulllist)
arr = sp.zeros((num_full,2))
for ind,cell in enumerate(fulllist):
arr[ind] = cell.pos
#get the Delaunay construction
tri = Delaunay(arr)
#add the links
for i,j,k in tri.vertices:
cellone = fulllist[i]
celltwo = fulllist[j]
cellthree = fulllist[k]
length_of_bond = norm(cellone.pos - celltwo.pos)
expected_length = 0.5*(cellone.type.L + celltwo.type.L)
if length_of_bond < 2*expected_length:
try:
one,two = self._filter_ghosts(cellone,celltwo)
self.add_bond(one,two)
except Exception, e:
if e.message=="DoubleGhost":
pass
else:
raise
try:
one,two = self._filter_ghosts(celltwo,cellthree)
self.add_bond(one,two)
except Exception, e:
if e.message=="DoubleGhost":
pass
else:
raise
try:
one,two = self._filter_ghosts(cellthree,cellone)
self.add_bond(one,two)
except Exception, e:
if e.message=="DoubleGhost":
pass
else:
raise
def add_cell(self,cell):
""" Add the cell: cell """
self.cells.append(cell)
self.num_cells += 1
self._updated = True
self.logger.debug('Adding the cell {cell}'.format(cell=cell))
def add_bond(self,one,two):
""" Add a bond between cells one and two """
self.links.add_link(one,two,xsize=self.XSIZE)
self.logger.debug('Adding a bond between {one} and {two}'.format(one=one,two=two))
def remove_bond(self,one,two):
""" Remove a bond between cells one and two """
self.links.remove_link(one,two)
self.logger.debug('Removed the link between {one} and {two}'.format(one=one,two=two))
def remove_cell(self,cell):
""" Remove the cell: cell, and all bonds for that cell """
self.cells.remove(cell)
self.links.remove_cell(cell)
self.logger.debug('Removed the cell {cell}'.format(cell=cell))
def get_neighbors(self,cell):
""" Get the linked neighbor cells of cell """
return self.links.get_neighbors(cell)
def add_cancer_cell(self,x,r,eps=None):
file=open(self.cancer_evolution_filename,'a')
""" randomly make a cell a cancer cell """
cells = self._query_point(x,r,eps)
cells = [cell for cell in cells if cell.type != self.basal]
if cells:
cell = random.choice(cells)
self.cancer_cells.append(cell)
self.links.remove_cell(cell)
cell.type = self.cancer
s = str(cell.pos[0]) + ' ' + str(cell.pos[1]) + '\n'
file.write(s)
self.logger.info('Added a cancer cell: {cell}'.format(cell=cell))
self._updated = True
else:
raise Exception("No targets found at {} within radius {}".format(x,r))
file.close
def duplicate_cancer_cell(self,cancer=None,disp_frac = 0.01):
""" Duplicate the cancer cell: cancer """
if cancer is None:
cancer = random.choice(self.cancer_cells)
file=open(self.cancer_evolution_filename,'a')
self.logger.info('Duplicating a cancer cell...')
#need to choose a random direction and do the relaxation
L = disp_frac * cancer.type.L
theta = sp.rand()*2*sp.pi
disp = L * sp.array([sp.sin(theta),sp.cos(theta)])
newcell = cells.Cell(cancer.pos + disp,self.cancer,self.num_cells)
newcell.radius = cancer.radius
cancer.pos = cancer.pos - disp
s = str(cancer.pos[0]) + ' ' + str(cancer.pos[1]) + '\n'
file.write(s)
self.cancer_cells.append(newcell)
self.add_cell(newcell)
"""
neighs = self.links.get_neighbors(cancer).copy()
for neigh in neighs:
link_disp = neigh.pos - cancer.pos
if sp.vdot(link_disp,disp) >= 0:
#remove old link, create new one.
self.links.remove_link(cancer,neigh)
self.links.add_link(newcell,neigh)
"""
#self.links.add_link(newcell,cancer)
self._updated = True
file.close
def time_step(self):
""" Run a time step, duplicate a cancer cell,
do a FIRE relaxation, and plot """
self.logger.info('Running a time step')
self.duplicate_cancer_cell()
self.fire()
self.plot_sized_cells()
self.T += 1
def plot_cells(self,clf=True,fignum=1,ghosts=False,*args,**kwargs):
""" Plot the current configuration """
self.logger.info('Plotting the cells')
pos_arr = self.get_pos_arr()
py.figure(fignum)
if clf:
py.clf()
py.scatter(pos_arr[:,0],pos_arr[:,1],
c=[i.type.color for i in self.cells],
s=50,
zorder=10,
*args,**kwargs)
if ghosts:
ghost_arr = self._get_ghost_pos_arr()
py.scatter(ghost_arr[:,0],ghost_arr[:,1],
c = [i.original.type.color for i in self._ghosts],
s = 30,
zorder=10,
alpha = 0.3,
*args,**kwargs)
py.axis('equal')
def my_circle_scatter(self, axes, x_array, y_array, rad_array, col_array, **kwargs):
for x, y, R, c in zip(x_array, y_array , rad_array, col_array):
circle = py.Circle((x,y), radius=R, color = c, **kwargs)
axes.add_patch(circle)
return True
def plot_sized_cells_old(self,clf=True,fignum=1,ghosts=False,*args, **kwargs):
""" Plot the current configuration using circles"""
self.logger.info('Plotting Sized Cells')
pos_arr = self.get_pos_arr()
rad_arr = self.get_radius_arr()
col_arr = [i.type.color for i in self.cells]
py.figure(fignum)
if clf:
py.clf()
axes=py.axes()
self.my_circle_scatter(axes,
pos_arr[:,0],
pos_arr[:,1],
rad_arr, col_arr, alpha=0.6,**kwargs)
if ghosts:
ghost_arr = self._get_ghost_pos_arr()
py.scatter(ghost_arr[:,0],ghost_arr[:,1],
c = [i.original.type.color for i in self._ghosts],
s = 30,
zorder=10,
alpha = 0.3,
*args,**kwargs)
py.xlim((0,self.XSIZE))
py.axis('equal')
def plot_sized_cells(self,clf=True,fignum=1,ghosts=False,*args, **kwargs):
""" Plot the current configuration using circles"""
self.logger.info('Plotting Sized Cells')
pos_arr = self.get_pos_arr()
rad_arr = self.get_radius_arr()
pos = self.get_pos_arr(force=True)
pressure_arr = zeros_like(pos)
#kdtree = self._get_kdtree(force=True)
for i,j in self._get_npairs(): #kdtree.query_pairs(self.xi*1.0):
force = self.force_func_celltypes(self.cells[i], self.cells[j] )
pressure_arr[i] += fabs(force)
pressure_arr[j] += fabs(force)
pressure_arr = nan_to_num(pressure_arr)
#print "\n"
#print pressure_arr
#print "\n"
cancer_cell_pressures = empty(len(self.cancer_cells))
numero_cancer = 0
numero_cell = 0
for i in self.cells:
if i.type.name == 'Cancer' :
cancer_cell_pressures[numero_cancer]=norm(pressure_arr[numero_cell])/(3.141592*rad_arr[numero_cell]*rad_arr[numero_cell])
numero_cancer = numero_cancer + 1
numero_cell = numero_cell + 1
#printing stress on file
file=open(self.pressure_filename,'a')
#factor is 4/3( E/(1-nu^2)) = 3/2 kPa
factor = 1.5
for i in range(0,len(cancer_cell_pressures)):
s = str(i) + ' ' + str(cancer_cell_pressures[i]*factor) +'\n'
file.write(s)
s = '\n'
file.write(s)
#s = str(numero_cancer) + ' ' + str(cancer_cell_pressures.mean()) +'\n'
#file.write(s)
#s = '\n'
file.close
if len(cancer_cell_pressures)>1 :
cancer_cell_pressures = (cancer_cell_pressures-cancer_cell_pressures.min())/(cancer_cell_pressures.max()-cancer_cell_pressures.min())*0.9+0.1
#print "\n"
#print cancer_cell_pressures
#print "\n"
else :
cancer_cell_pressures[0] = 0.5
#print '\n'
#print cancer_cell_pressures
#print '\n'
col_arr = []
numero_cancer = 0
for i in self.cells:
if i.type.name == 'Cancer' :
rgb_color = cm.hot(1-cancer_cell_pressures[numero_cancer],1.0)
col_arr.append(rgb_color)
#print '\n'
#print rgb_color , cancer_cell_forces[numero_cancer]
#print '\n'
numero_cancer = numero_cancer + 1
else :
col_arr.append(i.type.color)
#cb.Colorbar(col_arr,kwargs)
#print '\n'
#print col_arr
#print '\n'
#file=open(self.screenshot_filename,'a')
#for i in range(0, len(pos_arr)):
# s = self.cells[i].type.name + ' ' + str(pos_arr[i][0]) + ' ' + str(pos_arr[i][1]) + ' ' + str(rad_arr[i]) + ' ' + str(col_arr[i]) +'\n'
# file.write(s)
#file.close
py.figure(fignum)
if clf:
py.clf()
axes=py.axes()
self.my_circle_scatter(axes,
pos_arr[:,0],
pos_arr[:,1],
rad_arr, col_arr, alpha=0.6,**kwargs)
if ghosts:
ghost_arr = self._get_ghost_pos_arr()
py.scatter(ghost_arr[:,0],ghost_arr[:,1],
c = [i.original.type.color for i in self._ghosts],
s = 30,
zorder=10,
alpha = 0.3,
*args,**kwargs)
py.xlim((0,self.XSIZE))
py.axis('equal')
def plot_links(self,clf=False,cutoff=None,fignum=1,ghosts=False,*args,**kwargs):
""" Plot the links between cells """
self.logger.info('Plotting Links')
if cutoff is None:
cutoff = self.XSIZE/2.
py.figure(fignum)
if clf:
py.clf()
#file=open(self.screenshot_filename,'a')
for link in self.links:
if link.C_10 > 0:
#s = 'Link' + ' ' + str(link.one.pos[0]) + ' ' + str(link.one.pos[1]) + ' ' + str(link.two.pos[0]) + ' ' + str(link.two.pos[1]) +'\n'
#file.write(s)
d12=link.one.pos-link.two.pos
abs_d12=norm(d12)
if abs_d12 < cutoff:
data = sp.array([ link.one.pos, link.two.pos ])
py.plot(data[:,0],data[:,1],
c=py.cm.jet( min(link.energy*30.,1.) ),
alpha=0.6,
*args, **kwargs )
#file.close
def _get_pairs(self):
kdtree = self._get_kdtree(force=True)
return kdtree.query_pairs(self.xi*1.0)
def _get_cpairs(self,num=100):
pos = self.get_pos_arr(force=True)
ckdtree = self._get_ckdtree(force=False)
ds,neighs = ckdtree.query(pos,num,distance_upper_bound=self.xi)
pairs = set()
N = len(neighs)
for (i,j),k in sp.ndenumerate(neighs):
# if cmp(i,k) < 1:
# pairs.add((i,k))
# else:
# pairs.add((k,i))
if k < N and (i,k) not in pairs and (k,i) not in pairs:
pairs.add((i,k))
return pairs
def _get_npairs(self):
if self.neighs is None:
self.neighs = NeighborList([self.xi]*self.num_cells)
self.neighs.update(self)
return ((i,j) for i in range(self.num_cells) for j in self.neighs.get_neighbors(i) )
@property
def forces(self):
""" get the forces between cells, as array, both from links
and from the native force_func
"""
self.logger.info('Computing forces')
pos = self.get_pos_arr(force=True)
force_arr = zeros_like(pos)
for link in self.links:
force = link.force
force_arr[link.one.index] += force
force_arr[link.two.index] -= force
#kdtree = self._get_kdtree(force=True)
for i,j in self._get_npairs(): #kdtree.query_pairs(self.xi*1.0):
force = self.force_func_celltypes(self.cells[i], self.cells[j] )
#disp = self.cells[i].pos - self.cells[j].pos
#L = norm(disp)
#force = 2 * self.a**4 * ( 2 * self.xi**2 - 3 * self.xi * L + L**2 )/( self.xi**2 * L**6 ) * disp
force_arr[i] += force
force_arr[j] -= force
return nan_to_num(force_arr)
def force_func(self,cell1,cell2):
""" the native force function between two positions """
x1 = cell1.pos
x2 = cell2.pos
disp = x1 - x2
mod_disp = norm(disp)
force = 2 * self.a**4 * ( 2 * self.xi**2 - 3 * self.xi * mod_disp + mod_disp**2 )/( self.xi**2 * mod_disp**6 ) * disp
return force
def force_func2(self,cell1,cell2):
""" the native force function between two positions, second attempt """
x1 = cell1.pos
x2 = cell2.pos
r1 = cell1.radius
r2 = cell2.radius
disp = x1 - x2
mod_disp = norm(disp)
a1=self.a*(r1+r2)
xi1=self.xi*(r1+r2)
force = 2 * a1**4 * ( 2 * xi1**2 - 3 * xi1 * mod_disp + mod_disp**2 )/( xi1**2 * mod_disp**6 ) * disp
return force
def force_func_hertz(self,cell1,cell2):
""" the Hertz force between two cells """
x1 = cell1.pos
x2 = cell2.pos
r1 = cell1.radius
r2 = cell2.radius
disp = x1 - x2
mod_disp = norm(disp)
delta=(r1+r2)-mod_disp
if delta > 0.0:
force = self.a*delta**1.5*disp/mod_disp
else:
force= 0.0
return force
def force_func_celltypes_old(self,cell1,cell2):
""" Try to case out the cell types """
x1 = cell1.pos
x2 = cell2.pos
#use the Cython dispfunc
disp = disp_func(x1,x2,self.XSIZE)
mod_disp = norm(disp)
force = 0.0
if cell1.type==self.basal and cell2.type==self.basal:
#We have two basal cells
force = 0.0
elif cell1.type==self.basal or cell2.type==self.basal:
#We have one basal cell
if mod_disp <= self.basalcutoff:
oldexpr = '2 * self.basalstrength**4 * ( 2 * self.basalcutoff**2 - 3 * self.basalcutoff * mod_disp + mod_disp**2 )/( self.basalcutoff**2 * mod_disp**6 ) * disp'
basalstrength = self.basalstrength
basalcutoff = self.basalcutoff
forcestr = '2 * basalstrength**4 * ( 2 * basalcutoff**2 - 3 * basalcutoff * mod_disp + mod_disp**2 )/( basalcutoff**2 * mod_disp**6 ) * disp'
force = ne.evaluate(forcestr)
else:
#We have some other situation
r1 = cell1.radius
r2 = cell2.radius
delta=(r1+r2)-mod_disp
if delta > 0:
a = self.a
oldexp = 'sqrt(r1*r2/(r1+r2)) * self.a * delta**1.5*disp/mod_disp'
forcestr = 'sqrt(r1*r2/(r1+r2)) * a * delta**1.5*disp/mod_disp'
force = ne.evaluate(forcestr)
#print 'force', force
return force
def force_func_celltypes(self,cell1,cell2):
""" Try to case out the cell types """
x1 = cell1.pos
x2 = cell2.pos
#use the Cython dispfunc
disp = disp_func(x1,x2,self.XSIZE)
mod_disp = norm(disp)
force = 0.0
if cell1.type==self.basal and cell2.type==self.basal:
#We have two basal cells
force = 0.0
#elif cell1.type==self.basal or cell2.type==self.basal:
#We have one basal cell
# if mod_disp <= self.basalcutoff:
# oldexpr = '2 * self.basalstrength**4 * ( 2 * self.basalcutoff**2 - 3 * self.basalcutoff * mod_disp + mod_disp**2 )/( self.basalcutoff**2 * mod_disp**6 ) * disp'
# basalstrength = self.basalstrength
# basalcutoff = self.basalcutoff
# forcestr = '2 * basalstrength**4 * ( 2 * basalcutoff**2 - 3 * basalcutoff * mod_disp + mod_disp**2 )/( basalcutoff**2 * mod_disp**6 ) * disp'
# force = ne.evaluate(forcestr)
else:
#We have some other situation
r1 = cell1.radius
r2 = cell2.radius
min_radius = min(r1,r2)
renormalized_r = r1*r2/(r1+r2)
delta=(r1+r2)-mod_disp
if delta > 0:
omega = pow(delta/renormalized_r,1.5)
a = self.a
forcestr = 'sqrt(renormalized_r) * a * delta**1.5*(1 + 1.15*omega**0.34 +9.5*omega + 9.288*omega**2)/(1+2.3*omega)*disp/mod_disp'
force = ne.evaluate(forcestr)
#print cell1.type, cell2.type, 'delta:', delta
else :
if cell1.type==self.cancer and cell2.type==self.cancer:
#alpha = 0.1
#forcestr = '-6*(alpha/(-delta))**7'
if -delta<2.0 :
forcestr = '0.001*delta'
else :
forcestr = '0.0'
force = ne.evaluate(forcestr)
print '\n'
print cell1.type, cell2.type, 'delta<0:', delta, 'force:', force
print '\n'
return force
def force_func_celltypes_cython(self,cell1,cell2):
""" Try to case out the cell types """
x1 = cell1.pos
x2 = cell2.pos
if cell1.type==self.basal and cell2.type==self.basal:
#We have two basal cells
force = 0.0
elif cell1.type==self.basal or cell2.type==self.basal:
#We have one basal cell
force = force_func_basal(x1,x2,self.basalstrength,self.XSIZE)
else:
#We have some other situation
r1 = cell1.radius
r2 = cell2.radius
force = force_func_hertz(x1,x2,r1,r2,self.a,self.XSIZE)
return force
@property
def energy(self):
""" get the energy of the current configuration """
tot_energy = 0
for link in self.links:
tot_energy += link.energy
return tot_energy
def fire(self):
""" Do a fire relaxation """
#load params
fmax = self.config['fmax']
Nmin = self.config['Nmin']
finc = self.config['finc']
fdec = self.config['fdec']
alphastart = self.config['alphastart']
fa = self.config['fa']
deltatmax = self.config['deltatmax']
maxsteps = self.config['maxsteps']
alpha = alphastart
deltat = 0.1
pos = self.get_pos_arr(force=True)
v = sp.zeros_like(pos)
self._update_vel(v)
v = self._get_vel_arr()
steps_since_negative = 0
def norm_arr_old(vec):
return sp.sqrt(sp.sum(vec**2,1))
def unitize_arr_old(vec):
return nan_to_num(((vec.T)/norm_arr(vec)).T)
norm_arr = norm
forces = nan_to_num(sp.array([ [sp.inf,sp.inf]]))
step_num = 0
self.logger.info("Beginning FIRE Relaxation -- fmax={}".format(fmax))
maxdpos = 100000.0
while max(norm_arr(forces)) > fmax and step_num < maxsteps:
forces = self.forces
self.logger.debug("Computed forces: {forces}".format(forces=pprint.pformat(forces)))
power = sp.vdot(forces,v)
self.logger.info("Step: {}, max_force: {}, power: {}".format(step_num,
max(norm_arr(forces)),
power))
#DEBUG PRINTING
#print "Step: {}, max_force: {}, power: {}, deltat: {}".format(step_num,
# max(norm_arr(forces)),
# power, deltat)
v = nan_to_num( (1.0 - alpha)*v + alpha*(norm_arr(v)*unitize_arr(forces).T).T )
if power>0.:
if steps_since_negative > Nmin:
deltat = min(deltat * finc, deltatmax)
alpha = alpha*fa
steps_since_negative += 1
else:
steps_since_negative = 0
deltat = deltat * fdec
v *= 0.
alpha = alphastart
v += forces*deltat
pos += v*deltat
self._update_pos(pos)
step_num += 1
#maxdpos = max(norm_arr(v*deltat))
#DEBUG PRINTING
#print "Maximum position change = {}".format(maxdpos)
#DEBUG_PLOT
#self.plot_sized_cells()
#self.plot_links()
#self.plot_forces()
#py.draw()
self._update_pos(pos)
self._update_vel(v)
self.logger.info("Relaxation finished...")
def save(self,filename):
self.logger.info("SAVING state to {}".format(filename))
with open(filename,'w') as f:
pickle.dump( (self.config, self.cells, self.links, self._ghosts, self.T ), f )
def vmd_out(self,filename):
""" Write a VMD compatible file to filename """
with open(filename,'w') as f:
positions = self.get_pos_arr(force=True)
formatstring = "{color} {x} {y} {z}\n"
for ind,row in enumerate(positions):
f.write(formatstring.format(x=row[0], y=row[1], z=0, color=self.cells[ind].type.type_ind))
def plot_forces(self,factor=5):
X,Y = self.get_pos_arr().T
FX,FY = self.forces.T
py.quiver(X,Y,FX,FY,scale=factor)
#Some code for ASE neighborlist functionality
def get_positions(self):
return sp.hstack(( self.get_pos_arr(), sp.zeros((self.num_cells,1)) ) )
def get_pbc(self):
return sp.array([True,False,False])
def get_cell(self):
return sp.array([[self.XSIZE,0,0],[0,self.YSIZE,0],[0,0,1]])
def __len__(self):
return self.num_cells
def load_from_file(filename):
with open(filename,'r') as f:
config, cells, links, ghosts, T = pickle.load(f)
Q = CancerSim(config)
Q.cells = cells
Q.ghosts = ghosts
Q.T = T
Q.links = links
Q.cancer_cells = [cell for cell in cells if cell.type.name == "Cancer"]
Q.num_cells = len(Q.cells)
return Q
if __name__ == "__main__":
Q = CancerSim()
Q._triang_lattice()
Q.delaunay()
Q._freeze_links()
Q.add_cancer_cell([XSIZE/2.,YSIZE/2 + 3],1)
Q.plot_cells()
self = Q
"""
TODO: have links know about periodic boundary conditions (maybe)
freeze links (DONE)
Ghost cells need update method. (DONE)
fire relaxation (DONE)
set and divide cancer cells (DONE)
long range forces (DONE)
cache the link calcs
cache the KDTree calcs?
allow more transparent custimization
expose CellTypes
use logging module
"""
| alexalemi/cancersim | code/cancer.py | Python | mit | 39,321 | [
"ASE",
"VMD"
] | 603caa6559b7876a06c99a04a3c4c1651095bc2a17f89a454ae8345175d6a4ad |
"""
Tests for the pipeline_info.py module
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2008, Prabhu Ramachandran Enthought, Inc.
# License: BSD Style.
import unittest
from tvtk.api import tvtk
from mayavi.core import pipeline_info
class TestPipelineInfoTest(unittest.TestCase):
def test_tvtk_dataset_name(self):
"Can tvtk datasets can be converted to names correctly."
datasets = [tvtk.ImageData(),
tvtk.StructuredPoints(),
tvtk.RectilinearGrid(),
tvtk.StructuredGrid(),
tvtk.PolyData(),
tvtk.UnstructuredGrid(),
tvtk.Property(), # Not a dataset!
'foo', # Not a TVTK object.
]
expect = ['image_data',
'image_data',
'rectilinear_grid',
'structured_grid',
'poly_data',
'unstructured_grid',
'none',
'none'
]
result = [pipeline_info.get_tvtk_dataset_name(d) for d in datasets]
self.assertEqual(result, expect)
def test_default_pipeline_info(self):
"Is the default PipelineInfo class built right."
p = pipeline_info.PipelineInfo()
self.assertEqual(len(p.datasets), 0)
self.assertEqual(len(p.attribute_types), 0)
self.assertEqual(len(p.attributes), 0)
if __name__ == '__main__':
unittest.main()
| dmsurti/mayavi | mayavi/tests/test_pipeline_info.py | Python | bsd-3-clause | 1,524 | [
"Mayavi"
] | 260d4e4224de5be1f9d29a00b019db59342ff3e74ede643ad2a6835077575932 |
# pylint: disable=R0913,R0914,W0201,W0622,C0302,R0902,R0903,W1001,W0612,W0613
"""Useful functions and objects used more or less everywhere."""
from __future__ import print_function
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
__author__ = "Lorenzo Bolla"
import numpy
import EMpy.constants
import EMpy.materials
import scipy.linalg
import scipy.interpolate
import scipy.optimize
import time
import sys
class Layer(object):
"""A layer is defined by a material (iso or aniso) and a thickness."""
def __init__(self, mat, thickness):
"""Set the material and the thickness."""
self.mat = mat
self.thickness = thickness
def isIsotropic(self):
"""Return True if the material is isotropic, False if anisotropic."""
return self.mat.isIsotropic()
def getEPSFourierCoeffs(self, wl, n, anisotropic=True):
"""Return the Fourier coefficients of eps and eps**-1, orders [-n,n]."""
nood = 2 * n + 1
hmax = nood - 1
if not anisotropic:
# isotropic
EPS = numpy.zeros(2 * hmax + 1, dtype=complex)
EPS1 = numpy.zeros_like(EPS)
rix = self.mat.n(wl)
EPS[hmax] = rix ** 2
EPS1[hmax] = rix ** -2
return EPS, EPS1
else:
# anisotropic
EPS = numpy.zeros((3, 3, 2 * hmax + 1), dtype=complex)
EPS1 = numpy.zeros_like(EPS)
EPS[:, :, hmax] = (
numpy.squeeze(self.mat.epsilonTensor(wl)) / EMpy.constants.eps0
)
EPS1[:, :, hmax] = scipy.linalg.inv(EPS[:, :, hmax])
return EPS, EPS1
def capacitance(self, area=1.0, wl=0):
"""Capacitance = eps0 * eps_r * area / thickness."""
if self.isIsotropic():
eps = EMpy.constants.eps0 * numpy.real(self.mat.n(wl).item() ** 2)
else:
# suppose to compute the capacitance along the z-axis
eps = self.mat.epsilonTensor(wl)[2, 2, 0]
return eps * area / self.thickness
def __str__(self):
"""Return the description of a layer."""
return "%s, thickness: %g" % (self.mat, self.thickness)
class BinaryGrating(object):
"""A Binary Grating is defined by two materials (iso or aniso), a
duty cycle, a pitch and a thickness."""
def __init__(self, mat1, mat2, dc, pitch, thickness):
"""Set the materials, the duty cycle and the thickness."""
self.mat1 = mat1
self.mat2 = mat2
self.dc = dc
self.pitch = pitch
self.thickness = thickness
def isIsotropic(self):
"""Return True if both the materials are isotropic, False otherwise."""
return self.mat1.isIsotropic() and self.mat2.isIsotropic()
def getEPSFourierCoeffs(self, wl, n, anisotropic=True):
"""Return the Fourier coefficients of eps and eps**-1, orders [-n,n]."""
nood = 2 * n + 1
hmax = nood - 1
if not anisotropic:
# isotropic
rix1 = self.mat1.n(wl)
rix2 = self.mat2.n(wl)
f = self.dc
h = numpy.arange(-hmax, hmax + 1)
EPS = (rix1 ** 2 - rix2 ** 2) * f * numpy.sinc(h * f) + rix2 ** 2 * (h == 0)
EPS1 = (rix1 ** -2 - rix2 ** -2) * f * numpy.sinc(h * f) + rix2 ** -2 * (
h == 0
)
return EPS, EPS1
else:
# anisotropic
EPS = numpy.zeros((3, 3, 2 * hmax + 1), dtype=complex)
EPS1 = numpy.zeros_like(EPS)
eps1 = numpy.squeeze(self.mat1.epsilonTensor(wl)) / EMpy.constants.eps0
eps2 = numpy.squeeze(self.mat2.epsilonTensor(wl)) / EMpy.constants.eps0
f = self.dc
h = numpy.arange(-hmax, hmax + 1)
for ih, hh in enumerate(h):
EPS[:, :, ih] = (eps1 - eps2) * f * numpy.sinc(hh * f) + eps2 * (
hh == 0
)
EPS1[:, :, ih] = (
scipy.linalg.inv(eps1) - scipy.linalg.inv(eps2)
) * f * numpy.sinc(hh * f) + scipy.linalg.inv(eps2) * (hh == 0)
return EPS, EPS1
def capacitance(self, area=1.0, wl=0):
"""Capacitance = eps0 * eps_r * area / thickness."""
if self.isIsotropic():
eps = EMpy.constants.eps0 * numpy.real(
self.mat1.n(wl) ** 2 * self.dc + self.mat2.n(wl) ** 2 * (1 - self.dc)
)
else:
eps1 = self.mat1.epsilonTensor(wl)[2, 2, 0]
eps2 = self.mat2.epsilonTensor(wl)[2, 2, 0]
eps = numpy.real(eps1 * self.dc + eps2 * (1 - self.dc))
return eps * area / self.thickness
def __str__(self):
"""Return the description of a binary grating."""
return "(%s, %s), dc: %g, pitch: %g, thickness: %g" % (
self.mat1,
self.mat2,
self.dc,
self.pitch,
self.thickness,
)
class SymmetricDoubleGrating(object):
"""A Symmetric Double Grating is defined by three materials (iso
or aniso), two duty cycles, a pitch and a thickness.
Inside the pitch there are two rect of width dc1*pitch of mat1 and
dc2*pitch of mat2, with a spacer of fixed width made of mat3 between them.
"""
def __init__(self, mat1, mat2, mat3, dc1, dc2, pitch, thickness):
"""Set the materials, the duty cycle and the thickness."""
self.mat1 = mat1
self.mat2 = mat2
self.mat3 = mat3
self.dc1 = dc1
self.dc2 = dc2
self.pitch = pitch
self.thickness = thickness
def isIsotropic(self):
"""Return True if all the materials are isotropic, False otherwise."""
return (
self.mat1.isIsotropic()
and self.mat2.isIsotropic()
and self.mat3.isIsotropic()
)
def getEPSFourierCoeffs(self, wl, n, anisotropic=True):
"""Return the Fourier coefficients of eps and eps**-1, orders [-n,n]."""
nood = 2 * n + 1
hmax = nood - 1
if not anisotropic:
# isotropic
rix1 = self.mat1.n(wl)
rix2 = self.mat2.n(wl)
rix3 = self.mat3.n(wl)
f1 = self.dc1
f2 = self.dc2
h = numpy.arange(-hmax, hmax + 1)
N = len(h)
A = -N * f1 / 2.0
B = N * f2 / 2.0
EPS = (
rix3 ** 2 * (h == 0)
+ (rix1 ** 2 - rix3 ** 2)
* f1
* numpy.sinc(h * f1)
* numpy.exp(2j * numpy.pi * h / N * A)
+ (rix2 ** 2 - rix3 ** 2)
* f2
* numpy.sinc(h * f2)
* numpy.exp(2j * numpy.pi * h / N * B)
)
EPS1 = (
rix3 ** -2 * (h == 0)
+ (rix1 ** -2 - rix3 ** -2)
* f1
* numpy.sinc(h * f1)
* numpy.exp(2j * numpy.pi * h / N * A)
+ (rix2 ** -2 - rix3 ** -2)
* f2
* numpy.sinc(h * f2)
* numpy.exp(2j * numpy.pi * h / N * B)
)
return EPS, EPS1
else:
# anisotropic
EPS = numpy.zeros((3, 3, 2 * hmax + 1), dtype=complex)
EPS1 = numpy.zeros_like(EPS)
eps1 = numpy.squeeze(self.mat1.epsilonTensor(wl)) / EMpy.constants.eps0
eps2 = numpy.squeeze(self.mat2.epsilonTensor(wl)) / EMpy.constants.eps0
eps3 = numpy.squeeze(self.mat3.epsilonTensor(wl)) / EMpy.constants.eps0
f1 = self.dc1
f2 = self.dc2
h = numpy.arange(-hmax, hmax + 1)
N = len(h)
A = -N * f1 / 2.0
B = N * f2 / 2.0
for ih, hh in enumerate(h):
EPS[:, :, ih] = (
(eps1 - eps3)
* f1
* numpy.sinc(hh * f1)
* numpy.exp(2j * numpy.pi * hh / N * A)
+ (eps2 - eps3)
* f2
* numpy.sinc(hh * f2)
* numpy.exp(2j * numpy.pi * hh / N * B)
+ eps3 * (hh == 0)
)
EPS1[:, :, ih] = (
(scipy.linalg.inv(eps1) - scipy.linalg.inv(eps3))
* f1
* numpy.sinc(hh * f1)
* numpy.exp(2j * numpy.pi * hh / N * A)
+ (scipy.linalg.inv(eps2) - scipy.linalg.inv(eps3))
* f2
* numpy.sinc(hh * f2)
* numpy.exp(2j * numpy.pi * hh / N * B)
+ scipy.linalg.inv(eps3) * (hh == 0)
)
return EPS, EPS1
def capacitance(self, area=1.0, wl=0):
"""Capacitance = eps0 * eps_r * area / thickness."""
if self.isIsotropic():
eps = EMpy.constants.eps0 * numpy.real(
self.mat1.n(wl) ** 2 * self.dc1
+ self.mat2.n(wl) ** 2 * self.dc2
+ self.mat3.n(wl) ** 2 * (1 - self.dc1 - self.dc2)
)
else:
eps1 = self.mat1.epsilonTensor(wl)[2, 2, 0]
eps2 = self.mat2.epsilonTensor(wl)[2, 2, 0]
eps3 = self.mat3.epsilonTensor(wl)[2, 2, 0]
eps = numpy.real(
eps1 * self.dc1 + eps2 * self.dc2 + eps3 * (1 - self.dc1 - self.dc2)
)
return eps * area / self.thickness
def __str__(self):
"""Return the description of a binary grating."""
return "(%s, %s, %s), dc1: %g, dc2: %g, pitch: %g, thickness: %g" % (
self.mat1,
self.mat2,
self.mat3,
self.dc1,
self.dc2,
self.pitch,
self.thickness,
)
class AsymmetricDoubleGrating(SymmetricDoubleGrating):
"""An Asymmetric Double Grating is defined by three materials (iso
or aniso), three duty cycles, a pitch and a thickness.
Inside the pitch there are two rect of width dc1*pitch of mat1 and
dc2*pitch of mat2, separated by dcM*pitch mat3 (between mat1 e
mat2, not between mat2 and mat1!).
"""
def __init__(self, mat1, mat2, mat3, dc1, dc2, dcM, pitch, thickness):
SymmetricDoubleGrating.__init__(
self, mat1, mat2, mat3, dc1, dc2, pitch, thickness
)
self.dcM = dcM
def getEPSFourierCoeffs(self, wl, n, anisotropic=True):
"""Return the Fourier coefficients of eps and eps**-1, orders [-n,n]."""
nood = 2 * n + 1
hmax = nood - 1
if not anisotropic:
# isotropic
rix1 = self.mat1.n(wl)
rix2 = self.mat2.n(wl)
rix3 = self.mat3.n(wl)
f1 = self.dc1
f2 = self.dc2
fM = self.dcM
h = numpy.arange(-hmax, hmax + 1)
N = len(h)
A = -N * (f1 + fM) / 2.0
B = N * (f2 + fM) / 2.0
EPS = (
rix3 ** 2 * (h == 0)
+ (rix1 ** 2 - rix3 ** 2)
* f1
* numpy.sinc(h * f1)
* numpy.exp(2j * numpy.pi * h / N * A)
+ (rix2 ** 2 - rix3 ** 2)
* f2
* numpy.sinc(h * f2)
* numpy.exp(2j * numpy.pi * h / N * B)
)
EPS1 = (
rix3 ** -2 * (h == 0)
+ (rix1 ** -2 - rix3 ** -2)
* f1
* numpy.sinc(h * f1)
* numpy.exp(2j * numpy.pi * h / N * A)
+ (rix2 ** -2 - rix3 ** -2)
* f2
* numpy.sinc(h * f2)
* numpy.exp(2j * numpy.pi * h / N * B)
)
return EPS, EPS1
else:
# anisotropic
EPS = numpy.zeros((3, 3, 2 * hmax + 1), dtype=complex)
EPS1 = numpy.zeros_like(EPS)
eps1 = numpy.squeeze(self.mat1.epsilonTensor(wl)) / EMpy.constants.eps0
eps2 = numpy.squeeze(self.mat2.epsilonTensor(wl)) / EMpy.constants.eps0
eps3 = numpy.squeeze(self.mat3.epsilonTensor(wl)) / EMpy.constants.eps0
f1 = self.dc1
f2 = self.dc2
fM = self.dcM
h = numpy.arange(-hmax, hmax + 1)
N = len(h)
A = -N * (f1 + fM) / 2.0
B = N * (f2 + fM) / 2.0
for ih, hh in enumerate(h):
EPS[:, :, ih] = (
(eps1 - eps3)
* f1
* numpy.sinc(hh * f1)
* numpy.exp(2j * numpy.pi * hh / N * A)
+ (eps2 - eps3)
* f2
* numpy.sinc(hh * f2)
* numpy.exp(2j * numpy.pi * hh / N * B)
+ eps3 * (hh == 0)
)
EPS1[:, :, ih] = (
(scipy.linalg.inv(eps1) - scipy.linalg.inv(eps3))
* f1
* numpy.sinc(hh * f1)
* numpy.exp(2j * numpy.pi * hh / N * A)
+ (scipy.linalg.inv(eps2) - scipy.linalg.inv(eps3))
* f2
* numpy.sinc(hh * f2)
* numpy.exp(2j * numpy.pi * hh / N * B)
+ scipy.linalg.inv(eps3) * (hh == 0)
)
return EPS, EPS1
def capacitance(self, area=1.0, wl=0):
"""Capacitance = eps0 * eps_r * area / thickness."""
if self.isIsotropic():
eps = EMpy.constants.eps0 * numpy.real(
self.mat1.n(wl) ** 2 * self.dc1
+ self.mat2.n(wl) ** 2 * self.dc2
+ self.mat3.n(wl) ** 2 * (1 - self.dc1 - self.dc2)
)
else:
eps1 = self.mat1.epsilonTensor(wl)[2, 2, 0]
eps2 = self.mat2.epsilonTensor(wl)[2, 2, 0]
eps3 = self.mat3.epsilonTensor(wl)[2, 2, 0]
eps = numpy.real(
eps1 * self.dc1 + eps2 * self.dc2 + eps3 * (1 - self.dc1 - self.dc2)
)
return eps * area / self.thickness
def __str__(self):
"""Return the description of a binary grating."""
return (
"(%s, %s, %s), dc1: %g, dc2: %g, dcM: %g, " "pitch: %g, thickness: %g"
) % (
self.mat1,
self.mat2,
self.mat3,
self.dc1,
self.dc2,
self.dcM,
self.pitch,
self.thickness,
)
class LiquidCrystalCell(object):
"""Liquid Crystal Cell.
A liquid crystal cell is determined by a liquid crystal, a voltage
applied to it, a total thickness, an anchoring thickness. The
liquid crystal molecules are anchored to the cell with a given
pretilt angle (that, at zero volts, is constant throughout all the LC cell).
The cell is decomposed in nlayers homogeneous layers. The LC
characteristics in each layer is either read from file or deduced
by the LC physical parameters solving a boundary value problem
(bvp).
Inspiration from:
U{http://www.ee.ucl.ac.uk/~rjames/modelling/constant-order/oned/}.
@ivar lc: Liquid Crystal.
@ivar voltage: voltage applied.
@ivar t_tot: total thickness.
@ivar t_anchoring: anchoring thickness.
@ivar pretilt: LC angle pretilt.
@ivar totaltwist: LC angle total twist between the anchoring layers.
@ivar nlayers: number of layers to subdived the cell.
@ivar data_file: file with the angles for voltages applid to the cell.
"""
def __init__(
self,
lc,
voltage,
t_tot,
t_anchoring,
pretilt=0,
totaltwist=0,
nlayers=100,
data_file=None,
):
self.lc = lc
self.t_tot = t_tot
self.t_anchoring = t_anchoring
self.pretilt = pretilt
self.totaltwist = totaltwist
self.nlayers = nlayers
self.data_file = data_file
# thicknesses of internal layers
tlc_internal = (
(self.t_tot - 2.0 * self.t_anchoring)
/ (self.nlayers - 2.0)
* numpy.ones(self.nlayers - 2)
)
# thicknesses of layers
self.tlc = numpy.r_[self.t_anchoring, tlc_internal, self.t_anchoring]
# internal sample points
lhs = numpy.r_[0, numpy.cumsum(tlc_internal)]
# normalized sample points: at the center of internal layers, plus the
# boundaries (i.e. the anchoring layers)
self.normalized_sample_points = numpy.r_[
0, (lhs[1:] + lhs[:-1]) / 2.0 / (self.t_tot - 2 * self.t_anchoring), 1
]
tmp = numpy.r_[0, numpy.cumsum(self.tlc)]
self.sample_points = 0.5 * (tmp[1:] + tmp[:-1])
# finally, apply voltage
self.voltage = voltage
def getvoltage(self):
return self.__voltage
def setvoltage(self, v):
self.__voltage = v
if self.data_file is not None:
self.__angles = self._get_angles_from_file()
else:
self.__angles = self._get_angles_from_bvp()
voltage = property(fget=getvoltage, fset=setvoltage)
def getangles(self):
return self.__angles
angles = property(fget=getangles)
def __ode_3k(self, z, f):
"""Inspiration from:
U{http://www.ee.ucl.ac.uk/~rjames/modelling/constant-order/oned/}."""
# ------------------------------------------------------------
# minimise Oseen Frank free energy and solve Laplace equation
# ------------------------------------------------------------
# [f(1..6)] = [theta theta' phi phi' u u']
theta2, dtheta2dz, phi2, dphi2dz, u2, du2dz = f
K11 = self.lc.K11
K22 = self.lc.K22
K33 = self.lc.K33
q0 = self.lc.q0
epslow = self.lc.epslow
deleps = self.lc.deleps
e0 = EMpy.constants.eps0
K1122 = K11 - K22
K3322 = K33 - K22
costheta1 = numpy.cos(theta2)
sintheta1 = numpy.sin(theta2)
ezz = e0 * (epslow + deleps * sintheta1 ** 2)
# maple generated (see lc3k.mws)
ddtheta2dz = (
costheta1
* sintheta1
* (
K1122 * dtheta2dz ** 2
+ 2 * K3322 * costheta1 ** 2 * dphi2dz ** 2
- K3322 * dtheta2dz ** 2
- K22 * dphi2dz ** 2
- e0 * deleps * du2dz ** 2
+ 2 * q0 * K22 * dphi2dz
- K3322 * dphi2dz ** 2
)
/ (K1122 * costheta1 ** 2 - K3322 * costheta1 ** 2 + K22 + K3322)
)
ddphi2dz = (
2
* sintheta1
* dtheta2dz
* (
2 * K3322 * costheta1 ** 2 * dphi2dz
- K22 * dphi2dz
+ q0 * K22
- K3322 * dphi2dz
)
/ costheta1
/ (K3322 * costheta1 ** 2 - K22 - K3322)
)
ddu2dz = -2 * e0 * deleps * sintheta1 * costheta1 * dtheta2dz * du2dz / ezz
return numpy.array([ddtheta2dz, ddphi2dz, ddu2dz])
def __bc_nosplay(self, f):
"""Inspiration from:
U{http://www.ee.ucl.ac.uk/~rjames/modelling/constant-order/oned/}."""
theta2, dtheta2dz, phi2, dphi2dz, u2, du2dz = f
return numpy.array(
[
theta2[0] - self.pretilt,
phi2[1] - 0,
u2[2] - 0,
theta2[3] - self.pretilt,
phi2[4] - self.totaltwist,
u2[5] - self.voltage,
]
)
def __ic_nosplay(self, z):
"""Inspiration from:
U{http://www.ee.ucl.ac.uk/~rjames/modelling/constant-order/oned/}."""
self.maxtilt = 90 * numpy.pi / 180 - self.pretilt
init = numpy.array(
[
self.pretilt + self.maxtilt * 4 * z * (1 - z),
self.maxtilt * 4 * (1 - 2 * z),
self.totaltwist * z,
self.totaltwist * numpy.ones_like(z),
self.voltage * z,
self.voltage * numpy.ones_like(z),
]
)
return init, self.__ode_3k(z, init)
def __apply_tension(self):
"""Inspiration from:
U{http://www.ee.ucl.ac.uk/~rjames/modelling/constant-order/oned/}."""
try:
from scikits.bvp1lg import colnew
except ImportError:
warning("bvp module not found.")
raise
boundary_points = numpy.array([0, 0, 0, 1, 1, 1])
tol = 1e-6 * numpy.ones_like(boundary_points)
degrees = numpy.array([2, 2, 2])
solution = colnew.solve(
boundary_points,
degrees,
self.__ode_3k,
self.__bc_nosplay,
is_linear=False,
initial_guess=self.__ic_nosplay,
tolerances=tol,
vectorized=True,
maximum_mesh_size=1000,
)
self.bvp_solution = solution
def get_parameters(self, z=None):
"""Inspiration from:
U{http://www.ee.ucl.ac.uk/~rjames/modelling/constant-order/oned/}."""
if z is None:
z = self.bvp_solution.mesh
data = self.bvp_solution(z)
theta = EMpy.utils.rad2deg(numpy.pi / 2.0 - data[:, 0])
phi = EMpy.utils.rad2deg(data[:, 2])
u = data[:, 4]
return z, theta, phi, u
def _get_angles_from_file(self):
# interpolate data file
data = numpy.loadtxt(self.data_file)
data_x = numpy.linspace(0, 1, data.shape[0] - 1)
data_y = data[0, :]
x = self.normalized_sample_points
y = [self.voltage]
angles = interp2(x, y, data_x, data_y, data[1:, :])
return angles.squeeze()
def _get_angles_from_bvp(self):
# solve bvp
self.__apply_tension()
z_ = self.normalized_sample_points
z, theta, phi, u = self.get_parameters(z_)
return theta
def createMultilayer(self):
"""Split the cell in nlayers homogeneous layers."""
m = []
for a, t in zip(EMpy.utils.deg2rad(self.angles), self.tlc):
epsT = EMpy.materials.EpsilonTensor(
epsilon_tensor_const=EMpy.utils.euler_rotate(
numpy.diag([self.lc.nE, self.lc.nO, self.lc.nO]) ** 2,
0.0,
numpy.pi / 2.0,
numpy.pi / 2.0 - a,
)
* EMpy.constants.eps0,
epsilon_tensor_known={
0: EMpy.utils.euler_rotate(
numpy.diag(
[
self.lc.nE_electrical,
self.lc.nO_electrical,
self.lc.nO_electrical,
]
)
** 2,
0.0,
numpy.pi / 2.0,
numpy.pi / 2.0 - a,
)
* EMpy.constants.eps0,
},
)
m.append(
Layer(EMpy.materials.AnisotropicMaterial("LC", epsilon_tensor=epsT), t)
)
return Multilayer(m)
def capacitance(self, area=1.0, wl=0):
"""Capacitance = eps0 * eps_r * area / thickness."""
return self.createMultilayer().capacitance(area, wl)
@staticmethod
def isIsotropic():
"""Return False."""
return False
def __str__(self):
"""Return the description of a LiquidCrystal."""
return (
"datafile: %s, voltage: %g, t_tot: %g, "
"t_anchoring: %g, (nO, nE) = (%g, %g)"
) % (
self.data_file,
self.voltage,
self.t_tot,
self.t_anchoring,
self.lc.nO,
self.lc.nE,
)
class Multilayer(object):
"""A Multilayer is a list of layers with some more methods."""
def __init__(self, data=None):
"""Initialize the data list."""
if data is None:
data = []
self.data = data[:]
def __delitem__(self, i):
"""Delete an item from list."""
del self.data[i]
def __getitem__(self, i):
"""Get an item of the list of layers."""
return self.data[i]
def __getslice__(self, i, j):
"""Get a Multilayer from a slice of layers."""
return Multilayer(self.data[i:j])
def __len__(self):
"""Return the number of layers."""
return len(self.data)
def __setitem__(self, i, item):
"""Set an item of the list of layers."""
self.data[i] = item
def __setslice__(self, i, j, other):
"""Set a slice of layers."""
self.data[i:j] = other
def append(self, item):
"""Append a layer to the layers list."""
self.data.append(item)
def extend(self, other):
"""Extend the layers list with other layers."""
self.data.extend(other)
def insert(self, i, item):
"""Insert a new layer in the layers list at the position i."""
self.data.insert(i, item)
def remove(self, item):
"""Remove item from layers list."""
self.data.remove(item)
def pop(self, i=-1):
return self.data.pop(i)
def isIsotropic(self):
"""Return True if all the layers of the multilayers are
isotropic, False otherwise."""
return numpy.asarray([m.isIsotropic() for m in self.data]).all()
def simplify(self):
"""Return a new flatten Multilayer, with expanded LiquidCrystalCells."""
# make a tmp list, copy of self, to work with
tmp = self.data[:]
# expand the liquid crystals
for il, l in enumerate(tmp):
if isinstance(l, LiquidCrystalCell):
tmp[il] = l.createMultilayer()
# flatten the tmp list
def helper(multilayer):
"""Recurse to flatten all the nested Multilayers."""
ret = []
for layer in multilayer:
if not isinstance(layer, Multilayer):
ret.append(layer)
else:
ret.extend(helper(layer[:]))
return ret
return Multilayer(helper(tmp))
def capacitance(self, area=1.0, wl=0):
"""Capacitance = eps0 * eps_r * area / thickness."""
m = self.simplify()
ctot_1 = 0.0
for l in m:
if numpy.isfinite(l.thickness):
ctot_1 += 1.0 / l.capacitance(area, wl)
return 1.0 / ctot_1
def __str__(self):
"""Return a description of the Multilayer."""
if self.__len__() == 0:
list_str = "<emtpy>"
else:
list_str = "\n".join(
["%d: %s" % (il, l.__str__()) for il, l in enumerate(self.data)]
)
return "Multilayer\n----------\n" + list_str
class Slice(Multilayer):
def __init__(self, width, *argv):
Multilayer.__init__(self, *argv)
self.width = width
def heights(self):
return numpy.array([l.thickness for l in self])
def ys(self):
return numpy.r_[0.0, self.heights().cumsum()]
def height(self):
return self.heights().sum()
def find_layer(self, y):
l = numpy.where(self.ys() <= y)[0]
if len(l) > 0:
return self[min(l[-1], len(self) - 1)]
else:
return self[0]
def plot(self, x0, x1, nmin, nmax, wl=1.55e-6):
try:
import pylab
except ImportError:
warning("no pylab installed")
return
y0 = 0
# ytot = sum([l.thickness for l in self])
for l in self:
y1 = y0 + l.thickness
n = l.mat.n(wl)
r = 1.0 - (1.0 * (n - nmin) / (nmax - nmin))
pylab.fill(
[x0, x1, x1, x0], [y0, y0, y1, y1], ec="yellow", fc=(r, r, r), alpha=0.5
)
y0 = y1
pylab.axis("image")
def __str__(self):
return "width = %e\n%s" % (self.width, Multilayer.__str__(self))
class CrossSection(list):
def __str__(self):
return "\n".join("%s" % s for s in self)
def widths(self):
return numpy.array([s.width for s in self])
def xs(self):
return numpy.r_[0.0, self.widths().cumsum()]
def ys(self):
tmp = numpy.concatenate([s.ys() for s in self])
# get rid of numerical errors
tmp = numpy.round(tmp * 1e10) * 1e-10
return numpy.unique(tmp)
def width(self):
return self.widths().sum()
def grid(self, nx_per_region, ny_per_region):
xs = self.xs()
ys = self.ys()
nxregions = len(xs) - 1
nyregions = len(ys) - 1
if numpy.isscalar(nx_per_region):
nx = (nx_per_region,) * nxregions
elif len(nx_per_region) != nxregions:
raise ValueError("wrong nx_per_region dim")
else:
nx = nx_per_region
if numpy.isscalar(ny_per_region):
ny = (ny_per_region,) * nyregions
elif len(ny_per_region) != nyregions:
raise ValueError("wrong ny_per_region dim")
else:
ny = ny_per_region
X = []
x0 = xs[0]
for x, n in zip(xs[1:], nx):
X.append(numpy.linspace(x0, x, n + 1)[:-1])
x0 = x
X = numpy.concatenate(X)
X = numpy.r_[X, x0]
Y = []
y0 = ys[0]
for y, n in zip(ys[1:], ny):
Y.append(numpy.linspace(y0, y, n + 1)[:-1])
y0 = y
Y = numpy.concatenate(Y)
Y = numpy.r_[Y, y0]
return X, Y
def find_slice(self, x):
s = numpy.where(self.xs() <= x)[0]
if len(s) > 0:
return self[min(s[-1], len(self) - 1)]
else:
return self[0]
def _epsfunc(self, x, y, wl):
if numpy.isscalar(x) and numpy.isscalar(y):
return self.find_slice(x).find_layer(y).mat.n(wl) ** 2
else:
raise ValueError("only scalars, please!")
def epsfunc(self, x, y, wl):
eps = numpy.ones((len(x), len(y)), dtype=complex)
for ix, xx in enumerate(x):
for iy, yy in enumerate(y):
eps[ix, iy] = self._epsfunc(xx, yy, wl)
return eps
def plot(self, wl=1.55e-6):
try:
import pylab
except ImportError:
warning("no pylab installed")
return
x0 = 0
ns = [[l.mat.n(wl) for l in s] for s in self]
nmax = max(max(ns))
nmin = min(min(ns))
for s in self:
x1 = x0 + s.width
s.plot(x0, x1, nmin, nmax, wl=wl)
x0 = x1
pylab.axis("image")
class Peak(object):
def __init__(self, x, y, idx, x0, y0, xFWHM_1, xFWHM_2):
self.x = x
self.y = y
self.idx = idx
self.x0 = x0
self.y0 = y0
self.xFWHM_1 = xFWHM_1
self.xFWHM_2 = xFWHM_2
self.FWHM = numpy.abs(xFWHM_2 - xFWHM_1)
def __str__(self):
return "(%g, %g) [%d, (%g, %g)] FWHM = %s" % (
self.x,
self.y,
self.idx,
self.x0,
self.y0,
self.FWHM,
)
def deg2rad(x):
"""Convert from deg to rad."""
return x / 180.0 * numpy.pi
def rad2deg(x):
"""Convert from rad to deg."""
return x / numpy.pi * 180.0
def norm(x):
"""Return the norm of a 1D vector."""
return numpy.sqrt(numpy.vdot(x, x))
def normalize(x):
"""Return a normalized 1D vector."""
return x / norm(x)
def euler_rotate(X, phi, theta, psi):
"""Euler rotate.
Rotate the matrix X by the angles phi, theta, psi.
INPUT
X = 2d numpy.array.
phi, theta, psi = rotation angles.
OUTPUT
Rotated matrix = 2d numpy.array.
NOTE
see http://mathworld.wolfram.com/EulerAngles.html
"""
A = numpy.array(
[
[
numpy.cos(psi) * numpy.cos(phi)
- numpy.cos(theta) * numpy.sin(phi) * numpy.sin(psi),
-numpy.sin(psi) * numpy.cos(phi)
- numpy.cos(theta) * numpy.sin(phi) * numpy.cos(psi),
numpy.sin(theta) * numpy.sin(phi),
],
[
numpy.cos(psi) * numpy.sin(phi)
+ numpy.cos(theta) * numpy.cos(phi) * numpy.sin(psi),
-numpy.sin(psi) * numpy.sin(phi)
+ numpy.cos(theta) * numpy.cos(phi) * numpy.cos(psi),
-numpy.sin(theta) * numpy.cos(phi),
],
[
numpy.sin(theta) * numpy.sin(psi),
numpy.sin(theta) * numpy.cos(psi),
numpy.cos(theta),
],
]
)
return numpy.dot(A, numpy.dot(X, scipy.linalg.inv(A)))
def snell(theta_inc, n):
"""Snell law.
INPUT
theta_inc = angle of incidence.
n = 1D numpy.array of refractive indices.
OUTPUT
theta = 1D numpy.array.
"""
theta = numpy.zeros_like(n)
theta[0] = theta_inc
for i in range(1, n.size):
theta[i] = numpy.arcsin(n[i - 1] / n[i] * numpy.sin(theta[i - 1]))
return theta
def group_delay_and_dispersion(wls, y):
"""Compute group delay and dispersion.
INPUT
wls = wavelengths (ndarray).
y = function (ndarray).
OUTPUT
phi = phase of function in rad.
tau = group delay in ps.
Dpsnm = dispersion in ps/nm.
NOTE
wls and y must have the same shape.
phi has the same shape as wls.
tau has wls.shape - (..., 1)
Dpsnm has wls.shape - (..., 2)
"""
# transform the input in ndarrays
wls = numpy.asarray(wls)
y = numpy.asarray(y)
# check for good input
if wls.shape != y.shape:
raise ValueError("wls and y must have the same shape.")
f = EMpy.constants.c / wls
df = numpy.diff(f)
toPSNM = 1e12 / 1e9
cnmps = EMpy.constants.c / toPSNM
# phase
phi = numpy.unwrap(4.0 * numpy.angle(y)) / 4.0
# group delay
tau = -0.5 / numpy.pi * numpy.diff(phi) / df * 1e12
# dispersion in ps/nm
Dpsnm = -0.5 / numpy.pi / cnmps * f[1:-1] ** 2 * numpy.diff(phi, 2) / df[0:-1] ** 2
return phi, tau, Dpsnm
def rix2losses(n, wl):
"""Return real(n), imag(n), alpha, alpha_cm1, alpha_dBcm1, given a
complex refractive index. Power goes as: P = P0 exp(-alpha*z)."""
nr = numpy.real(n)
ni = numpy.imag(n)
alpha = 4 * numpy.pi * ni / wl
alpha_cm1 = alpha / 100.0
alpha_dBcm1 = 10 * numpy.log10(numpy.exp(1)) * alpha_cm1
return nr, ni, alpha, alpha_cm1, alpha_dBcm1
def loss_cm2rix(n_real, alpha_cm1, wl):
"""Return complex refractive index, given real index (n_real), absorption
coefficient (alpha_cm1) in cm^-1, and wavelength (wl) in meters.
Do not pass more than one argument as array, will return erroneous result."""
ni = 100 * alpha_cm1 * wl / (numpy.pi * 4)
return n_real - 1j * ni
def loss_m2rix(n_real, alpha_m1, wl):
"""Return complex refractive index, given real index (n_real), absorption
coefficient (alpha_m1) in m^-1, and wavelength (wl) in meters.
Do not pass more than one argument as array, will return erroneous result."""
ni = alpha_m1 * wl / (numpy.pi * 4)
return n_real - 1j * ni
def loss_dBcm2rix(n_real, alpha_dBcm1, wl):
"""Return complex refractive index, given real index (n_real), absorption
coefficient (alpha_dBcm1) in dB/cm, and wavelength (wl) in meters.
Do not pass more than one argument as array, will return erroneous result."""
ni = 10 * alpha_dBcm1 * wl / (numpy.log10(numpy.exp(1)) * 4 * numpy.pi)
return n_real - 1j * ni
def wl2f(wl0, dwl):
"""Convert a central wavelength and an interval to frequency."""
wl1 = wl0 - dwl / 2.0
wl2 = wl0 + dwl / 2.0
f1 = EMpy.constants.c / wl2
f2 = EMpy.constants.c / wl1
f0 = (f1 + f2) / 2.0
df = f2 - f1
return f0, df
def f2wl(f0, df):
"""Convert a central frequency and an interval to wavelength."""
return wl2f(f0, df)
def find_peaks(x, y, threshold=1e-6):
# find peaks' candidates
dy = numpy.diff(y)
ddy = numpy.diff(numpy.sign(dy))
# idxs = numpy.where(ddy < 0)[0] + 1
idxs = numpy.where(ddy < 0)
if len(idxs) == 0:
# there is only 1 min in f, so the max is on either boundary
# get the max and set FWHM = 0
idx = numpy.argmax(y)
p = Peak(x[idx], y[idx], idx, x[idx], y[idx], x[idx], x[idx])
# return a list of one element
return [p]
# refine search with splines
tck = scipy.interpolate.splrep(x, y)
# look for zero derivative
def absdy(x_):
return numpy.abs(scipy.interpolate.splev(x_, tck, der=1))
peaks = []
for idx in idxs:
# look around the candidate
xtol = (x.max() - x.min()) * 1e-6
xopt = scipy.optimize.fminbound(
absdy, x[idx - 1], x[idx + 1], xtol=xtol, disp=False
)
yopt = scipy.interpolate.splev(xopt, tck)
if yopt > threshold * y.max():
# FWHM
tckFWHM = scipy.interpolate.splrep(x, y - 0.5 * yopt)
roots = scipy.interpolate.sproot(tckFWHM)
idxFWHM = numpy.searchsorted(roots, xopt)
if idxFWHM <= 0:
xFWHM_1 = x[0]
else:
xFWHM_1 = roots[idxFWHM - 1]
if idxFWHM >= len(roots):
xFWHM_2 = x[-1]
else:
xFWHM_2 = roots[idxFWHM]
p = Peak(xopt, yopt, idx, x[idx], y[idx], xFWHM_1, xFWHM_2)
peaks.append(p)
def cmp_y(x_, y_):
# to sort in descending order
if x_.y == y_.y:
return 0
if x_.y > y_.y:
return -1
return 1
peaks.sort(cmp=cmp_y)
return peaks
def cond(M):
"""Return the condition number of the 2D array M."""
svdv = scipy.linalg.svdvals(M)
return svdv.max() / svdv.min()
def interp2(x, y, xp, yp, fp):
"""Interpolate a 2D complex array.
:rtype : numpy.array
"""
f1r = numpy.zeros((len(xp), len(y)))
f1i = numpy.zeros((len(xp), len(y)))
for ixp in range(len(xp)):
f1r[ixp, :] = numpy.interp(y, yp, numpy.real(fp[ixp, :]))
f1i[ixp, :] = numpy.interp(y, yp, numpy.imag(fp[ixp, :]))
fr = numpy.zeros((len(x), len(y)))
fi = numpy.zeros((len(x), len(y)))
for iy in range(len(y)):
fr[:, iy] = numpy.interp(x, xp, f1r[:, iy])
fi[:, iy] = numpy.interp(x, xp, f1i[:, iy])
return fr + 1j * fi
def trapz2(f, x=None, y=None, dx=1.0, dy=1.0):
"""Double integrate."""
return numpy.trapz(numpy.trapz(f, x=y, dx=dy), x=x, dx=dx)
def centered1d(x):
return (x[1:] + x[:-1]) / 2.0
def centered2d(x):
return (x[1:, 1:] + x[1:, :-1] + x[:-1, 1:] + x[:-1, :-1]) / 4.0
def blackbody(f, T):
return (
2
* EMpy.constants.h
* f ** 3
/ (EMpy.constants.c ** 2)
* 1.0
/ (numpy.exp(EMpy.constants.h * f / (EMpy.constants.k * T)) - 1)
)
def warning(s):
"""Print a warning on the stdout.
:param s: warning message
:type s: str
:rtype : str
"""
print("WARNING --- {}".format(s))
class ProgressBar(object):
"""Creates a text-based progress bar. Call the object with the `print'
command to see the progress bar, which looks something like this:
[=======> 22% ]
You may specify the progress bar's width, min and max values on init.
"""
def __init__(self, minValue=0, maxValue=100, totalWidth=80):
self.progBar = "[]" # This holds the progress bar string
self.min = minValue
self.max = maxValue
self.span = maxValue - minValue
self.width = totalWidth
self.reset()
def reset(self):
self.start_time = time.time()
self.amount = 0 # When amount == max, we are 100% done
self.updateAmount(0) # Build progress bar string
def updateAmount(self, newAmount=0):
"""Update the progress bar with the new amount (with min and max
values set at initialization; if it is over or under, it takes the
min or max value as a default."""
if newAmount < self.min:
newAmount = self.min
if newAmount > self.max:
newAmount = self.max
self.amount = newAmount
# Figure out the new percent done, round to an integer
diffFromMin = float(self.amount - self.min)
percentDone = (diffFromMin / float(self.span)) * 100.0
percentDone = int(round(percentDone))
# Figure out how many hash bars the percentage should be
allFull = self.width - 2 - 18
numHashes = (percentDone / 100.0) * allFull
numHashes = int(round(numHashes))
# Build a progress bar with an arrow of equal signs; special cases for
# empty and full
if numHashes == 0:
self.progBar = "[>%s]" % (" " * (allFull - 1))
elif numHashes == allFull:
self.progBar = "[%s]" % ("=" * allFull)
else:
self.progBar = "[%s>%s]" % (
"=" * (numHashes - 1),
" " * (allFull - numHashes),
)
# figure out where to put the percentage, roughly centered
percentPlace = (len(self.progBar) / 2) - len(str(percentDone))
percentString = " " + str(percentDone) + "% "
elapsed_time = time.time() - self.start_time
# slice the percentage into the bar
self.progBar = "".join(
[
self.progBar[0:percentPlace],
percentString,
self.progBar[percentPlace + len(percentString) :],
]
)
if percentDone > 0:
self.progBar += " %6ds / %6ds" % (
int(elapsed_time),
int(elapsed_time * (100.0 / percentDone - 1)),
)
def update(self, value, every=1):
"""Updates the amount, and writes to stdout. Prints a carriage return
first, so it will overwrite the current line in stdout."""
if value % every == 0 or value >= self.max:
print("\r", end=" ")
self.updateAmount(value)
sys.stdout.write(self.progBar)
sys.stdout.flush()
| lbolla/EMpy | EMpy/utils.py | Python | mit | 42,390 | [
"CRYSTAL"
] | cb05930542d973a2367943c7ffc13c12eb7074a08f95d685418ef20f9b785c40 |
# -*- coding: utf-8 -*-
#LICENCE
#
# This File is part of the Webbouqueteditor plugin
# and licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported
# License if not stated otherwise in a files head. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/3.0/ or send a letter to Creative
# Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
from enigma import eServiceReference, eServiceCenter, eDVBDB
from Components.Sources.Source import Source
from Screens.ChannelSelection import service_types_tv, MODE_TV, MODE_RADIO
from Components.config import config
from os import remove, path, popen
from Screens.InfoBar import InfoBar
from ServiceReference import ServiceReference
from Components.ParentalControl import parentalControl
from re import compile as re_compile
from Components.NimManager import nimmanager
class BouquetEditor(Source):
ADD_BOUQUET = 0
REMOVE_BOUQUET = 1
MOVE_BOUQUET = 2
ADD_SERVICE_TO_BOUQUET = 3
REMOVE_SERVICE = 4
MOVE_SERVICE = 5
ADD_PROVIDER_TO_BOUQUETLIST = 6
ADD_SERVICE_TO_ALTERNATIVE = 7
REMOVE_ALTERNATIVE_SERVICES = 8
TOGGLE_LOCK = 9
BACKUP = 10
RESTORE = 11
RENAME_SERVICE = 12
ADD_MARKER_TO_BOUQUET = 13
BACKUP_PATH = "/tmp"
BACKUP_FILENAME = "webbouqueteditor_backup.tar"
def __init__(self, session, func=ADD_BOUQUET):
Source.__init__(self)
self.func = func
self.session = session
self.command = None
self.bouquet_rootstr = ""
self.result = ( False, "one two three four unknown command" )
def handleCommand(self, cmd):
print "[WebComponents.BouquetEditor] handleCommand with cmd = ", cmd
if self.func is self.ADD_BOUQUET:
self.result = self.addToBouquet(cmd)
elif self.func is self.MOVE_BOUQUET:
self.result = self.moveBouquet(cmd)
elif self.func is self.MOVE_SERVICE:
self.result = self.moveService(cmd)
elif self.func is self.REMOVE_BOUQUET:
self.result = self.removeBouquet(cmd)
elif self.func is self.REMOVE_SERVICE:
self.result = self.removeService(cmd)
elif self.func is self.ADD_SERVICE_TO_BOUQUET:
self.result = self.addServiceToBouquet(cmd)
elif self.func is self.ADD_PROVIDER_TO_BOUQUETLIST:
self.result = self.addProviderToBouquetlist(cmd)
elif self.func is self.ADD_SERVICE_TO_ALTERNATIVE:
self.result = self.addServiceToAlternative(cmd)
elif self.func is self.REMOVE_ALTERNATIVE_SERVICES:
self.result = self.removeAlternativeServices(cmd)
elif self.func is self.TOGGLE_LOCK:
self.result = self.toggleLock(cmd)
elif self.func is self.BACKUP:
self.result = self.backupFiles(cmd)
elif self.func is self.RESTORE:
self.result = self.restoreFiles(cmd)
elif self.func is self.RENAME_SERVICE:
self.result = self.renameService(cmd)
elif self.func is self.ADD_MARKER_TO_BOUQUET:
self.result = self.addMarkerToBouquet(cmd)
else:
self.result = ( False, "one two three four unknown command" )
def addToBouquet(self, param):
print "[WebComponents.BouquetEditor] addToBouquet with param = ", param
bName = param["name"]
if bName is None:
return (False, "No bouquet name given!")
mode = MODE_TV # init
if "mode" in param:
if param["mode"] is not None:
mode = int(param["mode"])
return self.addBouquet(bName, mode, None)
def addBouquet(self, bName, mode, services):
if config.usage.multibouquet.value:
mutableBouquetList = self.getMutableBouquetList(mode)
if mutableBouquetList:
if mode == MODE_TV:
bName += " (TV)"
sref = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET \"userbouquet.%s.tv\" ORDER BY bouquet'%(self.buildBouquetID(bName, "userbouquet.", mode))
else:
bName += " (Radio)"
sref = '1:7:2:0:0:0:0:0:0:0:FROM BOUQUET \"userbouquet.%s.radio\" ORDER BY bouquet'%(self.buildBouquetID(bName, "userbouquet.", mode))
new_bouquet_ref = eServiceReference(sref)
if not mutableBouquetList.addService(new_bouquet_ref):
mutableBouquetList.flushChanges()
eDVBDB.getInstance().reloadBouquets()
mutableBouquet = self.getMutableList(new_bouquet_ref)
if mutableBouquet:
mutableBouquet.setListName(bName)
if services is not None:
for service in services:
if mutableBouquet.addService(service):
print "add", service.toString(), "to new bouquet failed"
mutableBouquet.flushChanges()
self.setRoot(self.bouquet_rootstr)
return (True, "Bouquet %s created." % bName)
else:
return (False, "Get mutable list for new created bouquet failed!")
else:
return (False, "Bouquet %s already exists." % bName)
else:
return (False, "Bouquetlist is not editable!")
else:
return (False, "Multi-Bouquet is not enabled!")
def addProviderToBouquetlist(self, param):
print "[WebComponents.BouquetEditor] addProviderToBouquet with param = ", param
refstr = sref = param["sProviderRef"]
if refstr is None:
return (False, "No provider given!")
mode = MODE_TV # init
if "mode" in param:
if param["mode"] is not None:
mode = int(param["mode"])
ref = eServiceReference(refstr)
provider = ServiceReference(ref)
providerName = provider.getServiceName()
serviceHandler = eServiceCenter.getInstance()
services = serviceHandler.list(provider.ref)
return self.addBouquet(providerName, mode, services and services.getContent('R', True))
def removeBouquet(self, param):
print "[WebComponents.BouquetEditor] removeBouquet with param = ", param
refstr = sref = param["sBouquetRef"]
if refstr is None:
return (False, "No bouquet name given!")
mode = MODE_TV # init
if "mode" in param:
if param["mode"] is not None:
mode = int(param["mode"])
if param.has_key("BouquetRefRoot"):
bouquet_root = param["BouquetRefRoot"] # only when removing alternative
else:
bouquet_root = None
pos = refstr.find('FROM BOUQUET "')
filename = None
if pos != -1:
refstr = refstr[pos+14:]
pos = refstr.find('"')
if pos != -1:
filename = '/etc/enigma2/' + refstr[:pos] # FIXMEEE !!! HARDCODED /etc/enigma2
ref = eServiceReference(sref)
bouquetName = self.getName(ref)
if not bouquetName:
bouquetName = filename
if bouquet_root:
mutableList = self.getMutableList(eServiceReference(bouquet_root))
else:
mutableList = self.getMutableBouquetList(mode)
if ref.valid() and mutableList is not None:
if not mutableList.removeService(ref):
mutableList.flushChanges()
self.setRoot(self.bouquet_rootstr)
else:
return (False, "Bouquet %s removed failed." % filename)
else:
return (False, "Bouquet %s removed failed, sevicerefence or mutable list is not valid." % filename)
try:
if filename is not None:
remove(filename)
return (True, "Bouquet %s deleted." % bouquetName)
except OSError:
return (False, "Error: Bouquet %s could not deleted, OSError." % filename)
def moveBouquet(self, param):
print "[WebComponents.BouquetEditor] moveBouquet with param = ", param
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, "No bouquet name given!")
mode = MODE_TV # init
if "mode" in param:
if param["mode"] is not None:
mode = int(param["mode"])
position = None
if "position" in param:
if param["position"] is not None:
position = int(param["position"])
if position is None:
return (False, "No position given!")
mutableBouquetList = self.getMutableBouquetList(mode)
if mutableBouquetList is not None:
ref = eServiceReference(sBouquetRef)
mutableBouquetList.moveService(ref, position)
mutableBouquetList.flushChanges()
self.setRoot(self.bouquet_rootstr)
return (True, "Bouquet %s moved." % self.getName(ref))
else:
return (False, "Bouquet %s can not be moved." % self.getName(ref))
def removeService(self, param):
print "[WebComponents.BouquetEditor] removeService with param = ", param
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, "No bouquet given!")
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef =param["sRef"]
if sRef is None:
return (False, "No service given!")
ref = eServiceReference(sRef)
if ref.flags & eServiceReference.isGroup: # check if service is an alternative, if so delete it with removeBouquet
new_param = {}
new_param["sBouquetRef"] = sRef
new_param["mode"] = None # of no interest when passing BouquetRefRoot
new_param["BouquetRefRoot"] = sBouquetRef
returnValue = self.removeBouquet(new_param)
if returnValue[0]:
return (True, "Service %s removed." % self.getName(ref))
else:
bouquetRef = eServiceReference(sBouquetRef)
mutableBouquetList = self.getMutableList(bouquetRef)
if mutableBouquetList is not None:
if not mutableBouquetList.removeService(ref):
mutableBouquetList.flushChanges()
self.setRoot(sBouquetRef)
return (True, "Service %s removed from bouquet %s." % (self.getName(ref), self.getName(bouquetRef)))
return (False, "Service %s can not be removed." % self.getName(ref))
def moveService(self, param):
print "[WebComponents.BouquetEditor] moveService with param = ", param
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, "No bouquet given!")
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef =param["sRef"]
if sRef is None:
return (False, "No service given!")
position = None
if "position" in param:
if param["position"] is not None:
position = int(param["position"])
if position is None:
return (False, "No position given!")
mutableBouquetList = self.getMutableList(eServiceReference(sBouquetRef))
if mutableBouquetList is not None:
ref = eServiceReference(sRef)
mutableBouquetList.moveService(ref, position)
mutableBouquetList.flushChanges()
self.setRoot(sBouquetRef)
return (True, "Service %s moved." % self.getName(ref))
return (False, "Service can not be moved.")
def addServiceToBouquet(self, param):
print "[WebComponents.BouquetEditor] addService with param = ", param
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, "No bouquet given!")
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef =param["sRef"]
if sRef is None:
return (False, "No service given!")
sName = None
if "Name" in param:
if param["Name"] is not None:
sName =param["Name"]
sRefBefore = eServiceReference()
if "sRefBefore" in param:
if param["sRefBefore"] is not None:
sRefBefore = eServiceReference(param["sRefBefore"])
bouquetRef = eServiceReference(sBouquetRef)
mutableBouquetList = self.getMutableList(bouquetRef)
if mutableBouquetList is not None:
ref = eServiceReference(sRef)
if sName:
ref.setName(sName)
if not mutableBouquetList.addService(ref, sRefBefore):
mutableBouquetList.flushChanges()
self.setRoot(sBouquetRef)
return (True, "Service %s added." % self.getName(ref))
else:
bouquetName = self.getName(bouquetRef)
return (False, "Service %s already exists in bouquet %s." % (self.getName(ref), bouquetName))
return (False, "This service can not be added.")
def addMarkerToBouquet(self, param):
print "[WebComponents.BouquetEditor] addMarkerToBouquet with param = ", param
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, "No bouquet given!")
name = None
if "Name" in param:
if param["Name"] is not None:
name =param["Name"]
if name is None:
return (False, "No marker-name given!")
sRefBefore = eServiceReference()
if "sRefBefore" in param:
if param["sRefBefore"] is not None:
sRefBefore = eServiceReference(param["sRefBefore"])
bouquet_ref = eServiceReference(sBouquetRef)
mutableBouquetList = self.getMutableList(bouquet_ref)
cnt = 0
while mutableBouquetList:
service_str = '1:64:%d:0:0:0:0:0:0:0::%s'%(cnt, name)
ref = eServiceReference(service_str)
if not mutableBouquetList.addService(ref, sRefBefore):
mutableBouquetList.flushChanges()
self.setRoot(sBouquetRef)
return (True, "Marker added.")
cnt+=1
return (False, "Internal error!")
def renameService(self, param):
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef =param["sRef"]
if sRef is None:
return (False, "No service given!")
sName = None
if "newName" in param:
if param["newName"] is not None:
sName =param["newName"]
if sName is None:
return (False, "No new servicename given!")
sBouquetRef = None
if "sBouquetRef" in param:
if param["sBouquetRef"] is not None:
sBouquetRef =param["sBouquetRef"]
cur_ref = eServiceReference(sRef)
if cur_ref.flags & eServiceReference.mustDescent:
# bouquets or alternatives can be renamed with setListName directly
mutableBouquetList = self.getMutableList(cur_ref)
if mutableBouquetList:
mutableBouquetList.setListName(sName)
mutableBouquetList.flushChanges()
if sBouquetRef: # BouquetRef is given when renaming alternatives
self.setRoot(sBouquetRef)
else:
mode = MODE_TV # mode is given when renaming bouquet
if "mode" in param:
if param["mode"] is not None:
mode = int(param["mode"])
if mode == MODE_TV:
bouquet_rootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'
else:
bouquet_rootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet'
self.setRoot(bouquet_rootstr)
return (True, "Bouquet renamed successfully.")
else: # service
# services can not be renamed directly, so delete the current and add it again with new servicename
sRefBefore = None
if "sRefBefore" in param:
if param["sRefBefore"] is not None:
sRefBefore =param["sRefBefore"]
new_param = {}
new_param["sBouquetRef"] = sBouquetRef
new_param["sRef"] = sRef
new_param["Name"] = sName
new_param["sRefBefore"] = sRefBefore
returnValue = self.removeService(new_param)
if returnValue[0]:
returnValue = self.addServiceToBouquet(new_param)
if returnValue[0]:
return (True, "Service renamed successfully.")
return (False, "Service can not be renamed.")
def addServiceToAlternative(self, param):
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, "No bouquet given!")
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef =param["sRef"] # service to add to the alternative
if sRef is None:
return (False, "No service given!")
sCurrentRef = param["sCurrentRef"] # alternative service
if sCurrentRef is None:
return (False, "No current service given!")
cur_ref = eServiceReference(sCurrentRef)
# check if service is already an alternative
if not (cur_ref.flags & eServiceReference.isGroup):
# sCurrentRef is not an alternative service yet, so do this and add itself to new alternative liste
mode = MODE_TV # init
if "mode" in param:
if param["mode"] is not None:
mode = int(param["mode"])
mutableBouquetList = self.getMutableList(eServiceReference(sBouquetRef))
if mutableBouquetList:
cur_service = ServiceReference(cur_ref)
name = cur_service.getServiceName()
if mode == MODE_TV:
sref = '1:134:1:0:0:0:0:0:0:0:FROM BOUQUET \"alternatives.%s.tv\" ORDER BY bouquet'%(self.buildBouquetID(name, "alternatives.", mode))
else:
sref = '1:134:2:0:0:0:0:0:0:0:FROM BOUQUET \"alternatives.%s.radio\" ORDER BY bouquet'%(self.buildBouquetID(name, "alternatives.", mode))
new_ref = eServiceReference(sref)
if not mutableBouquetList.addService(new_ref, cur_ref):
mutableBouquetList.removeService(cur_ref)
mutableBouquetList.flushChanges()
eDVBDB.getInstance().reloadBouquets()
mutableAlternatives = self.getMutableList(new_ref)
if mutableAlternatives:
mutableAlternatives.setListName(name)
if mutableAlternatives.addService(cur_ref):
print "add", cur_ref.toString(), "to new alternatives failed"
mutableAlternatives.flushChanges()
self.setRoot(sBouquetRef)
sCurrentRef = sref # currentRef is now an alternative (bouquet)
else:
return (False, "Get mutable list for new created alternative failed!")
else:
return (False, "Alternative %s created failed." % name)
else:
return (False, "Bouquetlist is not editable!")
# add service to alternative-bouquet
new_param = {}
new_param["sBouquetRef"] = sCurrentRef
new_param["sRef"] = sRef
returnValue = self.addServiceToBouquet(new_param)
if returnValue[0]:
cur_ref = eServiceReference(sCurrentRef)
cur_service = ServiceReference(cur_ref)
name = cur_service.getServiceName()
service_ref = ServiceReference(sRef)
service_name = service_ref.getServiceName()
return (True, "Added %s to alternative service %s." % (service_name,name))
else:
return returnValue
def removeAlternativeServices(self, param):
print "[WebComponents.BouquetEditor] removeAlternativeServices with param = ", param
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, "No bouquet given!")
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef =param["sRef"]
if sRef is None:
return (False, "No service given!")
cur_ref = eServiceReference(sRef)
# check if service is an alternative
if cur_ref.flags & eServiceReference.isGroup:
cur_service = ServiceReference(cur_ref)
list = cur_service.list()
first_in_alternative = list and list.getNext()
if first_in_alternative:
mutableBouquetList = self.getMutableList(eServiceReference(sBouquetRef))
if mutableBouquetList is not None:
if mutableBouquetList.addService(first_in_alternative, cur_service.ref):
print "couldn't add first alternative service to current root"
else:
print "couldn't edit current root"
else:
print "remove empty alternative list"
else:
return (False, "Service is not an alternative.")
new_param = {}
new_param["sBouquetRef"] = sRef
new_param["mode"] = None # of no interest when passing BouquetRefRoot
new_param["BouquetRefRoot"] = sBouquetRef
returnValue = self.removeBouquet(new_param)
if returnValue[0]:
self.setRoot(sBouquetRef)
return (True,"All alternative services deleted.")
else:
return returnValue
def toggleLock(self, param):
if not config.ParentalControl.configured.value:
return (False, "Parent Control is not activated.")
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef =param["sRef"]
if sRef is None:
return (False, "No service given!")
if "setuppinactive" in config.ParentalControl.dict().keys() and config.ParentalControl.setuppinactive.value:
password = None
if "password" in param:
if param["password"] is not None:
password =param["password"]
if password is None:
return (False, "No Parent Control Setup Pin given!")
else:
if password.isdigit():
if int(password) != config.ParentalControl.setuppin.value:
return (False, "Parent Control Setup Pin is wrong!")
else:
return (False, "Parent Control Setup Pin is wrong!")
cur_ref = eServiceReference(sRef)
protection = parentalControl.getProtectionLevel(cur_ref.toCompareString())
if protection:
parentalControl.unProtectService(cur_ref.toCompareString())
else:
parentalControl.protectService(cur_ref.toCompareString())
if cur_ref.flags & eServiceReference.mustDescent:
serviceType = "Bouquet"
else:
serviceType = "Service"
if protection:
if config.ParentalControl.type.value == "blacklist":
if parentalControl.blacklist.has_key(sref):
if "SERVICE" in parentalControl.blacklist.has_key(sref):
protectionText = "Service %s is locked." % self.getName(cur_ref)
elif "BOUQUET" in parentalControl.blacklist.has_key(sref):
protectionText = "Bouquet %s is locked." % self.getName(cur_ref)
else:
protectionText = "%s %s is locked." % (serviceType, self.getName(cur_ref))
else:
if hasattr(ParentalControl, "whitelist") and parentalControl.whitelist.has_key(sref):
if "SERVICE" in parentalControl.whitelist.has_key(sref):
protectionText = "Service %s is unlocked." % self.getName(cur_ref)
elif "BOUQUET" in parentalControl.whitelist.has_key(sref):
protectionText = "Bouquet %s is unlocked." % self.getName(cur_ref)
return (True, protectionText)
def backupFiles(self, param):
filename = param
if not filename:
filename = self.BACKUP_FILENAME
invalidCharacters= re_compile(r'[^A-Za-z0-9_. ]+|^\.|\.$|^ | $|^$')
tarFilename= "%s.tar" % invalidCharacters.sub('_', filename)
backupFilename = path.join(self.BACKUP_PATH, tarFilename)
if path.exists(backupFilename):
remove(backupFilename)
checkfile = path.join(self.BACKUP_PATH,'.webouquetedit')
f = open(checkfile, 'w')
if f:
files = []
f.write('created with WebBouquetEditor')
f.close()
files.append(checkfile)
files.append("/etc/enigma2/bouquets.tv")
files.append("/etc/enigma2/bouquets.radio")
files.append("/etc/enigma2/userbouquet.favourites.tv")
files.append("/etc/enigma2/userbouquet.favourites.radio")
files.append("/etc/enigma2/lamedb")
for xml in ("/etc/enigma2/cables.xml", "/etc/enigma2/terrestrial.xml", "/etc/enigma2/satellites.xml", "/etc/tuxbox/cables.xml", "/etc/tuxbox/terrestrial.xml", "/etc/tuxbox/satellites.xml"):
if path.exists(xml):
files.append(xml)
if config.ParentalControl.configured.value:
if config.ParentalControl.type.value == "blacklist":
files.append("/etc/enigma2/blacklist")
else:
files.append("/etc/enigma2/whitelist")
files += self.getPhysicalFilenamesFromServicereference(eServiceReference('1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'))
files += self.getPhysicalFilenamesFromServicereference(eServiceReference('1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet'))
tarFiles = ""
for arg in files:
if not path.exists(arg):
return (False, "Error while preparing backup file, %s does not exists." % arg)
tarFiles += "%s " % arg
lines = popen("tar cvf %s %s" % (backupFilename,tarFiles)).readlines()
remove(checkfile)
return (True, tarFilename)
else:
return (False, "Error while preparing backup file.")
def getPhysicalFilenamesFromServicereference(self, ref):
files = []
serviceHandler = eServiceCenter.getInstance()
services = serviceHandler.list(ref)
servicelist = services and services.getContent("S", True)
for service in servicelist:
sref = service
pos = sref.find('FROM BOUQUET "')
filename = None
if pos != -1:
sref = sref[pos+14:]
pos = sref.find('"')
if pos != -1:
filename = '/etc/enigma2/' + sref[:pos] # FIXMEEE !!! HARDCODED /etc/enigma2
files.append(filename)
files += self.getPhysicalFilenamesFromServicereference(eServiceReference(service))
return files
def restoreFiles(self, param):
tarFilename = param
backupFilename = tarFilename #path.join(self.BACKUP_PATH, tarFilename)
if path.exists(backupFilename):
check_tar = False
lines = popen('tar -tf %s' % backupFilename).readlines()
for line in lines:
pos = line.find('tmp/.webouquetedit')
if pos != -1:
check_tar = True
break
if check_tar:
eDVBDB.getInstance().removeServices()
files = []
files += self.getPhysicalFilenamesFromServicereference(eServiceReference('1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'))
files += self.getPhysicalFilenamesFromServicereference(eServiceReference('1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet'))
for bouquetfiles in files:
if path.exists(bouquetfiles):
remove(bouquetfiles)
lines = popen('tar xvf %s -C / --exclude tmp/.webouquetedit' % backupFilename).readlines()
nimmanager.readTransponders()
eDVBDB.getInstance().reloadServicelist()
eDVBDB.getInstance().reloadBouquets()
infoBarInstance = InfoBar.instance
if infoBarInstance is not None:
servicelist = infoBarInstance.servicelist
root = servicelist.getRoot()
currentref = servicelist.getCurrentSelection()
servicelist.setRoot(root)
servicelist.setCurrentSelection(currentref)
remove(backupFilename)
return (True, "Bouquet-settings were restored successfully")
else:
return (False, "Error, %s was not created with WebBouquetEditor..." % backupFilename)
else:
return (False, "Error, %s does not exists, restore is not possible..." % backupFilename)
def getMutableBouquetList(self, mode):
if mode == MODE_TV:
self.bouquet_rootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'
else:
self.bouquet_rootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet'
return self.getMutableList(eServiceReference(self.bouquet_rootstr))
def getMutableList(self, ref):
serviceHandler = eServiceCenter.getInstance()
return serviceHandler.list(ref).startEdit()
def setRoot(self, bouquet_rootstr):
infoBarInstance = InfoBar.instance
if infoBarInstance is not None:
servicelist = infoBarInstance.servicelist
root = servicelist.getRoot()
if bouquet_rootstr == root.toString():
currentref = servicelist.getCurrentSelection()
servicelist.setRoot(root)
servicelist.setCurrentSelection(currentref)
def buildBouquetID(self, str, prefix, mode):
tmp = str.lower()
name = ''
for c in tmp:
if (c >= 'a' and c <= 'z') or (c >= '0' and c <= '9'):
name += c
else:
name += '_'
# check if file is unique
suffix = ""
if mode == MODE_TV:
suffix = ".tv"
else:
suffix = ".radio"
filename = '/etc/enigma2/' + prefix + name + suffix
if path.exists(filename):
i = 1
while True:
filename = "/etc/enigma2/%s%s_%d%s" %( prefix , name , i, suffix)
if path.exists(filename):
i += 1
else:
name = "%s_%d" % (name,i)
break
return name
def getName(self,ref):
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(ref)
if info:
name = info.getName(ref)
else:
name = ""
return name
| MOA-2011/e2openplugin-OpenWebif | plugin/controllers/BouquetEditor.py | Python | gpl-2.0 | 26,057 | [
"VisIt"
] | 6d26bbadb32a49693eb4948b1127e6d7261afa805437ba2b2ef6c7797fc94760 |
import blockbuster.bb_logging as log
import blockbuster.bb_dbconnector_factory
from blockbuster.messaging import bb_sms_handler
def go(smsrequest):
instance_name = smsrequest.instancename
blockbuster.bb_dbconnector_factory.DBConnectorInterfaceFactory().create()\
.add_analytics_record("Count", "Command-START", instance_name)
send_welcome_message(smsrequest)
return
# This method simply sends a 'Welcome' text message to the user
def send_welcome_message(smsrequest):
blockbuster.bb_logging.logger.info("Sending Welcome Message destination_mobile=\"%s\"",
smsrequest.requestormobile)
message = "Welcome to Blockbuster! \n" \
"\n" \
"To register a car, text 'REGISTER AB05TYR Firstname Surname'. \n" \
"\n" \
"For more commands text '?'"
bb_sms_handler.send_sms_notification(smsrequest.servicenumber,
smsrequest.requestormobile,
message)
return
# This method is a WORK IN PROGRESS
def workflow_start(smsrequest):
print(str.format("Request from: {0}", smsrequest.requestormobile))
# Is the user registered?
log.logger.debug("Checking if the mobile number is already registered")
# If so - do they have any vehicles registered?
log.logger.debug("User already has registered vehicles.")
message = "Welcome back, Joe Bloggs! \n" \
"\n" \
"You have the following vehicles registered: \n" \
"\n" \
"Vehicle 1\n" \
"Vehicle 2\n" \
"\n" \
"Text 'REGISTER AB05TYR' to add a vehicle."
bb_sms_handler.send_sms_notification(smsrequest.servicenumber,
smsrequest.requestormobile,
message)
# If not - prompt them to add a vehicle
log.logger.debug("User has no registered vehicles - prompting to add one.")
message = "Welcome back, Joe Bloggs! \n" \
"\n" \
"You don't currently have any vehicles registered." \
"\n" \
"Text 'REGISTER AB05TYR' to add a vehicle."
bb_sms_handler.send_sms_notification(smsrequest.servicenumber,
smsrequest.requestormobile,
message)
# Is the user on the blacklist?
log.logger.debug("Checking if the mobile number is blacklisted")
message = "Welcome back!\n" \
"\n" \
"Messages from this service are currently 'Stopped'.\n" \
"\n" \
"Text 'RESTART' to remove the stop on this number."
# In which case - welcome them!
log.logger.debug("New user - sending welcome message")
message = "Welcome to Blockbuster! \n" \
"\n" \
"To register a car, text 'REGISTER AB05TYR Firstname Surname'. \n" \
"\n" \
"For more info visit http://bit.ly/bbparking or reply 'HELP' for commands."
bb_sms_handler.send_sms_notification(smsrequest.servicenumber,
smsrequest.requestormobile,
message) | mattstibbs/blockbuster-server | blockbuster/workflows/command_start.py | Python | mit | 3,306 | [
"VisIt"
] | 1e82be6a04a31f39e5e642f8c9f3133a6e762877081584403d1f4cf4b34cddec |
""" The input data resolution module is a plugin that
allows to define VO input data policy in a simple way using existing
utilities in DIRAC or extension code supplied by the VO.
The arguments dictionary from the Job Wrapper includes the file catalogue
result and in principle has all the necessary information to resolve input data
for applications.
"""
__RCSID__ = "$Id$"
import types
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.ModuleFactory import ModuleFactory
from DIRAC.WorkloadManagementSystem.Client.PoolXMLSlice import PoolXMLSlice
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
COMPONENT_NAME = 'InputDataResolution'
CREATE_CATALOG = False
class InputDataResolution( object ):
""" Defines the Input Data Policy
"""
#############################################################################
def __init__( self, argumentsDict ):
""" Standard constructor
"""
self.arguments = argumentsDict
self.name = COMPONENT_NAME
self.log = gLogger.getSubLogger( self.name )
self.arguments.setdefault( 'Configuration', {} )['AllReplicas'] = Operations().getValue( 'InputDataPolicy/AllReplicas', False )
# By default put input data into the current directory
self.arguments.setdefault( 'InputDataDirectory', 'CWD' )
#############################################################################
def execute( self ):
"""Given the arguments from the Job Wrapper, this function calls existing
utilities in DIRAC to resolve input data.
"""
resolvedInputData = self.__resolveInputData()
if not resolvedInputData['OK']:
self.log.error( 'InputData resolution failed with result:\n%s' % ( resolvedInputData['Message'] ) )
return resolvedInputData
# For local running of this module we can expose an option to ignore missing files
ignoreMissing = self.arguments.get( 'IgnoreMissing', False )
# Missing some of the input files is a fatal error unless ignoreMissing option is defined
failedReplicas = resolvedInputData['Value'].get( 'Failed', {} )
if failedReplicas and not ignoreMissing:
self.log.error( 'Failed to obtain access to the following files:\n%s'
% ( '\n'.join( sorted( failedReplicas ) ) ) )
return S_ERROR( 'Failed to access some of requested input data' )
if not resolvedInputData['Value'].get( 'Successful' ):
return S_ERROR( 'Could not access any requested input data' )
if CREATE_CATALOG:
res = self._createCatalog( resolvedInputData )
if not res['OK']:
return res
return resolvedInputData
#############################################################################
def _createCatalog( self, resolvedInputData, catalogName = 'pool_xml_catalog.xml', pfnType = 'ROOT_All' ):
""" By default uses PoolXMLSlice, VO extensions can modify at will
"""
resolvedData = resolvedInputData['Successful']
tmpDict = {}
for lfn, mdata in resolvedData.items():
tmpDict[lfn] = mdata
tmpDict[lfn]['pfntype'] = pfnType
self.log.verbose( 'Adding PFN file type %s for LFN:%s' % ( pfnType, lfn ) )
catalogName = self.arguments['Configuration'].get( 'CatalogName', catalogName )
self.log.verbose( 'Catalog name will be: %s' % catalogName )
resolvedData = tmpDict
appCatalog = PoolXMLSlice( catalogName )
return appCatalog.execute( resolvedData )
#############################################################################
def __resolveInputData( self ):
"""This method controls the execution of the DIRAC input data modules according
to the VO policy defined in the configuration service.
"""
site = self.arguments['Configuration'].get( 'SiteName', DIRAC.siteName() )
self.arguments.setdefault( 'Job', {} )
policy = self.arguments['Job'].get( 'InputDataPolicy', [] )
if policy:
# In principle this can be a list of modules with the first taking precedence
if type( policy ) in types.StringTypes:
policy = [policy]
self.log.info( 'Job has a specific policy setting: %s' % ( ', '.join( policy ) ) )
else:
self.log.debug( 'Attempting to resolve input data policy for site %s' % site )
inputDataPolicy = Operations().getOptionsDict( 'InputDataPolicy' )
if not inputDataPolicy['OK']:
return S_ERROR( 'Could not resolve InputDataPolicy from Operations InputDataPolicy' )
options = inputDataPolicy['Value']
policy = options.get( site, options.get( 'Default', [] ) )
if policy:
policy = [x.strip() for x in policy.split( ',' )]
if site in options:
prStr = 'Found specific'
else:
prStr = 'Applying default'
self.log.info( '%s input data policy for site %s:\n%s' % ( prStr, site, '\n'.join( policy ) ) )
dataToResolve = [] # if none, all supplied input data is resolved
successful = {}
for modulePath in policy:
result = self.__runModule( modulePath, dataToResolve )
if not result['OK']:
self.log.warn( 'Problem during %s execution' % modulePath )
return result
result = result['Value']
successful.update( result.get( 'Successful', {} ) )
dataToResolve = result.get( 'Failed', [] )
if dataToResolve:
self.log.info( '%s failed for the following files:\n%s'
% ( modulePath, '\n'.join( dataToResolve ) ) )
else:
self.log.info( 'All replicas resolved after %s execution' % ( modulePath ) )
break
if successful:
self.log.verbose( 'Successfully resolved:', str( successful ) )
return S_OK( {'Successful': successful, 'Failed':dataToResolve} )
#############################################################################
def __runModule( self, modulePath, remainingReplicas ):
"""This method provides a way to run the modules specified by the VO that
govern the input data access policy for the current site. Using the
InputDataPolicy section from Operations different modules can be defined for
particular sites or for InputDataPolicy defined in the JDL of the jobs.
"""
self.log.info( 'Attempting to run %s' % ( modulePath ) )
moduleFactory = ModuleFactory()
moduleInstance = moduleFactory.getModule( modulePath, self.arguments )
if not moduleInstance['OK']:
return moduleInstance
module = moduleInstance['Value']
result = module.execute( remainingReplicas )
return result
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| rajanandakumar/DIRAC | WorkloadManagementSystem/Client/InputDataResolution.py | Python | gpl-3.0 | 6,629 | [
"DIRAC"
] | 89d450ba9079dcb09c8ee6b8bf97d8246432b942138394b360cdb399b7ce58f6 |
# GLOBAL IMPORTS
#import pysam # from pysam 0.8.1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
class Interval(object):
"""
@class Reference
@brief Object oriented class containing informations of genomic interval
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~CLASS FIELDS~~~~~~~#
Instances = [] # Class field used for instance tracking
id_count = 1
#~~~~~~~CLASS METHODS~~~~~~~#
@ classmethod
def next_id (self):
cur_id = self.id_count
self.id_count +=1
return cur_id
@ classmethod
def countInstances (self):
return len(self.Instances)
@ classmethod
def getInstances (self):
return self.Instances
@ classmethod
def printInstances (self):
for inter in self.Instances:
print (inter)
@ classmethod
def resetInstances (self):
self.Instances = []
self.id_count = 0
@ classmethod
def get_read (self):
read_list =[]
for i in self.Instances:
read_list.extend(i.read_list)
return read_list
@ classmethod
def get_report (self):
report = [["ref_name", "start", "end", "name", "nread"]]
report += [[i.ref_name, i.start, i.end, i.name, i.nread] for i in self.Instances]
return report
@ classmethod
def resetReadCount (self):
for inter in self.Instances:
inter.nread=0
@ classmethod
def resetReadList (self):
for inter in self.Instances:
inter.read_list=[]
#~~~~~~~FONDAMENTAL METHODS~~~~~~~#
def __init__(self, ref_name, start, end, name="-"):
"""
@param ref_name Name of the reference sequence
@param start Start coordinates of the interval (INT)
@param end End coordinates of the interval (INT)
@param name Facultative name of the interval
"""
# Store object variables
self.id = self.next_id()
self.ref_name = ref_name
self.name = name
# Store start and end in crescent order
if start <= end:
self.start, self.end = start, end
else:
self.start, self.end = end, start
# Define additional variables
self.nread = 0
self.read_list = []
# Add the instance to the class instance tracking list
self.Instances.append(self)
def __str__(self):
return "{} [{}:{}] {} = {} reads found".format(
self.ref_name,
self.start,
self.end,
self.name,
self.nread)
def __repr__(self):
return "<Instance of {} from {} >\n".format(self.__class__.__name__, self.__module__)
def __len__(self):
return self.nread
def get(self, key):
return self.__dict__[key]
def set(self, key, value):
self.__dict__[key] = value
#~~~~~~~PUBLIC METHODS~~~~~~~#
def is_overlapping (self, ref_name, start, end):
# Reverse value order if negative order
if start > end:
start, end = end, start
return ref_name == self.ref_name and start <= self.start and end >= self.end
def add_read (self, read):
"""
Add a read to read_list and update the counter
"""
self.read_list.append(read)
self.nread+=1
def sort_read (self):
"""
sort read in read_list according to their leftmost position
"""
self.read_list.sort(key = lambda x: x.pos)
| a-slide/Find_overlap_reads | Find_overlap_read_src/Interval.py | Python | gpl-2.0 | 3,643 | [
"pysam"
] | 0e9bd3463fe1bff38903dc5cd0dfcb0b1a44a4035cdcd14fa3be1caa16cb2774 |
#! /usr/bin/env python
#
# Author: Sergey Savchenko (savchenko.s.s@gmail.com)
#
from threading import Thread
import subprocess
from math import hypot, log10, sqrt
from time import time, sleep
import urllib.request
import urllib
import sys
import os
import glob
import shutil
import argparse
import bz2
import numpy as np
from astropy.io import fits
try:
from astropy.wcs import WCS
wcsOK = True
except ImportError:
wcsOK = False
def move(src, dst):
if not os.path.isfile(src):
print("File %s not found and cannot be moved")
return
shutil.copy(src, dst)
os.remove(src)
def findField2(objRA, objDEC, radius):
request = "http://skyserver.sdss.org/dr16/en/tools/search/x_results.aspx?"
request += "searchtool=Radial&uband=&gband=&rband=&iband=&zband=&jband=&hband=&kband="
request += "&TaskName=Skyserver.Search.Radial&ReturnHtml=true&whichphotometry=optical&"
request += "coordtype=equatorial&ra=%1.5f&dec=%1.5f" % (objRA, objDEC)
if radius < 0.01:
request += "&radius=2"
else:
request += "&radius=%1.2f" % (radius)
request += "&min_u=0&max_u=20&min_g=0&max_g=20&min_r=0&max_r=20&min_i=0&max_i=20"
request += "&min_z=0&max_z=20&min_j=0&max_j=20&min_h=0&max_h=20&min_k=0&max_k=20"
request += "&format=csv&TableName=&limit=99999"
u = urllib.request.urlopen(request)
table = u.read().decode().split("\n")
optRun = None
optRerun = None
optCamcol = None
optField = None
optObjID = None
# Find the nearest object and the corresponding field
minDist = 1e10
for line in table:
# ignore comments, header and footer of table
if (len(line) == 0) or (not line[0].isdigit()):
continue
objParams = line.split(',')
objID = int(objParams[0])
run = int(objParams[1])
rerun = int(objParams[2])
camcol = int(objParams[3])
field = int(objParams[4])
ra = float(objParams[7])
dec = float(objParams[8])
dist = hypot(objRA - ra, objDEC - dec)
if dist < minDist:
minDist = dist
optObjID = objID
optRun = run
optRerun = rerun
optCamcol = camcol
optField = field
fList = [(optRun, optRerun, optCamcol, optField)]
if radius < 0.001:
return fList, optObjID
else:
for line in table:
# ignore comments, header and footer of table
if (len(line) == 0) or (not line[0].isdigit()):
continue
objParams = line.split(',')
run = int(objParams[1])
rerun = int(objParams[2])
camcol = int(objParams[3])
field = int(objParams[4])
if (run, rerun, camcol, field) not in fList:
fList.append((run, rerun, camcol, field))
return fList, optObjID
def getUrl(run, rerun, camcol, field, band):
u = "http://dr16.sdss.org/sas/dr16/eboss/photoObj/frames/"
u += "%s/%i/%i/frame-%s-%.6i-%i-%.4i.fits.bz2" % (rerun, run, camcol, band,
run, camcol, field)
return u
def testUrlExists(url):
try:
u = urllib.request.urlopen(url)
code = u.code
meta = u.info()
file_size = int(meta["Content-Length"])
if code == 200:
return file_size
return -1
except urllib.request.HTTPError:
return -1
def download(url, passband, file_name):
fName = "./downloads/%s_%s.fits" % (file_name, passband)
if passband != "ps": # PSF-files are not compressed on SDSS servers
fName += ".bz2"
try:
urllib.request.urlretrieve(url, fName)
except urllib.request.HTTPError:
print("")
return -1
def threadsAlive(listOfThreads):
for th in listOfThreads:
if th.is_alive():
return True
return False
# some sdss functions are below
def prep_ima(gal):
new_gal = "./prep_ima_tmp.fits"
# This function re-writes pixel values given in NMgy to ADU
hdulist = fits.open(gal, do_not_scale_image_data=True, mode='update')
img = hdulist[0].data
(dimy, dimx) = img.shape
cimg = np.zeros(shape=(dimy, dimx))
calib = hdulist[1].data
for i in range(dimy):
cimg[i] = calib
dn = img/cimg
shutil.copy(gal, new_gal)
hdulist1 = fits.open(new_gal, do_not_scale_image_data=True,
mode='update')
img_new = hdulist1[0].data
for i in range(dimy):
for k in range(dimx):
img_new[i, k] = dn[i, k] # new fits-file img_new with ADU
hdulist1.flush()
os.remove(gal)
move(new_gal, gal)
def change_m0(fitsName, oldM0Value, refM0):
# function changes value of magnitude zeropoint to a given one
hdulist = fits.open(fitsName)
data = hdulist[0].data
header = hdulist[0].header
deltaM0 = refM0 - oldM0Value
data = data * (10.0**(0.4*deltaM0))
outHDU = fits.PrimaryHDU(data=data, header=header)
outHDU.writeto("tmp.fits")
os.remove(fitsName)
move("tmp.fits", fitsName)
def gain_dark_SDSS(camcol, band, run):
if band == 'u':
if camcol == 1:
gaidark = [1.62, 9.61]
if camcol == 2:
if run < 1100:
gaidark = [1.595, 12.6025]
else:
gaidark = [1.825, 12.6025]
if camcol == 3:
gaidark = [1.59, 8.7025]
if camcol == 4:
gaidark = [1.6, 12.6025]
if camcol == 5:
gaidark = [1.47, 9.3025]
if camcol == 6:
gaidark = [2.17, 7.0225]
if band == 'g':
if camcol == 1:
gaidark = [3.32, 15.6025]
if camcol == 2:
gaidark = [3.855, 1.44]
if camcol == 3:
gaidark = [3.845, 1.3225]
if camcol == 4:
gaidark = [3.995, 1.96]
if camcol == 5:
gaidark = [4.05, 1.1025]
if camcol == 6:
gaidark = [4.035, 1.8225]
if band == 'r':
if camcol == 1:
gaidark = [4.71, 1.8225]
if camcol == 2:
gaidark = [4.6, 1.00]
if camcol == 3:
gaidark = [4.72, 1.3225]
if camcol == 4:
gaidark = [4.76, 1.3225]
if camcol == 5:
gaidark = [4.725, 0.81]
if camcol == 6:
gaidark = [4.895, 0.9025]
if band == 'i':
if camcol == 1:
gaidark = [5.165, 7.84]
if camcol == 2:
if run < 1500:
gaidark = [6.565, 5.76]
if run > 1500:
gaidark = [6.565, 6.25]
if camcol == 3:
gaidark = [4.86, 4.6225]
if camcol == 4:
if run < 1500:
gaidark = [4.885, 6.25]
if run > 1500:
gaidark = [4.885, 7.5625]
if camcol == 5:
gaidark = [4.64, 7.84]
if camcol == 6:
gaidark = [4.76, 5.0625]
if band == 'z':
if camcol == 1:
gaidark = [4.745, 0.81]
if camcol == 2:
gaidark = [5.155, 1.0]
if camcol == 3:
gaidark = [4.885, 1.0]
if camcol == 4:
if run < 1500:
gaidark = [4.775, 9.61]
if run > 1500:
gaidark = [4.775, 12.6025]
if camcol == 5:
if run < 1500:
gaidark = [3.48, 1.8225]
if run > 1500:
gaidark = [3.48, 2.1025]
if camcol == 6:
gaidark = [4.69, 1.21]
return gaidark[0], gaidark[1]
def SDSS_dr8(gal_image):
# http://data.sdss3.org/datamodel/files/BOSS_PHOTOOBJ/
# frames/RERUN/RUN/CAMCOL/frame.html
hdulist = fits.open(gal_image)
prihdr = hdulist[0].header
run = int(prihdr['RUN'])
band = str(prihdr['FILTER'])
camcol = int(prihdr['CAMCOL'])
kkk = prihdr['NMGY']
m0 = 22.5 - 2.5*log10(kkk)
GAIN, var = gain_dark_SDSS(camcol, band, run)
read_out_noise = sqrt(var)*GAIN # should be <~5 e
return GAIN, read_out_noise, m0
def bunzip(zipName):
bzipFile = bz2.BZ2File(zipName)
data = bzipFile.read()
outName = zipName[:-4]
if os.path.isfile(outName):
os.remove(outName)
outFile = open(outName, 'wb')
outFile.write(data)
outFile.close()
os.remove(zipName)
def reduce_to_same_m0(listOfImages):
GAINList = []
READOUTList = []
m0List = []
for i, fName in enumerate(listOfImages):
bunzip("%s.bz2" % fName)
prep_ima(fName)
GAIN, READOUT, m0 = SDSS_dr8(fName)
if i == 0:
refM0 = m0
else:
change_m0(fName, m0, refM0)
GAINList.append(GAIN)
READOUTList.append(READOUT)
hdu = fits.open(fName, do_not_scale_image_data=True,
mode="update")
header = hdu[0].header
header["M0"] = refM0
header["EXPTIME"] = 1.0
hdu.flush()
return GAINList, READOUTList, m0List, refM0
# parsing the argument line here
parser = argparse.ArgumentParser(description="Download fits-files of fields for specified coordinates")
parser.add_argument("filters", help="List of filters (for example gri or uiz)")
parser.add_argument("-i", "--input", default="coordinates.dat",
help="File with coordinates to download")
parser.add_argument("-a", "--adjacent", action="store_true", default=False,
help="Download adjacent fields if any exist")
parser.add_argument("-s", "--swarp", action="store_true", default=False,
help="Concatenate fields using SWarp package")
parser.add_argument("-c", "--convert", action="store_true", default=False,
help="Convert fields to the same photometry zeropoint")
parser.add_argument("-f", "--free", action="store_true", default=False,
help="Remove individual fields after concatenation")
parser.add_argument("-t", "--trim", action="store_true", default=False,
help="Crop image to galaxy size")
parser.add_argument("--scatter", action="store_true", default=False,
help="Put every object in a separate directory")
parser.add_argument("--add_urls", default=None,
help="File with additional urls of fields for objects")
parser.add_argument("--add_fields", default=None,
help="File wuth additional run,rerun,camcol,fields data for objects")
args = parser.parse_args()
bandlist = args.filters
# Make dirs for all bands and psf in case all files for the same
# colour are in the same directories (scatter option is turned off)
if not args.scatter:
for band in bandlist:
if not os.path.exists("./downloads/%s" % (band)):
os.makedirs("./downloads/%s" % (band))
else:
# if every object will be placed in the separate directory
# (scatter option is on) create just the main downloads directory for now
if not os.path.exists("./downloads/"):
os.makedirs("./downloads/")
if args.swarp:
# Check what name has SWarp package on this system
rCode = subprocess.call("which swarp >/dev/null", shell=True)
if rCode == 0:
swarpName = "swarp"
else:
rCode = subprocess.call("which SWarp >/dev/null", shell=True)
if rCode == 0:
swarpName = "SWarp"
else:
print("\033[31m Error: SWarp was not found on your system.\033[0m")
print("\033[31m The command has to be either 'swarp' or 'SWarp'\033[0m")
print("\033[31m Intall SWarp package or try to run this script without -s option.\033[0m")
exit(1)
listOfCoords = [lst for lst in open(args.input).readlines() if not lst.startswith("#")]
counter = 0
errFile = open("errors_404.dat", "w", buffering=1)
with open("fields.dat", "w", buffering=1) as outFile:
for line in listOfCoords:
counter += 1
params = line.split()
if (len(params) in (3, 4)) or ((len(params) == 4)
and (args.adjacent is True)):
galName = params[0]
ra = float(params[1])
dec = float(params[2])
if args.adjacent is True:
r_adj = float(params[3])
else:
r_adj = 0.0
msg = "\033[33m Downloading field for "
msg += "%1.5f %1.5f: '%s' (%i/%i) \033[0m" % (ra, dec,
galName, counter,
len(listOfCoords))
print(msg)
else:
print("Invalid number of columns in input file %s" % args.input)
sys.exit(1)
objFieldList, objID = findField2(ra, dec, r_adj)
if objID is None:
print("\033[31m Error! No object was found at given coordinates.\033[0m")
print("\033[31m This area is probably outside of the SDSS footprint.\033[0m")
errFile.write("%s %1.6f %1.6f \n" % (galName, ra, dec))
continue
if len(objFieldList) > 1:
print("There are %i files for this object" % (len(objFieldList)))
outFile.write("%s %1.6f %1.6f " % (galName, ra, dec))
for ifield in range(len(objFieldList)):
startTime = time()
if len(objFieldList) > 1:
print("Downloading (%i/%i)" % (ifield + 1, len(objFieldList)))
curGalName = galName + "_" + str(ifield)
else:
curGalName = galName + "_0"
run, rerun, camcol, field = objFieldList[ifield]
# Check if fits files exist for all filters:
print(" Checking file existense:")
allExist = True
urls = {}
for band in bandlist:
print(" %s" % (band), end="")
url = getUrl(run, rerun, camcol, field, band)
answer = testUrlExists(url)
if answer == -1:
allExist = False
print("\033[31m [Fail!] \033[0m\n")
break
print("\033[32m [OK] \033[0m (%i bytes)" % (answer))
urls[band] = url
if not allExist:
errFile.write("%s %1.6f %1.6f \n" % (galName, ra, dec))
continue
downloadThreads = []
# Downloading files in all passbands
for band in bandlist:
dth = Thread(target=download,
args=(urls[band], band, curGalName))
dth.daemon = True
dth.start()
downloadThreads.append(dth)
print(" Downloading", end="")
while threadsAlive(downloadThreads):
sys.stdout.write(".")
sys.stdout.flush()
sleep(0.333)
sys.stdout.write(".")
sys.stdout.flush()
sleep(0.333)
sys.stdout.write(".")
sys.stdout.flush()
sleep(0.333)
sys.stdout.write("\b\b\b \b\b\b")
sys.stdout.flush()
sleep(0.333)
msg = " \033[34m [DONE] \033[0m "
msg += "(%1.2f sec)\n\n" % (time()-startTime)
print(msg)
outFile.write(" %s.fits " % (curGalName))
outFile.write("\n")
# If there are additional urls
if (args.add_fields is not None) or (args.add_urls is not None):
addNames = {band: [] for band in bandlist}
else:
addNames = None
if args.add_urls is not None:
for line in open(args.add_urls):
if line.split()[0] == galName:
listOfAddUrls = line.split()[1:]
thereAreAddFields = True
for band in bandlist:
for url in listOfAddUrls:
urlBand = url.replace("*", band)
outNameAdd = "./downloads/%s_%s" % (galName, urlBand.split("/")[-1])
addNames[band].append(outNameAdd[:-4])
print("Downloading additional field %s" % (outNameAdd))
subprocess.call("wget -nv -O %s %s" % (outNameAdd, urlBand), shell=True)
break
if args.add_fields is not None:
for line in open(args.add_fields):
if line.split(":")[0] == galName:
listOfAddFields = line.split(":")[1]
for runData in listOfAddFields.split(";"):
run = int(runData.split()[0])
rerun = int(runData.split()[1])
camcol = int(runData.split()[2])
field = int(runData.split()[3])
for band in bandlist:
url = getUrl(run, rerun, camcol, field, band)
outNameAdd = "./downloads/%s_%s" % (galName, url.split("/")[-1])
addNames[band].append(outNameAdd[:-4])
print("Downloading additional field %s" % (outNameAdd))
subprocess.call("wget -nv -O %s %s" % (outNameAdd, url), shell=True)
# Concatenating fields
if args.swarp and ((len(objFieldList) > 1) or (addNames is not None)):
for band in bandlist:
listOfImages = ["./downloads/%s_%i_%s.fits" % (galName, i, band) for i in range(len(objFieldList))]
if (args.add_urls is not None) or (args.add_fields is not None):
listOfImages.extend(addNames[band])
if args.convert:
GAINList, READOUTList, m0List, refM0 = reduce_to_same_m0(listOfImages)
print("Running SWarp for %s band..." % (band))
callSt = "%s -verbose_type quiet -BACK_TYPE MANUAL " % (swarpName)
callSt += " ".join(["%s[0]" % (s) for s in listOfImages])
subprocess.call(callSt, shell="True")
move("./coadd.fits", "./downloads/%s_%s.fits" % (galName, band))
os.remove("coadd.weight.fits")
os.remove("swarp.xml")
if args.free:
for fN in listOfImages:
os.remove(fN)
# store mean keywords to coadded file
if args.convert:
hdu = fits.open("./downloads/%s_%s.fits" % (galName, band),
do_not_scale_image_data=True,
mode="update")
header = hdu[0].header
header["GAIN"] = np.mean(GAINList)
header["READOUT"] = np.mean(READOUTList)
header["M0"] = refM0
header["EXPTIME"] = 1.0
hdu.flush()
# Convert singlefield images
if args.convert and ((len(objFieldList) == 1) and (addNames is None)):
for band in bandlist:
fName = "./downloads/%s_0_%s.fits" % (galName, band)
bunzip("%s.bz2" % (fName))
prep_ima(fName)
GAIN, READOUT, m0 = SDSS_dr8(fName)
hdu = fits.open(fName, do_not_scale_image_data=True,
mode="update")
header = hdu[0].header
header["M0"] = m0
header["EXPTIME"] = 1.0
hdu.flush()
move("./downloads/%s_0_%s.fits" % (galName, band),
"./downloads/%s_%s.fits" % (galName, band))
# Crop images
if args.trim and (not wcsOK):
print("Astropy module was not found. Images cannot be trimmed")
elif args.trim and wcsOK:
print("Cropping...")
# At first determine the common size of cropping area
# (so all images will be of the same size)
pixelCoords = {}
cropSizes = []
for band in bandlist:
fName = "./downloads/%s_%s.fits" % (galName, band)
hdu = fits.open(fName)
data = hdu[0].data
wcs = WCS(fName)
xGalPix, yGalPix = wcs.wcs_world2pix([[ra, dec]], 1)[0]
size = min(r_adj*60.0/0.396127, xGalPix, yGalPix,
data.shape[1]-xGalPix, data.shape[0]-yGalPix)
hdu.close()
pixelCoords[band] = (int(xGalPix), int(yGalPix))
cropSizes.append(size)
commonCropSize = int(min(cropSizes))
for band in bandlist:
fName = "./downloads/%s_%s.fits" % (galName, band)
hdu = fits.open(fName)
data = hdu[0].data
header = hdu[0].header
xCropMin = pixelCoords[band][0] - commonCropSize
xCropMax = pixelCoords[band][0] + commonCropSize
yCropMin = pixelCoords[band][1] - commonCropSize
yCropMax = pixelCoords[band][1] + commonCropSize
data = data[yCropMin:yCropMax, xCropMin:xCropMax]
outHDU = fits.PrimaryHDU(data=data, header=header)
fOutName = "./downloads/%s_%s_trim.fits" % (galName, band)
if os.path.exists(fOutName):
os.remove(fOutName)
outHDU.writeto(fOutName)
hdu.close()
if args.free:
os.remove(fName)
# Downloading and processing are finished. Now we have to place files to folders
# according to scatter option
if args.scatter:
# every object has separate filder, where all files related to this object
# are located
fileList = glob.glob("./downloads/%s*.fits" % (galName))
dst = "./downloads/%s/" % (galName)
if not os.path.exists(dst):
os.mkdir(dst)
for src in fileList:
move(src, dst)
else:
# scatter option is off, so all images taken in the same passband
# will be in the same folder.
for band in bandlist:
fileList = glob.glob("./downloads/%s_*%s.fits" % (galName, band))
if args.trim:
fileList.append("./downloads/%s_%s_trim.fits" % (galName, band))
dst = "./downloads/%s/" % (band)
for src in fileList:
if os.path.exists(src):
move(src, dst)
| latrop/sdss_downloader | sdss_downloader.py | Python | gpl-3.0 | 22,627 | [
"Galaxy"
] | dc4da2409a7cb493bc364dce9a57a6270c84382a63dd495d1daedda2b688df09 |
from typing import List, Tuple
import numpy as np
from deepchem.utils.typing import RDKitAtom, RDKitBond, RDKitMol
from deepchem.feat.graph_data import GraphData
from deepchem.feat.base_classes import MolecularFeaturizer
from deepchem.utils.molecule_feature_utils import get_atom_type_one_hot
from deepchem.utils.molecule_feature_utils import construct_hydrogen_bonding_info
from deepchem.utils.molecule_feature_utils import get_atom_hydrogen_bonding_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_hybridization_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_total_num_Hs_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_is_in_aromatic_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_chirality_one_hot
from deepchem.utils.molecule_feature_utils import get_atom_formal_charge
from deepchem.utils.molecule_feature_utils import get_atom_partial_charge
from deepchem.utils.molecule_feature_utils import get_atom_total_degree_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_type_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_is_in_same_ring_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_is_conjugated_one_hot
from deepchem.utils.molecule_feature_utils import get_bond_stereo_one_hot
def _construct_atom_feature(
atom: RDKitAtom, h_bond_infos: List[Tuple[int, str]], use_chirality: bool,
use_partial_charge: bool) -> np.ndarray:
"""Construct an atom feature from a RDKit atom object.
Parameters
----------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
h_bond_infos: List[Tuple[int, str]]
A list of tuple `(atom_index, hydrogen_bonding_type)`.
Basically, it is expected that this value is the return value of
`construct_hydrogen_bonding_info`. The `hydrogen_bonding_type`
value is "Acceptor" or "Donor".
use_chirality: bool
Whether to use chirality information or not.
use_partial_charge: bool
Whether to use partial charge data or not.
Returns
-------
np.ndarray
A one-hot vector of the atom feature.
"""
atom_type = get_atom_type_one_hot(atom)
formal_charge = get_atom_formal_charge(atom)
hybridization = get_atom_hybridization_one_hot(atom)
acceptor_donor = get_atom_hydrogen_bonding_one_hot(atom, h_bond_infos)
aromatic = get_atom_is_in_aromatic_one_hot(atom)
degree = get_atom_total_degree_one_hot(atom)
total_num_Hs = get_atom_total_num_Hs_one_hot(atom)
atom_feat = np.concatenate([
atom_type, formal_charge, hybridization, acceptor_donor, aromatic, degree,
total_num_Hs
])
if use_chirality:
chirality = get_atom_chirality_one_hot(atom)
atom_feat = np.concatenate([atom_feat, chirality])
if use_partial_charge:
partial_charge = get_atom_partial_charge(atom)
atom_feat = np.concatenate([atom_feat, partial_charge])
return atom_feat
def _construct_bond_feature(bond: RDKitBond) -> np.ndarray:
"""Construct a bond feature from a RDKit bond object.
Parameters
---------
bond: rdkit.Chem.rdchem.Bond
RDKit bond object
Returns
-------
np.ndarray
A one-hot vector of the bond feature.
"""
bond_type = get_bond_type_one_hot(bond)
same_ring = get_bond_is_in_same_ring_one_hot(bond)
conjugated = get_bond_is_conjugated_one_hot(bond)
stereo = get_bond_stereo_one_hot(bond)
return np.concatenate([bond_type, same_ring, conjugated, stereo])
class MolGraphConvFeaturizer(MolecularFeaturizer):
"""This class is a featurizer of general graph convolution networks for molecules.
The default node(atom) and edge(bond) representations are based on
`WeaveNet paper <https://arxiv.org/abs/1603.00856>`_. If you want to use your own representations,
you could use this class as a guide to define your original Featurizer. In many cases, it's enough
to modify return values of `construct_atom_feature` or `construct_bond_feature`.
The default node representation are constructed by concatenating the following values,
and the feature length is 30.
- Atom type: A one-hot vector of this atom, "C", "N", "O", "F", "P", "S", "Cl", "Br", "I", "other atoms".
- Formal charge: Integer electronic charge.
- Hybridization: A one-hot vector of "sp", "sp2", "sp3".
- Hydrogen bonding: A one-hot vector of whether this atom is a hydrogen bond donor or acceptor.
- Aromatic: A one-hot vector of whether the atom belongs to an aromatic ring.
- Degree: A one-hot vector of the degree (0-5) of this atom.
- Number of Hydrogens: A one-hot vector of the number of hydrogens (0-4) that this atom connected.
- Chirality: A one-hot vector of the chirality, "R" or "S". (Optional)
- Partial charge: Calculated partial charge. (Optional)
The default edge representation are constructed by concatenating the following values,
and the feature length is 11.
- Bond type: A one-hot vector of the bond type, "single", "double", "triple", or "aromatic".
- Same ring: A one-hot vector of whether the atoms in the pair are in the same ring.
- Conjugated: A one-hot vector of whether this bond is conjugated or not.
- Stereo: A one-hot vector of the stereo configuration of a bond.
If you want to know more details about features, please check the paper [1]_ and
utilities in deepchem.utils.molecule_feature_utils.py.
Examples
--------
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = MolGraphConvFeaturizer(use_edges=True)
>>> out = featurizer.featurize(smiles)
>>> type(out[0])
<class 'deepchem.feat.graph_data.GraphData'>
>>> out[0].num_node_features
30
>>> out[0].num_edge_features
11
References
----------
.. [1] Kearnes, Steven, et al. "Molecular graph convolutions: moving beyond fingerprints."
Journal of computer-aided molecular design 30.8 (2016):595-608.
Notes
-----
This class requires RDKit to be installed.
"""
def __init__(self,
use_edges: bool = False,
use_chirality: bool = False,
use_partial_charge: bool = False):
"""
Parameters
----------
use_edges: bool, default False
Whether to use edge features or not.
use_chirality: bool, default False
Whether to use chirality information or not.
If True, featurization becomes slow.
use_partial_charge: bool, default False
Whether to use partial charge data or not.
If True, this featurizer computes gasteiger charges.
Therefore, there is a possibility to fail to featurize for some molecules
and featurization becomes slow.
"""
try:
from rdkit.Chem import AllChem # noqa
except ModuleNotFoundError:
raise ImportError("This method requires RDKit to be installed.")
self.use_edges = use_edges
self.use_partial_charge = use_partial_charge
self.use_chirality = use_chirality
def _featurize(self, mol: RDKitMol) -> GraphData:
"""Calculate molecule graph features from RDKit mol object.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit mol object.
Returns
-------
graph: GraphData
A molecule graph with some features.
"""
if self.use_partial_charge:
try:
mol.GetAtomWithIdx(0).GetProp('_GasteigerCharge')
except:
# If partial charges were not computed
from rdkit.Chem import AllChem
AllChem.ComputeGasteigerCharges(mol)
# construct atom (node) feature
h_bond_infos = construct_hydrogen_bonding_info(mol)
atom_features = np.asarray(
[
_construct_atom_feature(atom, h_bond_infos, self.use_chirality,
self.use_partial_charge)
for atom in mol.GetAtoms()
],
dtype=np.float,
)
# construct edge (bond) index
src, dest = [], []
for bond in mol.GetBonds():
# add edge list considering a directed graph
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
src += [start, end]
dest += [end, start]
# construct edge (bond) feature
bond_features = None # deafult None
if self.use_edges:
bond_features = []
for bond in mol.GetBonds():
bond_features += 2 * [_construct_bond_feature(bond)]
bond_features = np.asarray(bond_features, dtype=np.float)
return GraphData(
node_features=atom_features,
edge_index=np.asarray([src, dest], dtype=np.int),
edge_features=bond_features)
| lilleswing/deepchem | deepchem/feat/molecule_featurizers/mol_graph_conv_featurizer.py | Python | mit | 8,431 | [
"RDKit"
] | 7a5083f9a02ff1ce94f69c1f353afb0a360b6866fca595f444e688a5fc25cebd |
#!/usr/bin/env python
# NOTE: multiprocessing import required for issues with nose tests.
# See: http://bugs.python.org/issue15881#msg170215
import multiprocessing
from setuptools import setup
setup(
name='reference_genome_maker',
version='0.1',
author='Church Lab',
author_email='gleb@mit.edu',
maintainer='Gleb Kuznetsov',
maintainer_email='gleb@mit.edu',
url='http://churchlab.github.io/millstone/',
package_dir={'': 'src'},
packages=['reference_genome_maker'],
install_requires=[
'biopython >= 1.6.1',
'PyVCF >= 0.6.7'
],
test_suite='nose.collector',
)
| churchlab/reference_genome_maker | setup.py | Python | mit | 624 | [
"Biopython"
] | 8ee618dc982f4109d146048076b99971df8300dc262bbf91afbeaa667a29cb88 |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import re
import random
import shutil
import socket
import string
import json
import charms.leadership
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import when, when_any, when_not
from charms.reactive.helpers import data_changed
from charms.kubernetes.common import get_version
from charms.kubernetes.common import retry
from charms.kubernetes.flagmanager import FlagManager
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
def service_cidr():
''' Return the charm's service-cidr config '''
db = unitdata.kv()
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db = unitdata.kv()
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def reset_states_for_delivery():
'''An upgrade charm event was triggered by Juju, react to that here.'''
migrate_from_pre_snaps()
install_snaps()
remove_state('authentication.setup')
remove_state('kubernetes-master.components.started')
def rename_file_idempotent(source, destination):
if os.path.isfile(source):
os.rename(source, destination)
def migrate_from_pre_snaps():
# remove old states
remove_state('kubernetes.components.installed')
remove_state('kubernetes.dashboard.available')
remove_state('kube-dns.available')
remove_state('kubernetes-master.app_version.set')
# disable old services
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
# rename auth files
os.makedirs('/root/cdk', exist_ok=True)
rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
'/root/cdk/serviceaccount.key')
rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
'/root/cdk/basic_auth.csv')
rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
'/root/cdk/known_tokens.csv')
# cleanup old files
files = [
"/lib/systemd/system/kube-apiserver.service",
"/lib/systemd/system/kube-controller-manager.service",
"/lib/systemd/system/kube-scheduler.service",
"/etc/default/kube-defaults",
"/etc/default/kube-apiserver.defaults",
"/etc/default/kube-controller-manager.defaults",
"/etc/default/kube-scheduler.defaults",
"/srv/kubernetes",
"/home/ubuntu/kubectl",
"/usr/local/bin/kubectl",
"/usr/local/bin/kube-apiserver",
"/usr/local/bin/kube-controller-manager",
"/usr/local/bin/kube-scheduler",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# clear the flag managers
FlagManager('kube-apiserver').destroy_all()
FlagManager('kube-controller-manager').destroy_all()
FlagManager('kube-scheduler').destroy_all()
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
snap.install('kube-apiserver', channel=channel)
hookenv.status_set('maintenance',
'Installing kube-controller-manager snap')
snap.install('kube-controller-manager', channel=channel)
hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
snap.install('kube-scheduler', channel=channel)
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
snap.install('cdk-addons', channel=channel)
set_state('kubernetes-master.snaps.installed')
@when('config.changed.channel')
def channel_changed():
install_snaps()
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys):
if not os.path.isfile(basic_auth):
setup_basic_auth('admin', 'admin', 'admin')
if not os.path.isfile(known_tokens):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
api_opts.add('service-account-key-file', service_key)
controller_opts.add('service-account-private-key-file', service_key)
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
charms.leadership.leader_set(leader_data)
set_state('authentication.setup')
@when_not('leadership.is_leader')
@when_not('authentication.setup')
def setup_non_leader_authentication():
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
if not get_keys_from_leader(keys):
# the keys were not retrieved. Non-leaders have to retry.
return
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
api_opts.add('service-account-key-file', service_key)
controller_opts.add('service-account-private-key-file', service_key)
set_state('authentication.setup')
def get_keys_from_leader(keys):
"""
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
"""
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k):
# Fetch data from leadership broadcast
contents = charms.leadership.leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
msg = "Waiting on leaders crypto keys."
hookenv.status_set('waiting', msg)
hookenv.log('Missing content for file {}'.format(k))
return False
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
return True
@when('kubernetes-master.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('cdk-addons.configured')
def idle_status():
''' Signal at the end of the run that we are running. '''
if not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
elif hookenv.config('service-cidr') != service_cidr():
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
hookenv.status_set('active', msg)
else:
hookenv.status_set('active', 'Kubernetes master running.')
@when('etcd.available', 'tls_client.server.certificate.saved',
'authentication.setup')
@when_not('kubernetes-master.components.started')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Configuring the Kubernetes master services.')
freeze_service_cidr()
handle_etcd_relation(etcd)
configure_master_services()
hookenv.status_set('maintenance',
'Starting the Kubernetes master services.')
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
host.service_restart('snap.%s.daemon' % service)
hookenv.open_port(6443)
set_state('kubernetes-master.components.started')
@when('etcd.available')
def etcd_data_change(etcd):
''' Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistenly only when the number of etcd
units has actually changed '''
# key off of the connection string
connection_string = etcd.get_connection_string()
# If the connection string changes, remove the started state to trigger
# handling of the master components
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started')
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
''' Send cluster DNS info '''
# Note that the DNS server doesn't necessarily exist at this point. We know
# where we're going to put it, though, so let's send the info anyway.
dns_ip = get_dns_ip()
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip)
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-worker:kube-control'.format(
hookenv.service_name()))
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api.connected')
def push_api_data(kube_api):
''' Send configuration to remote consumer.'''
# Since all relations already have the private ip address, only
# send the port on the relation object to all consumers.
# The kubernetes api-server uses 6443 for the default secure port.
kube_api.set_api_port('6443')
@when('kubernetes-master.components.started')
def configure_cdk_addons():
''' Configure CDK addons '''
remove_state('cdk-addons.configured')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
args = [
'arch=' + arch(),
'dns-ip=' + get_dns_ip(),
'dns-domain=' + hookenv.config('dns_domain'),
'enable-dashboard=' + dbEnabled
]
check_call(['snap', 'set', 'cdk-addons'] + args)
if not addons_ready():
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured')
@retry(times=3, delay_secs=20)
def addons_ready():
"""
Test if the add ons got installed
Returns: True is the addons got applied
"""
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log("Addons are not ready yet.")
return False
@when('loadbalancer.available', 'certificates.ca.available',
'certificates.client.cert.available')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('certificates.ca.available', 'certificates.client.cert.available')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except:
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-master.components.started')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def is_privileged():
"""Return boolean indicating whether or not to set allow-privileged=true.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
return is_state('kubernetes-master.gpu.enabled')
else:
return privileged == 'true'
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged')
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
"""The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled')
@when('kubernetes-master.gpu.enabled')
@when_not('kubernetes-master.privileged')
def disable_gpu_mode():
"""We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
"""
remove_state('kubernetes-master.gpu.enabled')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
key = layer_options.get('client_key_path')
key_exists = key and os.path.isfile(key)
cert = layer_options.get('client_certificate_path')
cert_exists = cert and os.path.isfile(cert)
# Do we have everything we need?
if ca_exists and key_exists and cert_exists:
# Cache last server string to know if we need to regenerate the config.
if not data_changed('kubeconfig.server', server):
return
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca, key, cert)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu',
context='juju-context', cluster='juju-cluster'):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \
'--client-key={2} --client-certificate={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, user, key, certificate)))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
'''Get an IP address for the DNS server on the provided cidr.'''
# Remove the range from the cidr.
ip = service_cidr().split('/')[0]
# Take the last octet off the IP address and replace it with 10.
return '.'.join(ip.split('.')[0:-1]) + '.10'
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
# Remove the range from the cidr.
ip = service_cidr().split('/')[0]
# Remove the last octet and replace it with 1.
return '.'.join(ip.split('.')[0:-1]) + '.1'
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
connection_string = reldata.get_connection_string()
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
api_opts = FlagManager('kube-apiserver')
# Never use stale data, always prefer whats coming in during context
# building. if its stale, its because whats in unitdata is stale
data = api_opts.data
if data.get('etcd-servers-strict') or data.get('etcd-servers'):
api_opts.destroy('etcd-cafile')
api_opts.destroy('etcd-keyfile')
api_opts.destroy('etcd-certfile')
api_opts.destroy('etcd-servers', strict=True)
api_opts.destroy('etcd-servers')
# Set the apiserver flags in the options manager
api_opts.add('etcd-cafile', ca)
api_opts.add('etcd-keyfile', key)
api_opts.add('etcd-certfile', cert)
api_opts.add('etcd-servers', connection_string, strict=True)
def configure_master_services():
''' Add remaining flags for the master services and configure snaps to use
them '''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
scheduler_opts = FlagManager('kube-scheduler')
scheduler_opts.add('v', '2')
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
if is_privileged():
api_opts.add('allow-privileged', 'true', strict=True)
set_state('kubernetes-master.privileged')
else:
api_opts.add('allow-privileged', 'false', strict=True)
remove_state('kubernetes-master.privileged')
# Handle static options for now
api_opts.add('service-cluster-ip-range', service_cidr())
api_opts.add('min-request-timeout', '300')
api_opts.add('v', '4')
api_opts.add('client-ca-file', ca_cert_path)
api_opts.add('tls-cert-file', server_cert_path)
api_opts.add('tls-private-key-file', server_key_path)
api_opts.add('kubelet-certificate-authority', ca_cert_path)
api_opts.add('kubelet-client-certificate', client_cert_path)
api_opts.add('kubelet-client-key', client_key_path)
api_opts.add('logtostderr', 'true')
api_opts.add('insecure-bind-address', '127.0.0.1')
api_opts.add('insecure-port', '8080')
api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support
admission_control = [
'Initializers',
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'ResourceQuota',
'DefaultTolerationSeconds'
]
if get_version('kube-apiserver') < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control.remove('DefaultTolerationSeconds')
api_opts.add('admission-control', ','.join(admission_control), strict=True)
# Default to 3 minute resync. TODO: Make this configureable?
controller_opts.add('min-resync-period', '3m')
controller_opts.add('v', '2')
controller_opts.add('root-ca-file', ca_cert_path)
controller_opts.add('logtostderr', 'true')
controller_opts.add('master', 'http://127.0.0.1:8080')
scheduler_opts.add('v', '2')
scheduler_opts.add('logtostderr', 'true')
scheduler_opts.add('master', 'http://127.0.0.1:8080')
cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ')
check_call(cmd)
cmd = (
['snap', 'set', 'kube-controller-manager'] +
controller_opts.to_s().split(' ')
)
check_call(cmd)
cmd = ['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' ')
check_call(cmd)
def setup_basic_auth(username='admin', password='admin', user='admin'):
'''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
with open(htaccess, 'w') as stream:
stream.write('{0},{1},{2}'.format(username, password, user))
def setup_tokens(token, username, user):
'''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if not token:
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(32))
with open(known_tokens, 'a') as stream:
stream.write('{0},{1},{2}\n'.format(token, username, user))
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
result = json.loads(output)
for pod in result['items']:
status = pod['status']['phase']
if status != 'Running':
return False
return True
def apiserverVersion():
cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
| wonderfly/kubernetes | cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | Python | apache-2.0 | 32,431 | [
"CDK"
] | 7181a15e73d57b8ed5438e4e394aea229d5689bb0fb03103d08f5eb18164c509 |
from jasp import *
import uuid
import textwrap
# http://cms.mpi.univie.ac.at/vasp/vasp/Files_used_VASP.html
vaspfiles = ['INCAR', 'STOPCAR', 'stout', 'POTCAR',
'OUTCAR', 'vasprun.xml',
'KPOINTS', 'IBZKPT', 'POSCAR', 'CONTCAR',
'EXHCAR', 'CHGCAR', 'CHG', 'WAVECAR',
'TMPCAR', 'EIGENVAL', 'DOSCAR', 'PROCAR',
'OSZICAR', 'PCDAT', 'XDATCAR', 'LOCPOT',
'ELFCAR', 'PROOUT', 'ase-sort.dat', 'METADATA']
def clone(self, newdir, extra_files=None):
'''copy a vasp directory to a new directory. Does not overwrite
existing files. newdir is relative to the the directory the
calculator was created from, not the current working directory,
unless an absolute path is used.
what to do about METADATA, the uuid will be wrong!
'''
if extra_files is None:
extra_files = []
if os.path.isabs(newdir):
newdirpath = newdir
else:
newdirpath = os.path.join(self.cwd, newdir)
import shutil
if not os.path.isdir(newdirpath):
os.makedirs(newdirpath)
for vf in vaspfiles+extra_files:
if (not os.path.exists(os.path.join(newdirpath, vf))
and os.path.exists(vf)):
shutil.copy(vf, newdirpath)
# if we are an neb calculation we need to copy the image
# directories
if hasattr(self, 'neb'):
import glob
for imagedir in glob.glob('0[0-9]'):
dst = os.path.join(newdirpath, imagedir)
if not os.path.exists(dst):
shutil.copytree(imagedir, dst)
# update metadata. remember we are in the vaspdir
d = {}
d['uuid'] = str(uuid.uuid1())
d['cloned on'] = time.ctime(time.time())
os.chdir(self.cwd)
from jasp import jasp
with jasp(newdir) as calc:
if hasattr(calc, 'metadata'):
calc.metadata.update(d)
calc.write_metadata()
os.chdir(self.vaspdir)
Vasp.clone = clone
def archive(self, archive='vasp', extra_files=[], append=False):
'''
Create an archive file (.tar.gz) of the vasp files in the current
directory. This is a way to save intermediate results.
'''
import tarfile
if not archive.endswith('.tar.gz'):
archive = archive + '.tar.gz'
if not append and os.path.exists(archive):
# we do not overwrite existing archives except to append
return None
elif append and os.path.exists(archive):
mode = 'a:gz'
else:
mode = 'w:gz'
f = tarfile.open(archive, mode)
for vf in vaspfiles + extra_files:
if os.path.exists(vf):
f.add(vf)
# if we are an neb calculation we need to copy the image
# directories
if hasattr(self, 'neb'):
import glob
for imagedir in glob.glob('0[0-9]'):
f.add(imagedir)
f.close()
Vasp.archive = archive
def get_pseudopotentials(self):
from os.path import join, isfile, islink
''' this is almost the exact code from the original initialize
function, but all it does is get the pseudpotentials paths, and
the git-hash for each one
'''
atoms = self.get_atoms()
p = self.input_params
self.all_symbols = atoms.get_chemical_symbols()
self.natoms = len(atoms)
# jrk 10/21/2013 I commented this line out as it was causing an
# error in serialize by incorrectly resetting spinpol. I do not see
# why this should be set here. It is not used in the function.
# self.spinpol = atoms.get_initial_magnetic_moments().any()
atomtypes = atoms.get_chemical_symbols()
# Determine the number of atoms of each atomic species
# sorted after atomic species
special_setups = []
symbols = {}
if self.input_params['setups']:
for m in self.input_params['setups']:
try:
special_setups.append(int(m))
except:
continue
for m, atom in enumerate(atoms):
symbol = atom.symbol
if m in special_setups:
pass
else:
if symbol not in symbols:
symbols[symbol] = 1
else:
symbols[symbol] += 1
# Build the sorting list
self.sort = []
self.sort.extend(special_setups)
for symbol in symbols:
for m, atom in enumerate(atoms):
if m in special_setups:
pass
else:
if atom.symbol == symbol:
self.sort.append(m)
self.resort = range(len(self.sort))
for n in range(len(self.resort)):
self.resort[self.sort[n]] = n
self.atoms_sorted = atoms[self.sort]
# Check if the necessary POTCAR files exists and
# create a list of their paths.
self.symbol_count = []
for m in special_setups:
self.symbol_count.append([atomtypes[m], 1])
for m in symbols:
self.symbol_count.append([m, symbols[m]])
sys.stdout.flush()
xc = '/'
if p['xc'] == 'PW91':
xc = '_gga/'
elif p['xc'] == 'PBE':
xc = '_pbe/'
if 'VASP_PP_PATH' in os.environ:
pppaths = os.environ['VASP_PP_PATH'].split(':')
else:
pppaths = []
self.ppp_list = []
# Setting the pseudopotentials, first special setups and
# then according to symbols
for m in special_setups:
name = 'potpaw'+xc.upper() + p['setups'][str(m)] + '/POTCAR'
found = False
for path in pppaths:
filename = join(path, name)
if isfile(filename) or islink(filename):
found = True
self.ppp_list.append(filename)
break
elif isfile(filename + '.Z') or islink(filename + '.Z'):
found = True
self.ppp_list.append(filename+'.Z')
break
if not found:
log.debug('Looked for %s' % name)
print 'Looked for %s' % name
raise RuntimeError('No pseudopotential for %s:%s!' % (symbol,
name))
for symbol in symbols:
try:
name = 'potpaw' + xc.upper() + symbol + p['setups'][symbol]
except (TypeError, KeyError):
name = 'potpaw' + xc.upper() + symbol
name += '/POTCAR'
found = False
for path in pppaths:
filename = join(path, name)
if isfile(filename) or islink(filename):
found = True
self.ppp_list.append(filename)
break
elif isfile(filename + '.Z') or islink(filename + '.Z'):
found = True
self.ppp_list.append(filename+'.Z')
break
if not found:
print '''Looking for %s
The pseudopotentials are expected to be in:
LDA: $VASP_PP_PATH/potpaw/
PBE: $VASP_PP_PATH/potpaw_PBE/
PW91: $VASP_PP_PATH/potpaw_GGA/''' % name
log.debug('Looked for %s' % name)
print 'Looked for %s' % name
raise RuntimeError('No pseudopotential for %s:%s!' % (symbol,
name))
raise RuntimeError('No pseudopotential for %s!' % symbol)
# get sha1 hashes similar to the way git does it
# http://stackoverflow.com/questions/552659/assigning-git-sha1s-without-git
# git hash-object foo.txt will generate a command-line hash
hashes = []
for ppp in self.ppp_list:
f = open(ppp, 'r')
data = f.read()
f.close()
s = sha1()
s.update("blob %u\0" % len(data))
s.update(data)
hashes.append(s.hexdigest())
stripped_paths = [ppp.split(os.environ['VASP_PP_PATH'])[1]
for ppp in self.ppp_list]
return zip(symbols, stripped_paths, hashes)
Vasp.get_pseudopotentials = get_pseudopotentials
'''pre_run and post_run hooks
the idea here is that you can register some functions that will run
before and after running a Vasp calculation. These functions will have
the following signature: function(self). you might use them like this
def set_nbands(self):
do something if nbands is not set
calc.register_pre_run_hook(set_nbands)
def enter_calc_in_database(self):
do something
calc.register_post_run_hook(enter_calc_in_database)
maybe plugins
(http://www.luckydonkey.com/2008/01/02/python-style-plugins-made-easy/)
are a better way?
The calculator will store a list of hooks.
'''
def register_pre_run_hook(function):
if not hasattr(Vasp, 'pre_run_hooks'):
Vasp.pre_run_hooks = []
Vasp.pre_run_hooks.append(function)
def register_post_run_hook(function):
if not hasattr(Vasp, 'post_run_hooks'):
Vasp.post_run_hooks = []
Vasp.post_run_hooks.append(function)
Vasp.register_pre_run_hook = staticmethod(register_pre_run_hook)
Vasp.register_post_run_hook = staticmethod(register_post_run_hook)
def job_in_queue(self):
''' return True or False if the directory has a job in the queue'''
if not os.path.exists('jobid'):
return False
else:
# get the jobid
jobid = open('jobid').readline().strip()
if JASPRC['scheduler']=='PBS':
# see if jobid is in queue
jobids_in_queue = commands.getoutput('qselect').split('\n')
if jobid in jobids_in_queue:
# get details on specific jobid
status, output = commands.getstatusoutput('qstat %s' % jobid)
if status == 0:
lines = output.split('\n')
fields = lines[2].split()
job_status = fields[4]
if job_status == 'C':
return False
else:
return True
else:
return False
if JASPRC['scheduler']=='SGE':
# SGE qselect does not print a list of jobids, so we have to improvise
jobids_in_queue = commands.getoutput("qstat | awk '{ print $1; }'").split('\n')[2:]
if jobid in jobids_in_queue:
# SGE apparently does not have jobstate == 'C', lets still get status and output for now
status, output = commands.getstatusoutput('qstat | grep {0}'.format(jobid))
if status == 0:
fields = output.split()
job_status = fields[4]
return True
else:
return False
Vasp.job_in_queue = job_in_queue
def calculation_required(self, atoms, quantities):
'''Monkey-patch original function because (4,4,4) != [4,4,4] which
makes the test on input_params fail'''
if self.positions is None:
log.debug('self.positions is None')
return True
elif self.atoms != atoms:
log.debug('atoms have changed')
log.debug('self.atoms = ', self.atoms)
log.debug('atoms = ', self.atoms)
return True
elif self.float_params != self.old_float_params:
log.debug('float_params have changed')
return True
elif self.exp_params != self.old_exp_params:
log.debug('exp_params have changed')
return True
elif self.string_params != self.old_string_params:
log.debug('string_params have changed.')
log.debug('current: {0}'.format(self.string_params))
log.debug('old : {0}'.format(self.old_string_params))
return True
elif self.int_params != self.old_int_params:
log.debug('int_params have changed')
log.debug('current: {0}'.format(self.int_params))
log.debug('old : {0}'.format(self.old_int_params))
return True
elif self.bool_params != self.old_bool_params:
log.debug('bool_params have changed')
return True
elif self.dict_params != self.old_dict_params:
log.debug('current: {0}'.format(str(self.dict_params)))
log.debug('old: {0}'.format(str(self.old_dict_params)))
log.debug('dict_params have changed')
return True
for key in self.list_params:
if (self.list_params[key] is None
and self.old_list_params[key] is None):
# no check required
continue
elif (self.list_params[key] is None
or self.old_list_params[key] is None):
# handle this because one may be a list and the other is
# not, either way they are not the same. We cannot just
# cast each element as a list, like we do in the next case
# because list(None) raises an exception.
log.debug('odd list_param case:')
log.debug('current: {0} \n'.format(self.list_params[key]))
log.debug('old: {0} \n'.format(self.old_list_params[key]))
return True
# here we explicitly make both lists so we can compare them
if list(self.list_params[key]) != list(self.old_list_params[key]):
log.debug('list_params have changed')
log.debug('current: {0}'.format(self.list_params[key]))
log.debug('old: {0}'.format(self.old_list_params[key]))
return True
for key in self.input_params:
if key == 'kpts':
if (list(self.input_params[key])
!= list(self.old_input_params[key])):
log.debug('1. {}'.format(list(self.input_params[key])))
log.debug('2. {}'.format(list(self.old_input_params[key])))
log.debug('KPTS have changed.')
return True
else:
continue
elif key == 'setups':
log.warn('We do not know how to compare setups yet! '
'silently continuing.')
continue
elif key == 'txt':
log.warn('We do not know how to compare txt yet!'
'silently continuing.')
continue
else:
if self.input_params[key] != self.old_input_params[key]:
print '{0} FAILED'.format(key)
print self.input_params[key]
print self.old_input_params[key]
return True
if 'magmom' in quantities:
return not hasattr(self, 'magnetic_moment')
if self.converged is None:
self.converged = self.read_convergence()
if not self.converged:
if not JASPRC['restart_unconverged']:
raise VaspNotConverged("This calculation did not converge."
" Set JASPRC['restart_unconverged'] ="
" True to restart")
return True
return False
Vasp.calculation_required = calculation_required
original_calculate = Vasp.calculate
def calculate(self, atoms=None):
'''
monkeypatched function to avoid calling calculate unless we really
want to run a job. If a job is queued or running, we should exit
here to avoid reinitializing the input files.
I also made it possible to not give an atoms here, since there
should be one on the calculator.
'''
if hasattr(self, 'vasp_queued'):
raise VaspQueued('Queued', os.getcwd())
if hasattr(self, 'vasp_running'):
raise VaspRunning('Running', os.getcwd())
if atoms is None:
atoms = self.get_atoms()
# this may not catch magmoms
if not self.calculation_required(atoms, []):
return
if 'mode' in JASPRC:
if JASPRC['mode'] is None:
log.debug(self)
log.debug('self.converged" %s', self.converged)
raise Exception('''JASPRC['mode'] is None. '''
'''we should not be running!''')
# finally run the original function
original_calculate(self, atoms)
Vasp.calculate = calculate
def run(self):
'''monkey patch to submit job through the queue.
If this is called, then the calculator thinks a job should be run.
If we are in the queue, we should run it, otherwise, a job should
be submitted.
'''
if hasattr(self, 'pre_run_hooks'):
for hook in self.pre_run_hooks:
hook(self)
# if we are in the queue and jasp is called or if we want to use
# mode='run' , we should just run the job. First, we consider how.
if 'PBS_O_WORKDIR' in os.environ or JASPRC['mode'] == 'run':
log.info('In the queue. determining how to run')
if 'PBS_NODEFILE' in os.environ:
# we are in the queue. determine if we should run serial
# or parallel
NPROCS = len(open(os.environ['PBS_NODEFILE']).readlines())
log.debug('Found {0} PROCS'.format(NPROCS))
if NPROCS == 1:
# no question. running in serial.
vaspcmd = JASPRC['vasp.executable.serial']
log.debug('NPROCS = 1. running in serial')
exitcode = os.system(vaspcmd)
return exitcode
else:
# vanilla MPI run. multiprocessing does not work on more
# than one node, and you must specify in JASPRC to use it
if (JASPRC['queue.nodes'] > 1
or (JASPRC['queue.nodes'] == 1
and JASPRC['queue.ppn'] > 1
and (JASPRC['multiprocessing.cores_per_process']
== 'None'))):
log.debug('queue.nodes = {0}'.format(JASPRC['queue.nodes']))
log.debug('queue.ppn = {0}'.format(JASPRC['queue.ppn']))
log.debug('multiprocessing.cores_per_process'
'= {0}'.format(JASPRC['multiprocessing.cores_per_process']))
log.debug('running vanilla MPI job')
print 'MPI NPROCS = ', NPROCS
vaspcmd = JASPRC['vasp.executable.parallel']
parcmd = 'mpirun -np %i %s' % (NPROCS, vaspcmd)
exitcode = os.system(parcmd)
return exitcode
else:
# we need to run an MPI job on cores_per_process
if JASPRC['multiprocessing.cores_per_process'] == 1:
log.debug('running single core multiprocessing job')
vaspcmd = JASPRC['vasp.executable.serial']
exitcode = os.system(vaspcmd)
elif JASPRC['multiprocessing.cores_per_process'] > 1:
log.debug('running mpi multiprocessing job')
NPROCS = JASPRC['multiprocessing.cores_per_process']
vaspcmd = JASPRC['vasp.executable.parallel']
parcmd = 'mpirun -np %i %s' % (NPROCS, vaspcmd)
exitcode = os.system(parcmd)
return exitcode
else:
# probably running at cmd line, in serial.
vaspcmd = JASPRC['vasp.executable.serial']
exitcode = os.system(vaspcmd)
return exitcode
# end
# if you get here, a job is getting submitted
script = '#!/bin/{0}\n'.format(JASPRC['queue.shell'])
script += 'module load {0}\n'.format(JASPRC['module'])
script +='''cd {self.cwd} # this is the current working directory
cd {self.vaspdir} # this is the vasp directory
runjasp.py # this is the vasp command
#end'''.format(**locals())
if JASPRC['scheduler'] == 'PBS':
jobname = self.vaspdir
log.debug('{0} will be the jobname.'.format(jobname))
log.debug('-l nodes={0}:ppn={1}'.format(JASPRC['queue.nodes'],
JASPRC['queue.ppn']))
cmdlist = ['{0}'.format(JASPRC['queue.command'])]
cmdlist += [option for option in JASPRC['queue.options'].split()]
cmdlist += ['-N', '{0}'.format(jobname),
'-l walltime={0}'.format(JASPRC['queue.walltime']),
'-l nodes={0}:ppn={1}'.format(JASPRC['queue.nodes'],
JASPRC['queue.ppn']),
'-l mem={0}'.format(JASPRC['queue.mem'])]
elif JASPRC['scheduler'] == 'SGE':
jobname = (self.vaspdir).replace('/','|') # SGE does not allow '/' in job names
log.debug('{0} will be the jobname.'.format(jobname))
f = open('qscript','w')
f.write(script)
f.close()
log.debug('-pe {0} {1}'.format(JASPRC['queue.pe'],
JASPRC['queue.nprocs']))
log.debug('-q {0}'.format(JASPRC['queue.q']))
cmdlist = ['{0}'.format(JASPRC['queue.command'])]
cmdlist += [option for option in JASPRC['queue.options'].split()]
cmdlist += ['-N', '{0}'.format(jobname),
'-q {0}'.format(JASPRC['queue.q']),
'-pe {0} {1}'.format(JASPRC['queue.pe'], JASPRC['queue.nprocs'])
#'-l mem_free={0}'.format(JASPRC['queue.mem'])
]
cmdlist += ['qscript']
log.debug('{0}'.format(' '.join(cmdlist)))
p = Popen(cmdlist,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
log.debug(script)
out, err = p.communicate(script)
if out == '' or err != '':
raise Exception('something went wrong in qsub:\n\n{0}'.format(err))
if JASPRC['scheduler'] == 'SGE':
jobid = out.split()[2]
else:
jobid = out
f = open('jobid', 'w')
f.write(jobid)
f.close()
raise VaspSubmitted(out)
Vasp.run = run
def prepare_input_files(self):
# Initialize calculations
atoms = self.get_atoms()
self.initialize(atoms)
# Write input
from ase.io.vasp import write_vasp
write_vasp('POSCAR',
self.atoms_sorted,
symbol_count=self.symbol_count)
self.write_incar(atoms)
self.write_potcar()
self.write_kpoints()
self.write_sort_file()
self.create_metadata()
Vasp.prepare_input_files = prepare_input_files
def pretty_print(self):
'''
__str__ function to print the calculator with a nice summary, e.g. jaspsum
'''
# special case for neb calculations
if self.int_params['images'] is not None:
# we have an neb.
s = []
s.append(': -----------------------------')
s.append(' VASP NEB calculation from %s' % os.getcwd())
try:
images, energies = self.get_neb()
for i, e in enumerate(energies):
s += ['image {0}: {1: 1.3f}'.format(i, e)]
except (VaspQueued):
s += ['Job is in queue']
return '\n'.join(s)
s = []
s.append(': -----------------------------')
s.append(' VASP calculation from %s' % os.getcwd())
if hasattr(self, 'converged'):
s.append(' converged: %s' % self.converged)
try:
atoms = self.get_atoms()
uc = atoms.get_cell()
try:
self.converged = self.read_convergence()
except IOError:
# eg no outcar
self.converged = False
if not self.converged:
try:
print self.read_relaxed()
except IOError:
print False
if self.converged:
energy = atoms.get_potential_energy()
forces = atoms.get_forces()
else:
energy = np.nan
forces = [np.array([np.nan, np.nan, np.nan]) for atom in atoms]
if self.converged:
if hasattr(self, 'stress'):
stress = self.stress
else:
stress = None
else:
stress = None
# get a,b,c,alpha,beta, gamma
A = uc[0, :]
B = uc[1, :]
C = uc[2, :]
a = np.linalg.norm(A)
b = np.linalg.norm(B)
c = np.linalg.norm(C)
alpha = np.arccos(np.dot(B/np.linalg.norm(B),
C/np.linalg.norm(C))) * 180/np.pi
beta = np.arccos(np.dot(A/np.linalg.norm(A),
C/np.linalg.norm(C))) * 180/np.pi
gamma = np.arccos(np.dot(B/np.linalg.norm(B),
C/np.linalg.norm(C))) * 180/np.pi
volume = np.abs(np.linalg.det(uc))
s.append(' Energy = %f eV' % energy)
s.append('\n Unit cell vectors (angstroms)')
s.append(' x y z length')
s.append(' a0 [% 3.3f % 3.3f % 3.3f] %3.3f' % (uc[0][0],
uc[0][1],
uc[0][2],
a))
s.append(' a1 [% 3.3f % 3.3f % 3.3f] %3.3f' % (uc[1][0],
uc[1][1],
uc[1][2],
b))
s.append(' a2 [% 3.3f % 3.3f % 3.3f] %3.3f' % (uc[2][0],
uc[2][1],
uc[2][2],
c))
s.append(' a,b,c,alpha,beta,gamma (deg):'
'%1.3f %1.3f %1.3f %1.1f %1.1f %1.1f' % (a,
b,
c,
alpha,
beta,
gamma))
s.append(' Unit cell volume = {0:1.3f} Ang^3'.format(volume))
if stress is not None:
s.append(' Stress (GPa):xx, yy, zz, yz, xz, xy')
s.append(' % 1.3f % 1.3f % 1.3f'
'% 1.3f % 1.3f % 1.3f' % tuple(stress))
else:
s += [' Stress was not computed']
constraints = None
if hasattr(atoms, 'constraints'):
from ase.constraints import FixAtoms, FixScaled
constraints = [[None, None, None] for atom in atoms]
for constraint in atoms.constraints:
if isinstance(constraint, FixAtoms):
for i, constrained in enumerate(constraint.index):
if constrained:
constraints[i] = [True, True, True]
if isinstance(constraint, FixScaled):
constraints[constraint.a] = constraint.mask.tolist()
if constraints is None:
s.append(' Atom# sym position [x,y,z]'
'tag rmsForce')
else:
s.append(' Atom# sym position [x,y,z]'
'tag rmsForce constraints')
for i, atom in enumerate(atoms):
rms_f = np.sum(forces[i]**2)**0.5
ts = (' {0:^4d} {1:^4s} [{2:<9.3f}'
'{3:^9.3f}{4:9.3f}]'
'{5:^6d}{6:1.2f}'.format(i,
atom.symbol,
atom.x,
atom.y,
atom.z,
atom.tag,
rms_f))
# VASP has the opposite convention of constrained
# Think: F = frozen
if constraints is not None:
ts += ' {0} {1} {2}'.format('F' if constraints[i][0]
is True else 'T',
'F' if constraints[i][1]
is True else 'T',
'F' if constraints[i][2]
is True else 'T')
s.append(ts)
s.append('--------------------------------------------------')
if self.get_spin_polarized() and self.converged:
s.append('Spin polarized: '
'Magnetic moment = %1.2f'
% self.get_magnetic_moment(atoms))
except AttributeError:
# no atoms
pass
if os.path.exists('INCAR'):
# print all parameters that are set
self.read_incar()
ppp_list = self.get_pseudopotentials()
else:
ppp_list = [(None, None, None)]
s += ['\nINCAR Parameters:']
s += ['-----------------']
for d in [self.int_params,
self.float_params,
self.exp_params,
self.bool_params,
self.list_params,
self.dict_params,
self.string_params,
self.special_params,
self.input_params]:
for key in d:
if key is 'magmom':
np.set_printoptions(precision=3)
value = textwrap.fill(str(d[key]),
width=56,
subsequent_indent=' '*17)
s.append(' %12s: %s' % (key, value))
elif d[key] is not None:
value = textwrap.fill(str(d[key]),
width=56,
subsequent_indent=' '*17)
s.append(' %12s: %s' % (key, value))
s += ['\nPseudopotentials used:']
s += ['----------------------']
for sym, ppp, hash in ppp_list:
s += ['{0}: {1} (git-hash: {2})'.format(sym, ppp, hash)]
# if ibrion in [5,6,7,8] print frequencies
if self.int_params['ibrion'] in [5, 6, 7, 8]:
freq, modes = self.get_vibrational_modes()
s += ['\nVibrational frequencies']
s += ['mode frequency']
s += ['------------------']
for i, f in enumerate(freq):
if isinstance(f, float):
s += ['{0:4d}{1: 10.3f} eV'.format(i, f)]
elif isinstance(f, complex):
s += ['{0:4d}{1: 10.3f} eV'.format(i, -f.real)]
return '\n'.join(s)
Vasp.__str__ = pretty_print
#########################################################################
def vasp_changed_bands(calc):
'''Check here if VASP changed nbands.'''
log.debug('Checking if vasp changed nbands')
if not os.path.exists('OUTCAR'):
return
with open('OUTCAR') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if 'The number of bands has been changed from the values supplied' in line:
s = lines[i + 5] # this is where the new bands are found
nbands_cur = calc.nbands
nbands_ori, nbands_new = [int(x) for x in
re.search(r"I found NBANDS\s+ =\s+([0-9]*).*=\s+([0-9]*)", s).groups()]
log.debug('Calculator nbands = {0}.\n'
'VASP found {1} nbands.\n'
'Changed to {2} nbands.'.format(nbands_cur,
nbands_ori,
nbands_new))
calc.set(nbands=nbands_new)
calc.write_incar(calc.get_atoms())
log.debug('calc.kwargs: {0}'.format(calc.kwargs))
if calc.kwargs.get('nbands', None) != nbands_new:
raise VaspWarning('The number of bands was changed by VASP. '
'This happens sometimes when you run in '
'parallel. It causes problems with jasp. '
'I have already updated your INCAR. '
'You need to change the number of bands '
'in your script to match what VASP used '
'to proceed.\n\n '
+ '\n'.join(lines[i - 9: i + 8]))
def checkerr_vasp(self):
''' Checks vasp output in OUTCAR for errors. adapted from atat code'''
error_strings = ['forrtl: severe', # seg-fault
'highest band is occupied at some k-points!',
'rrrr', # I think this is from Warning spelled
# out in ascii art
'cnorm',
'failed',
'non-integer']
# Check if VASP changed the bands
vasp_changed_bands(self)
errors = []
if os.path.exists('OUTCAR'):
f = open('OUTCAR')
for i, line in enumerate(f):
i += 1
for es in error_strings:
if es in line:
errors.append((i, line))
f.close()
converged = self.read_convergence()
if not converged:
errors.append(('Converged', converged))
# Then if ibrion > 0, check whether ionic relaxation condition been
# fulfilled, but we do not check ibrion >3 because those are vibrational
# type calculations.
if self.int_params['ibrion'] in [1, 2, 3]:
if not self.read_relaxed():
errors.append(('Ions/cell Converged', converged))
if len(errors) != 0:
f = open('error', 'w')
for i, line in errors:
f.write('{0}: {1}\n'.format(i, line))
f.close()
else:
# no errors found, lets delete any error file that had existed.
if os.path.exists('error'):
os.unlink('error')
if os.path.exists('error'):
with open('error') as f:
print 'Errors found:\n', f.read()
else:
if not hasattr(self, 'neb'):
raise Exception('no OUTCAR` found')
Vasp.register_post_run_hook(checkerr_vasp)
def strip(self, extrafiles=()):
'''removes large uncritical output files from directory'''
files_to_remove = ['CHG', 'CHGCAR', 'WAVECAR'] + extrafiles
for f in files_to_remove:
if os.path.exists(f):
os.unlink(f)
Vasp.strip = strip
def set_nbands(self, N=None, f=1.5):
''' convenience function to set NBANDS to N or automatically
compute nbands
for non-spin-polarized calculations
nbands = int(nelectrons/2 + nions*f)
this formula is suggested at
http://cms.mpi.univie.ac.at/vasp/vasp/NBANDS_tag.html
for transition metals f may be as high as 2.
'''
if N is not None:
self.set(nbands=int(N))
return
atoms = self.get_atoms()
nelectrons = self.get_valence_electrons()
nbands = int(np.ceil(nelectrons/2.) + len(atoms)*f)
self.set(nbands=nbands)
Vasp.set_nbands = set_nbands
def get_valence_electrons(self):
'''Return all the valence electrons for the atoms.
Calculated from the POTCAR file.
'''
if not os.path.exists('POTCAR'):
self.initialize(self.get_atoms())
self.write_potcar()
default_electrons = self.get_default_number_of_electrons()
d = {}
for s, n in default_electrons:
d[s] = n
atoms = self.get_atoms()
nelectrons = 0
for atom in atoms:
nelectrons += d[atom.symbol]
return nelectrons
Vasp.get_valence_electrons = get_valence_electrons
def get_elapsed_time(self):
'''Return elapsed calculation time in seconds from the OUTCAR file.'''
import re
regexp = re.compile('Elapsed time \(sec\):\s*(?P<time>[0-9]*\.[0-9]*)')
with open('OUTCAR') as f:
lines = f.readlines()
m = re.search(regexp, lines[-8])
time = m.groupdict().get('time', None)
if time is not None:
return float(time)
else:
return None
Vasp.get_elapsed_time = get_elapsed_time
old_read_ldau = Vasp.read_ldau
def read_ldau(self):
'''Upon restarting the calculation, Vasp.read_incar() read
list_keys ldauu, ldauj, and ldaul as list params, even though we
are only allowed to define them through a dict key. If the
calculation is complete, this is never a problem because we
initially call read_incar(), and, seeing the ldauu, ldauj, and
ldaul keys in the INCAR, VASP believes we initially defined them
as lists. However, when we call restart a calculation and call
read_ldau, this reads the ldauu, ldaul, and ldauj keys from the
OUTCAR and sets the dictionary. We now have two instances where
the ldauu, ldauj, and ldaul tags are stored (in the dict and list
params)!
This is particularly troublesome for continuation
calculations. What happens is that Vasp writes the ldauu, ldaul,
and ldauj tags twice, because it is stored in both the list_params
and dict_params. The easiest way to get rid of this is when we
read the ldauu, ldauj, and ldaul tags from the OUTCAR upon
restart, we should erase the list params. This is what this
function does.
'Note: the problem persists with continuation calculations with
nbands and magmoms.
'''
ldau, ldauprint, ldautype, ldau_luj = old_read_ldau(self)
self.set(ldauu=None)
self.set(ldaul=None)
self.set(ldauj=None)
return ldau, ldauprint, ldautype, ldau_luj
Vasp.read_ldau = read_ldau
def get_nearest_neighbor_table(self):
"""read the nearest neighbor table from OUTCAR
returns a list of atom indices and the connecting neighbors. The
list is not sorted according to self.sorted or self.resorted.
"""
with open('OUTCAR') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if 'nearest neighbor table' in line:
break
# i contains index of line
i += 1 # first line of the table
# sometimes there is carriover to the next line.
line_counter = 0
NN = []
while True:
line = lines[i]
if ('LATTYP' in line
or line.strip() == ''):
break
line = lines[i].strip()
if ' ' in lines[i+1]:
# this was a continuation line
line += lines[i+1]
i += 2
else:
i += 1
fields = line.split()
atom_index = int(fields[0])
nearest_neigbors = fields[4:]
nn_indices = [int(nearest_neigbors[x])
for x in range(0, len(nearest_neigbors), 2)]
nn_distances = [float(nearest_neigbors[x])
for x in range(1, len(nearest_neigbors), 2)]
NN.append((atom_index, nn_indices))
return NN
Vasp.get_nearest_neighbor_table = get_nearest_neighbor_table
# this fixes a bug in ase.calculators.vasp, which does not return a
# copy of the forces
def get_forces(self, atoms):
self.update(atoms)
return np.copy(self.forces)
Vasp.get_forces = get_forces
def get_energy_components(self, outputType=0):
'''Returns all of the components of the energies.
outputType = 0, returns each individual component
outputType = 1, returns a major portion of the electrostatic
energy and the total
outputType = 2, returns a major portion of the electrostatic
energy and the other components
vasp forum may provide help:
http://cms.mpi.univie.ac.at/vasp-forum/forum_viewtopic.php?4.273
Contributed by Jason Marshall, 2014.
'''
# self.calculate()
with open('OUTCAR') as f:
lines = f.readlines()
lineNumbers = []
for i, line in enumerate(lines):
# note: this is tricky, the exact string to search for is not
# the last energy line in OUTCAR, there are space differences
# ... USER BEWARE: Be careful with this function ... may be
# buggy depending on inputs
if line.startswith(' free energy TOTEN ='):
lineNumbers.append(i)
lastLine = lineNumbers[-1]
data = lines[lastLine - 10:lastLine]
energies = []
alphaZ = float(data[0].split()[-1])
ewald = float(data[1].split()[-1])
halfHartree = float(data[2].split()[-1])
exchange = float(data[3].split()[-1])
xExchange = float(data[4].split()[-1])
PAWDoubleCounting1 = float(data[5].split()[-2])
PAWDoubleCounting2 = float(data[5].split()[-1])
entropy = float(data[6].split()[-1])
eigenvalues = float(data[7].split()[-1])
atomicEnergy = float(data[8].split()[-1])
if outputType == 1:
energies = [['electro', alphaZ + ewald + halfHartree],
['else', exchange + xExchange + PAWDoubleCounting1
+ PAWDoubleCounting2 + entropy + eigenvalues
+ atomicEnergy],
['total', alphaZ + ewald + halfHartree + exchange
+ xExchange
+ PAWDoubleCounting1 + PAWDoubleCounting2 + entropy
+ eigenvalues + atomicEnergy]]
elif outputType == 2:
energies = [['electro', alphaZ + ewald + halfHartree],
['exchange', exchange],
['xExchange', xExchange],
['PAW', PAWDoubleCounting1 + PAWDoubleCounting2],
['entropy', entropy],
['eigenvalues', eigenvalues],
['atomicEnergy', atomicEnergy],
['total', alphaZ + ewald + halfHartree + exchange
+ xExchange
+ PAWDoubleCounting1 + PAWDoubleCounting2 + entropy
+ eigenvalues + atomicEnergy]]
else:
energies = [['alphaZ', alphaZ],
['ewald', ewald],
['halfHartree', halfHartree],
['exchange', exchange],
['xExchange', xExchange],
['PAW', PAWDoubleCounting1 + PAWDoubleCounting2],
['entropy', entropy],
['eigenvalues', eigenvalues],
['atomicEnergy', atomicEnergy]]
return energies
Vasp.get_energy_components = get_energy_components
def get_beefens(self, n=-1):
'''Get the BEEFens 2000 ensemble energies from the OUTCAR.
This only works with Vasp 5.3.5 compiled with libbeef.
I am pretty sure this array is the deviations from the total
energy. There are usually 2000 of these, but it is not clear this will
always be the case. I assume the 2000 entries are always in the same
order, so you can calculate ensemble energy differences for reactions,
as long as the number of samples in the ensemble is the same.
There is usually more than one BEEFens section. By default we return
the last one. Choose another one with the the :par: n.
see http://suncat.slac.stanford.edu/facility/software/functional/
'''
beefens = []
with open('OUTCAR') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if 'BEEFens' in line:
nsamples = int(re.search('(\d+)', line).groups()[0])
beefens.append([float(x) for x in lines[i + 1: i + nsamples]])
return np.array(beefens[n])
Vasp.get_beefens = get_beefens
def get_orbital_occupations(self):
'''Read occuations from OUTCAR.
Returns a numpy array of
[[s, p, d tot]] for each atom.
You probably need to have used LORBIT=11 for this function to
work.
'''
# this finds the last entry of occupations. Sometimes, this is printed multiple times in the OUTCAR.
with open('OUTCAR', 'r') as f:
lines = f.readlines()
start = None
for i,line in enumerate(lines):
if line.startswith(" total charge "):
start = i
if not i:
raise Exception('Occupations not found')
atoms = self.get_atoms()
occupations = []
for j in range(len(atoms)):
line = lines[start + 4 + j]
fields = line.split()
s, p, d, tot = [float(x) for x in fields[1:]]
occupations.append(np.array((s, p, d, tot)))
return np.array(occupations)
Vasp.get_orbital_occupations = get_orbital_occupations
def get_number_of_ionic_steps(self):
"Returns number of ionic steps from the OUTCAR."
nsteps = None
for line in open('OUTCAR'):
# find the last iteration number
if line.find('- Iteration') != -1:
nsteps = int(line.split('(')[0].split()[-1].strip())
return nsteps
Vasp.get_number_of_ionic_steps = get_number_of_ionic_steps
def get_required_memory(self):
''' Returns the recommended memory needed for a VASP calculation
Code retrieves memory estimate based on the following priority:
1) METADATA
2) existing OUTCAR
3) run diagnostic calculation
The final method determines the memory requirements from
KPOINT calculations run locally before submission to the queue
'''
import json
def get_memory():
''' Retrieves the recommended memory from the OUTCAR
'''
if os.path.exists('OUTCAR'):
with open('OUTCAR') as f:
lines = f.readlines()
else:
return None
for line in lines:
# There can be multiple instances of this,
# but they all seem to be identical
if 'memory' in line:
# Read the memory usage
required_mem = float(line.split()[-2]) / 1e6
return required_mem
# Attempt to get the recommended memory from METADATA
# JASP automatically generates a METADATA file when
# run, so there should be no instances where it does not exist
with open('METADATA', 'r') as f:
data = json.load(f)
try:
memory = data['recommended.memory']
except(KeyError):
# Check if an OUTCAR exists from a previous run
if os.path.exists('OUTCAR'):
memory = get_memory()
# Write the recommended memory to the METADATA file
with open('METADATA', 'r+') as f:
data = json.load(f)
data['recommended.memory'] = memory
f.seek(0)
json.dump(data, f)
# If no OUTCAR exists, we run a 'dummy' calculation
else:
original_ialgo = self.int_params.get('ialgo')
self.int_params['ialgo'] = -1
# Generate the base files needed for VASP calculation
atoms = self.get_atoms()
self.initialize(atoms)
from ase.io.vasp import write_vasp
write_vasp('POSCAR',
self.atoms_sorted,
symbol_count=self.symbol_count)
self.write_incar(atoms)
self.write_potcar()
self.write_kpoints()
self.write_sort_file()
# Need to pass a function to Timer for delayed execution
def kill():
process.kill()
# We only need the memory estimate, so we can greatly
# accelerate the process by terminating after we have it
process = Popen(JASPRC['vasp.executable.serial'],
stdout=PIPE)
from threading import Timer
timer = Timer(20.0, kill)
timer.start()
while True:
if timer.is_alive():
memory = get_memory()
if memory:
timer.cancel()
process.terminate()
break
else:
time.sleep(0.1)
else:
raise RuntimeError('Memory estimate timed out')
# return to original settings
self.int_params['ialgo'] = original_ialgo
self.write_incar(atoms)
# Write the recommended memory to the METADATA file
with open('METADATA', 'r+') as f:
data = json.load(f)
data['recommended.memory'] = memory
f.seek(0)
json.dump(data, f)
# Remove all non-initialization files
files = ['CHG', 'CHGCAR', 'CONTCAR', 'DOSCAR',
'EIGENVAL', 'IBZKPT', 'OSZICAR', 'PCDAT',
'vasprun.xml', 'OUTCAR', 'WAVECAR', 'XDATCAR']
for f in files:
os.unlink(f)
# Each node will require the memory read from the OUTCAR
nodes = JASPRC['queue.nodes']
ppn = JASPRC['queue.ppn']
# Return an integer
import math
total_memory = int(math.ceil(nodes * ppn * memory))
JASPRC['queue.mem'] = '{0}GB'.format(total_memory)
# return the memory as read from the OUTCAR
return memory
Vasp.get_required_memory = get_required_memory
def chgsum(self):
'''
Uses the chgsum.pl utility to sum over the AECCAR0 and AECCAR2 files
'''
cmdlist = ['chgsum.pl', 'AECCAR0', 'AECCAR2']
p = Popen(cmdlist, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if out == '' or err != '':
raise Exception('Cannot perform chgsum:\n\n{0}'.format(err))
Vasp.chgsum = chgsum
def bader(self, cmd=None, ref=False, verbose=False, overwrite=False):
'''
Performs bader analysis for a calculation
Follows defaults unless full shell command is specified
Does not overwrite existing files if overwrite=False
If ref = True, tries to reference the charge density to
the sum of AECCAR0 and AECCAR2
Requires the bader.pl (and chgsum.pl) script to be in the system PATH
'''
if 'ACF.dat' in os.listdir('./') and not overwrite:
self._get_calculated_charges()
return
if cmd is None:
if ref:
self.chgsum()
cmdlist = ['bader',
'CHGCAR',
'-ref',
'CHGCAR_sum']
else:
cmdlist = ['bader',
'CHGCAR']
elif type(cmd) is str:
cmdlist = cmd.split()
elif type(cmd) is list:
cmdlist = cmd
p = Popen(cmdlist, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if out == '' or err != '':
raise Exception('Cannot perform Bader:\n\n{0}'.format(err))
elif verbose:
print('Bader completed for {0}'.format(self.vaspdir))
# Now store calculated charges
self._get_calculated_charges()
Vasp.bader = bader
def _get_calculated_charges(self,
atoms=None,
fileobj='ACF.dat',
displacement=1e-4):
"""Calculate the charges from the fileobj.
This is a modified version of the attach_charges function in
ase.io.bader to work better with VASP.
Does not require the atom positions to be in Bohr and references
the charge to the ZVAL in the POTCAR
"""
if isinstance(fileobj, str):
try:
fileobj = open(fileobj)
f_open = True
except(IOError):
return None
if atoms is None:
atoms = self.get_atoms()
# Get the sorting and resorting lists
sort = self.sort
resort = self.resort
# First get a dictionary of ZVALS from the pseudopotentials
LOP = self.get_pseudopotentials()
ppp = os.environ['VASP_PP_PATH']
zval = {}
for sym, ppath, hash in LOP:
fullpath = ppp + ppath
z = get_ZVAL(fullpath)
zval[sym] = z
# Get sorted symbols and positions according to POSCAR and ACF.dat
symbols = np.array(atoms.get_chemical_symbols())[sort]
positions = atoms.get_positions()[sort]
charges = []
sep = '---------------'
i = 0 # Counter for the lines
k = 0 # Counter of sep
assume6columns = False
for line in fileobj:
if line[0] == '\n': # check if there is an empty line in the
i -= 1 # head of ACF.dat file
if i == 0:
headings = line
if 'BADER' in headings.split():
j = headings.split().index('BADER')
elif 'CHARGE' in headings.split():
j = headings.split().index('CHARGE')
else:
print('Can\'t find keyword "BADER" or "CHARGE".' \
+ ' Assuming the ACF.dat file has 6 columns.')
j = 4
assume6columns = True
if sep in line: # Stop at last seperator line
if k == 1:
break
k += 1
if not i > 1:
pass
else:
words = line.split()
if assume6columns is True:
if len(words) != 6:
raise IOError('Number of columns in ACF file incorrect!\n'
'Check that Bader program version >= 0.25')
sym = symbols[int(words[0]) - 1]
charges.append(zval[sym] - float(words[j]))
if displacement is not None:
# check if the atom positions match
xyz = np.array([float(w) for w in words[1:4]])
assert np.linalg.norm(positions[int(words[0]) - 1] - xyz) < displacement
i += 1
if f_open:
fileobj.close()
# Now attach the resorted charges to the atom
charges = np.array(charges)[resort]
self._calculated_charges = charges
Vasp._get_calculated_charges = _get_calculated_charges
def get_charges(self, atoms=None):
'''
Returns a list of cached charges from a previous
call to bader(). Useful for storing the charges to
a database outside the context manager.
'''
if atoms is None:
atoms = self.get_atoms()
if hasattr(self, '_calculated_charges'):
return self._calculated_charges
else:
return None
Vasp.get_charges = get_charges
def get_property(self, name, atoms=None, allow_calculation=True):
"""A function meant to mimic the get_property() function
already implemented for non-VASP calculators in ASE.
This function is required for proper usage of the ASE database
the way it is currently written.
"""
if atoms is None:
atoms = self.get_atoms()
if name == 'energy':
return atoms.get_potential_energy()
elif name == 'forces':
return atoms.get_forces()
elif name == 'stress':
return atoms.get_stress()
elif name == 'dipole':
return atoms.get_dipole_moment()
elif name == 'magmom' and hasattr(self, 'magnetic_moment'):
return atoms.get_magnetic_moment()
elif name == 'magmoms':
return atoms.get_magnetic_moments()
elif name == 'charges':
return atoms.get_charges()
elif name == 'free_energy':
return atoms.get_potential_energy(force_consistent=True)
else:
raise NotImplementedError
Vasp.get_property = get_property
# implemented_properties = ['energy', 'forces', 'stress', 'dipole',
# 'charges', 'magmom', 'magmoms']
# Vasp.implemented_properties = implemented_properties
# def get_property(self, name, atoms=None, allow_calculation=False):
# if name not in self.implemented_properties:
# raise NotImplementedError
# if atoms is None:
# atoms = self.atoms
# system_changes = []
# else:
# system_changes = self.check_state(atoms)
# if system_changes:
# # self.reset()
# # Vasp does not have a reset. Silently continue here...
# pass
# if name not in self.results:
# return None
# # if not allow_calculation:
# # return None
# # We don't want to calculate things here...
# # try:
# # self.calculate(atoms, [name], system_changes)
# # except Exception:
# # self.reset()
# # raise
# if name == 'magmom' and 'magmom' not in self.results:
# return 0.0
# if name == 'magmoms' and 'magmoms' not in self.results:
# return np.zeros(len(atoms))
# result = self.results[name]
# if isinstance(result, np.ndarray):
# result = result.copy()
# return result
# Vasp.get_property = get_property
# def attach_results(self):
# '''
# Attaches values of the main properties to
# self.results
# '''
# atoms = self.get_atoms()
# results = {}
# results['energy'] = atoms.get_potential_energy()
# results['forces'] = atoms.get_forces()
# self.results = results
# Vasp.attach_results = attach_results
| prtkm/jasp | jasp/jasp_extensions.py | Python | gpl-2.0 | 56,254 | [
"ASE",
"VASP"
] | 4604d3a9a61c85abbb7fc0d984bf871f317b9aad72da2f00f4826c492c90a288 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("invoices.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
| sztosz/invoices | config/urls.py | Python | bsd-3-clause | 1,230 | [
"VisIt"
] | 21db073c04536ab15980e1bec9b5a6c4884f3f2e25100e21a9cac507b907fc61 |
from rest_framework import serializers
from qmpy.analysis.vasp import Calculation
class CalculationSerializer(serializers.ModelSerializer):
class Meta:
model = Calculation
fields = (
"id",
"entry",
"composition",
"path",
"label",
"band_gap",
"converged",
"energy_pa",
)
class CalculationRawSerializer(serializers.ModelSerializer):
class Meta:
model = Calculation
fields = ("id", "path", "label")
| wolverton-research-group/qmpy | qmpy/web/serializers/calculation.py | Python | mit | 544 | [
"VASP"
] | 8ab832468693f4516296a719f9e10fd6703ea7c64bf04179be4c92087352cef5 |
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X, Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return np.sqrt(((X - Y) ** 2).sum(axis=-1))
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return np.abs(X - Y).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
X_normalized = normalize(X, copy=True)
X_normalized -= normalize(Y, copy=True)
return .5 * (X_normalized ** 2).sum(axis=-1)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances,
}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Parameters
----------
X, Y : ndarray (n_samples, n_features)
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = linear_kernel(X_normalized, Y_normalized)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
# FIXME: np.zeros can be replaced by np.empty
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise ValueError("Unknown kernel %r" % metric)
| ankurankan/scikit-learn | sklearn/metrics/pairwise.py | Python | bsd-3-clause | 41,106 | [
"Gaussian"
] | 9d12d4c3849871bc76b9ce3cf3cb306efb30340ca97c4955cab29eb3a17a1468 |
# coding=utf-8
# Copyright (c) 2014 Merck KGaA
from __future__ import print_function
import os,re,gzip,json,requests,sys, optparse,csv
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import SDWriter
from rdkit.Chem import Descriptors
from rdkit.ML.Descriptors import MoleculeDescriptors
from scipy import interp
from scipy import stats
from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_score,recall_score
from sklearn import preprocessing
import cPickle
from pickle import Unpickler
import numpy as np
import math
from pylab import *
from sklearn.metrics import make_scorer
kappa_template = '''\
%(kind)s Kappa Coefficient
--------------------------------
Kappa %(kappa)6.4f
ASE %(std_kappa)6.4f
%(alpha_ci)s%% Lower Conf Limit %(kappa_low)6.4f
%(alpha_ci)s%% Upper Conf Limit %(kappa_upp)6.4f
Test of H0: %(kind)s Kappa = 0
ASE under H0 %(std_kappa0)6.4f
Z %(z_value)6.4f
One-sided Pr > Z %(pvalue_one_sided)6.4f
Two-sided Pr > |Z| %(pvalue_two_sided)6.4f
'''
'''
Weighted Kappa Coefficient
--------------------------------
Weighted Kappa 0.4701
ASE 0.1457
95% Lower Conf Limit 0.1845
95% Upper Conf Limit 0.7558
Test of H0: Weighted Kappa = 0
ASE under H0 0.1426
Z 3.2971
One-sided Pr > Z 0.0005
Two-sided Pr > |Z| 0.0010
'''
def int_ifclose(x, dec=1, width=4):
'''helper function for creating result string for int or float
only dec=1 and width=4 is implemented
Parameters
----------
x : int or float
value to format
dec : 1
number of decimals to print if x is not an integer
width : 4
width of string
Returns
-------
xint : int or float
x is converted to int if it is within 1e-14 of an integer
x_string : str
x formatted as string, either '%4d' or '%4.1f'
'''
xint = int(round(x))
if np.max(np.abs(xint - x)) < 1e-14:
return xint, '%4d' % xint
else:
return x, '%4.1f' % x
class KappaResults(dict):
def __init__(self, **kwds):
self.update(kwds)
if not 'alpha' in self:
self['alpha'] = 0.025
self['alpha_ci'] = int_ifclose(100 - 0.025 * 200)[1]
self['std_kappa'] = np.sqrt(self['var_kappa'])
self['std_kappa0'] = np.sqrt(self['var_kappa0'])
self['z_value'] = self['kappa'] / self['std_kappa0']
self['pvalue_one_sided'] = stats.norm.sf(self['z_value'])
self['pvalue_two_sided'] = self['pvalue_one_sided'] * 2
delta = stats.norm.isf(self['alpha']) * self['std_kappa']
self['kappa_low'] = self['kappa'] - delta
self['kappa_upp'] = self['kappa'] + delta
def __str__(self):
return kappa_template % self
def cohens_kappa(table, weights=None, return_results=True, wt=None):
'''Compute Cohen's kappa with variance and equal-zero test
Parameters
----------
table : array_like, 2-Dim
square array with results of two raters, one rater in rows, second
rater in columns
weights : array_like
The interpretation of weights depends on the wt argument.
If both are None, then the simple kappa is computed.
see wt for the case when wt is not None
If weights is two dimensional, then it is directly used as a weight
matrix. For computing the variance of kappa, the maximum of the
weights is assumed to be smaller or equal to one.
TODO: fix conflicting definitions in the 2-Dim case for
wt : None or string
If wt and weights are None, then the simple kappa is computed.
If wt is given, but weights is None, then the weights are set to
be [0, 1, 2, ..., k].
If weights is a one-dimensional array, then it is used to construct
the weight matrix given the following options.
wt in ['linear', 'ca' or None] : use linear weights, Cicchetti-Allison
actual weights are linear in the score "weights" difference
wt in ['quadratic', 'fc'] : use linear weights, Fleiss-Cohen
actual weights are squared in the score "weights" difference
wt = 'toeplitz' : weight matrix is constructed as a toeplitz matrix
from the one dimensional weights.
return_results : bool
If True (default), then an instance of KappaResults is returned.
If False, then only kappa is computed and returned.
Returns
-------
results or kappa
If return_results is True (default), then a results instance with all
statistics is returned
If return_results is False, then only kappa is calculated and returned.
Notes
-----
There are two conflicting definitions of the weight matrix, Wikipedia
versus SAS manual. However, the computation are invariant to rescaling
of the weights matrix, so there is no difference in the results.
Weights for 'linear' and 'quadratic' are interpreted as scores for the
categories, the weights in the computation are based on the pairwise
difference between the scores.
Weights for 'toeplitz' are a interpreted as weighted distance. The distance
only depends on how many levels apart two entries in the table are but
not on the levels themselves.
example:
weights = '0, 1, 2, 3' and wt is either linear or toeplitz means that the
weighting only depends on the simple distance of levels.
weights = '0, 0, 1, 1' and wt = 'linear' means that the first two levels
are zero distance apart and the same for the last two levels. This is
the sampe as forming two aggregated levels by merging the first two and
the last two levels, respectively.
weights = [0, 1, 2, 3] and wt = 'quadratic' is the same as squaring these
weights and using wt = 'toeplitz'.
References
----------
Wikipedia
SAS Manual
'''
table = np.asarray(table, float) #avoid integer division
agree = np.diag(table).sum()
nobs = table.sum()
probs = table / nobs
freqs = probs #TODO: rename to use freqs instead of probs for observed
probs_diag = np.diag(probs)
freq_row = table.sum(1) / nobs
freq_col = table.sum(0) / nobs
prob_exp = freq_col * freq_row[:, None]
assert np.allclose(prob_exp.sum(), 1)
#print prob_exp.sum()
agree_exp = np.diag(prob_exp).sum() #need for kappa_max
if weights is None and wt is None:
kind = 'Simple'
kappa = (agree / nobs - agree_exp) / (1 - agree_exp)
if return_results:
#variance
term_a = probs_diag * (1 - (freq_row + freq_col) * (1 - kappa))**2
term_a = term_a.sum()
term_b = probs * (freq_col[:, None] + freq_row)**2
d_idx = np.arange(table.shape[0])
term_b[d_idx, d_idx] = 0 #set diagonal to zero
term_b = (1 - kappa)**2 * term_b.sum()
term_c = (kappa - agree_exp * (1-kappa))**2
var_kappa = (term_a + term_b - term_c) / (1 - agree_exp)**2 / nobs
#term_c = freq_col * freq_row[:, None] * (freq_col + freq_row[:,None])
term_c = freq_col * freq_row * (freq_col + freq_row)
var_kappa0 = (agree_exp + agree_exp**2 - term_c.sum())
var_kappa0 /= (1 - agree_exp)**2 * nobs
else:
if weights is None:
weights = np.arange(table.shape[0])
#weights follows the Wikipedia definition, not the SAS, which is 1 -
kind = 'Weighted'
weights = np.asarray(weights, float)
if weights.ndim == 1:
if wt in ['ca', 'linear', None]:
weights = np.abs(weights[:, None] - weights) / \
(weights[-1] - weights[0])
elif wt in ['fc', 'quadratic']:
weights = (weights[:, None] - weights)**2 / \
(weights[-1] - weights[0])**2
elif wt == 'toeplitz':
#assume toeplitz structure
from scipy.linalg import toeplitz
#weights = toeplitz(np.arange(table.shape[0]))
weights = toeplitz(weights)
else:
raise ValueError('wt option is not known')
else:
rows, cols = table.shape
if (table.shape != weights.shape):
raise ValueError('weights are not square')
#this is formula from Wikipedia
kappa = 1 - (weights * table).sum() / nobs / (weights * prob_exp).sum()
#TODO: add var_kappa for weighted version
if return_results:
var_kappa = np.nan
var_kappa0 = np.nan
#switch to SAS manual weights, problem if user specifies weights
#w is negative in some examples,
#but weights is scale invariant in examples and rough check of source
w = 1. - weights
w_row = (freq_col * w).sum(1)
w_col = (freq_row[:, None] * w).sum(0)
agree_wexp = (w * freq_col * freq_row[:, None]).sum()
term_a = freqs * (w - (w_col + w_row[:, None]) * (1 - kappa))**2
fac = 1. / ((1 - agree_wexp)**2 * nobs)
var_kappa = term_a.sum() - (kappa - agree_wexp * (1 - kappa))**2
var_kappa *= fac
freqse = freq_col * freq_row[:, None]
var_kappa0 = (freqse * (w - (w_col + w_row[:, None]))**2).sum()
var_kappa0 -= agree_wexp**2
var_kappa0 *= fac
kappa_max = (np.minimum(freq_row, freq_col).sum() - agree_exp) / \
(1 - agree_exp)
if return_results:
res = KappaResults( kind=kind,
kappa=kappa,
kappa_max=kappa_max,
weights=weights,
var_kappa=var_kappa,
var_kappa0=var_kappa0
)
return res
else:
return kappa
def to_table(data, bins=None):
'''convert raw data with shape (subject, rater) to (rater1, rater2)
brings data into correct format for cohens_kappa
Parameters
----------
data : array_like, 2-Dim
data containing category assignment with subjects in rows and raters
in columns.
bins : None, int or tuple of array_like
If None, then the data is converted to integer categories,
0,1,2,...,n_cat-1. Because of the relabeling only category levels
with non-zero counts are included.
If this is an integer, then the category levels in the data are already
assumed to be in integers, 0,1,2,...,n_cat-1. In this case, the
returned array may contain columns with zero count, if no subject
has been categorized with this level.
If bins are a tuple of two array_like, then the bins are directly used
by ``numpy.histogramdd``. This is useful if we want to merge categories.
Returns
-------
arr : nd_array, (n_cat, n_cat)
Contingency table that contains counts of category level with rater1
in rows and rater2 in columns.
Notes
-----
no NaN handling, delete rows with missing values
This works also for more than two raters. In that case the dimension of
the resulting contingency table is the same as the number of raters
instead of 2-dimensional.
'''
data = np.asarray(data)
n_rows, n_cols = data.shape
if bins is None:
#I could add int conversion (reverse_index) to np.unique
cat_uni, cat_int = np.unique(data.ravel(), return_inverse=True)
n_cat = len(cat_uni)
data_ = cat_int.reshape(data.shape)
bins_ = np.arange(n_cat+1) - 0.5
#alternative implementation with double loop
#tt = np.asarray([[(x == [i,j]).all(1).sum() for j in cat_uni]
# for i in cat_uni] )
#other altervative: unique rows and bincount
elif np.isscalar(bins):
bins_ = np.arange(bins+1) - 0.5
data_ = data
else:
bins_ = bins
data_ = data
tt = np.histogramdd(data_, (bins_,)*n_cols)
return tt[0], bins_
class p_con:
"""Class to create Models to classify Molecules active or inactive
using threshold for value in training-data"""
def __init__(self,acc_id=None,proxy={}):
"""Constructor to initialize Object, use proxy if neccessary"""
self.request_data={"acc_id":acc_id,"proxy":proxy}
self.acc_id = acc_id
self.proxy = proxy
self.model = []
self.verbous = False
def __str__(self):
"""String-Representation for Object"""
self.request_data["cmpd_count"] = len(self.sd_entries)
retString = ""
for key in self.request_data.keys():
retString += "%s: %s\n" % (key,self.request_data[key])
return retString.rstrip()
def step_0_get_chembl_data(self):
"""Download Compound-Data for self.acc_id, these are available in self.sd_entries afterwards"""
def looks_like_number(x):
"""Check for proper Float-Value"""
try:
float(x)
return True
except ValueError:
return False
if self.acc_id.find("CHEMBL") == -1:
self.target_data = requests.get("https://www.ebi.ac.uk/chemblws/targets/uniprot/{}.json".format(self.acc_id),proxies=self.proxy).json()
else:
self.target_data = {}
self.target_data['target'] = {}
self.target_data['target']['chemblId'] = self.acc_id
self.chembl_id = self.target_data['target']['chemblId']
self.request_data["chembl_id"] = self.target_data['target']['chemblId']
# print self.target_data
self.bioactivity_data = requests.get("https://www.ebi.ac.uk/chemblws/targets/{}/bioactivities.json".format(self.target_data['target']['chemblId']),proxies=self.proxy).json()
ic50_skip=0
ki_skip=0
inhb_skip=0
count=0
non_homo=0
self.dr={}
i = 0
x = len(self.bioactivity_data['bioactivities'] )
for bioactivity in [record for record in self.bioactivity_data['bioactivities'] if looks_like_number(record['value']) ] :
if i%100 == 0:
sys.stdout.write('\r' + str(i) + '/' +str(x) + ' > <\b\b\b\b\b\b\b\b\b\b\b')
elif (i%100)%10==0:
sys.stdout.write('|')
sys.stdout.flush()
i += 1
# if i > 5000: break
if bioactivity['organism'] != 'Homo sapiens':
non_homo+=1
continue
if re.search('IC50', bioactivity['bioactivity_type']):
if bioactivity['units'] != 'nM':
ic50_skip+=1
continue
elif re.search('Ki', bioactivity['bioactivity_type']):
ki_skip+=1
continue
elif re.search('Inhibition', bioactivity['bioactivity_type']):
inhb_skip+=1
else:
continue
self.cmpd_data = requests.get("https://www.ebi.ac.uk/chemblws/compounds/{}.json".format(bioactivity['ingredient_cmpd_chemblid']),proxies=self.proxy).json()
my_smiles = self.cmpd_data['compound']['smiles']
bioactivity['Smiles']=my_smiles
self.dr[count] = bioactivity
count+=1
SDtags = self.dr[0].keys()
cpd_counter=0
self.sd_entries = []
for x in range(len(self.dr)):
entry = self.dr[x]
cpd = Chem.MolFromSmiles(str(entry['Smiles']))
AllChem.Compute2DCoords(cpd)
cpd.SetProp("_Name",str(cpd_counter))
cpd_counter += 1
for tag in SDtags: cpd.SetProp(str(tag),str(entry[tag]))
self.sd_entries.append(cpd)
return True
def step_1_keeplargestfrag(self):
"""remove all smaller Fragments per compound, just keep the largest"""
result=[]
for cpd in self.sd_entries:
fragments = Chem.GetMolFrags(cpd,asMols=True)
list_cpds_fragsize = []
for frag in fragments:
list_cpds_fragsize.append(frag.GetNumAtoms())
largest_frag_index = list_cpds_fragsize.index(max(list_cpds_fragsize))
largest_frag = fragments[largest_frag_index]
result.append(largest_frag)
self.sd_entries = result
return True
def step_2_remove_dupl(self):
"""remove duplicates from self.sd_entries"""
result = []
all_struct_dict = {}
for cpd in self.sd_entries:
Chem.RemoveHs(cpd)
cansmi = Chem.MolToSmiles(cpd,canonical=True)
if not cansmi in all_struct_dict.keys():
all_struct_dict[cansmi] = []
all_struct_dict[cansmi].append(cpd)
for entry in all_struct_dict.keys():
if len(all_struct_dict[entry])==1:
all_struct_dict[entry][0].SetProp('cansmirdkit',entry)
result.append(all_struct_dict[entry][0])
self.sd_entries=result
return True
def step_3_merge_IC50(self):
"""merge IC50 of duplicates into one compound using mean of all values if:
min(IC50) => IC50_avg-3*IC50_stddev && max(IC50) <= IC50_avg+3*IC50_stddev && IC50_stddev <= IC50_avg"""
np_old_settings = np.seterr(invalid='ignore') #dirty way to ignore warnings from np.std
def get_mean_IC50(mol_list):
IC50 = 0
IC50_avg = 0
for bla in mol_list:
try:
IC50 += float(bla.GetProp("value"))
except:
print("no IC50 reported",bla.GetProp("_Name"))
IC50_avg = IC50 / len(mol_list)
return IC50_avg
def get_stddev_IC50(mol_list):
IC50_list = []
for mol in mol_list:
try:
IC50_list.append(round(float(mol.GetProp("value")),2))
except:
print("no IC50 reported",mol.GetProp("_Name"))
IC50_stddev = np.std(IC50_list,ddof=1)
return IC50_stddev,IC50_list
result = []
IC50_dict = {}
for cpd in self.sd_entries:
if not "cansmirdkit" in cpd.GetPropNames():
Chem.RemoveHs(cpd)
cansmi = Chem.MolToSmiles(cpd,canonical=True)
cpd.SetProp('cansmirdkit',cansmi)
cansmi = str(cpd.GetProp("cansmirdkit"))
IC50_dict[cansmi]={}
for cpd in self.sd_entries:
cansmi = str(cpd.GetProp("cansmirdkit"))
try:
IC50_dict[cansmi].append(cpd)
except:
IC50_dict[cansmi] = [cpd]
for entry in IC50_dict:
IC50_avg = str(get_mean_IC50(IC50_dict[entry]))
IC50_stddev,IC50_list = get_stddev_IC50(IC50_dict[entry])
IC50_dict[entry][0].SetProp("value_stddev",str(IC50_stddev))
IC50_dict[entry][0].SetProp("value",IC50_avg)
minimumvalue = float(IC50_avg)-3*float(IC50_stddev)
maximumvalue = float(IC50_avg)+3*float(IC50_stddev)
if round(IC50_stddev,1) == 0.0:
result.append(IC50_dict[entry][0])
elif IC50_stddev > float(IC50_avg):
runawaylist = []
for e in IC50_dict[entry]:
runawaylist.append(e.GetProp("_Name"))
print("stddev larger than mean", runawaylist, IC50_list, IC50_avg,IC50_stddev)
elif np.min(IC50_list) < minimumvalue or np.max(IC50_list) > maximumvalue:
pass
else:
result.append(IC50_dict[entry][0])
self.sd_entries=result
np.seterr(over=np_old_settings['over'],divide=np_old_settings['divide'],invalid=np_old_settings['invalid'],under=np_old_settings['under'])
return True
def step_4_set_TL(self,threshold,ic50_tag="value"):
"""set Property "TL"(TrafficLight) for each compound:
if ic50_tag (default:"value") > threshold: TL = 0, else 1"""
result = []
i,j = 0,0
for cpd in self.sd_entries:
if float(cpd.GetProp(ic50_tag))> float(threshold):
cpd.SetProp('TL','0')
i += 1
else:
cpd.SetProp('TL','1')
j += 1
result.append(cpd)
self.sd_entries = result
if self.verbous: print("## act: %d, inact: %d" % (j,i))
return True
def step_5_remove_descriptors(self):
"""remove list of Properties from each compound (hardcoded)
which would corrupt process of creating Prediction-Models"""
sd_tags = ['activity__comment','alogp','assay__chemblid','assay__description','assay__type','bioactivity__type','activity_comment','assay_chemblid','assay_description','assay_type','bioactivity_type','cansmirdkit','ingredient__cmpd__chemblid','ingredient_cmpd_chemblid','knownDrug','medChemFriendly','molecularFormula','name__in__reference','name_in_reference','numRo5Violations','operator','organism','parent__cmpd__chemblid','parent_cmpd_chemblid','passesRuleOfThree','preferredCompoundName','reference','rotatableBonds','smiles','Smiles','stdInChiKey','synonyms','target__chemblid','target_chemblid','target__confidence','target__name','target_confidence','target_name','units','value_avg','value_stddev'] + ['value']
result = []
for mol in self.sd_entries:
properties = mol.GetPropNames()
for tag in properties:
if tag in sd_tags: mol.ClearProp(tag)
result.append(mol)
self.sd_entries = result
return True
def step_6_calc_descriptors(self):
"""calculate descriptors for each compound, according to Descriptors._descList"""
nms=[x[0] for x in Descriptors._descList]
calc = MoleculeDescriptors.MolecularDescriptorCalculator(nms)
for i in range(len(self.sd_entries)):
descrs = calc.CalcDescriptors(self.sd_entries[i])
for j in range(len(descrs)):
self.sd_entries[i].SetProp(str(nms[j]),str(descrs[j]))
return True
def step_7_train_models(self):
"""train models according to trafficlight using sklearn.ensamble.RandomForestClassifier
self.model contains up to 10 models afterwards, use save_model_info(type) to create csv or html
containing data for each model"""
title_line = ["#","accuracy","MCC","precision","recall","f1","auc","kappa","prevalence","bias","pickel-File"]
self.csv_text= [title_line]
TL_list = []
property_list_list = []
directory = os.getcwd().split("/")[-2:]
dir_string = ';'.join(directory)
for cpd in self.sd_entries:
property_list = []
property_name_list = []
prop_name = cpd.GetPropNames()
for property in prop_name:
if property not in ['TL','value']:
try:
f = float(cpd.GetProp(property))
if math.isnan(f) or math.isinf(f):
print("invalid: %s" % property)
except ValueError:
print("valerror: %s" % property)
continue
property_list.append(f)
property_name_list.append(property)
elif property == 'TL':
TL_list.append(int(cpd.GetProp(property)))
else:
print(property)
pass
property_list_list.append(property_list)
dataDescrs_array = np.asarray(property_list_list)
dataActs_array = np.array(TL_list)
for randomseedcounter in range(1,11):
if self.verbous:
print("################################")
print("try to calculate seed %d" % randomseedcounter)
X_train,X_test,y_train,y_test = cross_validation.train_test_split(dataDescrs_array,dataActs_array,test_size=.4,random_state=randomseedcounter)
# try:
clf_RF = RandomForestClassifier(n_estimators=100,random_state=randomseedcounter)
clf_RF = clf_RF.fit(X_train,y_train)
cv_counter = 5
scores = cross_validation.cross_val_score( clf_RF, X_test,y_test, cv=cv_counter,scoring='accuracy')
accuracy_CV = round(scores.mean(),3)
accuracy_std_CV = round(scores.std(),3)
calcMCC = make_scorer(metrics.matthews_corrcoef,greater_is_better=True,needs_threshold=False)
scores = cross_validation.cross_val_score( clf_RF, X_test,y_test, cv=cv_counter,scoring=calcMCC)
MCC_CV = round(scores.mean(),3)
MCC_std_CV = round(scores.std(),3)
scores = cross_validation.cross_val_score( clf_RF, X_test,y_test, cv=cv_counter,scoring='f1')
scores_rounded = [round(x,3) for x in scores]
f1_CV = round(scores.mean(),3)
f1_std_CV = round(scores.std(),3)
scores = cross_validation.cross_val_score( clf_RF, X_test,y_test, cv=cv_counter,scoring='precision')
scores_rounded = [round(x,3) for x in scores]
precision_CV = round(scores.mean(),3)
precision_std_CV = round(scores.std(),3)
scores = cross_validation.cross_val_score( clf_RF, X_test,y_test, cv=cv_counter,scoring='recall')
scores_rounded = [round(x,3) for x in scores]
recall_CV = round(scores.mean(),3)
recall_std_CV = round(scores.std(),3)
scores = cross_validation.cross_val_score( clf_RF, X_test,y_test, cv=cv_counter,scoring='roc_auc')
scores_rounded = [round(x,3) for x in scores]
auc_CV = round(scores.mean(),3)
auc_std_CV = round(scores.std(),3)
y_predict = clf_RF.predict(X_test)
conf_matrix = metrics.confusion_matrix(y_test,y_predict)
# coh_kappa = cohenskappa.cohens_kappa(conf_matrix)
coh_kappa = cohens_kappa(conf_matrix)
kappa = round(coh_kappa['kappa'],3)
kappa_stdev = round(coh_kappa['std_kappa'],3)
tp = conf_matrix[0][0]
tn = conf_matrix[1][1]
fp = conf_matrix[1][0]
fn = conf_matrix[0][1]
n = tn+fp
p = tp+fn
kappa_prevalence = round(float(abs(tp-tn))/float(n),3)
kappa_bias = round(float(abs(fp-fn))/float(n),3)
if self.verbous:
print("test:")
print("\tpos\tneg")
print("true\t%d\t%d" % (tp,tn))
print("false\t%d\t%d" % (fp,fn))
print(conf_matrix)
print("\ntrain:")
y_predict2 = clf_RF.predict(X_train)
conf_matrix2 = metrics.confusion_matrix(y_train,y_predict2)
tp2 = conf_matrix2[0][0]
tn2 = conf_matrix2[1][1]
fp2 = conf_matrix2[1][0]
fn2 = conf_matrix2[0][1]
print("\tpos\tneg")
print("true\t%d\t%d" % (tp2,tn2))
print("false\t%d\t%d" % (fp2,fn2))
print(conf_matrix2)
result_string_cut = [randomseedcounter,
str(accuracy_CV)+"_"+str(accuracy_std_CV),
str(MCC_CV)+"_"+str(MCC_std_CV),
str(precision_CV)+"_"+str(precision_std_CV),
str(recall_CV)+"_"+str(recall_std_CV),
str(f1_CV)+"_"+str(f1_std_CV),
str(auc_CV)+"_"+str(auc_std_CV),
str(kappa)+"_"+str(kappa_stdev),
kappa_prevalence,kappa_bias,"model_file.pkl"]
self.model.append(clf_RF)
self.csv_text.append(result_string_cut)
# except Exception as e:
# print "got %d models" % len(self.model)
# print e
# sys.exit(-1)
# break
return True if len(self.model)>0 else False
def save_model_info(self,outfile,mode="html"):
"""create html- or csv-File for models according to mode (default: "html")"""
if mode=="csv":
if not outfile.endswith(".csv"): outfile += ".csv"
csv_file = open(outfile,"wb")
csv_file_writer = csv.writer(csv_file,delimiter=";",quotechar=' ')
for line in self.csv_text: csv_file_writer.writerow(line)
csv_file.flush()
csv_file.close()
elif mode=="html":
if not outfile.endswith(".html"): outfile += ".html"
def lines2list(lines):
return lines
def list2html(data,act,inact):
html_head = """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title></title>
<style type="text/css">
table {
max-width: 100%;
background-color: transparent;
}
th {
text-align: left;
}
.table {
width: 100%;
margin-bottom: 20px;
}
.table > thead > tr > th,
.table > tbody > tr > th,
.table > tfoot > tr > th,
.table > thead > tr > td,
.table > tbody > tr > td,
.table > tfoot > tr > td {
padding: 8px;
line-height: 1.428571429;
vertical-align: top;
border-top: 1px solid #dddddd;
}
.table > thead > tr > th {MSC1013123
vertical-align: bottom;
border-bottom: 2px solid #dddddd;
}
.table > caption + thead > tr:first-child > th,
.table > colgroup + thead > tr:first-child > th,
.table > thead:first-child > tr:first-child > th,
.table > caption + thead > tr:first-child > td,
.table > colgroup + thead > tr:first-child > td,
.table > thead:first-child > tr:first-child > td {
border-top: 0;
}
.table > tbody + tbody {
border-top: 2px solid #dddddd;
}
.table .table {
background-color: #ffffff;
}
.table-condensed > thead > tr > th,
.table-condensed > tbody > tr > th,
.table-condensed > tfoot > tr > th,
.table-condensed > thead > tr > td,
.table-condensed > tbody > tr > td,
.table-condensed > tfoot > tr > td {
padding: 5px;
}
.table-bordered {
border: 1px solid #dddddd;
}
.table-bordered > thead > tr > th,
.table-bordered > tbody > tr > th,
.table-bordered > tfoot > tr > th,
.table-bordered > thead > tr > td,
.table-bordered > tbody > tr > td,
.table-bordered > tfoot > tr > td {
border: 1px solid #dddddd;
}
.table-bordered > thead > tr > th,
.table-bordered > thead > tr > td {
border-bottom-width: 2px;
}
.table-striped > tbody > tr:nth-child(odd) > td,
.table-striped > tbody > tr:nth-child(odd) > th {
background-color: #f9f9f9;
}
.table-hover > tbody > tr:hover > td,
.table-hover > tbody > tr:hover > th {
background-color: #f5f5f5;
}
table col[class*="col-"] {
position: static;
display: table-column;
float: none;
}
table td[class*="col-"],
table th[class*="col-"] {
display: table-cell;
float: none;
}
.table > thead > tr > .active,
.table > tbody > tr > .active,
.table > tfoot > tr > .active,
.table > thead > .active > td,
.table > tbody > .active > td,
.table > tfoot > .active > td,
.table > thead > .active > th,
.table > tbody > .active > th,
.table > tfoot > .active > th {
background-color: #f5f5f5;
}
.table-hover > tbody > tr > .active:hover,
.table-hover > tbody > .active:hover > td,
.table-hover > tbody > .active:hover > th {
background-color: #e8e8e8;
}
.table > thead > tr > .success,
.table > tbody > tr > .success,
.table > tfoot > tr > .success,
.table > thead > .success > td,
.table > tbody > .success > td,
.table > tfoot > .success > td,
.table > thead > .success > th,
.table > tbody > .success > th,
.table > tfoot > .success > th {
background-color: #dff0d8;
}
.table-hover > tbody > tr > .success:hover,
.table-hover > tbody > .success:hover > td,
.table-hover > tbody > .success:hover > th {
background-color: #d0e9c6;
}
.table > thead > tr > .danger,
.table > tbody > tr > .danger,
.table > tfoot > tr > .danger,
.table > thead > .danger > td,
.table > tbody > .danger > td,
.table > tfoot > .danger > td,
.table > thead > .danger > th,
.table > tbody > .danger > th,
.table > tfoot > .danger > th {
background-color: #f2dede;
}
.table-hover > tbody > tr > .danger:hover,
.table-hover > tbody > .danger:hover > td,
.table-hover > tbody > .danger:hover > th {
background-color: #ebcccc;
}
.table > thead > tr > .warning,
.table > tbody > tr > .warning,
.table > tfoot > tr > .warning,
.table > thead > .warning > td,
.table > tbody > .warning > td,
.table > tfoot > .warning > td,
.table > thead > .warning > th,
.table > tbody > .warning > th,
.table > tfoot > .warning > th {
background-color: #fcf8e3;
}
.table-hover > tbody > tr > .warning:hover,
.table-hover > tbody > .warning:hover > td,
.table-hover > tbody > .warning:hover > th {
background-color: #faf2cc;
}
@media (max-width: 767px) {
.table-responsive {
width: 100%;
margin-bottom: 15px;
overflow-x: scroll;
overflow-y: hidden;
border: 1px solid #dddddd;
-ms-overflow-style: -ms-autohiding-scrollbar;
-webkit-overflow-scrolling: touch;
}
.table-responsive > .table {
margin-bottom: 0;
}
.table-responsive > .table > thead > tr > th,
.table-responsive > .table > tbody > tr > th,
.table-responsive > .table > tfoot > tr > th,
.table-responsive > .table > thead > tr > td,
.table-responsive > .table > tbody > tr > td,
.table-responsive > .table > tfoot > tr > td {
white-space: nowrap;
}
.table-responsive > .table-bordered {
border: 0;
}
.table-responsive > .table-bordered > thead > tr > th:first-child,
.table-responsive > .table-bordered > tbody > tr > th:first-child,
.table-responsive > .table-bordered > tfoot > tr > th:first-child,
.table-responsive > .table-bordered > thead > tr > td:first-child,
.table-responsive > .table-bordered > tbody > tr > td:first-child,
.table-responsive > .table-bordered > tfoot > tr > td:first-child {
border-left: 0;
}
.table-responsive > .table-bordered > thead > tr > th:last-child,
.table-responsive > .table-bordered > tbody > tr > th:last-child,
.table-responsive > .table-bordered > tfoot > tr > th:last-child,
.table-responsive > .table-bordered > thead > tr > td:last-child,
.table-responsive > .table-bordered > tbody > tr > td:last-child,
.table-responsive > .table-bordered > tfoot > tr > td:last-child {
border-right: 0;
}
.table-responsive > .table-bordered > tbody > tr:last-child > th,
.table-responsive > .table-bordered > tfoot > tr:last-child > th,
.table-responsive > .table-bordered > tbody > tr:last-child > td,
.table-responsive > .table-bordered > tfoot > tr:last-child > td {
border-bottom: 0;
}
}
</style>
</head>
<body>
<p style="padding-left:10px;padding-top:10px;font-size:200%">Data for Models</p>
<p style="padding-left:10px;padding-right:10px;">"""
html_topPlot_start = """<table style="vertical-align:top; background-color=#CCCCCC">
<tr align="left" valign="top"><td><img src="pieplot.png"></td><td><H3>Distribution</H3><font color="#00C000">active %d</font><br><font color="#FF0000">inactive %d</td><td>"""
html_topPlot_bottom="""</td></tr></table>"""
html_tableStart="""<table class="table table-bordered table-condensed">
<thead>
<tr>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
</tr>
</thead>
<tbody>"""
html_tElements ="""
<tr bgcolor = "%s">
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td><a href="%s">model.pkl</a></td>
</tr>"""
html_bottomPlot = """</tbody>
</table>
<img src="barplot.png"><br>"""
html_foot ="""
</p>
</body>
</html>"""
html_kappa_table_head="""<table class="table table-bordered table-condensed">
<thead>
<tr>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
</tr>
</thead>
<tbody>"""
html_kappa_table_element="""<tr bgcolor = "%s">
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td><a href="%s">model.pkl</a></td>
</tr>"""
html_kappa_table_bottom="""</tbody>
</table>
<img src="barplot.png"><br>"""
best,worst = findBestWorst(data)
html = []
html.append(html_head)
html.append(html_topPlot_start % (act,inact))
html.append(html_topPlot_bottom)
html.append(html_tableStart % tuple(data[0]))
i = 0
for l in data[1:len(data)]:
l_replaced = []
for elem in l:
elem_string = str(elem)
if elem_string.find("pkl")==-1: l_replaced.append(elem_string.replace("_","±"))
else: l_replaced.append(elem_string)
c = ""
if i == best: c = "#9CC089"
if i == worst: c = "#FF3333"
html.append(html_tElements % tuple([c] + l_replaced))
i += 1
html.append(html_bottomPlot)
html.append(html_foot)
createBarPlot(data)
return html
def writeHtml(html,outf):
outf_h = open(outf,'w')
for block in html:
outf_h.write(block)
outf_h.flush()
outf_h.close()
return
def findBestWorst(data):
auc = [float(x[6].split("_")[0]) for x in data[1:]]
max_index,min_index = auc.index(max(auc)),auc.index(min(auc))
return (max_index,min_index)
def createPiePlot(cpds):
def getActInact(cpds):
act,inact=0,0
for cpd in cpds:
if int(cpd.GetProp('TL'))==0: inact+=1
else: act+=1
return act,inact
act_count,inact_count = getActInact(cpds)
print("act/inact from TL's %d/%d" % (act_count,inact_count))
fig = plt.figure(figsize=(2,2))
pie = plt.pie([inact_count,act_count],colors=('r','g'))
fig.savefig("pieplot.png",transparent=True)
return act_count,inact_count
def createBarPlot(data):
def getLists(data,col):
accList = []
errList = []
for x in data[1:]:
if x[col].find("_")==-1: continue
if x[col].find(".pkl")!=-1:continue
spl = x[col].split("_")
accList.append(float(spl[0]))
errList.append(float(spl[1]))
return accList,errList
def plotLists(cnt):
result=[]
clr = ['#DD1E2F','#EBB035','#06A2CB','#218559','#D0C6B1','#192823','#DDAACC']
# print ticks, list,errList,width
# print ticks
for i in range(1,cnt):
list,errList = getLists(data,i)
# print i,cnt,list,errList
result.append(ax.bar(ticks+width*i,list,width,color=clr[i-1],yerr=errList))
return result
fig,ax = plt.subplots()
fig.set_size_inches(15,6)
ticks = np.arange(0.0,12.0,1.2)
if len(self.model)==1: ticks = np.arange(0.0,1.0,1.5)
width = 0.15
plots = plotLists(8)
ax.set_xticks(ticks+0.75)
ax.set_xticklabels([str(x) for x in range(1,11,1)])
ax.set_ylabel("Accuracy")
ax.set_xlabel("# model")
ax.set_xlim(-0.3,14)
ax.set_ylim(-0.1,1.2)
ax.legend(tuple(plots),[x for x in data[0][1:8]],'upper right')
best,worst = findBestWorst(data)
if len(self.model)>1:
ax.annotate("best",xy=(ticks[best],0.85),xytext=(ticks[best]+0.25,1.1),color="green")
ax.annotate("worst",xy=(ticks[worst],0.85),xytext=(ticks[worst]+0.25,1.10),color="red")
fig.savefig("barplot.png",transparent=True)
return
act,inact = createPiePlot(self.sd_entries)
lines = self.csv_text
data = lines2list(lines)
html = list2html(data,act,inact)
writeHtml(html,outfile)
return True
def load_mols(self,sd_file):
"""load SD-File from .sdf, .sdf.gz or .sd.gz"""
if sd_file.endswith(".sdf.gz") or sd_file.endswith(".sd.gz"):
SDFile = Chem.ForwardSDMolSupplier(gzip.open(sd_file))
else:
SDFile = Chem.SDMolSupplier(sd_file)
self.sd_entries = [mol for mol in SDFile]
return True
def save_mols(self,outfile,gzip=True):
"""create SD-File of current molecules in self.sd_entries"""
sdw = Chem.SDWriter(outfile+".tmp")
for mol in self.sd_entries: sdw.write(mol)
sdw.flush()
sdw.close()
if not gzip:
os.rename(outfile+".tmp",outfile)
return
f_in = open(outfile+".tmp", 'rb')
f_out = gzip.open(outfile, 'wb')
f_out.writelines(f_in)
f_out.flush()
f_out.close()
f_in.close()
os.remove(outfile+".tmp")
return
def save_model(self,outfile,model_number=0):
"""save Model to file using cPickle.dump"""
cPickle.dump(self.model[model_number],file(outfile,"wb+"))
return
def load_models(self,model_files):
"""load model or list of models into self.model"""
if type(model_files)==str: model_files = [model_files]
i = 0
for mod_file in model_files:
model = open(mod_file,'r')
unPickled = Unpickler(model)
clf_RF = unPickled.load()
self.model.append(clf_RF)
model.close()
i += 1
return i
def predict(self,model_number):
"""try to predict activity of compounds using giving model-Number"""
if len(self.model)<=model_number:
sys.stderr.write("\nModel-Number %d doesn't exist, there are just %d Models\n" % (model_number,len(self.model)))
sys.exit(-1)
descriptors = []
active,inactive = 0,0
for D in Descriptors._descList:
descriptors.append(D[0])
calculator = MoleculeDescriptors.MolecularDescriptorCalculator(descriptors)
clf_RF = self.model[model_number]
for sample in self.sd_entries:
use = False
try:
pattern = calculator.CalcDescriptors(sample)
use = True
except e:
sys.stderr.write("Error computing descriptors for %s, skip" % sample)
if use:
dataDescrs_array = np.asarray(pattern)
y_predict = int(clf_RF.predict(dataDescrs_array)[0])
if y_predict==0: inactive += 1
if y_predict==1: active += 1
sample.SetProp("TL_prediction",str(y_predict))
return (active,inactive)
if __name__ == "__main__":
def step_error(step):
sys.stderr.write("Error in Step: %s" % step)
usage = "usage: python master.py [--accession=<Acc_ID>] [--sdf=<sdf-File>] --dupl/--uniq [--rof] [--combine=<file1>,<file2>] [--IC50=<IC50_tag>] [--cutoff=<value>] [--remove_descr=<txt_file>] [--proxy=<https://user:pass@proxy.de:portnumber] [--verbous] [--check_models=<model.pkl>]"
parser = optparse.OptionParser(usage=usage)
parser.add_option('--accession',action='store',type='string',dest='accession',help="Accession ID of Protein (hint: P43088 is Vitamin_D_Receptor with ~200 compounds)",default='')
parser.add_option('--rof',action='store_true',dest='onefile',help='remove obsolete Files',default=False)
parser.add_option('--dupl',action='store_true',dest='dupl',help='use only duplicates',default=False)
parser.add_option('--uniq',action='store_true',dest='uniq',help='use only uniques',default=False)
parser.add_option('--combine',action='store',type='string',dest='combine',help='Combine 2 SDF/SDF.GZ Files',default='')
parser.add_option('--IC50',action='store',type='string',dest='SD_tag',help='name of IC50 field, default is \'value\'',default='value')
parser.add_option('--cutoff',action='store',type='int',dest='cutoff',help='cutoff-value for hERG-trafficlight, default is \'5000\'',default=5000)
parser.add_option('--remove_descr',action='store',type='string',dest='remove_descr',help='file with SDtags2remove, line-wise default:<internal list>',default='')
parser.add_option('--proxy',action='store',type='string',dest='proxy',help='Use this Proxy',default='')
parser.add_option('--sdf',action='store',type='string',dest='sdf',help='load this SDF-File',default='')
parser.add_option('--verbous',action='store_true',dest='verbous',help='verbous',default=False)
parser.add_option('--check_models',action='store',type='string',dest='modelfile',help='check compounds with this model',default='')
(options,args) = parser.parse_args()
combineItems = options.combine.split(',')
if len(combineItems) == 1 and len(combineItems[0])>0:
print('need 2 files to combine')
print(usage)
sys.exit(-1)
elif len(combineItems) == 2 and len(combineItems[0])>0 and len(combineItems[1])>0:
cur_file = _04.combine(combineItems[0],combineItems[1])
print("File: %s" % cur_file)
sys.exit(0)
code = options.accession.split(':')
if len(code)==1:
accession = code[0]
else:
accession = code[1]
if options.accession == '' and options.sdf == '':
print("please offer Accession-Number or SDF-File")
print("-h for help")
sys.exit(-1)
if options.dupl==False and options.uniq==False:
print("Please select uniq or dupl -h for help")
print("-h for help")
sys.exit(-1)
pco = p_con(accession,proxy=options.proxy)
pco.verbous = options.verbous
if options.sdf != '':
print("load sdf from File: %s" % options.sdf)
result = pco.load_mols(options.sdf)
if not result:
step_error("load SDF-File")
sys.exit(-1)
else:
print("gather Data for Accession-ID \'%s\'" % accession)
result = pco.step_0_get_chembl_data()
if not result:
step_error("download ChEMBL-Data")
sys.exit(-1)
result = pco.step_1_keeplargestfrag()
if not result:
step_error("keep largest Fragment")
sys.exit(-1)
if options.uniq:
result = pco.step_2_remove_dupl()
if not result:
step_error("remove duplicates")
sys.exit(-1)
result = pco.step_3_merge_IC50()
if not result:
step_error("merge IC50-Values for same Smiles")
sys.exit(-1)
if options.modelfile != '':
result = pco.load_models(options.modelfile.split(","))
if not result:
step_error("Load Model-Files")
sys.exit(-1)
print("\n#Model\tActive\tInactive")
for i in range(len(pco.model)):
act,inact = pco.predict(i)
print("%d\t%d\t%d" % (i,act,inact))
sys.exit(0)
result = pco.step_4_set_TL(options.cutoff)
if not result:
step_error("set Trafficlight for cutoff")
sys.exit(-1)
result = pco.step_5_remove_descriptors()
if not result:
step_error("remove descriptors")
sys.exit(-1)
result = pco.step_6_calc_descriptors()
if not result:
step_error("calculate Descriptors")
sys.exit(-1)
result = pco.step_7_train_models()
if not result:
step_error("Training of Models")
sys.exit(-1)
pco.save_model_info("model_info.csv",mode="csv")
pco.save_model_info("model_info.html",mode="html")
for i in range(len(pco.model)):
filename = "%s_%dnm_model_%d.pkl" % (accession,options.cutoff,i)
pco.save_model(filename,i)
print("Model %d saved into File: %s" % (i,filename))
for i in range(len(pco.model)):
act,inact = pco.predict(i)
print("Model %d active: %d\tinactive: %d" % (i,act,inact))
| soerendip42/rdkit | Contrib/pzc/p_con.py | Python | bsd-3-clause | 48,985 | [
"ASE",
"RDKit"
] | 9522915bb86d4f8384aeb825c37beef196473656d4a423bd5073c4cf6698c963 |
# Zulip Settings intended to be set by a system administrator.
#
# See http://zulip.readthedocs.io/en/latest/settings.html for
# detailed technical documentation on the Zulip settings system.
#
### MANDATORY SETTINGS
#
# These settings MUST be set in production. In a development environment,
# sensible default values will be used.
# The user-accessible Zulip hostname for this installation, e.g.
# zulip.example.com
EXTERNAL_HOST = 'zulip.example.com'
# The email address for the person or team who maintain the Zulip
# Voyager installation. Will also get support emails. (e.g. zulip-admin@example.com)
ZULIP_ADMINISTRATOR = 'zulip-admin@example.com'
# Enable at least one of the following authentication backends.
# See http://zulip.readthedocs.io/en/latest/prod-authentication-methods.html
# for documentation on our authentication backends.
AUTHENTICATION_BACKENDS = (
# 'zproject.backends.EmailAuthBackend', # Email and password; see SMTP setup below
# 'zproject.backends.GoogleMobileOauth2Backend', # Google Apps, setup below
# 'zproject.backends.GitHubAuthBackend', # GitHub auth, setup below
# 'zproject.backends.ZulipLDAPAuthBackend', # LDAP, setup below
# 'zproject.backends.ZulipRemoteUserBackend', # Local SSO, setup docs on readthedocs
)
# To enable Google authentication, you need to do the following:
#
# (1) Visit https://console.developers.google.com, setup an
# Oauth2 client ID that allows redirects to
# e.g. https://zulip.example.com/accounts/login/google/done/.
#
# (2) Then click into the APIs and Auth section (in the sidebar on the
# left side of the page), APIs, then under "Social APIs" click on
# "Google+ API" and click the button to enable the API.
#
# (3) put your client secret as "google_oauth2_client_secret" in
# zulip-secrets.conf, and your client ID right here:
# GOOGLE_OAUTH2_CLIENT_ID=<your client ID from Google>
# To enable GitHub authentication, you will need to need to do the following:
#
# (1) Register an OAuth2 application with GitHub at one of:
# https://github.com/settings/applications
# https://github.com/organizations/ORGNAME/settings/applications
# Specify e.g. https://zulip.example.com/complete/github/ as the callback URL.
#
# (2) Put your "Client ID" as SOCIAL_AUTH_GITHUB_KEY below and your
# "Client secret" as social_auth_github_secret in
# /etc/zulip/zulip-secrets.conf.
# SOCIAL_AUTH_GITHUB_KEY = <your client ID from GitHub>
#
# (3) You can also configure the GitHub integration to only allow
# members of a particular GitHub team or organization to login to your
# Zulip server using GitHub authentication; to enable this, set one of the
# two parameters below:
# SOCIAL_AUTH_GITHUB_TEAM_ID = <your team id>
# SOCIAL_AUTH_GITHUB_ORG_NAME = <your org name>
# If you are using the ZulipRemoteUserBackend authentication backend,
# set this to your domain (e.g. if REMOTE_USER is "username" and the
# corresponding email address is "username@example.com", set
# SSO_APPEND_DOMAIN = "example.com")
SSO_APPEND_DOMAIN = None # type: str
# Configure the outgoing SMTP server below. For testing, you can skip
# sending emails entirely by commenting out EMAIL_HOST, but you will
# want to configure this to support email address confirmation emails,
# missed message emails, onboarding follow-up emails, etc. To
# configure SMTP, you will need to complete the following steps:
#
# (1) Fill out the outgoing email sending configuration below.
#
# (2) Put the SMTP password for EMAIL_HOST_USER in
# /etc/zulip/zulip-secrets.conf as email_password.
#
# (3) If you are using a gmail account to send outgoing email, you
# will likely need to read this Google support answer and configure
# that account as "less secure":
# https://support.google.com/mail/answer/14257.
#
# You can quickly test your sending email configuration using:
# ./manage.py send_test_email username@example.com
#
# A common problem is hosting providers that block outgoing SMTP traffic.
#
# With the exception of reading EMAIL_HOST_PASSWORD from
# email_password in the Zulip secrets file, Zulip uses Django's
# standard EmailBackend, so if you're having issues, you may want to
# search for documentation on using your email provider with Django.
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = ''
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# The email From address to be used for automatically generated emails
DEFAULT_FROM_EMAIL = "Zulip <zulip@example.com>"
# The noreply address to be used as Reply-To for certain generated emails.
# Messages sent to this address should not be delivered anywhere.
NOREPLY_EMAIL_ADDRESS = "noreply@example.com"
# A comma-separated list of strings representing the host/domain names
# that your users will enter in their browsers to access your Zulip
# server. This is a security measure to prevent an attacker from
# poisoning caches and triggering password reset emails with links to
# malicious hosts by submitting requests with a fake HTTP Host
# header. See Django's documentation here:
# <https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts>.
# Zulip adds 'localhost' to the list automatically.
ALLOWED_HOSTS = [EXTERNAL_HOST]
### OPTIONAL SETTINGS
# Controls whether session cookies expire when the browser closes
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# Session cookie expiry in seconds after the last page load
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # 2 weeks
# Controls password strength requirements
PASSWORD_MIN_LENGTH = 6
PASSWORD_MIN_ZXCVBN_QUALITY = 0.4 # 0 to disable
# Controls whether or not there is a feedback button in the UI.
ENABLE_FEEDBACK = False
# By default, the feedback button will submit feedback to the Zulip
# developers. If you set FEEDBACK_EMAIL to be an email address
# (e.g. ZULIP_ADMINISTRATOR), feedback sent by your users will instead
# be sent to that email address.
FEEDBACK_EMAIL = ZULIP_ADMINISTRATOR
# Controls whether or not error reports are sent to Zulip. Error
# reports are used to improve the quality of the product and do not
# include message contents; please contact Zulip support with any
# questions.
ERROR_REPORTING = True
# Controls whether or not Zulip will provide inline image preview when
# a link to an image is referenced in a message.
INLINE_IMAGE_PREVIEW = True
# Controls whether or not Zulip will parse links starting with
# "file:///" as a hyperlink (useful if you have e.g. an NFS share).
ENABLE_FILE_LINKS = False
# By default, files uploaded by users and user avatars are stored
# directly on the Zulip server. If file storage in Amazon S3 is
# desired, you can configure that as follows:
#
# (1) Set s3_key and s3_secret_key in /etc/zulip/zulip-secrets.conf to
# be the S3 access and secret keys that you want to use, and setting
# the S3_AUTH_UPLOADS_BUCKET and S3_AVATAR_BUCKET to be the S3 buckets
# you've created to store file uploads and user avatars, respectively.
# Then restart Zulip (scripts/restart-zulip).
#
# (2) Edit /etc/nginx/sites-available/zulip-enterprise to comment out
# the nginx configuration for /user_uploads and /user_avatars (see
# https://github.com/zulip/zulip/issues/291 for discussion of a better
# solution that won't be automatically reverted by the Zulip upgrade
# script), and then restart nginx.
LOCAL_UPLOADS_DIR = "/home/zulip/uploads"
#S3_AUTH_UPLOADS_BUCKET = ""
#S3_AVATAR_BUCKET = ""
# Maximum allowed size of uploaded files, in megabytes. DO NOT SET
# ABOVE 80MB. The file upload implementation doesn't support chunked
# uploads, so browsers will crash if you try uploading larger files.
MAX_FILE_UPLOAD_SIZE = 25
# Controls whether name changes are completely disabled for this installation
# This is useful in settings where you're syncing names from an integrated LDAP/Active Directory
NAME_CHANGES_DISABLED = False
# Controls whether users who have not uploaded an avatar will receive an avatar
# from gravatar.com.
ENABLE_GRAVATAR = True
# To override the default avatar image if ENABLE_GRAVATAR is False, place your
# custom default avatar image at /home/zulip/local-static/default-avatar.png
# and uncomment the following line.
#DEFAULT_AVATAR_URI = '/local-static/default-avatar.png'
# To access an external postgres database you should define the host name in
# REMOTE_POSTGRES_HOST, you can define the password in the secrets file in the
# property postgres_password, and the SSL connection mode in REMOTE_POSTGRES_SSLMODE
# Different options are:
# disable: I don't care about security, and I don't want to pay the overhead of encryption.
# allow: I don't care about security, but I will pay the overhead of encryption if the server insists on it.
# prefer: I don't care about encryption, but I wish to pay the overhead of encryption if the server supports it.
# require: I want my data to be encrypted, and I accept the overhead. I trust that the network will make sure
# I always connect to the server I want.
# verify-ca: I want my data encrypted, and I accept the overhead. I want to be sure that I connect to a server
# that I trust.
# verify-full: I want my data encrypted, and I accept the overhead. I want to be sure that I connect to a server
# I trust, and that it's the one I specify.
#REMOTE_POSTGRES_HOST = 'dbserver.example.com'
#REMOTE_POSTGRES_SSLMODE = 'require'
# If you want to set custom TOS, set the path to your markdown file, and uncomment
# the following line.
# TERMS_OF_SERVICE = '/etc/zulip/terms.md'
### TWITTER INTEGRATION
# Zulip supports showing inline Tweet previews when a tweet is linked
# to in a message. To support this, Zulip must have access to the
# Twitter API via OAuth. To obtain the various access tokens needed
# below, you must register a new application under your Twitter
# account by doing the following:
#
# 1. Log in to http://dev.twitter.com.
# 2. In the menu under your username, click My Applications. From this page, create a new application.
# 3. Click on the application you created and click "create my access token".
# 4. Fill in the values for twitter_consumer_key, twitter_consumer_secret, twitter_access_token_key,
# and twitter_access_token_secret in /etc/zulip/zulip-secrets.conf.
### EMAIL GATEWAY INTEGRATION
# The Email gateway integration supports sending messages into Zulip
# by sending an email. This is useful for receiving notifications
# from third-party services that only send outgoing notifications via
# email. Once this integration is configured, each stream will have
# an email address documented on the stream settings page an emails
# sent to that address will be delivered into the stream.
#
# There are two ways to configure email mirroring in Zulip:
# 1. Local delivery: A MTA runs locally and passes mail directly to Zulip
# 2. Polling: Checks an IMAP inbox every minute for new messages.
#
# The local delivery configuration is preferred for production because
# it supports nicer looking email addresses and has no cron delay,
# while the polling mechanism is better for testing/developing this
# feature because it doesn't require a public-facing IP/DNS setup.
#
# The main email mirror setting is the email address pattern, where
# you specify the email address format you'd like the integration to
# use. It should be one of the following:
# %s@zulip.example.com (for local delivery)
# username+%s@example.com (for polling if EMAIL_GATEWAY_LOGIN=username@example.com)
EMAIL_GATEWAY_PATTERN = ""
#
# If you are using local delivery, EMAIL_GATEWAY_PATTERN is all you need
# to change in this file. You will also need to enable the Zulip postfix
# configuration to support local delivery by adding
# , zulip::postfix_localmail
# to puppet_classes in /etc/zulip/zulip.conf and then running
# `scripts/zulip-puppet-apply -f` to do the installation.
#
# If you are using polling, you will need to setup an IMAP email
# account dedicated to Zulip email gateway messages. The model is
# that users will send emails to that account via an address of the
# form username+%s@example.com (which is what you will set as
# EMAIL_GATEWAY_PATTERN); your email provider should deliver those
# emails to the username@example.com inbox. Then you run in a cron
# job `./manage.py email_mirror` (see puppet/zulip/files/cron.d/email-mirror),
# which will check that inbox and batch-process any new messages.
#
# You will need to configure authentication for the email mirror
# command to access the IMAP mailbox below and in zulip-secrets.conf.
#
# The IMAP login; username here and password as email_gateway_password in
# zulip-secrets.conf.
EMAIL_GATEWAY_LOGIN = ""
# The IMAP server & port to connect to
EMAIL_GATEWAY_IMAP_SERVER = ""
EMAIL_GATEWAY_IMAP_PORT = 993
# The IMAP folder name to check for emails. All emails sent to EMAIL_GATEWAY_PATTERN above
# must be delivered to this folder
EMAIL_GATEWAY_IMAP_FOLDER = "INBOX"
### LDAP integration configuration
# Zulip supports retrieving information about users via LDAP, and
# optionally using LDAP as an authentication mechanism.
#
# In either configuration, you will need to do the following:
#
# * Fill in the LDAP configuration options below so that Zulip can
# connect to your LDAP server
#
# * Setup the mapping between email addresses (used as login names in
# Zulip) and LDAP usernames. There are two supported ways to setup
# the username mapping:
#
# (A) If users' email addresses are in LDAP, set
# LDAP_APPEND_DOMAIN = None
# AUTH_LDAP_USER_SEARCH to lookup users by email address
#
# (B) If LDAP only has usernames but email addresses are of the form
# username@example.com, you should set:
# LDAP_APPEND_DOMAIN = example.com and
# AUTH_LDAP_USER_SEARCH to lookup users by username
#
# You can quickly test whether your configuration works by running:
# ./manage.py query_ldap username@example.com
# From the root of your Zulip installation; if your configuration is working
# that will output the full name for your user.
#
# -------------------------------------------------------------
#
# If you are using LDAP for authentication, you will need to enable
# the zproject.backends.ZulipLDAPAuthBackend auth backend in
# AUTHENTICATION_BACKENDS above. After doing so, you should be able
# to login to Zulip by entering your email address and LDAP password
# on the Zulip login form.
#
# If you are using LDAP to populate names in Zulip, once you finish
# configuring this integration, you will need to run:
# ./manage.py sync_ldap_user_data
# To sync names for existing users; you may want to run this in a cron
# job to pick up name changes made on your LDAP server.
import ldap
from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
# URI of your LDAP server. If set, LDAP is used to prepopulate a user's name in
# Zulip. Example: "ldaps://ldap.example.com"
AUTH_LDAP_SERVER_URI = ""
# This DN will be used to bind to your server. If unset, anonymous
# binds are performed. If set, you need to specify the password as
# 'auth_ldap_bind_password' in zulip-secrets.conf.
AUTH_LDAP_BIND_DN = ""
# Specify the search base and the property to filter on that corresponds to the
# username.
AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=users,dc=example,dc=com",
ldap.SCOPE_SUBTREE, "(uid=%(user)s)")
# If the value of a user's "uid" (or similar) property is not their email
# address, specify the domain to append here.
LDAP_APPEND_DOMAIN = None # type: str
# This map defines how to populate attributes of a Zulip user from LDAP.
AUTH_LDAP_USER_ATTR_MAP = {
# Populate the Django user's name from the LDAP directory.
"full_name": "cn",
}
# The default CAMO_URI of '/external_content/' is served by the camo
# setup in the default Voyager nginx configuration. Setting CAMO_URI
# to '' will disable the Camo integration.
CAMO_URI = '/external_content/'
# RabbitMQ configuration
#
# By default, Zulip connects to rabbitmq running locally on the machine,
# but Zulip also supports connecting to RabbitMQ over the network;
# to use a remote RabbitMQ instance, set RABBITMQ_HOST here.
# RABBITMQ_HOST = "localhost"
# To use another rabbitmq user than the default 'zulip', set RABBITMQ_USERNAME here.
# RABBITMQ_USERNAME = 'zulip'
# Memcached configuration
#
# By default, Zulip connects to memcached running locally on the machine,
# but Zulip also supports connecting to memcached over the network;
# to use a remote Memcached instance, set MEMCACHED_LOCATION here.
# Format HOST:PORT
# MEMCACHED_LOCATION = 127.0.0.1:11211
# Redis configuration
#
# By default, Zulip connects to redis running locally on the machine,
# but Zulip also supports connecting to redis over the network;
# to use a remote Redis instance, set REDIS_HOST here.
# REDIS_HOST = '127.0.0.1'
# For a different redis port set the REDIS_PORT here.
# REDIS_PORT = 6379
# If you set redis_password in zulip-secrets.conf, Zulip will use that password
# to connect to the redis server.
# Controls whether Zulip will rate-limit user requests.
# RATE_LIMITING = True
| Diptanshu8/zulip | zproject/prod_settings_template.py | Python | apache-2.0 | 16,999 | [
"VisIt"
] | b69917858bc18c9ed2b93059c7136cb972ce23e00726f3ecd42e3b49d4a22b1f |
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from horizon import exceptions
from horizon import forms
import json
from crystal_dashboard.dashboards.crystal.controllers.instances import forms as instances_forms
from crystal_dashboard.api import controllers as api
class CreateInstanceView(forms.ModalFormView):
form_class = instances_forms.CreateInstance
form_id = "create_instance_form"
modal_header = _("Create Instance")
submit_label = _("Create Instance")
submit_url = reverse_lazy("horizon:crystal:controllers:instances:create_instance")
template_name = "crystal/controllers/instances/create.html"
context_object_name = "instance"
success_url = reverse_lazy("horizon:crystal:controllers:index")
page_title = _("Create Instance")
class UpdateInstanceView(forms.ModalFormView):
form_class = instances_forms.UpdateInstance
submit_url = "horizon:crystal:controllers:instances:update_instance"
form_id = "update_instance_form"
modal_header = _("Update an Instance")
submit_label = _("Update Instance")
template_name = "crystal/controllers/instances/update.html"
context_object_name = 'instance'
success_url = reverse_lazy('horizon:crystal:controllers:index')
page_title = _("Update an Instance")
def get_context_data(self, **kwargs):
context = super(UpdateInstanceView, self).get_context_data(**kwargs)
context['id'] = self.kwargs['id']
args = (self.kwargs['id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def _get_object(self, *args, **kwargs):
instance_id = self.kwargs['id']
try:
instance = api.get_instance(self.request, instance_id)
return instance
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
instance = self._get_object()
initial = json.loads(instance.text)
initial['id'] = self.kwargs['id']
return initial
| Crystal-SDS/dashboard | crystal_dashboard/dashboards/crystal/controllers/instances/views.py | Python | gpl-3.0 | 2,229 | [
"CRYSTAL"
] | 15297b558f05ac0b24d5e55afbbbe13a9b65454365b676dd2d6ec481e8a6aeb9 |
# Copyright (C) 2007-2010 www.stani.be
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
import os
from cStringIO import StringIO
from itertools import cycle
from urllib import urlopen
from PIL import Image
from PIL import ImageDraw
from PIL import ImageEnhance
from PIL import ImageOps, ImageChops, ImageFilter
ALL_PALETTE_INDICES = set(range(256))
CHECKBOARD = {}
COLOR_MAP = [255] * 128 + [0] * 128
WWW_CACHE = {}
EXT_BY_FORMATS = {
'JPEG': ['JPG', 'JPEG', 'JPE'],
'TIFF': ['TIF', 'TIFF'],
'SVG': ['SVG', 'SVGZ'],
}
FORMATS_BY_EXT = {}
for format, exts in EXT_BY_FORMATS.items():
for ext in exts:
FORMATS_BY_EXT[ext] = format
CROSS = 'Cross'
ROUNDED = 'Rounded'
SQUARE = 'Square'
CORNERS = [ROUNDED, SQUARE, CROSS]
CORNER_ID = 'rounded_corner_r%d_f%d'
CROSS_POS = (CROSS, CROSS, CROSS, CROSS)
ROUNDED_POS = (ROUNDED, ROUNDED, ROUNDED, ROUNDED)
ROUNDED_RECTANGLE_ID = 'rounded_rectangle_r%d_f%d_s%s_p%s'
class InvalidWriteFormatError(Exception):
pass
def drop_shadow(image, horizontal_offset=5, vertical_offset=5,
background_color=(255, 255, 255, 0), shadow_color=0x444444,
border=8, shadow_blur=3, force_background_color=False, cache=None):
"""Add a gaussian blur drop shadow to an image.
:param image: The image to overlay on top of the shadow.
:param type: PIL Image
:param offset:
Offset of the shadow from the image as an (x,y) tuple.
Can be positive or negative.
:type offset: tuple of integers
:param background_color: Background color behind the image.
:param shadow_color: Shadow color (darkness).
:param border:
Width of the border around the image. This must be wide
enough to account for the blurring of the shadow.
:param shadow_blur:
Number of times to apply the filter. More shadow_blur
produce a more blurred shadow, but increase processing time.
"""
if cache is None:
cache = {}
if has_transparency(image) and image.mode != 'RGBA':
# Make sure 'LA' and 'P' with trasparency are handled
image = image.convert('RGBA')
#get info
size = image.size
mode = image.mode
back = None
#assert image is RGBA
if mode != 'RGBA':
if mode != 'RGB':
image = image.convert('RGB')
mode = 'RGB'
#create cache id
id = ''.join([str(x) for x in ['shadow_', size,
horizontal_offset, vertical_offset, border, shadow_blur,
background_color, shadow_color]])
#look up in cache
if id in cache:
#retrieve from cache
back, back_size = cache[id]
if back is None:
#size of backdrop
back_size = (size[0] + abs(horizontal_offset) + 2 * border,
size[1] + abs(vertical_offset) + 2 * border)
#create shadow mask
if mode == 'RGBA':
image_mask = get_alpha(image)
shadow = Image.new('L', back_size, 0)
else:
image_mask = Image.new(mode, size, shadow_color)
shadow = Image.new(mode, back_size, background_color)
shadow_left = border + max(horizontal_offset, 0)
shadow_top = border + max(vertical_offset, 0)
paste(shadow, image_mask, (shadow_left, shadow_top,
shadow_left + size[0], shadow_top + size[1]))
del image_mask # free up memory
#blur shadow mask
#Apply the filter to blur the edges of the shadow. Since a small
#kernel is used, the filter must be applied repeatedly to get a decent
#blur.
n = 0
while n < shadow_blur:
shadow = shadow.filter(ImageFilter.BLUR)
n += 1
#create back
if mode == 'RGBA':
back = Image.new('RGBA', back_size, shadow_color)
back.putalpha(shadow)
del shadow # free up memory
else:
back = shadow
cache[id] = back, back_size
#Paste the input image onto the shadow backdrop
image_left = border - min(horizontal_offset, 0)
image_top = border - min(vertical_offset, 0)
if mode == 'RGBA':
paste(back, image, (image_left, image_top), image)
if force_background_color:
mask = get_alpha(back)
paste(back, Image.new('RGB', back.size, background_color),
(0, 0), ImageChops.invert(mask))
back.putalpha(mask)
else:
paste(back, image, (image_left, image_top))
return back
def round_image(image, cache={}, round_all=True, rounding_type=None,
radius=100, opacity=255, pos=ROUNDED_POS, back_color='#FFFFFF'):
if image.mode != 'RGBA':
image = image.convert('RGBA')
if round_all:
pos = 4 * (rounding_type, )
mask = create_rounded_rectangle(image.size, cache, radius, opacity, pos)
paste(image, Image.new('RGB', image.size, back_color), (0, 0),
ImageChops.invert(mask))
image.putalpha(mask)
return image
def create_rounded_rectangle(size=(600, 400), cache={}, radius=100,
opacity=255, pos=ROUNDED_POS):
#rounded_rectangle
im_x, im_y = size
rounded_rectangle_id = ROUNDED_RECTANGLE_ID % (radius, opacity, size, pos)
if rounded_rectangle_id in cache:
return cache[rounded_rectangle_id]
else:
#cross
cross_id = ROUNDED_RECTANGLE_ID % (radius, opacity, size, CROSS_POS)
if cross_id in cache:
cross = cache[cross_id]
else:
cross = cache[cross_id] = Image.new('L', size, 0)
draw = ImageDraw.Draw(cross)
draw.rectangle((radius, 0, im_x - radius, im_y), fill=opacity)
draw.rectangle((0, radius, im_x, im_y - radius), fill=opacity)
if pos == CROSS_POS:
return cross
#corner
corner_id = CORNER_ID % (radius, opacity)
if corner_id in cache:
corner = cache[corner_id]
else:
corner = cache[corner_id] = create_corner(radius, opacity)
#rounded rectangle
rectangle = Image.new('L', (radius, radius), 255)
rounded_rectangle = cross.copy()
for index, angle in enumerate(pos):
if angle == CROSS:
continue
if angle == ROUNDED:
element = corner
else:
element = rectangle
if index % 2:
x = im_x - radius
element = element.transpose(Image.FLIP_LEFT_RIGHT)
else:
x = 0
if index < 2:
y = 0
else:
y = im_y - radius
element = element.transpose(Image.FLIP_TOP_BOTTOM)
paste(rounded_rectangle, element, (x, y))
cache[rounded_rectangle_id] = rounded_rectangle
return rounded_rectangle
def create_corner(radius=100, opacity=255, factor=2):
corner = Image.new('L', (factor * radius, factor * radius), 0)
draw = ImageDraw.Draw(corner)
draw.pieslice((0, 0, 2 * factor * radius, 2 * factor * radius),
180, 270, fill=opacity)
corner = corner.resize((radius, radius), Image.ANTIALIAS)
return corner
def get_format(ext):
"""Guess the image format by the file extension.
:param ext: file extension
:type ext: string
:returns: image format
:rtype: string
.. warning::
This is only meant to check before saving files. For existing files
open the image with PIL and check its format attribute.
>>> get_format('jpg')
'JPEG'
"""
ext = ext.lstrip('.').upper()
return FORMATS_BY_EXT.get(ext, ext)
def open_image_data(data):
"""Open image from format data.
:param data: image format data
:type data: string
:returns: image
:rtype: pil.Image
"""
return Image.open(StringIO(data))
def open_image_exif(uri):
"""Open local files or remote files over http and transpose the
image to its exif orientation.
:param uri: image location
:type uri: string
:returns: image
:rtype: pil.Image
"""
return transpose_exif(open_image(uri))
class _ByteCounter:
"""Helper class to count how many bytes are written to a file.
.. see also:: :func:`get_size`
>>> bc = _ByteCounter()
>>> bc.write('12345')
>>> bc.bytes
5
"""
def __init__(self):
self.bytes = 0
def write(self, data):
self.bytes += len(data)
def get_size(im, format, **options):
"""Gets the size in bytes if the image would be written to a file.
:param format: image file format (e.g. ``'JPEG'``)
:type format: string
:returns: the file size in bytes
:rtype: int
"""
try:
out = _ByteCounter()
im.save(out, format, **options)
return out.bytes
except AttributeError:
# fall back on full in-memory compression
out = StringIO()
im.save(out, format, **options)
return len(out.getvalue())
def get_quality(im, size, format, down=0, up=100, delta=1000, options=None):
"""Figure out recursively the quality save parameter to obtain a
certain image size. This mostly used for ``JPEG`` images.
:param im: image
:type im: pil.Image
:param format: image file format (e.g. ``'JPEG'``)
:type format: string
:param down: minimum file size in bytes
:type down: int
:param up: maximum file size in bytes
:type up: int
:param delta: fault tolerance in bytes
:type delta: int
:param options: image save options
:type options: dict
:returns: save quality
:rtype: int
Example::
filename = '/home/stani/sync/Desktop/IMGA3345.JPG'
im = Image.open(filename)
q = get_quality(im, 300000, "JPEG")
im.save(filename.replace('.jpg', '_sized.jpg'))
"""
if options is None:
options = {}
q = options['quality'] = (down + up) / 2
if q == down or q == up:
return max(q, 1)
s = get_size(im, format, **options)
if abs(s - size) < delta:
return q
elif s > size:
return get_quality(im, size, format, down, up=q, options=options)
else:
return get_quality(im, size, format, down=q, up=up, options=options)
def fill_background_color(image, color):
"""Fills given image with background color.
:param image: source image
:type image: pil.Image
:param color: background color
:type color: tuple of int
:returns: filled image
:rtype: pil.Image
"""
if image.mode == 'LA':
image = image.convert('RGBA')
elif image.mode != 'RGBA' and\
not (image.mode == 'P' and 'transparency' in image.info):
return image
if len(color) == 4 and color[-1] != 255:
mode = 'RGBA'
else:
mode = 'RGB'
back = Image.new(mode, image.size, color)
if (image.mode == 'P' and mode == 'RGBA'):
image = image.convert('RGBA')
if has_alpha(image):
paste(back, image, mask=image)
elif image.mode == 'P':
palette = image.getpalette()
index = image.info['transparency']
palette[index * 3: index * 3 + 3] = color[:3]
image.putpalette(palette)
del image.info['transparency']
back = image
else:
paste(back, image)
return back
def generate_layer(image_size, mark, method,
horizontal_offset, vertical_offset,
horizontal_justification, vertical_justification,
orientation, opacity):
"""Generate new layer for backgrounds or watermarks on which a given
image ``mark`` can be positioned, scaled or repeated.
:param image_size: size of the reference image
:type image_size: tuple of int
:param mark: image mark
:type mark: pil.Image
:param method: ``'Tile'``, ``'Scale'``, ``'By Offset'``
:type method: string
:param horizontal_offset: horizontal offset
:type horizontal_offset: int
:param vertical_offset: vertical offset
:type vertical_offset: int
:param horizontal_justification: ``'Left'``, ``'Middle'``, ``'Right'``
:type horizontal_justification: string
:param vertical_justification: ``'Top'``, ``'Middle'``, ``'Bottom'``
:type vertical_justification: string
:param orientation: mark orientation (e.g. ``'ROTATE_270'``)
:type orientation: string
:param opacity: opacity within ``[0, 1]``
:type opacity: float
:returns: generated layer
:rtype: pil.Image
.. see also:: :func:`reduce_opacity`
"""
mark = convert_safe_mode(open_image(mark))
opacity /= 100.0
mark = reduce_opacity(mark, opacity)
layer = Image.new('RGBA', image_size, (0, 0, 0, 0))
if method == 'Tile':
for y in range(0, image_size[1], mark.size[1]):
for x in range(0, image_size[0], mark.size[0]):
paste(layer, mark, (x, y))
elif method == 'Scale':
# scale, but preserve the aspect ratio
ratio = min(float(image_size[0]) / mark.size[0],
float(image_size[1]) / mark.size[1])
w = int(mark.size[0] * ratio)
h = int(mark.size[1] * ratio)
mark = mark.resize((w, h))
paste(layer, mark, ((image_size[0] - w) / 2,
(image_size[1] - h) / 2))
elif method == 'By Offset':
location = calculate_location(
horizontal_offset, vertical_offset,
horizontal_justification, vertical_justification,
image_size, mark.size)
if orientation:
orientation_value = getattr(Image, orientation)
mark = mark.transpose(orientation_value)
paste(layer, mark, location, force=True)
else:
raise ValueError('Unknown method "%s" for generate_layer.' % method)
return layer
def identity_color(image, value=0):
"""Get a color with same color component values.
>>> im = Image.new('RGB', (1,1))
>>> identity_color(im, 2)
(2, 2, 2)
>>> im = Image.new('L', (1,1))
>>> identity_color(im, 7)
7
"""
bands = image.getbands()
if len(bands) == 1:
return value
return tuple([value for band in bands])
def blend(im1, im2, amount, color=None):
"""Blend two images with each other. If the images differ in size
the color will be used for undefined pixels.
:param im1: first image
:type im1: pil.Image
:param im2: second image
:type im2: pil.Image
:param amount: amount of blending
:type amount: int
:param color: color of undefined pixels
:type color: tuple
:returns: blended image
:rtype: pil.Image
"""
im2 = convert_safe_mode(im2)
if im1.size == im2.size:
im1 = convert(im1, im2.mode)
else:
if color is None:
expanded = Image.new(im2.mode, im2.size)
elif im2.mode in ('1', 'L') and type(color) != int:
expanded = Image.new(im2.mode, im2.size, color[0])
else:
expanded = Image.new(im2.mode, im2.size, color)
im1 = im1.convert(expanded.mode)
we, he = expanded.size
wi, hi = im1.size
paste(expanded, im1, ((we - wi) / 2, (he - hi) / 2),
im1.convert('RGBA'))
im1 = expanded
return Image.blend(im1, im2, amount)
def reduce_opacity(im, opacity):
"""Returns an image with reduced opacity if opacity is
within ``[0, 1]``.
:param im: source image
:type im: pil.Image
:param opacity: opacity within ``[0, 1]``
:type opacity: float
:returns im: image
:rtype: pil.Image
>>> im = Image.new('RGBA', (1, 1), (255, 255, 255))
>>> im = reduce_opacity(im, 0.5)
>>> im.getpixel((0,0))
(255, 255, 255, 127)
"""
if opacity < 0 or opacity > 1:
return im
alpha = get_alpha(im)
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
put_alpha(im, alpha)
return im
def calculate_location(horizontal_offset, vertical_offset,
horizontal_justification, vertical_justification,
canvas_size, image_size):
"""Calculate location based on offset and justification. Offsets
can be positive and negative.
:param horizontal_offset: horizontal offset
:type horizontal_offset: int
:param vertical_offset: vertical offset
:type vertical_offset: int
:param horizontal_justification: ``'Left'``, ``'Middle'``, ``'Right'``
:type horizontal_justification: string
:param vertical_justification: ``'Top'``, ``'Middle'``, ``'Bottom'``
:type vertical_justification: string
:param canvas_size: size of the total canvas
:type canvas_size: tuple of int
:param image_size: size of the image/text which needs to be placed
:type image_size: tuple of int
:returns: location
:rtype: tuple of int
.. see also:: :func:`generate layer`
>>> calculate_location(50, 50, 'Left', 'Middle', (100,100), (10,10))
(50, 45)
"""
canvas_width, canvas_height = canvas_size
image_width, image_height = image_size
# check offsets
if horizontal_offset < 0:
horizontal_offset += canvas_width
if vertical_offset < 0:
vertical_offset += canvas_height
# check justifications
if horizontal_justification == 'Left':
horizontal_delta = 0
elif horizontal_justification == 'Middle':
horizontal_delta = -image_width / 2
elif horizontal_justification == 'Right':
horizontal_delta = -image_width
if vertical_justification == 'Top':
vertical_delta = 0
elif vertical_justification == 'Middle':
vertical_delta = -image_height / 2
elif vertical_justification == 'Bottom':
vertical_delta = -image_height
return horizontal_offset + horizontal_delta, \
vertical_offset + vertical_delta
####################################
#### PIL helper functions ####
####################################
def flatten(l):
"""Flatten a list.
:param l: list to be flattened
:type l: list
:returns: flattened list
:rtype: list
>>> flatten([[1, 2], [3]])
[1, 2, 3]
"""
return [item for sublist in l for item in sublist]
def has_alpha(image):
"""Checks if the image has an alpha band.
i.e. the image mode is either RGBA or LA.
The transparency in the P mode doesn't count as an alpha band
:param image: the image to check
:type image: PIL image object
:returns: True or False
:rtype: boolean
"""
return image.mode.endswith('A')
def has_transparency(image):
"""Checks if the image has transparency.
The image has an alpha band or a P mode with transparency.
:param image: the image to check
:type image: PIL image object
:returns: True or False
:rtype: boolean
"""
return (image.mode == 'P' and 'transparency' in image.info) or\
has_alpha(image)
if Image.VERSION == '1.1.7':
def split(image):
"""Work around for bug in Pil 1.1.7
:param image: input image
:type image: PIL image object
:returns: the different color bands of the image (eg R, G, B)
:rtype: tuple
"""
image.load()
return image.split()
else:
def split(image):
"""Work around for bug in Pil 1.1.7
:param image: input image
:type image: PIL image object
:returns: the different color bands of the image (eg R, G, B)
:rtype: tuple
"""
return image.split()
def get_alpha(image):
"""Gets the image alpha band. Can handles P mode images with transpareny.
Returns a band with all values set to 255 if no alpha band exists.
:param image: input image
:type image: PIL image object
:returns: alpha as a band
:rtype: single band image object
"""
if has_alpha(image):
return split(image)[-1]
if image.mode == 'P' and 'transparency' in image.info:
return image.convert('RGBA').split()[-1]
# No alpha layer, create one.
return Image.new('L', image.size, 255)
def get_format_data(image, format):
"""Convert the image in the file bytes of the image. By consequence
this byte data is different for the chosen format (``JPEG``,
``TIFF``, ...).
.. see also:: :func:`thumbnail.get_format_data`
:param image: source image
:type impage: pil.Image
:param format: image file type format
:type format: string
:returns: byte data of the image
"""
f = StringIO()
convert_save_mode_by_format(image, format).save(f, format)
return f.getvalue()
def get_palette(image):
"""Gets the palette of an image as a sequence of (r, g, b) tuples.
:param image: image with a palette
:type impage: pil.Image
:returns: palette colors
:rtype: a sequence of (r, g, b) tuples
"""
palette = image.resize((256, 1))
palette.putdata(range(256))
return list(palette.convert("RGB").getdata())
def get_used_palette_indices(image):
"""Get used color indices in an image palette.
:param image: image with a palette
:type impage: pil.Image
:returns: used colors of the palette
:rtype: set of integers (0-255)
"""
return set(image.getdata())
def get_used_palette_colors(image):
"""Get used colors in an image palette as a sequence of (r, g, b) tuples.
:param image: image with a palette
:type impage: pil.Image
:returns: used colors of the palette
:rtype: sequence of (r, g, b) tuples
"""
used_indices = get_used_palette_indices(image)
if 'transparency' in image.info:
used_indices -= set([image.info['transparency']])
n = len(used_indices)
palette = image.resize((n, 1))
palette.putdata(used_indices)
return palette.convert("RGB").getdata()
def get_unused_palette_indices(image):
"""Get unused color indices in an image palette.
:param image: image with a palette
:type impage: pil.Image
:returns: unused color indices of the palette
:rtype: set of 0-255
"""
return ALL_PALETTE_INDICES - get_used_palette_indices(image)
def fit_color_in_palette(image, color):
"""Fit a color into a palette. If the color exists already in the palette
return its current index, otherwise add the color to the palette if
possible. Returns -1 for color index if all colors are used already.
:param image: image with a palette
:type image: pil.Image
:param color: color to fit
:type color: (r, g, b) tuple
:returns: color index, (new) palette
:rtype: (r, g, b) tuple, sequence of (r, g, b) tuples
"""
palette = get_palette(image)
try:
index = palette.index(color)
except ValueError:
index = -1
if index > -1:
# Check if it is not the transparent index, as that doesn't qualify.
try:
transparent = index == image.info['transparency']
except KeyError:
transparent = False
# If transparent, look further
if transparent:
try:
index = palette[index + 1:].index(color) + index + 1
except ValueError:
index = -1
if index == -1:
unused = list(get_unused_palette_indices(image))
if unused:
index = unused[0]
palette[index] = color # add color to palette
else:
palette = None # palette is full
return index, palette
def put_palette(image_to, image_from, palette=None):
"""Copies the palette and transparency of one image to another.
:param image_to: image with a palette
:type image_to: pil.Image
:param image_from: image with a palette
:type image_from: pil.Image
:param palette: image palette
:type palette: sequence of (r, g, b) tuples or None
"""
if palette == None:
palette = get_palette(image_from)
image_to.putpalette(flatten(palette))
if 'transparency' in image_from.info:
image_to.info['transparency'] = image_from.info['transparency']
def put_alpha(image, alpha):
"""Copies the given band to the alpha layer of the given image.
:param image: input image
:type image: PIL image object
:param alpha: the alpha band to copy
:type alpha: single band image object
"""
if image.mode in ['CMYK', 'YCbCr', 'P']:
image = image.convert('RGBA')
elif image.mode in ['1', 'F']:
image = image.convert('RGBA')
image.putalpha(alpha)
def remove_alpha(image):
"""Returns a copy of the image after removing the alpha band or
transparency
:param image: input image
:type image: PIL image object
:returns: the input image after removing the alpha band or transparency
:rtype: PIL image object
"""
if image.mode == 'RGBA':
return image.convert('RGB')
if image.mode == 'LA':
return image.convert('L')
if image.mode == 'P' and 'transparency' in image.info:
img = image.convert('RGB')
del img.info['transparency']
return img
return image
def paste(destination, source, box=(0, 0), mask=None, force=False):
""""Pastes the source image into the destination image while using an
alpha channel if available.
:param destination: destination image
:type destination: PIL image object
:param source: source image
:type source: PIL image object
:param box:
The box argument is either a 2-tuple giving the upper left corner,
a 4-tuple defining the left, upper, right, and lower pixel coordinate,
or None (same as (0, 0)). If a 4-tuple is given, the size of the
pasted image must match the size of the region.
:type box: tuple
:param mask: mask or None
:type mask: bool or PIL image object
:param force:
With mask: Force the invert alpha paste or not.
Without mask:
- If ``True`` it will overwrite the alpha channel of the destination
with the alpha channel of the source image. So in that case the
pixels of the destination layer will be abandoned and replaced
by exactly the same pictures of the destination image. This is mostly
what you need if you paste on a transparant canvas.
- If ``False`` this will use a mask when the image has an alpha
channel. In this case pixels of the destination image will appear
through where the source image is transparent.
:type force: bool
"""
# Paste on top
if mask and source == mask:
if has_alpha(source):
# invert_alpha = the transparant pixels of the destination
if has_alpha(destination) and (destination.size == source.size
or force):
invert_alpha = ImageOps.invert(get_alpha(destination))
if invert_alpha.size != source.size:
# if sizes are not the same be careful!
# check the results visually
if len(box) == 2:
w, h = source.size
box = (box[0], box[1], box[0] + w, box[1] + h)
invert_alpha = invert_alpha.crop(box)
else:
invert_alpha = None
# we don't want composite of the two alpha channels
source_without_alpha = remove_alpha(source)
# paste on top of the opaque destination pixels
destination.paste(source_without_alpha, box, source)
if invert_alpha != None:
# the alpha channel is ok now, so save it
destination_alpha = get_alpha(destination)
# paste on top of the transparant destination pixels
# the transparant pixels of the destination should
# be filled with the color information from the source
destination.paste(source_without_alpha, box, invert_alpha)
# restore the correct alpha channel
destination.putalpha(destination_alpha)
else:
destination.paste(source, box)
elif mask:
destination.paste(source, box, mask)
else:
destination.paste(source, box)
if force and has_alpha(source):
destination_alpha = get_alpha(destination)
source_alpha = get_alpha(source)
destination_alpha.paste(source_alpha, box)
destination.putalpha(destination_alpha)
def auto_crop(image):
"""Crops all transparent or black background from the image
:param image: input image
:type image: PIL image object
:returns: the cropped image
:rtype: PIL image object
"""
alpha = get_alpha(image)
box = alpha.getbbox()
return convert_safe_mode(image).crop(box)
def convert(image, mode, *args, **keyw):
"""Returns a converted copy of an image
:param image: input image
:type image: PIL image object
:param mode: the new mode
:type mode: string
:param args: extra options
:type args: tuple of values
:param keyw: extra keyword options
:type keyw: dictionary of options
:returns: the converted image
:rtype: PIL image object
"""
if mode == 'P':
if image.mode == 'P':
return image
if image.mode in ['1', 'F']:
return image.convert('L').convert(mode, *args, **keyw)
if image.mode in ['RGBA', 'LA']:
alpha = get_alpha(image)
output = image.convert('RGB').convert(
mode, colors=255, *args, **keyw)
paste(output,
255, alpha.point(COLOR_MAP))
output.info['transparency'] = 255
return output
return image.convert('RGB').convert(mode, *args, **keyw)
if image.mode == 'P' and mode == 'LA':
# A workaround for a PIL bug.
# Converting from P to LA directly doesn't work.
return image.convert('RGBA').convert('LA', *args, **keyw)
if has_transparency(image) and (not mode in ['RGBA', 'LA']):
if image.mode == 'P':
image = image.convert('RGBA')
del image.info['transparency']
#image = fill_background_color(image, (255, 255, 255, 255))
image = image.convert(mode, *args, **keyw)
return image
return image.convert(mode, *args, **keyw)
def convert_safe_mode(image):
"""Converts image into a processing-safe mode.
:param image: input image
:type image: PIL image object
:returns: the converted image
:rtype: PIL image object
"""
if image.mode in ['1', 'F']:
return image.convert('L')
if image.mode == 'P' and 'transparency' in image.info:
img = image.convert('RGBA')
del img.info['transparency']
return img
if image.mode in ['P', 'YCbCr', 'CMYK', 'RGBX']:
return image.convert('RGB')
return image
def convert_save_mode_by_format(image, format):
"""Converts image into a saving-safe mode.
:param image: input image
:type image: PIL image object
:param format: target format
:type format: string
:returns: the converted image
:rtype: PIL image object
"""
#TODO: Extend this helper function to support other formats as well
if image.mode == 'P':
# Make sure P is handled correctly
if not format in ['GIF', 'PNG', 'TIFF', 'IM', 'PCX']:
image = remove_alpha(image)
if format == 'JPEG':
if image.mode in ['RGBA', 'P']:
return image.convert('RGB')
if image.mode in ['LA']:
return image.convert('L')
elif format == 'BMP':
if image.mode in ['LA']:
return image.convert('L')
if image.mode in ['P', 'RGBA', 'YCbCr', 'CMYK']:
return image.convert('RGB')
elif format == 'DIB':
if image.mode in ['YCbCr', 'CMYK']:
return image.convert('RGB')
elif format == 'EPS':
if image.mode in ['1', 'LA']:
return image.convert('L')
if image.mode in ['P', 'RGBA', 'YCbCr']:
return image.convert('RGB')
elif format == 'GIF':
return convert(image, 'P', palette=Image.ADAPTIVE)
elif format == 'PBM':
if image.mode != '1':
return image.convert('1')
elif format == 'PCX':
if image.mode in ['RGBA', 'CMYK', 'YCbCr']:
return image.convert('RGB')
if image.mode in ['LA', '1']:
return image.convert('L')
elif format == 'PDF':
if image.mode in ['LA']:
return image.convert('L')
if image.mode in ['RGBA', 'YCbCr']:
return image.convert('RGB')
elif format == 'PGM':
if image.mode != 'L':
return image.convert('L')
elif format == 'PPM':
if image.mode in ['P', 'CMYK', 'YCbCr']:
return image.convert('RGB')
if image.mode in ['LA']:
return image.convert('L')
elif format == 'PS':
if image.mode in ['1', 'LA']:
return image.convert('L')
if image.mode in ['P', 'RGBA', 'YCbCr']:
return image.convert('RGB')
elif format == 'XBM':
if not image.mode in ['1']:
return image.convert('1')
elif format == 'TIFF':
if image.mode in ['YCbCr']:
return image.convert('RGB')
elif format == 'PNG':
if image.mode in ['CMYK', 'YCbCr']:
return image.convert('RGB')
#for consistency return a copy! (thumbnail.py depends on it)
return image.copy()
def save_check_mode(image, filename, **options):
#save image with pil
save(image, filename, **options)
#verify saved file
try:
image_file = Image.open(filename)
image_file.verify()
except IOError:
# We can't verify the image mode with PIL, so issue no warnings.
return ''
if image.mode != image_file.mode:
return image_file.mode
return ''
def save_safely(image, filename):
"""Saves an image with a filename and raise the specific
``InvalidWriteFormatError`` in case of an error instead of a
``KeyError``. It can also save IM files with unicode.
:param image: image
:type image: pil.Image
:param filename: image filename
:type filename: string
"""
ext = os.path.splitext(filename)[-1]
format = get_format(ext[1:])
image = convert_save_mode_by_format(image, format)
save(image, filename)
def get_reverse_transposition(transposition):
"""Get the reverse transposition method.
:param transposition: transpostion, e.g. ``Image.ROTATE_90``
:returns: inverse transpostion, e.g. ``Image.ROTATE_270``
"""
if transposition == Image.ROTATE_90:
return Image.ROTATE_270
elif transposition == Image.ROTATE_270:
return Image.ROTATE_90
return transposition
def get_exif_transposition(orientation):
"""Get the transposition methods necessary to aling the image to
its exif orientation.
:param orientation: exif orientation
:type orientation: int
:returns: (transposition methods, reverse transpostion methods)
:rtype: tuple
"""
#see EXIF.py
if orientation == 1:
transposition = transposition_reverse = ()
elif orientation == 2:
transposition = Image.FLIP_LEFT_RIGHT,
transposition_reverse = Image.FLIP_LEFT_RIGHT,
elif orientation == 3:
transposition = Image.ROTATE_180,
transposition_reverse = Image.ROTATE_180,
elif orientation == 4:
transposition = Image.FLIP_TOP_BOTTOM,
transposition_reverse = Image.FLIP_TOP_BOTTOM,
elif orientation == 5:
transposition = Image.FLIP_LEFT_RIGHT, \
Image.ROTATE_90
transposition_reverse = Image.ROTATE_270, \
Image.FLIP_LEFT_RIGHT
elif orientation == 6:
transposition = Image.ROTATE_270,
transposition_reverse = Image.ROTATE_90,
elif orientation == 7:
transposition = Image.FLIP_LEFT_RIGHT, \
Image.ROTATE_270
transposition_reverse = Image.ROTATE_90, \
Image.FLIP_LEFT_RIGHT
elif orientation == 8:
transposition = Image.ROTATE_90,
transposition_reverse = Image.ROTATE_270,
else:
transposition = transposition_reverse = ()
return transposition, transposition_reverse
def get_exif_orientation(image):
"""Gets the exif orientation of an image.
:param image: image
:type image: pil.Image
:returns: orientation
:rtype: int
"""
if not hasattr(image, '_getexif'):
return 1
try:
_exif = image._getexif()
if not _exif:
return 1
return _exif[0x0112]
except KeyError:
return 1
def transpose(image, methods):
"""Transpose with a sequence of transformations, mainly useful
for exif.
:param image: image
:type image: pil.Image
:param methods: transposition methods
:type methods: list
:returns: transposed image
:rtype: pil.Image
"""
for method in methods:
image = image.transpose(method)
return image
def transpose_exif(image, reverse=False):
"""Transpose an image to its exif orientation.
:param image: image
:type image: pil.Image
:param reverse: False when opening, True when saving
:type reverse: bool
:returns: transposed image
:rtype: pil.Image
"""
orientation = get_exif_orientation(image)
transposition = get_exif_transposition(orientation)[int(reverse)]
if transposition:
return transpose(image, transposition)
return image
def checkboard(size, delta=8, fg=(128, 128, 128), bg=(204, 204, 204)):
"""Draw an n x n checkboard, which is often used as background
for transparent images. The checkboards are stored in the
``CHECKBOARD`` cache.
:param delta: dimension of one square
:type delta: int
:param fg: foreground color
:type fg: tuple of int
:param bg: background color
:type bg: tuple of int
:returns: checkboard image
:rtype: pil.Image
"""
if not (size in CHECKBOARD):
dim = max(size)
n = int(dim / delta) + 1 # FIXME: now acts like square->nx, ny
def sq_start(i):
"Return the x/y start coord of the square at column/row i."
return i * delta
def square(i, j):
"Return the square corners"
return map(sq_start, [i, j, i + 1, j + 1])
image = Image.new("RGB", size, bg)
draw_square = ImageDraw.Draw(image).rectangle
squares = (square(i, j)
for i_start, j in zip(cycle((0, 1)), range(n))
for i in range(i_start, n, 2))
for sq in squares:
draw_square(sq, fill=fg)
CHECKBOARD[size] = image
return CHECKBOARD[size].copy()
def add_checkboard(image):
""""If the image has a transparent mask, a RGB checkerboard will be
drawn in the background.
.. note::
In case of a thumbnail, the resulting image can not be used for
the cache, as it replaces the transparency layer with a non
transparent checkboard.
:param image: image
:type image: pil.Image
:returns: image, with checkboard if transparant
:rtype: pil.Image
"""
if (image.mode == 'P' and 'transparency' in image.info) or\
image.mode.endswith('A'):
#transparant image
image = image.convert('RGBA')
image_bg = checkboard(image.size)
paste(image_bg, image, (0, 0), image)
return image_bg
else:
return image
| Cobinja/Cinnamon | files/usr/share/cinnamon/cinnamon-settings/bin/imtools.py | Python | gpl-2.0 | 40,072 | [
"Gaussian"
] | 9277dea7c3d650f5fc230000313265ce899f8205bb4f88a9d174f3670032ad20 |
from pymol.wizard import Wizard
from pymol import cmd
import pymol
import types
import string
class Renaming(Wizard):
def __init__(self,old_name,mode='object',_self=cmd):
Wizard.__init__(self,_self)
self.prefix = 'Renaming \\999%s\\--- to: \\999'%old_name
self.old_name = old_name
self.new_name = old_name
self.mode = mode
def get_event_mask(self):
return Wizard.event_mask_key
def do_key(self,k,x,y,m):
if k in [8,127]:
self.new_name = self.new_name[:-1]
elif k==27:
cmd.set_wizard()
cmd.refresh()
elif k==32:
self.new_name = self.new_name + "_"
elif k>32:
self.new_name= self.new_name + chr(k)
elif k==10 or k==13:
self.new_name = string.strip(self.new_name)
if self.mode=='object':
cmd.do("set_name %s,%s"%
(self.old_name,self.new_name),log=0)
elif self.mode=='scene':
cmd.do("scene %s,rename,new_key=%s"%
(self.old_name,self.new_name),log=0)
cmd.set_wizard()
cmd.refresh()
return 1
cmd.refresh_wizard()
return 1
def get_prompt(self):
self.prompt = [ self.prefix + self.new_name + "_" ]
return self.prompt
def get_panel(self):
return [
[ 1, 'Renaming', '' ],
[ 2, 'Cancel', 'cmd.set_wizard()' ]
]
| gratefulfrog/lib | python/pymol/wizard/renaming.py | Python | gpl-2.0 | 1,544 | [
"PyMOL"
] | ccdb97ab7209ee2b2ae190abaa5e8bec734e73ec84272c2dc831d43d12831820 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
import os
from stoqlib.domain.test.domaintest import DomainTest
from stoqlib.exporters.xlsexporter import XLSExporter
from kiwi.ui.objectlist import ObjectList, Column
class Fruit:
def __init__(self, name, price):
self.name = name
self.price = price
class XLSExporterTest(DomainTest):
def test_export_from_object_list(self):
fruits = ObjectList([Column('name', data_type=str),
Column('price', data_type=int)])
for name, price in [('Apple', 4),
('Pineapple', 2),
('Kiwi', 8),
('Banana', 3),
('Melon', 5)]:
fruits.append(Fruit(name, price))
ofx = XLSExporter()
ofx.add_from_object_list(fruits)
try:
temp_file = ofx.save()
data = open(temp_file.name).read()
# We should use xlrd to 're-open' the spreadsheet and parse its content.
self.assertTrue(len(data) > 0)
finally:
temp_file.close()
os.unlink(temp_file.name)
| tiagocardosos/stoq | stoqlib/lib/test/test_xlsexporter.py | Python | gpl-2.0 | 1,981 | [
"VisIt"
] | f71c0f9cfca8765778b307420eecbc95cb4836e03be9b52efa1d230032caea8a |
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key.upper(), *args)
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key, *args)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
return name in globals()
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import warnings
msg = "os.popen2 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import warnings
msg = "os.popen3 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import warnings
msg = "os.popen4 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
try:
bs = b""
while n > len(bs):
bs += read(_urandomfd, n - len(bs))
finally:
close(_urandomfd)
return bs
| aurofable/medhack-server | venv/lib/python2.7/os.py | Python | mit | 26,300 | [
"VisIt"
] | 1516262162cb0023c1b944fd8259cbd2b75a6fe6616c3e5df19d35d577f0d72f |
# Copyright (C) 2011 by Brandon Invergo (b.invergo@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import os
import os.path
from _paml import Paml, PamlError, _relpath
import _parse_codeml
#TODO - Restore use of with statement for closing handles automatically
#after dropping Python 2.4
class CodemlError(EnvironmentError):
"""CODEML has failed. Run with verbose = True to view CODEML's error
message"""
class Codeml(Paml):
"""This class implements an interface to CODEML, part of the PAML package."""
def __init__(self, alignment = None, tree = None, working_dir = None,
out_file = None):
"""Initialize the codeml instance.
The user may optionally pass in strings specifying the locations
of the input alignment and tree files, the working directory and
the final output file. Other options found in the CODEML control
have typical settings by default to run site class models 0, 1 and
2 on a nucleotide alignment.
"""
Paml.__init__(self, alignment, working_dir, out_file)
if tree is not None:
if not os.path.exists(tree):
raise IOError, "The specified tree file does not exist."
self.tree = tree
self.ctl_file = "codeml.ctl"
self._options = {"noisy": None,
"verbose": None,
"runmode": None,
"seqtype": None,
"CodonFreq": None,
"ndata": None,
"clock": None,
"aaDist": None,
"aaRatefile": None,
"model": None,
"NSsites": None,
"icode": None,
"Mgene": None,
"fix_kappa": None,
"kappa": None,
"fix_omega": None,
"omega": None,
"fix_alpha": None,
"alpha": None,
"Malpha": None,
"ncatG": None,
"getSE": None,
"RateAncestor": None,
"Small_Diff": None,
"cleandata": None,
"fix_blength": None,
"method": None}
def write_ctl_file(self):
"""Dynamically build a CODEML control file from the options.
The control file is written to the location specified by the
ctl_file property of the codeml class.
"""
# Make sure all paths are relative to the working directory
self._set_rel_paths()
if True: #Dummy statement to preserve indentation for diff
ctl_handle = open(self.ctl_file, 'w')
ctl_handle.write("seqfile = %s\n" % self._rel_alignment)
ctl_handle.write("outfile = %s\n" % self._rel_out_file)
ctl_handle.write("treefile = %s\n" % self._rel_tree)
for option in self._options.items():
if option[1] == None:
# If an option has a value of None, there's no need
# to write it in the control file; it's normally just
# commented out.
continue
if option[0] == "NSsites":
# NSsites is stored in Python as a list but in the
# control file it is specified as a series of numbers
# separated by spaces.
NSsites = " ".join([str(site) for site in option[1]])
ctl_handle.write("%s = %s\n" % (option[0], NSsites))
else:
ctl_handle.write("%s = %s\n" % (option[0], option[1]))
ctl_handle.close()
def read_ctl_file(self, ctl_file):
"""Parse a control file and load the options into the Codeml instance.
"""
temp_options = {}
if not os.path.isfile(ctl_file):
raise IOError("File not found: %r" % ctl_file)
else:
ctl_handle = open(ctl_file)
for line in ctl_handle:
line = line.strip()
uncommented = line.split("*",1)[0]
if uncommented != "":
if "=" not in uncommented:
ctl_handle.close()
raise AttributeError, \
"Malformed line in control file:\n%r" % line
(option, value) = uncommented.split("=")
option = option.strip()
value = value.strip()
if option == "seqfile":
self.alignment = value
elif option == "treefile":
self.tree = value
elif option == "outfile":
self.out_file = value
elif option == "NSsites":
site_classes = value.split(" ")
for n in range(len(site_classes)):
try:
site_classes[n] = int(site_classes[n])
except:
ctl_handle.close()
raise TypeError, \
"Invalid site class: %s" % site_classes[n]
temp_options["NSsites"] = site_classes
elif option not in self._options:
ctl_handle.close()
raise KeyError, "Invalid option: %s" % option
else:
if "." in value:
try:
converted_value = float(value)
except:
converted_value = value
else:
try:
converted_value = int(value)
except:
converted_value = value
temp_options[option] = converted_value
ctl_handle.close()
for option in self._options.keys():
if option in temp_options.keys():
self._options[option] = temp_options[option]
else:
self._options[option] = None
def print_options(self):
"""Print out all of the options and their current settings."""
for option in self._options.items():
if option[0] == "NSsites" and option[1] is not None:
# NSsites is stored in Python as a list but in the
# control file it is specified as a series of numbers
# separated by spaces.
NSsites = " ".join([str(site) for site in option[1]])
print "%s = %s" % (option[0], NSsites)
else:
print "%s = %s" % (option[0], option[1])
def _set_rel_paths(self):
"""Convert all file/directory locations to paths relative to the current working directory.
CODEML requires that all paths specified in the control file be
relative to the directory from which it is called rather than
absolute paths.
"""
Paml._set_rel_paths(self)
if self.tree is not None:
self._rel_tree = _relpath(self.tree, self.working_dir)
def run(self, ctl_file = None, verbose = False, command = "codeml",
parse = True):
"""Run codeml using the current configuration and then parse the results.
Return a process signal so the user can determine if
the execution was successful (return code 0 is successful, -N
indicates a failure). The arguments may be passed as either
absolute or relative paths, despite the fact that CODEML
requires relative paths.
"""
if self.tree is None:
raise ValueError, "Tree file not specified."
if not os.path.exists(self.tree):
raise IOError, "The specified tree file does not exist."
Paml.run(self, ctl_file, verbose, command)
if parse:
results = read(self.out_file)
else:
results = None
return results
def read(results_file):
"""Parse a CODEML results file."""
results = {}
if not os.path.exists(results_file):
raise IOError, "Results file does not exist."
handle = open(results_file)
lines = handle.readlines()
handle.close()
(results, multi_models) = _parse_codeml.parse_basics(lines, results)
results = _parse_codeml.parse_nssites(lines, results, multi_models)
results = _parse_codeml.parse_pairwise(lines, results)
results = _parse_codeml.parse_distances(lines, results)
if len(results) == 0:
raise ValueError, "Invalid results file"
return results
| asherkhb/coge | bin/last_wrapper/Bio/Phylo/PAML/codeml.py | Python | bsd-2-clause | 9,214 | [
"Biopython"
] | fc10069f0e654f20f1d4157f949e8ff62a0757f09feb9b81dee41eee19645a29 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
from math import sqrt, pi
import numpy as np
from psi4 import core
from psi4.driver import constants
from psi4.driver.p4util.exceptions import *
def least_squares_fit_polynomial(xvals, fvals, localization_point, no_factorials=True, weighted=True, polynomial_order=4):
"""Performs and unweighted least squares fit of a polynomial, with specified order
to an array of input function values (fvals) evaluated at given locations (xvals).
See http://dx.doi.org/10.1063/1.4862157, particularly eqn (7) for details. """
xpts = np.array(xvals) - localization_point
if weighted:
R = 1.0
p_nu = 1
epsilon = 1e-3
zvals = np.square(xpts/R)
weights = np.exp(-zvals) / (zvals**p_nu + epsilon**p_nu)
else:
weights = None
fit = np.polynomial.polynomial.polyfit(xpts, fvals, polynomial_order, w=weights)
# Remove the 1/n! coefficients
if no_factorials:
scalefac = 1.0
for n in range(2,polynomial_order+1):
scalefac *= n
fit[n] *= scalefac
return fit
def anharmonicity(rvals, energies, plot_fit='', mol = None):
"""Generates spectroscopic constants for a diatomic molecules.
Fits a diatomic potential energy curve using a weighted least squares approach
(c.f. http://dx.doi.org/10.1063/1.4862157, particularly eqn. 7), locates the minimum
energy point, and then applies second order vibrational perturbation theory to obtain spectroscopic
constants. Any number of points greater than 4 may be provided, and they should bracket the minimum.
The data need not be evenly spaced, and can be provided in any order. The data are weighted such that
those closest to the minimum have highest impact.
A dictionary with the following keys, which correspond to spectroscopic constants, is returned:
:type rvals: list
:param rvals: The bond lengths (in Angstrom) for which energies are
provided, of length at least 5 and equal to the length of the energies array
:type energies: list
:param energies: The energies (Eh) computed at the bond lengths in the rvals list
:type plot_fit: string
:param plot_fit: A string describing where to save a plot of the harmonic and anharmonic fits, the
inputted data points, re, r0 and the first few energy levels, if matplotlib
is available. Set to 'screen' to generate an interactive plot on the screen instead. If a filename is
provided, the image type is determined by the extension; see matplotlib for supported file types.
:returns: (*dict*) Keys: "re", "r0", "we", "wexe", "nu", "ZPVE(harmonic)", "ZPVE(anharmonic)", "Be", "B0", "ae", "De"
corresponding to the spectroscopic constants in cm-1
"""
angstrom_to_bohr = 1.0 / constants.bohr2angstroms
angstrom_to_meter = 10e-10;
# Make sure the input is valid
if len(rvals) != len(energies):
raise ValidationError("The number of energies must match the number of distances")
npoints = len(rvals)
if npoints < 5:
raise ValidationError("At least 5 data points must be provided to compute anharmonicity")
core.print_out("\n\nPerforming a fit to %d data points\n" % npoints)
# Make sure the molecule the user provided is the active one
molecule = mol if mol is not None else core.get_active_molecule()
molecule.update_geometry()
natoms = molecule.natom()
if natoms != 2:
raise Exception("The current molecule must be a diatomic for this code to work!")
m1 = molecule.mass(0)
m2 = molecule.mass(1)
# Optimize the geometry, refitting the surface around each new geometry
core.print_out("\nOptimizing geometry based on current surface:\n\n");
re = np.mean(rvals)
maxit = 30
thres = 1.0e-9
for i in range(maxit):
derivs = least_squares_fit_polynomial(rvals,energies,localization_point=re)
e,g,H = derivs[0:3]
core.print_out(" E = %20.14f, x = %14.7f, grad = %20.14f\n" % (e, re, g))
if abs(g) < thres:
break
re -= g/H;
if i == maxit-1:
raise ConvergenceError("diatomic geometry optimization", maxit)
core.print_out(" Final E = %20.14f, x = %14.7f, grad = %20.14f\n" % (e, re, g));
if re < min(rvals):
raise Exception("Minimum energy point is outside range of points provided. Use a lower range of r values.")
if re > max(rvals):
raise Exception("Minimum energy point is outside range of points provided. Use a higher range of r values.")
# Convert to convenient units, and compute spectroscopic constants
d0,d1,d2,d3,d4 = derivs*constants.hartree2aJ
core.print_out("\nEquilibrium Energy %20.14f Hartrees\n" % e)
core.print_out("Gradient %20.14f\n" % g)
core.print_out("Quadratic Force Constant %14.7f MDYNE/A\n" % d2)
core.print_out("Cubic Force Constant %14.7f MDYNE/A**2\n" % d3)
core.print_out("Quartic Force Constant %14.7f MDYNE/A**3\n" % d4)
hbar = constants.h / (2.0 * pi)
mu = ((m1*m2)/(m1+m2))*constants.amu2kg
we = 5.3088375e-11*sqrt(d2/mu)
wexe = (1.2415491e-6)*(we/d2)**2 * ((5.0*d3*d3)/(3.0*d2)-d4)
# Rotational constant: Be
I = ((m1*m2)/(m1+m2)) * constants.amu2kg * (re * angstrom_to_meter)**2
B = constants.h / (8.0 * pi**2 * constants.c * I)
# alpha_e and quartic centrifugal distortion constant
ae = -(6.0 * B**2 / we) * ((1.05052209e-3*we*d3)/(sqrt(B * d2**3))+1.0)
de = 4.0*B**3 / we**2
# B0 and r0 (plus re check using Be)
B0 = B - ae / 2.0
r0 = sqrt(constants.h / (8.0 * pi**2 * mu * constants.c * B0))
recheck = sqrt(constants.h / (8.0 * pi**2 * mu * constants.c * B))
r0 /= angstrom_to_meter;
recheck /= angstrom_to_meter;
# Fundamental frequency nu
nu = we - 2.0 * wexe;
zpve_nu = 0.5 * we - 0.25 * wexe;
# Generate pretty pictures, if requested
if(plot_fit):
try:
import matplotlib.pyplot as plt
# Correct the derivatives for the missing factorial prefactors
dvals = np.zeros(5)
dvals[0:5] = derivs[0:5]
dvals[2] /= 2
dvals[3] /= 6
dvals[4] /= 24
# Default plot range, before considering energy levels
minE = np.min(energies)
maxE = np.max(energies)
minR = np.min(rvals)
maxR = np.max(rvals)
# Plot vibrational energy levels
we_au = we / constants.hartree2wavenumbers
wexe_au = wexe / constants.hartree2wavenumbers
coefs2 = [ dvals[2], dvals[1], dvals[0] ]
coefs4 = [ dvals[4], dvals[3], dvals[2], dvals[1], dvals[0] ]
for n in range(3):
Eharm = we_au*(n+0.5)
Evpt2 = Eharm - wexe_au*(n+0.5)**2
coefs2[-1] = -Eharm
coefs4[-1] = -Evpt2
roots2 = np.roots(coefs2)
roots4 = np.roots(coefs4)
xvals2 = roots2 + re
xvals4 = np.choose(np.where(np.isreal(roots4)), roots4)[0].real + re
Eharm += dvals[0]
Evpt2 += dvals[0]
plt.plot(xvals2, [Eharm, Eharm], 'b', linewidth=1)
plt.plot(xvals4, [Evpt2, Evpt2], 'g', linewidth=1)
maxE = Eharm
maxR = np.max([xvals2,xvals4])
minR = np.min([xvals2,xvals4])
# Find ranges for the plot
dE = maxE - minE
minE -= 0.2*dE
maxE += 0.4*dE
dR = maxR - minR
minR -= 0.2*dR
maxR += 0.2*dR
# Generate the fitted PES
xpts = np.linspace(minR, maxR, 1000)
xrel = xpts-re
xpows = xrel[:,np.newaxis] ** range(5)
fit2 = np.einsum('xd,d', xpows[:,0:3], dvals[0:3])
fit4 = np.einsum('xd,d', xpows, dvals)
# Make / display the plot
plt.plot(xpts, fit2, 'b', linewidth=2.5, label='Harmonic (quadratic) fit')
plt.plot(xpts, fit4, 'g', linewidth=2.5, label='Anharmonic (quartic) fit')
plt.plot([re, re], [minE, maxE], 'b--', linewidth=0.5)
plt.plot([r0, r0], [minE, maxE], 'g--', linewidth=0.5)
plt.scatter(rvals, energies, c='Black', linewidth=3, label='Input Data')
plt.legend()
plt.xlabel('Bond length (Angstroms)')
plt.ylabel('Energy (Eh)')
plt.xlim(minR, maxR)
plt.ylim(minE, maxE)
if plot_fit == 'screen':
plt.show()
else:
plt.savefig(plot_fit)
core.print_out("\n\tPES fit saved to %s.\n\n" % plot_fit)
except ImportError:
msg = "\n\tPlot not generated; matplotlib is not installed on this machine.\n\n"
print(msg)
core.print_out(msg)
core.print_out("\nre = %10.6f A check: %10.6f\n" % (re, recheck))
core.print_out("r0 = %10.6f A\n" % r0)
core.print_out("we = %10.4f cm-1\n" % we)
core.print_out("wexe = %10.4f cm-1\n" % wexe)
core.print_out("nu = %10.4f cm-1\n" % nu)
core.print_out("ZPVE(nu) = %10.4f cm-1\n" % zpve_nu)
core.print_out("Be = %10.4f cm-1\n" % B)
core.print_out("B0 = %10.4f cm-1\n" % B0)
core.print_out("ae = %10.4f cm-1\n" % ae)
core.print_out("De = %10.7f cm-1\n" % de)
results = {
"re" : re,
"r0" : r0,
"we" : we,
"wexe" : wexe,
"nu" : nu,
"ZPVE(harmonic)" : zpve_nu,
"ZPVE(anharmonic)" : zpve_nu,
"Be" : B,
"B0" : B0,
"ae" : ae,
"De" : de
}
return results
| rmcgibbo/psi4public | psi4/driver/diatomic.py | Python | lgpl-3.0 | 10,978 | [
"Psi4"
] | f83cf68e0eaa722564f91828ecc276bcc1330f0a695f4df0ce52be6545fced64 |
import re
from distutils.core import setup
def get_version_string():
version_file = "vqsr_cnn/_version.py"
version_str_line = open(version_file, "rt").read()
version_regexp = r"^__version__ = ['\"]([^'\"]*)['\"]"
re_out = re.search(version_regexp, version_str_line, re.M)
if re_out is not None:
return re_out.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (version_file,))
setup(name='vqsr_cnn',
version=get_version_string(),
description='Variant quality score recalibration with Convolutional Neural Networks',
author='Sam Friedman',
author_email='sam@broadinstitute.org',
license='LICENSE.txt',
packages=['vqsr_cnn'],
install_requires=[
"keras >= 2.0",
"numpy >= 1.13.1",
"scipy >= 0.19.1",
"pysam >= 0.13",
"scikit-learn >= 0.19.1",
"matplotlib >= 2.1.2",
"pyvcf >= 0.6.8",
"biopython >= 1.70"
]
)
| magicDGS/gatk | src/main/python/org/broadinstitute/hellbender/setup_vqsr_cnn.py | Python | bsd-3-clause | 999 | [
"Biopython",
"pysam"
] | a39c152f5ff0d22d436b0190b240f2bfb35ea54b994f62fafc9ddcc72af28590 |
from build.management.commands.base_build import Command as BaseBuild
from django.conf import settings
from protein.models import Protein, ProteinSegment, ProteinConformation, ProteinState
from structure.models import Structure, Rotamer
from structure.functions import BlastSearch
from Bio.Blast import NCBIXML, NCBIWWW
from Bio import pairwise2
import subprocess, shlex, os
class Command(BaseBuild):
help = 'Blastp search custom dbs'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser=parser)
parser.add_argument('-p1', help='Entry name or sequence of protein 1', default=False, type=str)
parser.add_argument('-p2', help='Entry name or sequence of protein 2', default=False, type=str)
def handle(self, *args, **options):
p1 = Protein.objects.filter(entry_name=options['p1'])
if len(p1)==0:
seq1 = options['p1']
else:
seq1 = p1[0].sequence
p2 = Protein.objects.filter(entry_name=options['p2'])
if len(p2)==0:
seq2 = options['p2']
else:
seq2 = p2[0].sequence
pw2 = pairwise2.align.localms(seq1, seq2, 3, -4, -3, -1)
ref_seq, temp_seq = str(pw2[0][0]), str(pw2[0][1])
for r,t in zip(ref_seq,temp_seq):
print(r,t) | protwis/protwis | protein/management/commands/pairwise.py | Python | apache-2.0 | 1,185 | [
"BLAST"
] | a230c68e510d232084d142b54608603e76480b48e4ab475b9f6e473c77b7bdc4 |
import android, sys, threading, time
def set_parse_options():
parser = OptionParser()
parser.add_option("-a", "--adb_lport", dest="adb_lport", type="int",
help="local port that adb will forward too")
parser.add_option("-b", "--adb_rport", dest="adb_rport", type="int",
help="remote port that adb will forward too")
parser.add_option("-s", "--socat_port", dest="stcp_port", type="int",
help="port socat will recieve and forward the data to the serial port")
parser.add_option("-t", "--serial_name", dest="serialp_name", type="string",
help="port socat will recieve and forward the data to the serial port")
parser.set_defaults(adb_lport = None,
adb_rport = None,
stcp_port = None,
serialp_name = None)
return parser
def get_checksum(sentence):
Checksum = 0
for char in sentence:
if char == '$': continue
elif char == '*': break
else:
if Checksum == 0:
Checksum = ord(char)
else:
Checksum = Checksum ^ ord(char)
return "%x"%Checksum
def convert_DMlongitude_to_NMEA(result):
longitude = result['longitude']
hemi = "E"
if longitude < 0:
hemi = "W"
return str(abs(longitude)*100)+","+hemi
def convert_DMlatitude_to_NMEA(result):
latitude = result['latitude']
hemi = "N"
if latitude < 0:
hemi = "S"
return str(abs(longitude)*100)+","+hemi
def create_gpgll_sentence(result):
gll = "$GPGLL,"
gll+= convert_latitude_to_NMEA(result)
gll+= convert_longitude_to_NMEA(result)
gll+= ","+str(result['time'])
gll+= ",A,*"
gll+= get_checksum(gll)
return gll+"\r\n"
def init_android_instance(port, accuracy="fine", minUpdateTimeMs=3000,minDistanceM=3):
port = int(port)
d = android.Android(("localhost", port))
d.startLocating(accuracy,minUpdateTimeMs,minDistanceM)
return d
def run_cmd(cmd_str):
x = Popen(cmd_str.split(), stdout=PIPE )
return x.stdout.read()
socat_cmd_str = "socat -d -d tcp:localhost:%d pty,link=/dev/%s,raw,echo=0"
if __name__ == "__main__":
parser = set_parser_options()
(options, args) = parser.parse_args()
if options.adb_lport is None or\
options.adb_rport is None or\
options.stcp_port is None or\
options.serialp_name is None:
parser.print_help()
sys.exit(-1)
adb_cmd = "adb forward tcp:%u tcp:%u"%(options.adb_lport, options.adb_rport)
#init adb
print ("Setting up adb for Python and ASE interation: %s"%adb_cmd)
run_cmd(adb_cmd)
# start socat in a separate thread
socat_cmd = socat_cmd_str%(options.stcp_port,options.serialp_name)
print("Starting socat with the following command: %s"%socat_cmd)
socat_thread = threading.Thread(None, run_cmd, None, (socat_cmd,))
# connect our socket to the virtual serial port
socat_socket = socket()
socat_socket.connect(("localhost",options.stcp_port))
# init the android ASE interface, and
# init location facade manager
droid = init_android_instance(options.adb_lport)
# Now lets rock and roll
while 1:
try:
event = droid.receiveEvent()
while not event.result is None and\
event.result['name'] != "location":
event = droid.receiveEvent()
nmea_sentence = create_gpgll_sentence(event.result)
print("Posting the following NMEA Sentence: %s", nmea_sentence)
socat_socket.send(nmea_sentence)
except KeyboardInterrupt:
print("Exitting the loop.")
break
except:
print ("The following Exception Occurred %s"%str(sys.sys.exc_info()[0]))
pass
# dont bother stopping socat
# when the script dies the thread will terminate | deeso/python_scrirpts | convert_android_result_to_gpgll.py | Python | apache-2.0 | 3,598 | [
"ASE"
] | bafcfd43fe86764babf655f2169c01bb5ccf80ff620a8c4bb67ff4333db48414 |
# AUTHOR: Diego Paris drp2121@columbia.edu
import os
from Bio.Blast import NCBIXML as xmlparse
# searchSpecies must be a string, the name of the species being searched for as reported by the classifier
# xmls must be an array of absolute file paths to .xmls
# speciesDict must be the output of the xmls having been run through the classifier with mode 1
# Returns dictionary with confusion matrix titles (strings) as keys
# number of files in each category as values
def getConfusionMatrix(searchSpecies, xmls, speciesDict) :
# Dictionary for containing top hits from xmls
topSpecies = {}
for xmlfile in xmls:
# Open xmls and parse
xmlhandle = open(xmlfile)
blast_recs = xmlparse.parse(xmlhandle)
# Iterate through blast recs
for blast_rec in blast_recs:
# Get top hit (maximum confidence alignment as reported by blast)
if len(blast_rec.alignments) > 0:
tophit = blast_rec.alignments[0]
# Extract species name
description = tophit.hit_def
description = str(description).split("|")
names = description[len(description)-1]
names = names.split(",")[0]
names = names.split()
# Correct for known typos/inconsistencies in dataset
if ":" in names[0] : names.pop(0)
if names[0] == "Pig" : name = "Sus scrofa"
elif names[0] == "Atlantic" : name = "Salmo salar"
elif names[0] == "Altantic" : name = "Salmo salar"
elif names[0] == "Zebrafish" : name = "Danio rerio"
else : name = names[0] + " " + names[1]
# Add values to dictionary
if name in topSpecies:
if xmlfile not in topSpecies[name]:
topSpecies[name].append(xmlfile)
if name not in topSpecies:
topSpecies[name] = [xmlfile]
# Dictionary for each sample's classifier output and top hit from BLAST
tuplesDict = {}
# Loading dictionary. Note: files unaligned by
for species in speciesDict:
for sample in speciesDict[species]:
if species == "Unaligned" : pass
else : tuplesDict[sample] = [species]
for species in topSpecies:
for sample in topSpecies[species]:
tuplesDict[sample].append(species)
# Labels for confusion matrix
TP = "Is " + searchSpecies + " in both BLAST and our classifier"
TN = "Is not " + searchSpecies + " in both BLAST and our classifier"
FP = "Is " + searchSpecies + " in BLAST but not our classifier"
FN = "Is not " + searchSpecies + " in BLAST but is in our classifier"
# Initializing confusion matrix
confusionMatrix = dict.fromkeys([TP, TN,FP,FN], 0)
# Load confusion matrix
for sample in tuplesDict:
if tuplesDict[sample][0] == searchSpecies:
if tuplesDict[sample][1] == searchSpecies:
confusionMatrix[TP] += 1
else:
confusionMatrix[FN] += 1
else:
if tuplesDict[sample][1] == searchSpecies:
confusionMatrix[FP] += 1
else:
confusionMatrix[TN] += 1
return confusionMatrix
| blt2114/gen_snacks | Assignment_2/question3_pt1/confusionMat.py | Python | gpl-2.0 | 2,828 | [
"BLAST"
] | 23c9e9b97bb1da846f1e2e57826542cc50c7d4db82071ffd0a83255d6793771c |
import logging
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.dqn.dqn import GenericOffPolicyTrainer
from ray.rllib.agents.ddpg.ddpg_tf_policy import DDPGTFPolicy
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === Twin Delayed DDPG (TD3) and Soft Actor-Critic (SAC) tricks ===
# TD3: https://spinningup.openai.com/en/latest/algorithms/td3.html
# In addition to settings below, you can use "exploration_noise_type" and
# "exploration_gauss_act_noise" to get IID Gaussian exploration noise
# instead of OU exploration noise.
# twin Q-net
"twin_q": False,
# delayed policy update
"policy_delay": 1,
# target policy smoothing
# (this also replaces OU exploration noise with IID Gaussian exploration
# noise, for now)
"smooth_target_policy": False,
# gaussian stddev of target action noise for smoothing
"target_noise": 0.2,
# target noise limit (bound)
"target_noise_clip": 0.5,
# === Evaluation ===
# Evaluate with epsilon=0 every `evaluation_interval` training iterations.
# The evaluation stats will be reported under the "evaluation" metric key.
# Note that evaluation is currently not parallelized, and that for Ape-X
# metrics are already only reported for the lowest epsilon workers.
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 10,
# === Model ===
# Apply a state preprocessor with spec given by the "model" config option
# (like other RL algorithms). This is mostly useful if you have a weird
# observation shape, like an image. Disabled by default.
"use_state_preprocessor": False,
# Postprocess the policy network model output with these hidden layers. If
# use_state_preprocessor is False, then these will be the *only* hidden
# layers in the network.
"actor_hiddens": [400, 300],
# Hidden layers activation of the postprocessing stage of the policy
# network
"actor_hidden_activation": "relu",
# Postprocess the critic network model output with these hidden layers;
# again, if use_state_preprocessor is True, then the state will be
# preprocessed by the model specified with the "model" config option first.
"critic_hiddens": [400, 300],
# Hidden layers activation of the postprocessing state of the critic.
"critic_hidden_activation": "relu",
# N-step Q learning
"n_step": 1,
# === Exploration ===
"exploration_config": {
# DDPG uses OrnsteinUhlenbeck (stateful) noise to be added to NN-output
# actions (after a possible pure random phase of n timesteps).
"type": "OrnsteinUhlenbeckNoise",
# For how many timesteps should we return completely random actions,
# before we start adding (scaled) noise?
"random_timesteps": 1000,
# The OU-base scaling factor to always apply to action-added noise.
"ou_base_scale": 0.1,
# The OU theta param.
"ou_theta": 0.15,
# The OU sigma param.
"ou_sigma": 0.2,
# The initial noise scaling factor.
"initial_scale": 1.0,
# The final noise scaling factor.
"final_scale": 1.0,
# Timesteps over which to anneal scale (from initial to final values).
"scale_timesteps": 10000,
},
# Number of env steps to optimize for before returning
"timesteps_per_iteration": 1000,
# Extra configuration that disables exploration.
"evaluation_config": {
"explore": False
},
# === Replay buffer ===
# Size of the replay buffer. Note that if async_updates is set, then
# each worker will have a replay buffer of this size.
"buffer_size": 50000,
# If True prioritized replay buffer will be used.
"prioritized_replay": True,
# Alpha parameter for prioritized replay buffer.
"prioritized_replay_alpha": 0.6,
# Beta parameter for sampling from prioritized replay buffer.
"prioritized_replay_beta": 0.4,
# Time steps over which the beta parameter is annealed.
"prioritized_replay_beta_annealing_timesteps": 20000,
# Final value of beta
"final_prioritized_replay_beta": 0.4,
# Epsilon to add to the TD errors when updating priorities.
"prioritized_replay_eps": 1e-6,
# Whether to LZ4 compress observations
"compress_observations": False,
# If set, this will fix the ratio of replayed from a buffer and learned on
# timesteps to sampled from an environment and stored in the replay buffer
# timesteps. Otherwise, the replay will proceed at the native ratio
# determined by (train_batch_size / rollout_fragment_length).
"training_intensity": None,
# === Optimization ===
# Learning rate for the critic (Q-function) optimizer.
"critic_lr": 1e-3,
# Learning rate for the actor (policy) optimizer.
"actor_lr": 1e-3,
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": 0,
# Update the target by \tau * policy + (1-\tau) * target_policy
"tau": 0.002,
# If True, use huber loss instead of squared loss for critic network
# Conventionally, no need to clip gradients if using a huber loss
"use_huber": False,
# Threshold of a huber loss
"huber_threshold": 1.0,
# Weights for L2 regularization
"l2_reg": 1e-6,
# If not None, clip gradients during optimization at this value
"grad_clip": None,
# How many steps of the model to sample before learning starts.
"learning_starts": 1500,
# Update the replay buffer with this many samples at once. Note that this
# setting applies per-worker if num_workers > 1.
"rollout_fragment_length": 1,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 256,
# === Parallelism ===
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you're using the Async or Ape-X optimizers.
"num_workers": 0,
# Whether to compute priorities on workers.
"worker_side_prioritization": False,
# Prevent iterations from going lower than this time span
"min_iter_time_s": 1,
})
# __sphinx_doc_end__
# yapf: enable
def validate_config(config):
if config["model"]["custom_model"]:
logger.warning(
"Setting use_state_preprocessor=True since a custom model "
"was specified.")
config["use_state_preprocessor"] = True
if config["grad_clip"] is not None and config["grad_clip"] <= 0.0:
raise ValueError("`grad_clip` value must be > 0.0!")
if config["exploration_config"]["type"] == "ParameterNoise":
if config["batch_mode"] != "complete_episodes":
logger.warning(
"ParameterNoise Exploration requires `batch_mode` to be "
"'complete_episodes'. Setting batch_mode=complete_episodes.")
config["batch_mode"] = "complete_episodes"
def get_policy_class(config):
if config["framework"] == "torch":
from ray.rllib.agents.ddpg.ddpg_torch_policy import DDPGTorchPolicy
return DDPGTorchPolicy
else:
return DDPGTFPolicy
DDPGTrainer = GenericOffPolicyTrainer.with_updates(
name="DDPG",
default_config=DEFAULT_CONFIG,
default_policy=DDPGTFPolicy,
get_policy_class=get_policy_class,
validate_config=validate_config,
)
| richardliaw/ray | rllib/agents/ddpg/ddpg.py | Python | apache-2.0 | 7,642 | [
"Gaussian"
] | e7127c5702a063c17797469bfa6787ac144a1c250497f918de878df0269e771c |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
"""
This module implements various transmuter classes.
Transmuters are essentially classes that generate TransformedStructures from
various data sources. They enable the high-throughput generation of new
structures and input files.
It also includes the helper function, batch_write_vasp_input to generate an
entire directory of vasp input files for running.
"""
from six.moves import filter, map
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 4, 2012"
import os
import re
from multiprocessing import Pool
from pymatgen.alchemy.materials import TransformedStructure
<<<<<<< HEAD
=======
from pymatgen.io.vasp.sets import MPRelaxSet
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
class StandardTransmuter(object):
"""
An example of a Transmuter object, which performs a sequence of
transformations on many structures to generate TransformedStructures.
.. attribute: transformed_structures
List of all transformed structures.
"""
def __init__(self, transformed_structures, transformations=None,
extend_collection=0, ncores=None):
"""
Initializes a transmuter from an initial list of
:class:`pymatgen.alchemy.materials.TransformedStructure`.
Args:
transformed_structures ([TransformedStructure]): Input transformed
structures
transformations ([Transformations]): New transformations to be
applied to all structures.
extend_collection (int): Whether to use more than one output
structure from one-to-many transformations. extend_collection
can be an int, which determines the maximum branching for each
transformation.
ncores (int): Number of cores to use for applying transformations.
Uses multiprocessing.Pool. Default is None, which implies
serial.
"""
self.transformed_structures = transformed_structures
self.ncores = ncores
if transformations is not None:
for trans in transformations:
self.append_transformation(trans,
extend_collection=extend_collection)
def __getitem__(self, index):
return self.transformed_structures[index]
def __getattr__(self, name):
return [getattr(x, name) for x in self.transformed_structures]
def undo_last_change(self):
"""
Undo the last transformation in the TransformedStructure.
Raises:
IndexError if already at the oldest change.
"""
for x in self.transformed_structures:
x.undo_last_change()
def redo_next_change(self):
"""
Redo the last undone transformation in the TransformedStructure.
Raises:
IndexError if already at the latest change.
"""
for x in self.transformed_structures:
x.redo_next_change()
def __len__(self):
return len(self.transformed_structures)
def append_transformation(self, transformation, extend_collection=False,
clear_redo=True):
"""
Appends a transformation to all TransformedStructures.
Args:
transformation: Transformation to append
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
clear_redo (bool): Whether to clear the redo list. By default,
this is True, meaning any appends clears the history of
undoing. However, when using append_transformation to do a
redo, the redo list should not be cleared to allow multiple
redos.
Returns:
List of booleans corresponding to initial transformed structures
each boolean describes whether the transformation altered the
structure
"""
if self.ncores and transformation.use_multiprocessing:
p = Pool(self.ncores)
# need to condense arguments into single tuple to use map
z = map(
lambda x: (x, transformation, extend_collection, clear_redo),
self.transformed_structures)
new_tstructs = p.map(_apply_transformation, z, 1)
self.transformed_structures = []
for ts in new_tstructs:
self.transformed_structures.extend(ts)
else:
new_structures = []
for x in self.transformed_structures:
new = x.append_transformation(transformation,
extend_collection,
clear_redo=clear_redo)
if new is not None:
new_structures.extend(new)
self.transformed_structures.extend(new_structures)
def extend_transformations(self, transformations):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations: Sequence of Transformations
"""
for t in transformations:
self.append_transformation(t)
def apply_filter(self, structure_filter):
"""
Applies a structure_filter to the list of TransformedStructures
in the transmuter.
Args:
structure_filter: StructureFilter to apply.
"""
def test_transformed_structure(ts):
return structure_filter.test(ts.final_structure)
self.transformed_structures = list(filter(test_transformed_structure,
self.transformed_structures))
for ts in self.transformed_structures:
ts.append_filter(structure_filter)
<<<<<<< HEAD
def write_vasp_input(self, vasp_input_set, output_dir,
create_directory=True, subfolder=None,
include_cif=False):
=======
def write_vasp_input(self, **kwargs):
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{formula}_{number}.
Args:
\\*\\*kwargs: All kwargs supported by batch_write_vasp_input.
"""
<<<<<<< HEAD
batch_write_vasp_input(self.transformed_structures, vasp_input_set,
output_dir, create_directory, subfolder,
include_cif)
=======
batch_write_vasp_input(self.transformed_structures, **kwargs)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
def set_parameter(self, key, value):
"""
Add parameters to the transmuter. Additional parameters are stored in
the as_dict() output.
Args:
key: The key for the parameter.
value: The value for the parameter.
"""
for x in self.transformed_structures:
x.other_parameters[key] = value
def add_tags(self, tags):
"""
Add tags for the structures generated by the transmuter.
Args:
tags: A sequence of tags. Note that this should be a sequence of
strings, e.g., ["My awesome structures", "Project X"].
"""
self.set_parameter("tags", tags)
def __str__(self):
output = ["Current structures", "------------"]
for x in self.transformed_structures:
output.append(str(x.final_structure))
return "\n".join(output)
def append_transformed_structures(self, tstructs_or_transmuter):
"""
Method is overloaded to accept either a list of transformed structures
or transmuter, it which case it appends the second transmuter"s
structures.
Args:
tstructs_or_transmuter: A list of transformed structures or a
transmuter.
"""
if isinstance(tstructs_or_transmuter, self.__class__):
self.transformed_structures.extend(tstructs_or_transmuter
.transformed_structures)
else:
for ts in tstructs_or_transmuter:
assert isinstance(ts, TransformedStructure)
self.transformed_structures.extend(tstructs_or_transmuter)
@staticmethod
def from_structures(structures, transformations=None, extend_collection=0):
"""
Alternative constructor from structures rather than
TransformedStructures.
Args:
structures: Sequence of structures
transformations: New transformations to be applied to all
structures
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
Returns:
StandardTransmuter
"""
tstruct = [TransformedStructure(s, []) for s in structures]
return StandardTransmuter(tstruct, transformations, extend_collection)
class CifTransmuter(StandardTransmuter):
"""
Generates a Transmuter from a cif string, possibly containing multiple
structures.
"""
def __init__(self, cif_string, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a Transmuter from a cif string, possibly
containing multiple structures.
Args:
cif_string: A string containing a cif or a series of cifs
transformations: New transformations to be applied to all
structures
primitive: Whether to generate the primitive cell from the cif.
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
"""
transformed_structures = []
lines = cif_string.split("\n")
structure_data = []
read_data = False
for line in lines:
if re.match(r"^\s*data", line):
structure_data.append([])
read_data = True
if read_data:
structure_data[-1].append(line)
for data in structure_data:
tstruct = TransformedStructure.from_cif_string("\n".join(data), [],
primitive)
transformed_structures.append(tstruct)
super(CifTransmuter, self).__init__(transformed_structures,
transformations, extend_collection)
@staticmethod
def from_filenames(filenames, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a TransformedStructureCollection from a cif, possibly
containing multiple structures.
Args:
filenames: List of strings of the cif files
transformations: New transformations to be applied to all
structures
primitive: Same meaning as in __init__.
extend_collection: Same meaning as in __init__.
"""
allcifs = []
for fname in filenames:
with open(fname, "r") as f:
allcifs.append(f.read())
return CifTransmuter("\n".join(allcifs), transformations,
primitive=primitive,
extend_collection=extend_collection)
class PoscarTransmuter(StandardTransmuter):
"""
Generates a transmuter from a sequence of POSCARs.
Args:
poscar_string: List of POSCAR strings
transformations: New transformations to be applied to all
structures.
extend_collection: Whether to use more than one output structure
from one-to-many transformations.
"""
def __init__(self, poscar_string, transformations=None,
extend_collection=False):
tstruct = TransformedStructure.from_poscar_string(poscar_string, [])
super(PoscarTransmuter, self).__init__([tstruct], transformations,
extend_collection=extend_collection)
@staticmethod
def from_filenames(poscar_filenames, transformations=None,
extend_collection=False):
"""
Convenient constructor to generates a POSCAR transmuter from a list of
POSCAR filenames.
Args:
poscar_filenames: List of POSCAR filenames
transformations: New transformations to be applied to all
structures.
extend_collection:
Same meaning as in __init__.
"""
tstructs = []
for filename in poscar_filenames:
with open(filename, "r") as f:
tstructs.append(TransformedStructure
.from_poscar_string(f.read(), []))
return StandardTransmuter(tstructs, transformations,
extend_collection=extend_collection)
<<<<<<< HEAD
def batch_write_vasp_input(transformed_structures, vasp_input_set, output_dir,
create_directory=True, subfolder=None,
include_cif=False):
=======
def batch_write_vasp_input(transformed_structures, vasp_input_set=MPRelaxSet,
output_dir=".", create_directory=True,
subfolder=None,
include_cif=False, **kwargs):
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
"""
for i, s in enumerate(transformed_structures):
formula = re.sub(r"\s+", "", s.final_structure.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir,
"{}_{}".format(formula, i))
else:
dirname = os.path.join(output_dir, "{}_{}".format(formula, i))
s.write_vasp_input(vasp_input_set, dirname,
<<<<<<< HEAD
create_directory=create_directory)
=======
create_directory=create_directory, **kwargs)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
if include_cif:
from pymatgen.io.cif import CifWriter
writer = CifWriter(s.final_structure)
writer.write_file(os.path.join(dirname, "{}.cif".format(formula)))
def _apply_transformation(inputs):
"""
Helper method for multiprocessing of apply_transformation. Must not be
in the class so that it can be pickled.
Args:
inputs: Tuple containing the transformed structure, the transformation
to be applied, a boolean indicating whether to extend the
collection, and a boolean indicating whether to clear the redo
Returns:
List of output structures (the modified initial structure, plus
any new structures created by a one-to-many transformation)
"""
ts, transformation, extend_collection, clear_redo = inputs
new = ts.append_transformation(transformation, extend_collection,
clear_redo=clear_redo)
o = [ts]
if new:
o.extend(new)
return o
| Bismarrck/pymatgen | pymatgen/alchemy/transmuters.py | Python | mit | 16,738 | [
"VASP",
"pymatgen"
] | 1959a6bee3428b29125609a03ec3910d3a6c5921e20bac4443a45f33e319e9b7 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Airscript-ng - A script to conduct simple WiFi audits with ease.
# Copyright (C) 2018 Yudhajit N. (Sh3llcod3)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
# Contact: Please create a issue on my GitHub <https://github.com/Sh3llcod3>
#
#Import standard modules
import os
import subprocess
import time
import sys
import traceback
import csv
import sys
import atexit
#Import custom modules
import modules
import attacks
#Add some aliases for commonly used functions
ioStream = modules.ioStream
bashRun = modules.bashRun
bashReturnValue = modules.bashReturnValue
clearTerm = modules.clearTerm
col = modules.col
#Script Version number
scriptVersion = "1.9_build_date_08/12/2018"
#Some variables which will find out some basic system info
cpuModel = ioStream("cat /proc/cpuinfo | grep -i \"Model name\" | sort | uniq | awk -F ' ' {'print $4,$5,$6,$7,$8,$9,$10'}")
userName = ioStream("uname -n")
userDistro = ioStream("lsb_release -d | awk -F ':' {'print $2'} | tr -d \"\t\" ")
userKernel = ioStream("uname -r")
userArch = ioStream("uname -m")
userArchDpkg = ioStream("dpkg --print-architecture 2>/dev/null").upper()
invocationType = sys.argv[0]
#Determines a few values, useful when the -v argument is used
if invocationType.lower().startswith('./'):
scriptInvocation = ('%s' %(sys.argv[0]))
else:
scriptInvocation = ('python3 %s' %(sys.argv[0]))
#A quick check to determine if dependencies are installed
def dependenciesInstalled():
if modules.quickCheckDepsInstalled():
return '{}Installed{}'.format(col.green,col.endl)
else:
return '{}Not installed{}'.format(col.red,col.endl)
#The GPLv3+ exclusion of warranty. Just in case.
def displayWarranty():
print(" %sAirscript-ng Copyright (C) 2017-2018 Sh3llcod3%s" %(col.red,col.endl))
print(" %sThis program comes with ABSOLUTELY NO WARRANTY; for details visit: https://goo.gl/W1jcr5.%s" %(col.red,col.endl))
print(" %sThis is free software, and you are welcome to redistribute it%s" %(col.red,col.endl))
print(" %sunder certain conditions; visit: https://goo.gl/FSy6nc for details.%s" %(col.red,col.endl))
print(" %sPlease follow all instructions that appear. Thanks for using this program.%s\n" %(col.red,col.endl))
#Create the functions class, where all the re-usable code for the main functions will reside
class functions:
#Exit the program after a functions has finshed running.
def silentExit():
clearTerm()
if modules.startNetworkManager():
bashRun("rm ./HANDSHAKES/TEMP_DUMP_* 2>/dev/null")
print("\n[{}+{}] Detected network-manager as inactive,"
" restarted it.".format(
col.green,
col.endl))
print("[{}+{}] Internet connection may take up"
" to 10 seconds to come back.\n".format(
col.green,
col.endl))
modules.normalQuit(0)
#Give the user the option to quit or head to menu.
def menuOrExit():
while True:
modules.createNewLine()
getUserChoice = input("[{0}+{2}] Head to [{1}m{2}]enu"
" or [{1}q{2}]uit? m/q >> ".format(
col.green,
col.red,
col.endl)).lower()
if getUserChoice.startswith('m'):
mainMenu()
elif getUserChoice.startswith('q'):
functions.silentExit()
#Check for missing dependencies and if there are any, fetch them.
def getDependencies():
print("{}[-] {}Checking for dependancies".format(
col.yellow_deep,
col.endl))
modules.checkDependencies()
modules.gitDeps()
print("{}[-] {}All dependancies are met."
" Make sure you have correct drivers! {}".format(
col.yellow_deep,
col.pink,
col.endl))
clearTerm()
#Downloads and builds hashcat and hashcat-utils, which are needed for GPU based cracking.
#Note: this doesn't install the Opencl-drivers, which still needs to be installed by hand.
def hashcatDownloadFunction():
try:
if not modules.connActive():
raise ConnectionError
hashcatDownloader = attacks.capObj(col)
hashcatDownloader.downloadHashcat()
except(ConnectionError):
clearTerm()
modules.createNewLine()
modules.printRed(col, "Internet connection not found!")
finally:
functions.menuOrExit()
#This function used to add the Kali-Rolling repository to the apt lists, however it was too dangerous, so its has been deprecated and removed.
#Why was it dangerous? Because it replaces Ubuntu's Coreutils with Kali's Coreutils,Display manager, and breaks GPU (OpenCL) drivers.
#Your system will refuse to boot properly if kali tools aren't installed correctly. Use Katoolin if your're on Ubuntu >> https://goo.gl/rykBwg
#This function switches the colors on/off in realtime, without changing programs or restarting.
def switchColors():
clearTerm()
global col
if col == modules.col:
col = modules.blank_col
else:
col = modules.col
#This function simply performs a full-upgrade of all system packages from apt, a nice to have add-on.
#It also pulls the latest version from GitHub as an added bonus.
def updateAptPackagesFull():
functions.getDependencies()
modules.createNewLine()
if modules.yesNo("Run apt update and full-upgrade?", col):
bashRun("apt update && apt full-upgrade -y"
" && apt autoremove -y && apt autoclean")
modules.createNewLine()
modules.stashAndPullUpdate(col)
#This will restore the /etc/apt/source.list file. It won't be needed as the option to append the kali sources has been removed.
#This mainly concerns users who used a very early build of this program. This is here for compatibility reasons.
def revertKaliSources():
clearTerm()
if bashReturnValue("ls backup-repos") == '0':
if not modules.pickYesOrNo("Restore sources.list",
"Restore repository sources list?"):
return None
bashRun("cp backup-repos/sources.list /etc/apt/sources.list")
modules.printSuccess(col,"Successfully reverted sources file back!")
bashRun("apt update")
printInfo(col,"If you don't see errors above then sources.list file is OK.")
functions.menuOrExit()
else:
modules.printRed(col, "Backup file not found!")
functions.menuOrExit()
#This will help in displaying input prompts.
def displayCurrentPlace(prompt,*args):
currentPlace = col.endl + "|" + \
modules.returnRed(col,"MENU")
BASH_PROMPT = modules.returnRed(col, " ~# ")
for i in args:
currentPlace += col.endl + "|" +\
modules.returnRed(col, i)
currentPlace += col.endl + "|(" +\
modules.returnGreen(col, prompt) +\
")" + BASH_PROMPT
return currentPlace
#A basic check to see if airscript-ng already exists in /usr/local/bin
def checkExistingSymlink():
if bashReturnValue("ls /usr/local/bin/airscript-ng") == "0":
return True
else:
return False
#Install all dependencies from GitHub instead of git.kali.org
#If dependencies are already built, remove and replace them.
def fetchGitHubDeps():
clearTerm()
try:
modules.createNewLine()
modules.printBlue(col, "This will clone aircrack-ng, reaver, pixiewps and mdk4 from GitHub.")
modules.printBlue(col, "This is not normally required, as the script will automatically get")
modules.printBlue(col, "any missing dependencies from git.kali.org. However, you can get them")
modules.printBlue(col, "from GitHub instead of git.kali.org, if you want.")
modules.createNewLine()
modules.printYellow(col, "Warning: This may result in compatibility or instability issues!")
modules.printYellow(col, "Warning: Compilation may take longer due to different procedures.")
modules.createNewLine()
if modules.yesNo("Confirm download from GitHub?", col):
modules.cloneAircrackGitHub()
modules.cloneReaverGitHub()
modules.clonePixiewpsGitHub()
modules.cloneMDK4Deps()
modules.createNewLine()
modules.printSuccess(col, "Successfully built all dependencies.")
except(KeyboardInterrupt, EOFError, Exception):
modules.createNewLine()
finally:
functions.menuOrExit()
#Delete the dependencies folder, if the user wants.
def removeResidualFiles():
clearTerm()
modules.createNewLine()
if modules.yesNo("Confirm delete all residual files?", col):
bashRun("rm ~/.airscriptNG/ -rf 2>/dev/null")
modules.printSuccess(col, "Successfully removed the files.")
functions.menuOrExit()
#Allow the use full manual control over their wireless cards.
def manualCardControl():
functions.getDependencies()
try:
interfaceMethod = attacks.interfaceObj(col)
while True:
interfaceMethod.displayMenu()
except(KeyboardInterrupt, EOFError, Exception):
modules.createNewLine()
finally:
functions.menuOrExit()
#Finish this function!
#Create the main class, which will store all the main functions of the program
class main:
#This function is responsible for hosting the evil-twin/fake AP
def EvilTwinFakeAP():
functions.getDependencies()
try:
apMethod = attacks.apObj(col)
apMethod.setInitialFiles()
apMethod.showLogo()
apMethod.selectOptions()
apMethod.setupConfigFiles()
apMethod.hostFakeAp()
apMethod.cleanupFakeAp()
except(KeyboardInterrupt, EOFError, Exception):
try:
apMethod.cleanupFakeAp()
except(KeyboardInterrupt, EOFError, Exception):
modules.createNewLine()
finally:
functions.menuOrExit()
#This is here to remind me of how XTERM handles its geometry. Interesting.
#xterm -geometry 125x35+4320 -bg '#FFFFFF' -fg '#000000' THIS MAKES IT APPEAR TOP RIGHT
#xterm -geometry 125x35+4320+7640 -bg '#FFFFFF' -fg '#000000' BOTTOM RIGHT
#xterm -geometry 125x35-4320+7640 -bg '#FFFFFF' -fg '#000000' BOTTOM LEFT
#xterm -geometry 125x35-7640 -bg '#FFFFFF' -fg '#000000' TOP LEFT
#This function will allow a user to crack a WPA/WPA2 handshake file (.cap file)
def crackCaptureFile():
functions.getDependencies()
try:
capMethod = attacks.capObj(col)
capMethod.showLogo()
modules.createNewLine()
print("{}Type [1] - Crack the WPA-HANDSHAKE with CPU".format(
col.blue_deep))
print("{}Type [2] - Crack the WPA-HANDSHAKE with GPU (Much faster)".format(
col.green_deep))
print("\n{}Type [99] - Return to menu {}".format(
col.highlight,
col.endl))
while True:
modules.createNewLine()
getOption = input(functions.displayCurrentPlace(
"ENTER CHOICE",
"PRE-EXISTING_HANDSHAKE"))
if getOption.isdigit() and int(getOption) in [1,2,99]:
break
if int(getOption) == 1:
capMethod.enumerateMissingOptions()
capMethod.cpuCrack()
elif int(getOption) == 2:
capMethod.enumerateMissingOptions()
capMethod.gpuCrack()
#FINISH THIS
except(KeyboardInterrupt, EOFError, Exception) as e:
bashRun("killall aircrack-ng 2>/dev/null")
bashRun("kill $(ps aux | grep -i hashcat |"
" awk -F ' ' {'print $2'}) 2>/dev/null")
finally:
functions.menuOrExit()
#This function used to revert a backup of the .bashrc file to its original location.
#However this was deprecated in favor of symbolic links.
#This is cleaner and easier to maintain as well as being a more efficient method.
#Now, this function removes any created symlinks from the function below.
def removeSymlink():
clearTerm()
modules.createNewLine()
modules.printBlue(col,"This option will remove the symlink created in option [7].")
modules.printBlue(col,"If you haven't used option [7], then this isn't necessary.")
modules.printBlue(col,"However, this should be safe to run.")
modules.printBlue(col,"Unless you already have another airscript-ng in /usr/local/bin.")
modules.createNewLine()
try:
if modules.yesNo("Remove the symbolic link?",col):
if functions.checkExistingSymlink():
modules.createNewLine()
modules.printSuccess(col, "Found airscript-ng in /usr/local/bin.")
if bashReturnValue("rm -f /usr/local/bin/airscript-ng") == "0":
modules.printSuccess(col, "Successfully deleted the symlink.")
modules.printGreen(col, "Typing 'airscript-ng' will no longer invoke it.")
else:
raise ValueError
else:
raise FileNotFoundError
except(FileNotFoundError):
modules.createNewLine()
modules.printYellow(col, "Symlink not found. No need to remove anything.")
except(ValueError):
modules.createNewLine()
modules.printYellow(col, "An error occured while removing the symlink.")
modules.printYellow(col, "Try running this manually: unlink /usr/local/bin/airscript-ng")
finally:
functions.menuOrExit()
#This function creates a symlink, which allows you to invoke this program like any other binary.
#Using lots of nested-ifs is far from ideal, however it's all I can think of at present.
def createSymlink():
clearTerm()
modules.createNewLine()
modules.printBlue(col,"Creating a symbolic link allows you to run airscript-ng from anywhere.")
modules.printBlue(col,"Essentially, this adds an entry in /usr/local/bin.")
modules.printBlue(col,"Next time you want to run airscript-ng, just type 'airscript-ng'.")
modules.printBlue(col,"You can run this from anywhere, any folder you want.")
modules.printBlue(col,"If you change your mind, you can always delete this using option [8].")
modules.createNewLine()
try:
if sys.argv[0].lower().startswith("./"):
FILE_NAME = sys.argv[0][2:]
elif sys.argv[0].lower().startswith("/usr/local/bin/"):
raise TypeError
else:
FILE_NAME = sys.argv[0]
if modules.yesNo("Create the symbolic link?",col):
if not functions.checkExistingSymlink():
modules.createNewLine()
modules.printSuccess(col, "Adding Symbolic link -> /usr/local/bin/airscript-ng")
if bashReturnValue("ls /usr/local/bin") == "0":
if bashReturnValue("echo $PATH | grep \"/usr/local/bin\"") == "0":
if bashReturnValue("ln -sf $(find $(pwd) -name {}) /usr/local/bin/airscript-ng".format(FILE_NAME)) == "0":
modules.printSuccess(col, "Successfully created the symlink.")
modules.printGreen(col, "Now you can type 'airscript-ng' from anywhere.")
modules.printGreen(col, "Go ahead, quit and try typing: airscript-ng")
else:
raise ValueError
else:
raise FileNotFoundError
else:
raise NotADirectoryError
else:
raise FileExistsError
except(FileExistsError):
modules.createNewLine()
modules.printYellow(col, "Symbolic link already exists. Will not overwrite.")
modules.printYellow(col, "Is this an error? You can delete the link manually.")
modules.printYellow(col, "Try running this: unlink /usr/local/bin/airscript-ng")
except(ValueError):
modules.createNewLine()
modules.printYellow(col, "An error occured while creating the symlink.")
modules.printYellow(col, "Run this manually:")
modules.printYellow(col, "ln -sf $(find $(pwd) -name {}) /usr/local/bin/airscript-ng".format(
FILE_NAME))
except(FileNotFoundError):
modules.createNewLine()
modules.printYellow(col, "Unable to find /usr/local/bin in the $PATH env variable.")
modules.printYellow(col, "If $PATH doesn't exist, then how are you running this?")
modules.printYellow(col, "Otherwise, you can add to your $PATH variable by following this guide.")
modules.printYellow(col, "Visit: https://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux-unix")
except(NotADirectoryError):
modules.createNewLine()
modules.printYellow(col, "Unable to find the /usr/local/bin directory.")
modules.printYellow(col, "Please choose a linux distro that utilises this directory.")
modules.printYellow(col, "Otherwise, I can't add the Symlink.")
except(TypeError):
modules.createNewLine()
modules.printYellow(col, "Wrong method of invocation.")
modules.printYellow(col, "You're probably running this using 'airscript-ng'.")
modules.printYellow(col, "That won't work. You'll have to run it manually for this occasion.")
modules.printYellow(col, "Please head to where Airscript-ng is stored.")
modules.printYellow(col, "Then run it manually with: ./airscript-ng")
modules.printYellow(col, "If you're confused, please consult the README.md file.")
finally:
functions.menuOrExit()
#This function is to use the reaver tool, to crack wps.
def reaver():
functions.getDependencies()
try:
reaverMethod = attacks.wpsObj(col)
reaverMethod.showLogo()
reaverMethod.getWpsTargets()
reaverMethod.parseWpsCsv()
reaverMethod.pixieDust()
reaverMethod.cleanupWpsFiles()
except(KeyboardInterrupt, EOFError, Exception) as e:
try:
reaverMethod.cleanupWpsFiles()
except(KeyboardInterrupt, EOFError, Exception):
modules.createNewLine()
finally:
functions.menuOrExit()
#This will handle Aircrack-ng, for most users this will be the go-to choice.
#Again, this isn't the most efficient way, and I will re-write this at some point.
def aircrackng():
functions.getDependencies()
try:
aircrackMethod = attacks.aircrackObj(col)
aircrackMethod.showLogo()
aircrackMethod.createTempFiles()
aircrackMethod.selectInitialCard()
aircrackMethod.gatherInitialData()
aircrackMethod.parseCsvData()
aircrackMethod.selectTargetNetwork()
aircrackMethod.callCompleteCleanup()
clearTerm()
modules.printDeepBlue(col, "{1}Did you see WPA Handshake: {0} at the top right?".format(
aircrackMethod.selectedNetworkTarget[1],
col.green_deep))
modules.printDeepBlue(col, modules.returnBold(col, "If you didn't see that,"
"then {}cleanup with CTRL+C{} and try again.".format(
col.red_deep, col.endl_deep)))
decryptHandshake = attacks.capObj(col, aircrackMethod.captureFilePath)
decryptHandshake.enumerateMissingOptions()
clearTerm()
modules.printDeepBlue(col, "Which method do you want to crack the Handshake with:\n")
while True:
choice_of_cpu_gpu = input("{0}Crack using{1}: CPU-->[c] (all CPUs)|"
" GPU-->[g] (GTX 9xx,10xx+/AMD ROCM GPU) {2}${1} ".format(
col.blue_deep,
col.endl,
col.green)).lower()
if choice_of_cpu_gpu.startswith(("c","g")):
break
if choice_of_cpu_gpu.startswith("c"):
decryptHandshake.cpuCrack()
elif choice_of_cpu_gpu.startswith("g"):
decryptHandshake.gpuCrack()
except(KeyboardInterrupt, EOFError, Exception) as e:
try:
aircrackMethod.callCompleteCleanup()
except(KeyboardInterrupt, EOFError, Exception):
modules.createNewLine()
finally:
functions.menuOrExit()
#This will handle mdk4. A very useful tool with quite a few interesting options.
def mdk4():
functions.getDependencies()
try:
mdkMethod = attacks.mdkObj(col)
mdkMethod.showLogo()
mdkMethod.selectAttackMode()
except(KeyboardInterrupt, EOFError, Exception) as e:
try:
mdkMethod.cleanupCard()
except(KeyboardInterrupt, EOFError, Exception):
modules.createNewLine()
finally:
functions.menuOrExit()
#Define the main menu, where user will be presented with options and function of script.
def mainMenu():
try:
clearTerm()
#Check if user has root permissions.
if os.getuid() != 0:
print("{}[?]{} Please make sure you have followed the steps:\n".format(
col.yellow_deep,
col.endl))
print("\t{0}->{1} [{0}i{1}] Made script executable with '{2}sudo chmod +x {3}{1}' ".format(
col.blue,
col.endl,
col.red,
sys.argv[0]))
print("\t{0}->{1} [{0}i{1}] Ran it with '{2}sudo {3}{1}' \n".format(
col.blue,
col.endl,
col.red,
scriptInvocation))
print(modules.returnYellow(col, "\tAlternatively,"))
print("\t{0}->{1} [{0}i{1}] If you have symlinked the program, just do '{2}sudo airscript-ng{1}' \n".format(
col.blue,
col.endl,
col.red))
modules.normalQuit(1)
print("Hello {}{}{}!\n".format(col.yellow,userName,col.endl))
displayWarranty() #Display the warranty information, as recommened by the GPLv3 license.
print("{0}Your CPU{1}: {2}{3}{1}".format(col.red,col.endl,col.green,cpuModel))
print("{0}Your OS{1}: {2}{3}{1}".format(col.red,col.endl,col.green,userDistro))
print("{0}Your Kernel{1}: {2}{3}{1}".format(col.red,col.endl,col.green,userKernel))
print("{0}Your Architecture{1}: {2}{3}\\{4}{1}".format(col.red,col.endl,col.green,userArch,userArchDpkg))
print("{0}Dependencies{1}: {2}{3}{1}".format(col.red,col.endl,col.green,dependenciesInstalled()))
print("{0}Script version{1}: {2}{3}{1}".format(col.red,col.endl,col.green,scriptVersion))
#A 2d list spanning across multiple lines that stores all the info for the menu.
#Probably not the most efficient solution here, but its simple to maintain.
menuTextItemArray = \
[[col.pink_deep,'Aircrack-ng to crack WPA/WPA2','1','main.aircrackng'],
[col.blue_deep,'Reaver with pixie dust to crack WPS','2','main.reaver'],
[col.endl_deep,'Host a Evil-Twin/MITM AP to phish credentials, sniff traffic and more.','3','main.EvilTwinFakeAP'],
[col.red_deep,'Crack an existing WPA/WPA2 handshake using CPU/GPU.','4','main.crackCaptureFile'],
[col.green_deep,'Use mdk4 to create a beacon flood, denial-of-service and more.','5','main.mdk4'],
[col.yellow_deep,'Manipulate the system\'s WiFi-cards with manual control.','6','functions.manualCardControl'],
[col.blue_deep,'Download and build the dependencies from GitHub.','7','functions.fetchGitHubDeps'],
[col.yellow_deep,'Update/upgrade all system packages and this script','8','functions.updateAptPackagesFull'],
[col.green_deep,'Setup Hashcat and Hashcat-utils to use GPU for cracking','9','functions.hashcatDownloadFunction'],
[col.light_blue,'Add a symlink to invoke from anywhere','10','main.createSymlink'],
[col.pink_deep,'Delete the symlink from option [10]','11','main.removeSymlink'],
[col.black_deep,'Turn the colors on/off','12','functions.switchColors'],
[col.endl_deep,'If apt is broken, use this to fix it','13','functions.revertKaliSources'],
[col.red_deep,'Delete the folder containing dependencies.','14','functions.removeResidualFiles'],
[col.highlight+'\n','Exit \033[0m'+col.endl,'99','functions.silentExit']]
#The menu, in short.
print("\n%s[?] %sWhat tool would you like to use? Please run as root." %(col.yellow_deep,col.endl))
print("\n{}-----------------------------------------ATTACKS-----------------------------------------{}\n".format(
col.yellow_deep,
col.endl))
for i in range(1,int(menuTextItemArray[-2][2])+2):
print("%sType [%s] - %s" %(menuTextItemArray[i-1][0],menuTextItemArray[i-1][2],menuTextItemArray[i-1][1]))
if i == 6:
print("\n{}----------------------------------------DOWNLOADS----------------------------------------{}\n".format(
col.green_deep,
col.endl))
if i == 9:
print("\n{}--------------------------------------INSTALLATIONS--------------------------------------{}\n".format(
col.blue_deep,
col.endl))
while True:
mainMenuChoice = input('\n|%sMENU%s|(Choose an option) >>' %(col.red,col.endl))
for n in menuTextItemArray:
if mainMenuChoice == n[2]:
functionLocation = n[3].split(".")[1]
if n[3].split(".")[0] == 'main':
getattr(main,functionLocation)()
elif n[3].split(".")[0] == 'functions':
getattr(functions,functionLocation)()
mainMenu()
except(KeyboardInterrupt,EOFError):
try:
mainMenu()
except(KeyboardInterrupt,EOFError):
mainMenu()
mainMenu()
| Sh3llcod3/Aircrack-ng | airscript-ng.py | Python | gpl-3.0 | 27,707 | [
"VisIt"
] | ca8400709c4c3bc3d2c23e1a28a1018d59a1570154e9971013a96ff45646c6c6 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright (C) 2008 Evan Martin <martine@danga.com>
"""A git-command for integrating reviews on Rietveld."""
from distutils.version import LooseVersion
from multiprocessing.pool import ThreadPool
import base64
import collections
import glob
import httplib
import json
import logging
import optparse
import os
import Queue
import re
import stat
import sys
import tempfile
import textwrap
import time
import traceback
import urllib2
import urlparse
import webbrowser
import zlib
try:
import readline # pylint: disable=F0401,W0611
except ImportError:
pass
from third_party import colorama
from third_party import httplib2
from third_party import upload
import auth
from luci_hacks import trigger_luci_job as luci_trigger
import breakpad # pylint: disable=W0611
import clang_format
import dart_format
import fix_encoding
import gclient_utils
import git_common
from git_footers import get_footer_svn_id
import owners
import owners_finder
import presubmit_support
import rietveld
import scm
import subcommand
import subprocess2
import watchlists
__version__ = '1.0'
DEFAULT_SERVER = 'https://codereview.appspot.com'
POSTUPSTREAM_HOOK_PATTERN = '.git/hooks/post-cl-%s'
DESCRIPTION_BACKUP_FILE = '~/.git_cl_description_backup'
GIT_INSTRUCTIONS_URL = 'http://code.google.com/p/chromium/wiki/UsingGit'
CHANGE_ID = 'Change-Id:'
REFS_THAT_ALIAS_TO_OTHER_REFS = {
'refs/remotes/origin/lkgr': 'refs/remotes/origin/master',
'refs/remotes/origin/lkcr': 'refs/remotes/origin/master',
}
# Valid extensions for files we want to lint.
DEFAULT_LINT_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_IGNORE_REGEX = r"$^"
# Shortcut since it quickly becomes redundant.
Fore = colorama.Fore
# Initialized in main()
settings = None
def DieWithError(message):
print >> sys.stderr, message
sys.exit(1)
def GetNoGitPagerEnv():
env = os.environ.copy()
# 'cat' is a magical git string that disables pagers on all platforms.
env['GIT_PAGER'] = 'cat'
return env
def RunCommand(args, error_ok=False, error_message=None, **kwargs):
try:
return subprocess2.check_output(args, shell=False, **kwargs)
except subprocess2.CalledProcessError as e:
logging.debug('Failed running %s', args)
if not error_ok:
DieWithError(
'Command "%s" failed.\n%s' % (
' '.join(args), error_message or e.stdout or ''))
return e.stdout
def RunGit(args, **kwargs):
"""Returns stdout."""
return RunCommand(['git'] + args, **kwargs)
def RunGitWithCode(args, suppress_stderr=False):
"""Returns return code and stdout."""
try:
if suppress_stderr:
stderr = subprocess2.VOID
else:
stderr = sys.stderr
out, code = subprocess2.communicate(['git'] + args,
env=GetNoGitPagerEnv(),
stdout=subprocess2.PIPE,
stderr=stderr)
return code, out[0]
except ValueError:
# When the subprocess fails, it returns None. That triggers a ValueError
# when trying to unpack the return value into (out, code).
return 1, ''
def RunGitSilent(args):
"""Returns stdout, suppresses stderr and ingores the return code."""
return RunGitWithCode(args, suppress_stderr=True)[1]
def IsGitVersionAtLeast(min_version):
prefix = 'git version '
version = RunGit(['--version']).strip()
return (version.startswith(prefix) and
LooseVersion(version[len(prefix):]) >= LooseVersion(min_version))
def BranchExists(branch):
"""Return True if specified branch exists."""
code, _ = RunGitWithCode(['rev-parse', '--verify', branch],
suppress_stderr=True)
return not code
def ask_for_data(prompt):
try:
return raw_input(prompt)
except KeyboardInterrupt:
# Hide the exception.
sys.exit(1)
def git_set_branch_value(key, value):
branch = Changelist().GetBranch()
if not branch:
return
cmd = ['config']
if isinstance(value, int):
cmd.append('--int')
git_key = 'branch.%s.%s' % (branch, key)
RunGit(cmd + [git_key, str(value)])
def git_get_branch_default(key, default):
branch = Changelist().GetBranch()
if branch:
git_key = 'branch.%s.%s' % (branch, key)
(_, stdout) = RunGitWithCode(['config', '--int', '--get', git_key])
try:
return int(stdout.strip())
except ValueError:
pass
return default
def add_git_similarity(parser):
parser.add_option(
'--similarity', metavar='SIM', type='int', action='store',
help='Sets the percentage that a pair of files need to match in order to'
' be considered copies (default 50)')
parser.add_option(
'--find-copies', action='store_true',
help='Allows git to look for copies.')
parser.add_option(
'--no-find-copies', action='store_false', dest='find_copies',
help='Disallows git from looking for copies.')
old_parser_args = parser.parse_args
def Parse(args):
options, args = old_parser_args(args)
if options.similarity is None:
options.similarity = git_get_branch_default('git-cl-similarity', 50)
else:
print('Note: Saving similarity of %d%% in git config.'
% options.similarity)
git_set_branch_value('git-cl-similarity', options.similarity)
options.similarity = max(0, min(options.similarity, 100))
if options.find_copies is None:
options.find_copies = bool(
git_get_branch_default('git-find-copies', True))
else:
git_set_branch_value('git-find-copies', int(options.find_copies))
print('Using %d%% similarity for rename/copy detection. '
'Override with --similarity.' % options.similarity)
return options, args
parser.parse_args = Parse
def _get_properties_from_options(options):
properties = dict(x.split('=', 1) for x in options.properties)
for key, val in properties.iteritems():
try:
properties[key] = json.loads(val)
except ValueError:
pass # If a value couldn't be evaluated, treat it as a string.
return properties
def _prefix_master(master):
"""Convert user-specified master name to full master name.
Buildbucket uses full master name(master.tryserver.chromium.linux) as bucket
name, while the developers always use shortened master name
(tryserver.chromium.linux) by stripping off the prefix 'master.'. This
function does the conversion for buildbucket migration.
"""
prefix = 'master.'
if master.startswith(prefix):
return master
return '%s%s' % (prefix, master)
def trigger_luci_job(changelist, masters, options):
"""Send a job to run on LUCI."""
issue_props = changelist.GetIssueProperties()
issue = changelist.GetIssue()
patchset = changelist.GetMostRecentPatchset()
for builders_and_tests in sorted(masters.itervalues()):
for builder in sorted(builders_and_tests.iterkeys()):
luci_trigger.trigger(
builder, 'HEAD', issue, patchset, issue_props['project'])
def trigger_try_jobs(auth_config, changelist, options, masters, category):
rietveld_url = settings.GetDefaultServerUrl()
rietveld_host = urlparse.urlparse(rietveld_url).hostname
authenticator = auth.get_authenticator_for_host(rietveld_host, auth_config)
http = authenticator.authorize(httplib2.Http())
http.force_exception_to_status_code = True
issue_props = changelist.GetIssueProperties()
issue = changelist.GetIssue()
patchset = changelist.GetMostRecentPatchset()
properties = _get_properties_from_options(options)
buildbucket_put_url = (
'https://{hostname}/_ah/api/buildbucket/v1/builds/batch'.format(
hostname=options.buildbucket_host))
buildset = 'patch/rietveld/{hostname}/{issue}/{patch}'.format(
hostname=rietveld_host,
issue=issue,
patch=patchset)
batch_req_body = {'builds': []}
print_text = []
print_text.append('Tried jobs on:')
for master, builders_and_tests in sorted(masters.iteritems()):
print_text.append('Master: %s' % master)
bucket = _prefix_master(master)
for builder, tests in sorted(builders_and_tests.iteritems()):
print_text.append(' %s: %s' % (builder, tests))
parameters = {
'builder_name': builder,
'changes': [{
'author': {'email': issue_props['owner_email']},
'revision': options.revision,
}],
'properties': {
'category': category,
'issue': issue,
'master': master,
'patch_project': issue_props['project'],
'patch_storage': 'rietveld',
'patchset': patchset,
'reason': options.name,
'rietveld': rietveld_url,
'testfilter': tests,
},
}
if properties:
parameters['properties'].update(properties)
if options.clobber:
parameters['properties']['clobber'] = True
batch_req_body['builds'].append(
{
'bucket': bucket,
'parameters_json': json.dumps(parameters),
'tags': ['builder:%s' % builder,
'buildset:%s' % buildset,
'master:%s' % master,
'user_agent:git_cl_try']
}
)
for try_count in xrange(3):
response, content = http.request(
buildbucket_put_url,
'PUT',
body=json.dumps(batch_req_body),
headers={'Content-Type': 'application/json'},
)
content_json = None
try:
content_json = json.loads(content)
except ValueError:
pass
# Buildbucket could return an error even if status==200.
if content_json and content_json.get('error'):
msg = 'Error in response. Code: %d. Reason: %s. Message: %s.' % (
content_json['error'].get('code', ''),
content_json['error'].get('reason', ''),
content_json['error'].get('message', ''))
raise BuildbucketResponseException(msg)
if response.status == 200:
if not content_json:
raise BuildbucketResponseException(
'Buildbucket returns invalid json content: %s.\n'
'Please file bugs at crbug.com, label "Infra-BuildBucket".' %
content)
break
if response.status < 500 or try_count >= 2:
raise httplib2.HttpLib2Error(content)
# status >= 500 means transient failures.
logging.debug('Transient errors when triggering tryjobs. Will retry.')
time.sleep(0.5 + 1.5*try_count)
print '\n'.join(print_text)
def MatchSvnGlob(url, base_url, glob_spec, allow_wildcards):
"""Return the corresponding git ref if |base_url| together with |glob_spec|
matches the full |url|.
If |allow_wildcards| is true, |glob_spec| can contain wildcards (see below).
"""
fetch_suburl, as_ref = glob_spec.split(':')
if allow_wildcards:
glob_match = re.match('(.+/)?(\*|{[^/]*})(/.+)?', fetch_suburl)
if glob_match:
# Parse specs like "branches/*/src:refs/remotes/svn/*" or
# "branches/{472,597,648}/src:refs/remotes/svn/*".
branch_re = re.escape(base_url)
if glob_match.group(1):
branch_re += '/' + re.escape(glob_match.group(1))
wildcard = glob_match.group(2)
if wildcard == '*':
branch_re += '([^/]*)'
else:
# Escape and replace surrounding braces with parentheses and commas
# with pipe symbols.
wildcard = re.escape(wildcard)
wildcard = re.sub('^\\\\{', '(', wildcard)
wildcard = re.sub('\\\\,', '|', wildcard)
wildcard = re.sub('\\\\}$', ')', wildcard)
branch_re += wildcard
if glob_match.group(3):
branch_re += re.escape(glob_match.group(3))
match = re.match(branch_re, url)
if match:
return re.sub('\*$', match.group(1), as_ref)
# Parse specs like "trunk/src:refs/remotes/origin/trunk".
if fetch_suburl:
full_url = base_url + '/' + fetch_suburl
else:
full_url = base_url
if full_url == url:
return as_ref
return None
def print_stats(similarity, find_copies, args):
"""Prints statistics about the change to the user."""
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = GetNoGitPagerEnv()
if 'GIT_EXTERNAL_DIFF' in env:
del env['GIT_EXTERNAL_DIFF']
if find_copies:
similarity_options = ['--find-copies-harder', '-l100000',
'-C%s' % similarity]
else:
similarity_options = ['-M%s' % similarity]
try:
stdout = sys.stdout.fileno()
except AttributeError:
stdout = None
return subprocess2.call(
['git',
'diff', '--no-ext-diff', '--stat'] + similarity_options + args,
stdout=stdout, env=env)
class BuildbucketResponseException(Exception):
pass
class Settings(object):
def __init__(self):
self.default_server = None
self.cc = None
self.root = None
self.is_git_svn = None
self.svn_branch = None
self.tree_status_url = None
self.viewvc_url = None
self.updated = False
self.is_gerrit = None
self.git_editor = None
self.project = None
self.force_https_commit_url = None
self.pending_ref_prefix = None
def LazyUpdateIfNeeded(self):
"""Updates the settings from a codereview.settings file, if available."""
if not self.updated:
# The only value that actually changes the behavior is
# autoupdate = "false". Everything else means "true".
autoupdate = RunGit(['config', 'rietveld.autoupdate'],
error_ok=True
).strip().lower()
cr_settings_file = FindCodereviewSettingsFile()
if autoupdate != 'false' and cr_settings_file:
LoadCodereviewSettingsFromFile(cr_settings_file)
# set updated to True to avoid infinite calling loop
# through DownloadHooks
self.updated = True
DownloadHooks(False)
self.updated = True
def GetDefaultServerUrl(self, error_ok=False):
if not self.default_server:
self.LazyUpdateIfNeeded()
self.default_server = gclient_utils.UpgradeToHttps(
self._GetRietveldConfig('server', error_ok=True))
if error_ok:
return self.default_server
if not self.default_server:
error_message = ('Could not find settings file. You must configure '
'your review setup by running "git cl config".')
self.default_server = gclient_utils.UpgradeToHttps(
self._GetRietveldConfig('server', error_message=error_message))
return self.default_server
@staticmethod
def GetRelativeRoot():
return RunGit(['rev-parse', '--show-cdup']).strip()
def GetRoot(self):
if self.root is None:
self.root = os.path.abspath(self.GetRelativeRoot())
return self.root
def GetIsGitSvn(self):
"""Return true if this repo looks like it's using git-svn."""
if self.is_git_svn is None:
if self.GetPendingRefPrefix():
# If PENDING_REF_PREFIX is set then it's a pure git repo no matter what.
self.is_git_svn = False
else:
# If you have any "svn-remote.*" config keys, we think you're using svn.
self.is_git_svn = RunGitWithCode(
['config', '--local', '--get-regexp', r'^svn-remote\.'])[0] == 0
return self.is_git_svn
def GetSVNBranch(self):
if self.svn_branch is None:
if not self.GetIsGitSvn():
DieWithError('Repo doesn\'t appear to be a git-svn repo.')
# Try to figure out which remote branch we're based on.
# Strategy:
# 1) iterate through our branch history and find the svn URL.
# 2) find the svn-remote that fetches from the URL.
# regexp matching the git-svn line that contains the URL.
git_svn_re = re.compile(r'^\s*git-svn-id: (\S+)@', re.MULTILINE)
# We don't want to go through all of history, so read a line from the
# pipe at a time.
# The -100 is an arbitrary limit so we don't search forever.
cmd = ['git', 'log', '-100', '--pretty=medium']
proc = subprocess2.Popen(cmd, stdout=subprocess2.PIPE,
env=GetNoGitPagerEnv())
url = None
for line in proc.stdout:
match = git_svn_re.match(line)
if match:
url = match.group(1)
proc.stdout.close() # Cut pipe.
break
if url:
svn_remote_re = re.compile(r'^svn-remote\.([^.]+)\.url (.*)$')
remotes = RunGit(['config', '--get-regexp',
r'^svn-remote\..*\.url']).splitlines()
for remote in remotes:
match = svn_remote_re.match(remote)
if match:
remote = match.group(1)
base_url = match.group(2)
rewrite_root = RunGit(
['config', 'svn-remote.%s.rewriteRoot' % remote],
error_ok=True).strip()
if rewrite_root:
base_url = rewrite_root
fetch_spec = RunGit(
['config', 'svn-remote.%s.fetch' % remote],
error_ok=True).strip()
if fetch_spec:
self.svn_branch = MatchSvnGlob(url, base_url, fetch_spec, False)
if self.svn_branch:
break
branch_spec = RunGit(
['config', 'svn-remote.%s.branches' % remote],
error_ok=True).strip()
if branch_spec:
self.svn_branch = MatchSvnGlob(url, base_url, branch_spec, True)
if self.svn_branch:
break
tag_spec = RunGit(
['config', 'svn-remote.%s.tags' % remote],
error_ok=True).strip()
if tag_spec:
self.svn_branch = MatchSvnGlob(url, base_url, tag_spec, True)
if self.svn_branch:
break
if not self.svn_branch:
DieWithError('Can\'t guess svn branch -- try specifying it on the '
'command line')
return self.svn_branch
def GetTreeStatusUrl(self, error_ok=False):
if not self.tree_status_url:
error_message = ('You must configure your tree status URL by running '
'"git cl config".')
self.tree_status_url = self._GetRietveldConfig(
'tree-status-url', error_ok=error_ok, error_message=error_message)
return self.tree_status_url
def GetViewVCUrl(self):
if not self.viewvc_url:
self.viewvc_url = self._GetRietveldConfig('viewvc-url', error_ok=True)
return self.viewvc_url
def GetBugPrefix(self):
return self._GetRietveldConfig('bug-prefix', error_ok=True)
def GetIsSkipDependencyUpload(self, branch_name):
"""Returns true if specified branch should skip dep uploads."""
return self._GetBranchConfig(branch_name, 'skip-deps-uploads',
error_ok=True)
def GetRunPostUploadHook(self):
run_post_upload_hook = self._GetRietveldConfig(
'run-post-upload-hook', error_ok=True)
return run_post_upload_hook == "True"
def GetDefaultCCList(self):
return self._GetRietveldConfig('cc', error_ok=True)
def GetDefaultPrivateFlag(self):
return self._GetRietveldConfig('private', error_ok=True)
def GetIsGerrit(self):
"""Return true if this repo is assosiated with gerrit code review system."""
if self.is_gerrit is None:
self.is_gerrit = self._GetConfig('gerrit.host', error_ok=True)
return self.is_gerrit
def GetGitEditor(self):
"""Return the editor specified in the git config, or None if none is."""
if self.git_editor is None:
self.git_editor = self._GetConfig('core.editor', error_ok=True)
return self.git_editor or None
def GetLintRegex(self):
return (self._GetRietveldConfig('cpplint-regex', error_ok=True) or
DEFAULT_LINT_REGEX)
def GetLintIgnoreRegex(self):
return (self._GetRietveldConfig('cpplint-ignore-regex', error_ok=True) or
DEFAULT_LINT_IGNORE_REGEX)
def GetProject(self):
if not self.project:
self.project = self._GetRietveldConfig('project', error_ok=True)
return self.project
def GetForceHttpsCommitUrl(self):
if not self.force_https_commit_url:
self.force_https_commit_url = self._GetRietveldConfig(
'force-https-commit-url', error_ok=True)
return self.force_https_commit_url
def GetPendingRefPrefix(self):
if not self.pending_ref_prefix:
self.pending_ref_prefix = self._GetRietveldConfig(
'pending-ref-prefix', error_ok=True)
return self.pending_ref_prefix
def _GetRietveldConfig(self, param, **kwargs):
return self._GetConfig('rietveld.' + param, **kwargs)
def _GetBranchConfig(self, branch_name, param, **kwargs):
return self._GetConfig('branch.' + branch_name + '.' + param, **kwargs)
def _GetConfig(self, param, **kwargs):
self.LazyUpdateIfNeeded()
return RunGit(['config', param], **kwargs).strip()
def ShortBranchName(branch):
"""Convert a name like 'refs/heads/foo' to just 'foo'."""
return branch.replace('refs/heads/', '')
class Changelist(object):
def __init__(self, branchref=None, issue=None, auth_config=None):
# Poke settings so we get the "configure your server" message if necessary.
global settings
if not settings:
# Happens when git_cl.py is used as a utility library.
settings = Settings()
settings.GetDefaultServerUrl()
self.branchref = branchref
if self.branchref:
self.branch = ShortBranchName(self.branchref)
else:
self.branch = None
self.rietveld_server = None
self.upstream_branch = None
self.lookedup_issue = False
self.issue = issue or None
self.has_description = False
self.description = None
self.lookedup_patchset = False
self.patchset = None
self.cc = None
self.watchers = ()
self._auth_config = auth_config
self._props = None
self._remote = None
self._rpc_server = None
@property
def auth_config(self):
return self._auth_config
def GetCCList(self):
"""Return the users cc'd on this CL.
Return is a string suitable for passing to gcl with the --cc flag.
"""
if self.cc is None:
base_cc = settings.GetDefaultCCList()
more_cc = ','.join(self.watchers)
self.cc = ','.join(filter(None, (base_cc, more_cc))) or ''
return self.cc
def GetCCListWithoutDefault(self):
"""Return the users cc'd on this CL excluding default ones."""
if self.cc is None:
self.cc = ','.join(self.watchers)
return self.cc
def SetWatchers(self, watchers):
"""Set the list of email addresses that should be cc'd based on the changed
files in this CL.
"""
self.watchers = watchers
def GetBranch(self):
"""Returns the short branch name, e.g. 'master'."""
if not self.branch:
branchref = RunGit(['symbolic-ref', 'HEAD'],
stderr=subprocess2.VOID, error_ok=True).strip()
if not branchref:
return None
self.branchref = branchref
self.branch = ShortBranchName(self.branchref)
return self.branch
def GetBranchRef(self):
"""Returns the full branch name, e.g. 'refs/heads/master'."""
self.GetBranch() # Poke the lazy loader.
return self.branchref
@staticmethod
def FetchUpstreamTuple(branch):
"""Returns a tuple containing remote and remote ref,
e.g. 'origin', 'refs/heads/master'
"""
remote = '.'
upstream_branch = RunGit(['config', 'branch.%s.merge' % branch],
error_ok=True).strip()
if upstream_branch:
remote = RunGit(['config', 'branch.%s.remote' % branch]).strip()
else:
upstream_branch = RunGit(['config', 'rietveld.upstream-branch'],
error_ok=True).strip()
if upstream_branch:
remote = RunGit(['config', 'rietveld.upstream-remote']).strip()
else:
# Fall back on trying a git-svn upstream branch.
if settings.GetIsGitSvn():
upstream_branch = settings.GetSVNBranch()
else:
# Else, try to guess the origin remote.
remote_branches = RunGit(['branch', '-r']).split()
if 'origin/master' in remote_branches:
# Fall back on origin/master if it exits.
remote = 'origin'
upstream_branch = 'refs/heads/master'
elif 'origin/trunk' in remote_branches:
# Fall back on origin/trunk if it exists. Generally a shared
# git-svn clone
remote = 'origin'
upstream_branch = 'refs/heads/trunk'
else:
DieWithError("""Unable to determine default branch to diff against.
Either pass complete "git diff"-style arguments, like
git cl upload origin/master
or verify this branch is set up to track another (via the --track argument to
"git checkout -b ...").""")
return remote, upstream_branch
def GetCommonAncestorWithUpstream(self):
upstream_branch = self.GetUpstreamBranch()
if not BranchExists(upstream_branch):
DieWithError('The upstream for the current branch (%s) does not exist '
'anymore.\nPlease fix it and try again.' % self.GetBranch())
return git_common.get_or_create_merge_base(self.GetBranch(),
upstream_branch)
def GetUpstreamBranch(self):
if self.upstream_branch is None:
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
if remote is not '.':
upstream_branch = upstream_branch.replace('refs/heads/',
'refs/remotes/%s/' % remote)
upstream_branch = upstream_branch.replace('refs/branch-heads/',
'refs/remotes/branch-heads/')
self.upstream_branch = upstream_branch
return self.upstream_branch
def GetRemoteBranch(self):
if not self._remote:
remote, branch = None, self.GetBranch()
seen_branches = set()
while branch not in seen_branches:
seen_branches.add(branch)
remote, branch = self.FetchUpstreamTuple(branch)
branch = ShortBranchName(branch)
if remote != '.' or branch.startswith('refs/remotes'):
break
else:
remotes = RunGit(['remote'], error_ok=True).split()
if len(remotes) == 1:
remote, = remotes
elif 'origin' in remotes:
remote = 'origin'
logging.warning('Could not determine which remote this change is '
'associated with, so defaulting to "%s". This may '
'not be what you want. You may prevent this message '
'by running "git svn info" as documented here: %s',
self._remote,
GIT_INSTRUCTIONS_URL)
else:
logging.warn('Could not determine which remote this change is '
'associated with. You may prevent this message by '
'running "git svn info" as documented here: %s',
GIT_INSTRUCTIONS_URL)
branch = 'HEAD'
if branch.startswith('refs/remotes'):
self._remote = (remote, branch)
elif branch.startswith('refs/branch-heads/'):
self._remote = (remote, branch.replace('refs/', 'refs/remotes/'))
else:
self._remote = (remote, 'refs/remotes/%s/%s' % (remote, branch))
return self._remote
def GitSanityChecks(self, upstream_git_obj):
"""Checks git repo status and ensures diff is from local commits."""
if upstream_git_obj is None:
if self.GetBranch() is None:
print >> sys.stderr, (
'ERROR: unable to determine current branch (detached HEAD?)')
else:
print >> sys.stderr, (
'ERROR: no upstream branch')
return False
# Verify the commit we're diffing against is in our current branch.
upstream_sha = RunGit(['rev-parse', '--verify', upstream_git_obj]).strip()
common_ancestor = RunGit(['merge-base', upstream_sha, 'HEAD']).strip()
if upstream_sha != common_ancestor:
print >> sys.stderr, (
'ERROR: %s is not in the current branch. You may need to rebase '
'your tracking branch' % upstream_sha)
return False
# List the commits inside the diff, and verify they are all local.
commits_in_diff = RunGit(
['rev-list', '^%s' % upstream_sha, 'HEAD']).splitlines()
code, remote_branch = RunGitWithCode(['config', 'gitcl.remotebranch'])
remote_branch = remote_branch.strip()
if code != 0:
_, remote_branch = self.GetRemoteBranch()
commits_in_remote = RunGit(
['rev-list', '^%s' % upstream_sha, remote_branch]).splitlines()
common_commits = set(commits_in_diff) & set(commits_in_remote)
if common_commits:
print >> sys.stderr, (
'ERROR: Your diff contains %d commits already in %s.\n'
'Run "git log --oneline %s..HEAD" to get a list of commits in '
'the diff. If you are using a custom git flow, you can override'
' the reference used for this check with "git config '
'gitcl.remotebranch <git-ref>".' % (
len(common_commits), remote_branch, upstream_git_obj))
return False
return True
def GetGitBaseUrlFromConfig(self):
"""Return the configured base URL from branch.<branchname>.baseurl.
Returns None if it is not set.
"""
return RunGit(['config', 'branch.%s.base-url' % self.GetBranch()],
error_ok=True).strip()
def GetGitSvnRemoteUrl(self):
"""Return the configured git-svn remote URL parsed from git svn info.
Returns None if it is not set.
"""
# URL is dependent on the current directory.
data = RunGit(['svn', 'info'], cwd=settings.GetRoot())
if data:
keys = dict(line.split(': ', 1) for line in data.splitlines()
if ': ' in line)
return keys.get('URL', None)
return None
def GetRemoteUrl(self):
"""Return the configured remote URL, e.g. 'git://example.org/foo.git/'.
Returns None if there is no remote.
"""
remote, _ = self.GetRemoteBranch()
url = RunGit(['config', 'remote.%s.url' % remote], error_ok=True).strip()
# If URL is pointing to a local directory, it is probably a git cache.
if os.path.isdir(url):
url = RunGit(['config', 'remote.%s.url' % remote],
error_ok=True,
cwd=url).strip()
return url
def GetIssue(self):
"""Returns the issue number as a int or None if not set."""
if self.issue is None and not self.lookedup_issue:
issue = RunGit(['config', self._IssueSetting()], error_ok=True).strip()
self.issue = int(issue) or None if issue else None
self.lookedup_issue = True
return self.issue
def GetRietveldServer(self):
if not self.rietveld_server:
# If we're on a branch then get the server potentially associated
# with that branch.
if self.GetIssue():
rietveld_server_config = self._RietveldServer()
if rietveld_server_config:
self.rietveld_server = gclient_utils.UpgradeToHttps(RunGit(
['config', rietveld_server_config], error_ok=True).strip())
if not self.rietveld_server:
self.rietveld_server = settings.GetDefaultServerUrl()
return self.rietveld_server
def GetIssueURL(self):
"""Get the URL for a particular issue."""
if not self.GetIssue():
return None
return '%s/%s' % (self.GetRietveldServer(), self.GetIssue())
def GetDescription(self, pretty=False):
if not self.has_description:
if self.GetIssue():
issue = self.GetIssue()
try:
self.description = self.RpcServer().get_description(issue).strip()
except urllib2.HTTPError as e:
if e.code == 404:
DieWithError(
('\nWhile fetching the description for issue %d, received a '
'404 (not found)\n'
'error. It is likely that you deleted this '
'issue on the server. If this is the\n'
'case, please run\n\n'
' git cl issue 0\n\n'
'to clear the association with the deleted issue. Then run '
'this command again.') % issue)
else:
DieWithError(
'\nFailed to fetch issue description. HTTP error %d' % e.code)
except urllib2.URLError as e:
print >> sys.stderr, (
'Warning: Failed to retrieve CL description due to network '
'failure.')
self.description = ''
self.has_description = True
if pretty:
wrapper = textwrap.TextWrapper()
wrapper.initial_indent = wrapper.subsequent_indent = ' '
return wrapper.fill(self.description)
return self.description
def GetPatchset(self):
"""Returns the patchset number as a int or None if not set."""
if self.patchset is None and not self.lookedup_patchset:
patchset = RunGit(['config', self._PatchsetSetting()],
error_ok=True).strip()
self.patchset = int(patchset) or None if patchset else None
self.lookedup_patchset = True
return self.patchset
def SetPatchset(self, patchset):
"""Set this branch's patchset. If patchset=0, clears the patchset."""
if patchset:
RunGit(['config', self._PatchsetSetting(), str(patchset)])
self.patchset = patchset
else:
RunGit(['config', '--unset', self._PatchsetSetting()],
stderr=subprocess2.PIPE, error_ok=True)
self.patchset = None
def GetMostRecentPatchset(self):
return self.GetIssueProperties()['patchsets'][-1]
def GetPatchSetDiff(self, issue, patchset):
return self.RpcServer().get(
'/download/issue%s_%s.diff' % (issue, patchset))
def GetIssueProperties(self):
if self._props is None:
issue = self.GetIssue()
if not issue:
self._props = {}
else:
self._props = self.RpcServer().get_issue_properties(issue, True)
return self._props
def GetApprovingReviewers(self):
return get_approving_reviewers(self.GetIssueProperties())
def AddComment(self, message):
return self.RpcServer().add_comment(self.GetIssue(), message)
def SetIssue(self, issue):
"""Set this branch's issue. If issue=0, clears the issue."""
if issue:
self.issue = issue
RunGit(['config', self._IssueSetting(), str(issue)])
if self.rietveld_server:
RunGit(['config', self._RietveldServer(), self.rietveld_server])
else:
current_issue = self.GetIssue()
if current_issue:
RunGit(['config', '--unset', self._IssueSetting()])
self.issue = None
self.SetPatchset(None)
def GetChange(self, upstream_branch, author):
if not self.GitSanityChecks(upstream_branch):
DieWithError('\nGit sanity check failure')
root = settings.GetRelativeRoot()
if not root:
root = '.'
absroot = os.path.abspath(root)
# We use the sha1 of HEAD as a name of this change.
name = RunGitWithCode(['rev-parse', 'HEAD'])[1].strip()
# Need to pass a relative path for msysgit.
try:
files = scm.GIT.CaptureStatus([root], '.', upstream_branch)
except subprocess2.CalledProcessError:
DieWithError(
('\nFailed to diff against upstream branch %s\n\n'
'This branch probably doesn\'t exist anymore. To reset the\n'
'tracking branch, please run\n'
' git branch --set-upstream %s trunk\n'
'replacing trunk with origin/master or the relevant branch') %
(upstream_branch, self.GetBranch()))
issue = self.GetIssue()
patchset = self.GetPatchset()
if issue:
description = self.GetDescription()
else:
# If the change was never uploaded, use the log messages of all commits
# up to the branch point, as git cl upload will prefill the description
# with these log messages.
args = ['log', '--pretty=format:%s%n%n%b', '%s...' % (upstream_branch)]
description = RunGitWithCode(args)[1].strip()
if not author:
author = RunGit(['config', 'user.email']).strip() or None
return presubmit_support.GitChange(
name,
description,
absroot,
files,
issue,
patchset,
author,
upstream=upstream_branch)
def GetStatus(self):
"""Apply a rough heuristic to give a simple summary of an issue's review
or CQ status, assuming adherence to a common workflow.
Returns None if no issue for this branch, or one of the following keywords:
* 'error' - error from review tool (including deleted issues)
* 'unsent' - not sent for review
* 'waiting' - waiting for review
* 'reply' - waiting for owner to reply to review
* 'lgtm' - LGTM from at least one approved reviewer
* 'commit' - in the commit queue
* 'closed' - closed
"""
if not self.GetIssue():
return None
try:
props = self.GetIssueProperties()
except urllib2.HTTPError:
return 'error'
if props.get('closed'):
# Issue is closed.
return 'closed'
if props.get('commit'):
# Issue is in the commit queue.
return 'commit'
try:
reviewers = self.GetApprovingReviewers()
except urllib2.HTTPError:
return 'error'
if reviewers:
# Was LGTM'ed.
return 'lgtm'
messages = props.get('messages') or []
if not messages:
# No message was sent.
return 'unsent'
if messages[-1]['sender'] != props.get('owner_email'):
# Non-LGTM reply from non-owner
return 'reply'
return 'waiting'
def RunHook(self, committing, may_prompt, verbose, change):
"""Calls sys.exit() if the hook fails; returns a HookResults otherwise."""
try:
return presubmit_support.DoPresubmitChecks(change, committing,
verbose=verbose, output_stream=sys.stdout, input_stream=sys.stdin,
default_presubmit=None, may_prompt=may_prompt,
rietveld_obj=self.RpcServer())
except presubmit_support.PresubmitFailure, e:
DieWithError(
('%s\nMaybe your depot_tools is out of date?\n'
'If all fails, contact maruel@') % e)
def UpdateDescription(self, description):
self.description = description
return self.RpcServer().update_description(
self.GetIssue(), self.description)
def CloseIssue(self):
"""Updates the description and closes the issue."""
return self.RpcServer().close_issue(self.GetIssue())
def SetFlag(self, flag, value):
"""Patchset must match."""
if not self.GetPatchset():
DieWithError('The patchset needs to match. Send another patchset.')
try:
return self.RpcServer().set_flag(
self.GetIssue(), self.GetPatchset(), flag, value)
except urllib2.HTTPError, e:
if e.code == 404:
DieWithError('The issue %s doesn\'t exist.' % self.GetIssue())
if e.code == 403:
DieWithError(
('Access denied to issue %s. Maybe the patchset %s doesn\'t '
'match?') % (self.GetIssue(), self.GetPatchset()))
raise
def RpcServer(self):
"""Returns an upload.RpcServer() to access this review's rietveld instance.
"""
if not self._rpc_server:
self._rpc_server = rietveld.CachingRietveld(
self.GetRietveldServer(),
self._auth_config or auth.make_auth_config())
return self._rpc_server
def _IssueSetting(self):
"""Return the git setting that stores this change's issue."""
return 'branch.%s.rietveldissue' % self.GetBranch()
def _PatchsetSetting(self):
"""Return the git setting that stores this change's most recent patchset."""
return 'branch.%s.rietveldpatchset' % self.GetBranch()
def _RietveldServer(self):
"""Returns the git setting that stores this change's rietveld server."""
branch = self.GetBranch()
if branch:
return 'branch.%s.rietveldserver' % branch
return None
def GetCodereviewSettingsInteractively():
"""Prompt the user for settings."""
# TODO(ukai): ask code review system is rietveld or gerrit?
server = settings.GetDefaultServerUrl(error_ok=True)
prompt = 'Rietveld server (host[:port])'
prompt += ' [%s]' % (server or DEFAULT_SERVER)
newserver = ask_for_data(prompt + ':')
if not server and not newserver:
newserver = DEFAULT_SERVER
if newserver:
newserver = gclient_utils.UpgradeToHttps(newserver)
if newserver != server:
RunGit(['config', 'rietveld.server', newserver])
def SetProperty(initial, caption, name, is_url):
prompt = caption
if initial:
prompt += ' ("x" to clear) [%s]' % initial
new_val = ask_for_data(prompt + ':')
if new_val == 'x':
RunGit(['config', '--unset-all', 'rietveld.' + name], error_ok=True)
elif new_val:
if is_url:
new_val = gclient_utils.UpgradeToHttps(new_val)
if new_val != initial:
RunGit(['config', 'rietveld.' + name, new_val])
SetProperty(settings.GetDefaultCCList(), 'CC list', 'cc', False)
SetProperty(settings.GetDefaultPrivateFlag(),
'Private flag (rietveld only)', 'private', False)
SetProperty(settings.GetTreeStatusUrl(error_ok=True), 'Tree status URL',
'tree-status-url', False)
SetProperty(settings.GetViewVCUrl(), 'ViewVC URL', 'viewvc-url', True)
SetProperty(settings.GetBugPrefix(), 'Bug Prefix', 'bug-prefix', False)
SetProperty(settings.GetRunPostUploadHook(), 'Run Post Upload Hook',
'run-post-upload-hook', False)
# TODO: configure a default branch to diff against, rather than this
# svn-based hackery.
class ChangeDescription(object):
"""Contains a parsed form of the change description."""
R_LINE = r'^[ \t]*(TBR|R)[ \t]*=[ \t]*(.*?)[ \t]*$'
BUG_LINE = r'^[ \t]*(BUG)[ \t]*=[ \t]*(.*?)[ \t]*$'
def __init__(self, description):
self._description_lines = (description or '').strip().splitlines()
@property # www.logilab.org/ticket/89786
def description(self): # pylint: disable=E0202
return '\n'.join(self._description_lines)
def set_description(self, desc):
if isinstance(desc, basestring):
lines = desc.splitlines()
else:
lines = [line.rstrip() for line in desc]
while lines and not lines[0]:
lines.pop(0)
while lines and not lines[-1]:
lines.pop(-1)
self._description_lines = lines
def update_reviewers(self, reviewers, add_owners_tbr=False, change=None):
"""Rewrites the R=/TBR= line(s) as a single line each."""
assert isinstance(reviewers, list), reviewers
if not reviewers and not add_owners_tbr:
return
reviewers = reviewers[:]
# Get the set of R= and TBR= lines and remove them from the desciption.
regexp = re.compile(self.R_LINE)
matches = [regexp.match(line) for line in self._description_lines]
new_desc = [l for i, l in enumerate(self._description_lines)
if not matches[i]]
self.set_description(new_desc)
# Construct new unified R= and TBR= lines.
r_names = []
tbr_names = []
for match in matches:
if not match:
continue
people = cleanup_list([match.group(2).strip()])
if match.group(1) == 'TBR':
tbr_names.extend(people)
else:
r_names.extend(people)
for name in r_names:
if name not in reviewers:
reviewers.append(name)
if add_owners_tbr:
owners_db = owners.Database(change.RepositoryRoot(),
fopen=file, os_path=os.path, glob=glob.glob)
all_reviewers = set(tbr_names + reviewers)
missing_files = owners_db.files_not_covered_by(change.LocalPaths(),
all_reviewers)
tbr_names.extend(owners_db.reviewers_for(missing_files,
change.author_email))
new_r_line = 'R=' + ', '.join(reviewers) if reviewers else None
new_tbr_line = 'TBR=' + ', '.join(tbr_names) if tbr_names else None
# Put the new lines in the description where the old first R= line was.
line_loc = next((i for i, match in enumerate(matches) if match), -1)
if 0 <= line_loc < len(self._description_lines):
if new_tbr_line:
self._description_lines.insert(line_loc, new_tbr_line)
if new_r_line:
self._description_lines.insert(line_loc, new_r_line)
else:
if new_r_line:
self.append_footer(new_r_line)
if new_tbr_line:
self.append_footer(new_tbr_line)
def prompt(self):
"""Asks the user to update the description."""
self.set_description([
'# Enter a description of the change.',
'# This will be displayed on the codereview site.',
'# The first line will also be used as the subject of the review.',
'#--------------------This line is 72 characters long'
'--------------------',
] + self._description_lines)
regexp = re.compile(self.BUG_LINE)
if not any((regexp.match(line) for line in self._description_lines)):
self.append_footer('BUG=%s' % settings.GetBugPrefix())
content = gclient_utils.RunEditor(self.description, True,
git_editor=settings.GetGitEditor())
if not content:
DieWithError('Running editor failed')
lines = content.splitlines()
# Strip off comments.
clean_lines = [line.rstrip() for line in lines if not line.startswith('#')]
if not clean_lines:
DieWithError('No CL description, aborting')
self.set_description(clean_lines)
def append_footer(self, line):
if self._description_lines:
# Add an empty line if either the last line or the new line isn't a tag.
last_line = self._description_lines[-1]
if (not presubmit_support.Change.TAG_LINE_RE.match(last_line) or
not presubmit_support.Change.TAG_LINE_RE.match(line)):
self._description_lines.append('')
self._description_lines.append(line)
def get_reviewers(self):
"""Retrieves the list of reviewers."""
matches = [re.match(self.R_LINE, line) for line in self._description_lines]
reviewers = [match.group(2).strip() for match in matches if match]
return cleanup_list(reviewers)
def get_approving_reviewers(props):
"""Retrieves the reviewers that approved a CL from the issue properties with
messages.
Note that the list may contain reviewers that are not committer, thus are not
considered by the CQ.
"""
return sorted(
set(
message['sender']
for message in props['messages']
if message['approval'] and message['sender'] in props['reviewers']
)
)
def FindCodereviewSettingsFile(filename='codereview.settings'):
"""Finds the given file starting in the cwd and going up.
Only looks up to the top of the repository unless an
'inherit-review-settings-ok' file exists in the root of the repository.
"""
inherit_ok_file = 'inherit-review-settings-ok'
cwd = os.getcwd()
root = settings.GetRoot()
if os.path.isfile(os.path.join(root, inherit_ok_file)):
root = '/'
while True:
if filename in os.listdir(cwd):
if os.path.isfile(os.path.join(cwd, filename)):
return open(os.path.join(cwd, filename))
if cwd == root:
break
cwd = os.path.dirname(cwd)
def LoadCodereviewSettingsFromFile(fileobj):
"""Parse a codereview.settings file and updates hooks."""
keyvals = gclient_utils.ParseCodereviewSettingsContent(fileobj.read())
def SetProperty(name, setting, unset_error_ok=False):
fullname = 'rietveld.' + name
if setting in keyvals:
RunGit(['config', fullname, keyvals[setting]])
else:
RunGit(['config', '--unset-all', fullname], error_ok=unset_error_ok)
SetProperty('server', 'CODE_REVIEW_SERVER')
# Only server setting is required. Other settings can be absent.
# In that case, we ignore errors raised during option deletion attempt.
SetProperty('cc', 'CC_LIST', unset_error_ok=True)
SetProperty('private', 'PRIVATE', unset_error_ok=True)
SetProperty('tree-status-url', 'STATUS', unset_error_ok=True)
SetProperty('viewvc-url', 'VIEW_VC', unset_error_ok=True)
SetProperty('bug-prefix', 'BUG_PREFIX', unset_error_ok=True)
SetProperty('cpplint-regex', 'LINT_REGEX', unset_error_ok=True)
SetProperty('force-https-commit-url', 'FORCE_HTTPS_COMMIT_URL',
unset_error_ok=True)
SetProperty('cpplint-ignore-regex', 'LINT_IGNORE_REGEX', unset_error_ok=True)
SetProperty('project', 'PROJECT', unset_error_ok=True)
SetProperty('pending-ref-prefix', 'PENDING_REF_PREFIX', unset_error_ok=True)
SetProperty('run-post-upload-hook', 'RUN_POST_UPLOAD_HOOK',
unset_error_ok=True)
if 'GERRIT_HOST' in keyvals:
RunGit(['config', 'gerrit.host', keyvals['GERRIT_HOST']])
if 'PUSH_URL_CONFIG' in keyvals and 'ORIGIN_URL_CONFIG' in keyvals:
#should be of the form
#PUSH_URL_CONFIG: url.ssh://gitrw.chromium.org.pushinsteadof
#ORIGIN_URL_CONFIG: http://src.chromium.org/git
RunGit(['config', keyvals['PUSH_URL_CONFIG'],
keyvals['ORIGIN_URL_CONFIG']])
def urlretrieve(source, destination):
"""urllib is broken for SSL connections via a proxy therefore we
can't use urllib.urlretrieve()."""
with open(destination, 'w') as f:
f.write(urllib2.urlopen(source).read())
def hasSheBang(fname):
"""Checks fname is a #! script."""
with open(fname) as f:
return f.read(2).startswith('#!')
def DownloadHooks(force):
"""downloads hooks
Args:
force: True to update hooks. False to install hooks if not present.
"""
if not settings.GetIsGerrit():
return
src = 'https://gerrit-review.googlesource.com/tools/hooks/commit-msg'
dst = os.path.join(settings.GetRoot(), '.git', 'hooks', 'commit-msg')
if not os.access(dst, os.X_OK):
if os.path.exists(dst):
if not force:
return
try:
urlretrieve(src, dst)
if not hasSheBang(dst):
DieWithError('Not a script: %s\n'
'You need to download from\n%s\n'
'into .git/hooks/commit-msg and '
'chmod +x .git/hooks/commit-msg' % (dst, src))
os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
except Exception:
if os.path.exists(dst):
os.remove(dst)
DieWithError('\nFailed to download hooks.\n'
'You need to download from\n%s\n'
'into .git/hooks/commit-msg and '
'chmod +x .git/hooks/commit-msg' % src)
@subcommand.usage('[repo root containing codereview.settings]')
def CMDconfig(parser, args):
"""Edits configuration for this tree."""
parser.add_option('--activate-update', action='store_true',
help='activate auto-updating [rietveld] section in '
'.git/config')
parser.add_option('--deactivate-update', action='store_true',
help='deactivate auto-updating [rietveld] section in '
'.git/config')
options, args = parser.parse_args(args)
if options.deactivate_update:
RunGit(['config', 'rietveld.autoupdate', 'false'])
return
if options.activate_update:
RunGit(['config', '--unset', 'rietveld.autoupdate'])
return
if len(args) == 0:
GetCodereviewSettingsInteractively()
DownloadHooks(True)
return 0
url = args[0]
if not url.endswith('codereview.settings'):
url = os.path.join(url, 'codereview.settings')
# Load code review settings and download hooks (if available).
LoadCodereviewSettingsFromFile(urllib2.urlopen(url))
DownloadHooks(True)
return 0
def CMDbaseurl(parser, args):
"""Gets or sets base-url for this branch."""
branchref = RunGit(['symbolic-ref', 'HEAD']).strip()
branch = ShortBranchName(branchref)
_, args = parser.parse_args(args)
if not args:
print("Current base-url:")
return RunGit(['config', 'branch.%s.base-url' % branch],
error_ok=False).strip()
else:
print("Setting base-url to %s" % args[0])
return RunGit(['config', 'branch.%s.base-url' % branch, args[0]],
error_ok=False).strip()
def color_for_status(status):
"""Maps a Changelist status to color, for CMDstatus and other tools."""
return {
'unsent': Fore.RED,
'waiting': Fore.BLUE,
'reply': Fore.YELLOW,
'lgtm': Fore.GREEN,
'commit': Fore.MAGENTA,
'closed': Fore.CYAN,
'error': Fore.WHITE,
}.get(status, Fore.WHITE)
def fetch_cl_status(branch, auth_config=None):
"""Fetches information for an issue and returns (branch, issue, status)."""
cl = Changelist(branchref=branch, auth_config=auth_config)
url = cl.GetIssueURL()
status = cl.GetStatus()
if url and (not status or status == 'error'):
# The issue probably doesn't exist anymore.
url += ' (broken)'
return (branch, url, status)
def get_cl_statuses(
branches, fine_grained, max_processes=None, auth_config=None):
"""Returns a blocking iterable of (branch, issue, color) for given branches.
If fine_grained is true, this will fetch CL statuses from the server.
Otherwise, simply indicate if there's a matching url for the given branches.
If max_processes is specified, it is used as the maximum number of processes
to spawn to fetch CL status from the server. Otherwise 1 process per branch is
spawned.
"""
# Silence upload.py otherwise it becomes unwieldly.
upload.verbosity = 0
if fine_grained:
# Process one branch synchronously to work through authentication, then
# spawn processes to process all the other branches in parallel.
if branches:
fetch = lambda branch: fetch_cl_status(branch, auth_config=auth_config)
yield fetch(branches[0])
branches_to_fetch = branches[1:]
pool = ThreadPool(
min(max_processes, len(branches_to_fetch))
if max_processes is not None
else len(branches_to_fetch))
for x in pool.imap_unordered(fetch, branches_to_fetch):
yield x
else:
# Do not use GetApprovingReviewers(), since it requires an HTTP request.
for b in branches:
cl = Changelist(branchref=b, auth_config=auth_config)
url = cl.GetIssueURL()
yield (b, url, 'waiting' if url else 'error')
def upload_branch_deps(cl, args):
"""Uploads CLs of local branches that are dependents of the current branch.
If the local branch dependency tree looks like:
test1 -> test2.1 -> test3.1
-> test3.2
-> test2.2 -> test3.3
and you run "git cl upload --dependencies" from test1 then "git cl upload" is
run on the dependent branches in this order:
test2.1, test3.1, test3.2, test2.2, test3.3
Note: This function does not rebase your local dependent branches. Use it when
you make a change to the parent branch that will not conflict with its
dependent branches, and you would like their dependencies updated in
Rietveld.
"""
if git_common.is_dirty_git_tree('upload-branch-deps'):
return 1
root_branch = cl.GetBranch()
if root_branch is None:
DieWithError('Can\'t find dependent branches from detached HEAD state. '
'Get on a branch!')
if not cl.GetIssue() or not cl.GetPatchset():
DieWithError('Current branch does not have an uploaded CL. We cannot set '
'patchset dependencies without an uploaded CL.')
branches = RunGit(['for-each-ref',
'--format=%(refname:short) %(upstream:short)',
'refs/heads'])
if not branches:
print('No local branches found.')
return 0
# Create a dictionary of all local branches to the branches that are dependent
# on it.
tracked_to_dependents = collections.defaultdict(list)
for b in branches.splitlines():
tokens = b.split()
if len(tokens) == 2:
branch_name, tracked = tokens
tracked_to_dependents[tracked].append(branch_name)
print
print 'The dependent local branches of %s are:' % root_branch
dependents = []
def traverse_dependents_preorder(branch, padding=''):
dependents_to_process = tracked_to_dependents.get(branch, [])
padding += ' '
for dependent in dependents_to_process:
print '%s%s' % (padding, dependent)
dependents.append(dependent)
traverse_dependents_preorder(dependent, padding)
traverse_dependents_preorder(root_branch)
print
if not dependents:
print 'There are no dependent local branches for %s' % root_branch
return 0
print ('This command will checkout all dependent branches and run '
'"git cl upload".')
ask_for_data('[Press enter to continue or ctrl-C to quit]')
# Add a default patchset title to all upload calls.
args.extend(['-t', 'Updated patchset dependency'])
# Record all dependents that failed to upload.
failures = {}
# Go through all dependents, checkout the branch and upload.
try:
for dependent_branch in dependents:
print
print '--------------------------------------'
print 'Running "git cl upload" from %s:' % dependent_branch
RunGit(['checkout', '-q', dependent_branch])
print
try:
if CMDupload(OptionParser(), args) != 0:
print 'Upload failed for %s!' % dependent_branch
failures[dependent_branch] = 1
except: # pylint: disable=W0702
failures[dependent_branch] = 1
print
finally:
# Swap back to the original root branch.
RunGit(['checkout', '-q', root_branch])
print
print 'Upload complete for dependent branches!'
for dependent_branch in dependents:
upload_status = 'failed' if failures.get(dependent_branch) else 'succeeded'
print ' %s : %s' % (dependent_branch, upload_status)
print
return 0
def CMDstatus(parser, args):
"""Show status of changelists.
Colors are used to tell the state of the CL unless --fast is used:
- Red not sent for review or broken
- Blue waiting for review
- Yellow waiting for you to reply to review
- Green LGTM'ed
- Magenta in the commit queue
- Cyan was committed, branch can be deleted
Also see 'git cl comments'.
"""
parser.add_option('--field',
help='print only specific field (desc|id|patch|url)')
parser.add_option('-f', '--fast', action='store_true',
help='Do not retrieve review status')
parser.add_option(
'-j', '--maxjobs', action='store', type=int,
help='The maximum number of jobs to use when retrieving review status')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
if args:
parser.error('Unsupported args: %s' % args)
auth_config = auth.extract_auth_config_from_options(options)
if options.field:
cl = Changelist(auth_config=auth_config)
if options.field.startswith('desc'):
print cl.GetDescription()
elif options.field == 'id':
issueid = cl.GetIssue()
if issueid:
print issueid
elif options.field == 'patch':
patchset = cl.GetPatchset()
if patchset:
print patchset
elif options.field == 'url':
url = cl.GetIssueURL()
if url:
print url
return 0
branches = RunGit(['for-each-ref', '--format=%(refname)', 'refs/heads'])
if not branches:
print('No local branch found.')
return 0
changes = (
Changelist(branchref=b, auth_config=auth_config)
for b in branches.splitlines())
branches = [c.GetBranch() for c in changes]
alignment = max(5, max(len(b) for b in branches))
print 'Branches associated with reviews:'
output = get_cl_statuses(branches,
fine_grained=not options.fast,
max_processes=options.maxjobs,
auth_config=auth_config)
branch_statuses = {}
alignment = max(5, max(len(ShortBranchName(b)) for b in branches))
for branch in sorted(branches):
while branch not in branch_statuses:
b, i, status = output.next()
branch_statuses[b] = (i, status)
issue_url, status = branch_statuses.pop(branch)
color = color_for_status(status)
reset = Fore.RESET
if not sys.stdout.isatty():
color = ''
reset = ''
status_str = '(%s)' % status if status else ''
print ' %*s : %s%s %s%s' % (
alignment, ShortBranchName(branch), color, issue_url, status_str,
reset)
cl = Changelist(auth_config=auth_config)
print
print 'Current branch:',
print cl.GetBranch()
if not cl.GetIssue():
print 'No issue assigned.'
return 0
print 'Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL())
if not options.fast:
print 'Issue description:'
print cl.GetDescription(pretty=True)
return 0
def colorize_CMDstatus_doc():
"""To be called once in main() to add colors to git cl status help."""
colors = [i for i in dir(Fore) if i[0].isupper()]
def colorize_line(line):
for color in colors:
if color in line.upper():
# Extract whitespaces first and the leading '-'.
indent = len(line) - len(line.lstrip(' ')) + 1
return line[:indent] + getattr(Fore, color) + line[indent:] + Fore.RESET
return line
lines = CMDstatus.__doc__.splitlines()
CMDstatus.__doc__ = '\n'.join(colorize_line(l) for l in lines)
@subcommand.usage('[issue_number]')
def CMDissue(parser, args):
"""Sets or displays the current code review issue number.
Pass issue number 0 to clear the current issue.
"""
parser.add_option('-r', '--reverse', action='store_true',
help='Lookup the branch(es) for the specified issues. If '
'no issues are specified, all branches with mapped '
'issues will be listed.')
options, args = parser.parse_args(args)
if options.reverse:
branches = RunGit(['for-each-ref', 'refs/heads',
'--format=%(refname:short)']).splitlines()
# Reverse issue lookup.
issue_branch_map = {}
for branch in branches:
cl = Changelist(branchref=branch)
issue_branch_map.setdefault(cl.GetIssue(), []).append(branch)
if not args:
args = sorted(issue_branch_map.iterkeys())
for issue in args:
if not issue:
continue
print 'Branch for issue number %s: %s' % (
issue, ', '.join(issue_branch_map.get(int(issue)) or ('None',)))
else:
cl = Changelist()
if len(args) > 0:
try:
issue = int(args[0])
except ValueError:
DieWithError('Pass a number to set the issue or none to list it.\n'
'Maybe you want to run git cl status?')
cl.SetIssue(issue)
print 'Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL())
return 0
def CMDcomments(parser, args):
"""Shows or posts review comments for any changelist."""
parser.add_option('-a', '--add-comment', dest='comment',
help='comment to add to an issue')
parser.add_option('-i', dest='issue',
help="review issue id (defaults to current issue)")
parser.add_option('-j', '--json-file',
help='File to write JSON summary to')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
issue = None
if options.issue:
try:
issue = int(options.issue)
except ValueError:
DieWithError('A review issue id is expected to be a number')
cl = Changelist(issue=issue, auth_config=auth_config)
if options.comment:
cl.AddComment(options.comment)
return 0
data = cl.GetIssueProperties()
summary = []
for message in sorted(data.get('messages', []), key=lambda x: x['date']):
summary.append({
'date': message['date'],
'lgtm': False,
'message': message['text'],
'not_lgtm': False,
'sender': message['sender'],
})
if message['disapproval']:
color = Fore.RED
summary[-1]['not lgtm'] = True
elif message['approval']:
color = Fore.GREEN
summary[-1]['lgtm'] = True
elif message['sender'] == data['owner_email']:
color = Fore.MAGENTA
else:
color = Fore.BLUE
print '\n%s%s %s%s' % (
color, message['date'].split('.', 1)[0], message['sender'],
Fore.RESET)
if message['text'].strip():
print '\n'.join(' ' + l for l in message['text'].splitlines())
if options.json_file:
with open(options.json_file, 'wb') as f:
json.dump(summary, f)
return 0
def CMDdescription(parser, args):
"""Brings up the editor for the current CL's description."""
parser.add_option('-d', '--display', action='store_true',
help='Display the description instead of opening an editor')
auth.add_auth_options(parser)
options, _ = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
cl = Changelist(auth_config=auth_config)
if not cl.GetIssue():
DieWithError('This branch has no associated changelist.')
description = ChangeDescription(cl.GetDescription())
if options.display:
print description.description
return 0
description.prompt()
if cl.GetDescription() != description.description:
cl.UpdateDescription(description.description)
return 0
def CreateDescriptionFromLog(args):
"""Pulls out the commit log to use as a base for the CL description."""
log_args = []
if len(args) == 1 and not args[0].endswith('.'):
log_args = [args[0] + '..']
elif len(args) == 1 and args[0].endswith('...'):
log_args = [args[0][:-1]]
elif len(args) == 2:
log_args = [args[0] + '..' + args[1]]
else:
log_args = args[:] # Hope for the best!
return RunGit(['log', '--pretty=format:%s\n\n%b'] + log_args)
def CMDlint(parser, args):
"""Runs cpplint on the current changelist."""
parser.add_option('--filter', action='append', metavar='-x,+y',
help='Comma-separated list of cpplint\'s category-filters')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
# Access to a protected member _XX of a client class
# pylint: disable=W0212
try:
import cpplint
import cpplint_chromium
except ImportError:
print "Your depot_tools is missing cpplint.py and/or cpplint_chromium.py."
return 1
# Change the current working directory before calling lint so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(settings.GetRoot())
try:
cl = Changelist(auth_config=auth_config)
change = cl.GetChange(cl.GetCommonAncestorWithUpstream(), None)
files = [f.LocalPath() for f in change.AffectedFiles()]
if not files:
print "Cannot lint an empty CL"
return 1
# Process cpplints arguments if any.
command = args + files
if options.filter:
command = ['--filter=' + ','.join(options.filter)] + command
filenames = cpplint.ParseArguments(command)
white_regex = re.compile(settings.GetLintRegex())
black_regex = re.compile(settings.GetLintIgnoreRegex())
extra_check_functions = [cpplint_chromium.CheckPointerDeclarationWhitespace]
for filename in filenames:
if white_regex.match(filename):
if black_regex.match(filename):
print "Ignoring file %s" % filename
else:
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
extra_check_functions)
else:
print "Skipping file %s" % filename
finally:
os.chdir(previous_cwd)
print "Total errors found: %d\n" % cpplint._cpplint_state.error_count
if cpplint._cpplint_state.error_count != 0:
return 1
return 0
def CMDpresubmit(parser, args):
"""Runs presubmit tests on the current changelist."""
parser.add_option('-u', '--upload', action='store_true',
help='Run upload hook instead of the push/dcommit hook')
parser.add_option('-f', '--force', action='store_true',
help='Run checks even if tree is dirty')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if not options.force and git_common.is_dirty_git_tree('presubmit'):
print 'use --force to check even if tree is dirty.'
return 1
cl = Changelist(auth_config=auth_config)
if args:
base_branch = args[0]
else:
# Default to diffing against the common ancestor of the upstream branch.
base_branch = cl.GetCommonAncestorWithUpstream()
cl.RunHook(
committing=not options.upload,
may_prompt=False,
verbose=options.verbose,
change=cl.GetChange(base_branch, None))
return 0
def AddChangeIdToCommitMessage(options, args):
"""Re-commits using the current message, assumes the commit hook is in
place.
"""
log_desc = options.message or CreateDescriptionFromLog(args)
git_command = ['commit', '--amend', '-m', log_desc]
RunGit(git_command)
new_log_desc = CreateDescriptionFromLog(args)
if CHANGE_ID in new_log_desc:
print 'git-cl: Added Change-Id to commit message.'
else:
print >> sys.stderr, 'ERROR: Gerrit commit-msg hook not available.'
def GerritUpload(options, args, cl, change):
"""upload the current branch to gerrit."""
# We assume the remote called "origin" is the one we want.
# It is probably not worthwhile to support different workflows.
gerrit_remote = 'origin'
remote, remote_branch = cl.GetRemoteBranch()
branch = GetTargetRef(remote, remote_branch, options.target_branch,
pending_prefix='')
change_desc = ChangeDescription(
options.message or CreateDescriptionFromLog(args))
if not change_desc.description:
print "Description is empty; aborting."
return 1
if options.squash:
# Try to get the message from a previous upload.
shadow_branch = 'refs/heads/git_cl_uploads/' + cl.GetBranch()
message = RunGitSilent(['show', '--format=%s\n\n%b', '-s', shadow_branch])
if not message:
if not options.force:
change_desc.prompt()
if CHANGE_ID not in change_desc.description:
# Run the commit-msg hook without modifying the head commit by writing
# the commit message to a temporary file and running the hook over it,
# then reading the file back in.
commit_msg_hook = os.path.join(settings.GetRoot(), '.git', 'hooks',
'commit-msg')
file_handle, msg_file = tempfile.mkstemp(text=True,
prefix='commit_msg')
try:
try:
with os.fdopen(file_handle, 'w') as fileobj:
fileobj.write(change_desc.description)
finally:
os.close(file_handle)
RunCommand([commit_msg_hook, msg_file])
change_desc.set_description(gclient_utils.FileRead(msg_file))
finally:
os.remove(msg_file)
if not change_desc.description:
print "Description is empty; aborting."
return 1
message = change_desc.description
remote, upstream_branch = cl.FetchUpstreamTuple(cl.GetBranch())
if remote is '.':
# If our upstream branch is local, we base our squashed commit on its
# squashed version.
parent = ('refs/heads/git_cl_uploads/' +
scm.GIT.ShortBranchName(upstream_branch))
# Verify that the upstream branch has been uploaded too, otherwise Gerrit
# will create additional CLs when uploading.
if (RunGitSilent(['rev-parse', upstream_branch + ':']) !=
RunGitSilent(['rev-parse', parent + ':'])):
print 'Upload upstream branch ' + upstream_branch + ' first.'
return 1
else:
parent = cl.GetCommonAncestorWithUpstream()
tree = RunGit(['rev-parse', 'HEAD:']).strip()
ref_to_push = RunGit(['commit-tree', tree, '-p', parent,
'-m', message]).strip()
else:
if CHANGE_ID not in change_desc.description:
AddChangeIdToCommitMessage(options, args)
ref_to_push = 'HEAD'
parent = '%s/%s' % (gerrit_remote, branch)
commits = RunGitSilent(['rev-list', '%s..%s' % (parent,
ref_to_push)]).splitlines()
if len(commits) > 1:
print('WARNING: This will upload %d commits. Run the following command '
'to see which commits will be uploaded: ' % len(commits))
print('git log %s..%s' % (parent, ref_to_push))
print('You can also use `git squash-branch` to squash these into a single '
'commit.')
ask_for_data('About to upload; enter to confirm.')
if options.reviewers or options.tbr_owners:
change_desc.update_reviewers(options.reviewers, options.tbr_owners, change)
receive_options = []
cc = cl.GetCCList().split(',')
if options.cc:
cc.extend(options.cc)
cc = filter(None, cc)
if cc:
receive_options += ['--cc=' + email for email in cc]
if change_desc.get_reviewers():
receive_options.extend(
'--reviewer=' + email for email in change_desc.get_reviewers())
git_command = ['push']
if receive_options:
git_command.append('--receive-pack=git receive-pack %s' %
' '.join(receive_options))
git_command += [gerrit_remote, ref_to_push + ':refs/for/' + branch]
RunGit(git_command)
if options.squash:
head = RunGit(['rev-parse', 'HEAD']).strip()
RunGit(['update-ref', '-m', 'Uploaded ' + head, shadow_branch, ref_to_push])
# TODO(ukai): parse Change-Id: and set issue number?
return 0
def GetTargetRef(remote, remote_branch, target_branch, pending_prefix):
"""Computes the remote branch ref to use for the CL.
Args:
remote (str): The git remote for the CL.
remote_branch (str): The git remote branch for the CL.
target_branch (str): The target branch specified by the user.
pending_prefix (str): The pending prefix from the settings.
"""
if not (remote and remote_branch):
return None
if target_branch:
# Cannonicalize branch references to the equivalent local full symbolic
# refs, which are then translated into the remote full symbolic refs
# below.
if '/' not in target_branch:
remote_branch = 'refs/remotes/%s/%s' % (remote, target_branch)
else:
prefix_replacements = (
('^((refs/)?remotes/)?branch-heads/', 'refs/remotes/branch-heads/'),
('^((refs/)?remotes/)?%s/' % remote, 'refs/remotes/%s/' % remote),
('^(refs/)?heads/', 'refs/remotes/%s/' % remote),
)
match = None
for regex, replacement in prefix_replacements:
match = re.search(regex, target_branch)
if match:
remote_branch = target_branch.replace(match.group(0), replacement)
break
if not match:
# This is a branch path but not one we recognize; use as-is.
remote_branch = target_branch
elif remote_branch in REFS_THAT_ALIAS_TO_OTHER_REFS:
# Handle the refs that need to land in different refs.
remote_branch = REFS_THAT_ALIAS_TO_OTHER_REFS[remote_branch]
# Create the true path to the remote branch.
# Does the following translation:
# * refs/remotes/origin/refs/diff/test -> refs/diff/test
# * refs/remotes/origin/master -> refs/heads/master
# * refs/remotes/branch-heads/test -> refs/branch-heads/test
if remote_branch.startswith('refs/remotes/%s/refs/' % remote):
remote_branch = remote_branch.replace('refs/remotes/%s/' % remote, '')
elif remote_branch.startswith('refs/remotes/%s/' % remote):
remote_branch = remote_branch.replace('refs/remotes/%s/' % remote,
'refs/heads/')
elif remote_branch.startswith('refs/remotes/branch-heads'):
remote_branch = remote_branch.replace('refs/remotes/', 'refs/')
# If a pending prefix exists then replace refs/ with it.
if pending_prefix:
remote_branch = remote_branch.replace('refs/', pending_prefix)
return remote_branch
def RietveldUpload(options, args, cl, change):
"""upload the patch to rietveld."""
upload_args = ['--assume_yes'] # Don't ask about untracked files.
upload_args.extend(['--server', cl.GetRietveldServer()])
upload_args.extend(auth.auth_config_to_command_options(cl.auth_config))
if options.emulate_svn_auto_props:
upload_args.append('--emulate_svn_auto_props')
change_desc = None
if options.email is not None:
upload_args.extend(['--email', options.email])
if cl.GetIssue():
if options.title:
upload_args.extend(['--title', options.title])
if options.message:
upload_args.extend(['--message', options.message])
upload_args.extend(['--issue', str(cl.GetIssue())])
print ("This branch is associated with issue %s. "
"Adding patch to that issue." % cl.GetIssue())
else:
if options.title:
upload_args.extend(['--title', options.title])
message = options.title or options.message or CreateDescriptionFromLog(args)
change_desc = ChangeDescription(message)
if options.reviewers or options.tbr_owners:
change_desc.update_reviewers(options.reviewers,
options.tbr_owners,
change)
if not options.force:
change_desc.prompt()
if not change_desc.description:
print "Description is empty; aborting."
return 1
upload_args.extend(['--message', change_desc.description])
if change_desc.get_reviewers():
upload_args.append('--reviewers=' + ','.join(change_desc.get_reviewers()))
if options.send_mail:
if not change_desc.get_reviewers():
DieWithError("Must specify reviewers to send email.")
upload_args.append('--send_mail')
# We check this before applying rietveld.private assuming that in
# rietveld.cc only addresses which we can send private CLs to are listed
# if rietveld.private is set, and so we should ignore rietveld.cc only when
# --private is specified explicitly on the command line.
if options.private:
logging.warn('rietveld.cc is ignored since private flag is specified. '
'You need to review and add them manually if necessary.')
cc = cl.GetCCListWithoutDefault()
else:
cc = cl.GetCCList()
cc = ','.join(filter(None, (cc, ','.join(options.cc))))
if cc:
upload_args.extend(['--cc', cc])
if options.private or settings.GetDefaultPrivateFlag() == "True":
upload_args.append('--private')
upload_args.extend(['--git_similarity', str(options.similarity)])
if not options.find_copies:
upload_args.extend(['--git_no_find_copies'])
# Include the upstream repo's URL in the change -- this is useful for
# projects that have their source spread across multiple repos.
remote_url = cl.GetGitBaseUrlFromConfig()
if not remote_url:
if settings.GetIsGitSvn():
remote_url = cl.GetGitSvnRemoteUrl()
else:
if cl.GetRemoteUrl() and '/' in cl.GetUpstreamBranch():
remote_url = (cl.GetRemoteUrl() + '@'
+ cl.GetUpstreamBranch().split('/')[-1])
if remote_url:
upload_args.extend(['--base_url', remote_url])
remote, remote_branch = cl.GetRemoteBranch()
target_ref = GetTargetRef(remote, remote_branch, options.target_branch,
settings.GetPendingRefPrefix())
if target_ref:
upload_args.extend(['--target_ref', target_ref])
# Look for dependent patchsets. See crbug.com/480453 for more details.
remote, upstream_branch = cl.FetchUpstreamTuple(cl.GetBranch())
upstream_branch = ShortBranchName(upstream_branch)
if remote is '.':
# A local branch is being tracked.
local_branch = ShortBranchName(upstream_branch)
if settings.GetIsSkipDependencyUpload(local_branch):
print
print ('Skipping dependency patchset upload because git config '
'branch.%s.skip-deps-uploads is set to True.' % local_branch)
print
else:
auth_config = auth.extract_auth_config_from_options(options)
branch_cl = Changelist(branchref=local_branch, auth_config=auth_config)
branch_cl_issue_url = branch_cl.GetIssueURL()
branch_cl_issue = branch_cl.GetIssue()
branch_cl_patchset = branch_cl.GetPatchset()
if branch_cl_issue_url and branch_cl_issue and branch_cl_patchset:
upload_args.extend(
['--depends_on_patchset', '%s:%s' % (
branch_cl_issue, branch_cl_patchset)])
print
print ('The current branch (%s) is tracking a local branch (%s) with '
'an associated CL.') % (cl.GetBranch(), local_branch)
print 'Adding %s/#ps%s as a dependency patchset.' % (
branch_cl_issue_url, branch_cl_patchset)
print
project = settings.GetProject()
if project:
upload_args.extend(['--project', project])
if options.cq_dry_run:
upload_args.extend(['--cq_dry_run'])
try:
upload_args = ['upload'] + upload_args + args
logging.info('upload.RealMain(%s)', upload_args)
issue, patchset = upload.RealMain(upload_args)
issue = int(issue)
patchset = int(patchset)
except KeyboardInterrupt:
sys.exit(1)
except:
# If we got an exception after the user typed a description for their
# change, back up the description before re-raising.
if change_desc:
backup_path = os.path.expanduser(DESCRIPTION_BACKUP_FILE)
print '\nGot exception while uploading -- saving description to %s\n' \
% backup_path
backup_file = open(backup_path, 'w')
backup_file.write(change_desc.description)
backup_file.close()
raise
if not cl.GetIssue():
cl.SetIssue(issue)
cl.SetPatchset(patchset)
if options.use_commit_queue:
cl.SetFlag('commit', '1')
return 0
def cleanup_list(l):
"""Fixes a list so that comma separated items are put as individual items.
So that "--reviewers joe@c,john@c --reviewers joa@c" results in
options.reviewers == sorted(['joe@c', 'john@c', 'joa@c']).
"""
items = sum((i.split(',') for i in l), [])
stripped_items = (i.strip() for i in items)
return sorted(filter(None, stripped_items))
@subcommand.usage('[args to "git diff"]')
def CMDupload(parser, args):
"""Uploads the current changelist to codereview.
Can skip dependency patchset uploads for a branch by running:
git config branch.branch_name.skip-deps-uploads True
To unset run:
git config --unset branch.branch_name.skip-deps-uploads
Can also set the above globally by using the --global flag.
"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('--bypass-watchlists', action='store_true',
dest='bypass_watchlists',
help='bypass watchlists auto CC-ing reviewers')
parser.add_option('-f', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('-m', dest='message', help='message for patchset')
parser.add_option('-t', dest='title', help='title for patchset')
parser.add_option('-r', '--reviewers',
action='append', default=[],
help='reviewer email addresses')
parser.add_option('--cc',
action='append', default=[],
help='cc email addresses')
parser.add_option('-s', '--send-mail', action='store_true',
help='send email to reviewer immediately')
parser.add_option('--emulate_svn_auto_props',
'--emulate-svn-auto-props',
action="store_true",
dest="emulate_svn_auto_props",
help="Emulate Subversion's auto properties feature.")
parser.add_option('-c', '--use-commit-queue', action='store_true',
help='tell the commit queue to commit this patchset')
parser.add_option('--private', action='store_true',
help='set the review private (rietveld only)')
parser.add_option('--target_branch',
'--target-branch',
metavar='TARGET',
help='Apply CL to remote ref TARGET. ' +
'Default: remote branch head, or master')
parser.add_option('--squash', action='store_true',
help='Squash multiple commits into one (Gerrit only)')
parser.add_option('--email', default=None,
help='email address to use to connect to Rietveld')
parser.add_option('--tbr-owners', dest='tbr_owners', action='store_true',
help='add a set of OWNERS to TBR')
parser.add_option('--cq-dry-run', dest='cq_dry_run', action='store_true',
help='Send the patchset to do a CQ dry run right after '
'upload.')
parser.add_option('--dependencies', action='store_true',
help='Uploads CLs of all the local branches that depend on '
'the current branch')
orig_args = args
add_git_similarity(parser)
auth.add_auth_options(parser)
(options, args) = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if git_common.is_dirty_git_tree('upload'):
return 1
options.reviewers = cleanup_list(options.reviewers)
options.cc = cleanup_list(options.cc)
cl = Changelist(auth_config=auth_config)
if args:
# TODO(ukai): is it ok for gerrit case?
base_branch = args[0]
else:
if cl.GetBranch() is None:
DieWithError('Can\'t upload from detached HEAD state. Get on a branch!')
# Default to diffing against common ancestor of upstream branch
base_branch = cl.GetCommonAncestorWithUpstream()
args = [base_branch, 'HEAD']
# Make sure authenticated to Rietveld before running expensive hooks. It is
# a fast, best efforts check. Rietveld still can reject the authentication
# during the actual upload.
if not settings.GetIsGerrit() and auth_config.use_oauth2:
authenticator = auth.get_authenticator_for_host(
cl.GetRietveldServer(), auth_config)
if not authenticator.has_cached_credentials():
raise auth.LoginRequiredError(cl.GetRietveldServer())
# Apply watchlists on upload.
change = cl.GetChange(base_branch, None)
watchlist = watchlists.Watchlists(change.RepositoryRoot())
files = [f.LocalPath() for f in change.AffectedFiles()]
if not options.bypass_watchlists:
cl.SetWatchers(watchlist.GetWatchersForPaths(files))
if not options.bypass_hooks:
if options.reviewers or options.tbr_owners:
# Set the reviewer list now so that presubmit checks can access it.
change_description = ChangeDescription(change.FullDescriptionText())
change_description.update_reviewers(options.reviewers,
options.tbr_owners,
change)
change.SetDescriptionText(change_description.description)
hook_results = cl.RunHook(committing=False,
may_prompt=not options.force,
verbose=options.verbose,
change=change)
if not hook_results.should_continue():
return 1
if not options.reviewers and hook_results.reviewers:
options.reviewers = hook_results.reviewers.split(',')
if cl.GetIssue():
latest_patchset = cl.GetMostRecentPatchset()
local_patchset = cl.GetPatchset()
if latest_patchset and local_patchset and local_patchset != latest_patchset:
print ('The last upload made from this repository was patchset #%d but '
'the most recent patchset on the server is #%d.'
% (local_patchset, latest_patchset))
print ('Uploading will still work, but if you\'ve uploaded to this issue '
'from another machine or branch the patch you\'re uploading now '
'might not include those changes.')
ask_for_data('About to upload; enter to confirm.')
print_stats(options.similarity, options.find_copies, args)
if settings.GetIsGerrit():
return GerritUpload(options, args, cl, change)
ret = RietveldUpload(options, args, cl, change)
if not ret:
git_set_branch_value('last-upload-hash',
RunGit(['rev-parse', 'HEAD']).strip())
# Run post upload hooks, if specified.
if settings.GetRunPostUploadHook():
presubmit_support.DoPostUploadExecuter(
change,
cl,
settings.GetRoot(),
options.verbose,
sys.stdout)
# Upload all dependencies if specified.
if options.dependencies:
print
print '--dependencies has been specified.'
print 'All dependent local branches will be re-uploaded.'
print
# Remove the dependencies flag from args so that we do not end up in a
# loop.
orig_args.remove('--dependencies')
upload_branch_deps(cl, orig_args)
return ret
def IsSubmoduleMergeCommit(ref):
# When submodules are added to the repo, we expect there to be a single
# non-git-svn merge commit at remote HEAD with a signature comment.
pattern = '^SVN changes up to revision [0-9]*$'
cmd = ['rev-list', '--merges', '--grep=%s' % pattern, '%s^!' % ref]
return RunGit(cmd) != ''
def SendUpstream(parser, args, cmd):
"""Common code for CMDland and CmdDCommit
Squashes branch into a single commit.
Updates changelog with metadata (e.g. pointer to review).
Pushes/dcommits the code upstream.
Updates review and closes.
"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('-m', dest='message',
help="override review description")
parser.add_option('-f', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('-c', dest='contributor',
help="external contributor for patch (appended to " +
"description and used as author for git). Should be " +
"formatted as 'First Last <email@example.com>'")
add_git_similarity(parser)
auth.add_auth_options(parser)
(options, args) = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
cl = Changelist(auth_config=auth_config)
current = cl.GetBranch()
remote, upstream_branch = cl.FetchUpstreamTuple(cl.GetBranch())
if not settings.GetIsGitSvn() and remote == '.':
print
print 'Attempting to push branch %r into another local branch!' % current
print
print 'Either reparent this branch on top of origin/master:'
print ' git reparent-branch --root'
print
print 'OR run `git rebase-update` if you think the parent branch is already'
print 'committed.'
print
print ' Current parent: %r' % upstream_branch
return 1
if not args or cmd == 'land':
# Default to merging against our best guess of the upstream branch.
args = [cl.GetUpstreamBranch()]
if options.contributor:
if not re.match('^.*\s<\S+@\S+>$', options.contributor):
print "Please provide contibutor as 'First Last <email@example.com>'"
return 1
base_branch = args[0]
base_has_submodules = IsSubmoduleMergeCommit(base_branch)
if git_common.is_dirty_git_tree(cmd):
return 1
# This rev-list syntax means "show all commits not in my branch that
# are in base_branch".
upstream_commits = RunGit(['rev-list', '^' + cl.GetBranchRef(),
base_branch]).splitlines()
if upstream_commits:
print ('Base branch "%s" has %d commits '
'not in this branch.' % (base_branch, len(upstream_commits)))
print 'Run "git merge %s" before attempting to %s.' % (base_branch, cmd)
return 1
# This is the revision `svn dcommit` will commit on top of.
svn_head = None
if cmd == 'dcommit' or base_has_submodules:
svn_head = RunGit(['log', '--grep=^git-svn-id:', '-1',
'--pretty=format:%H'])
if cmd == 'dcommit':
# If the base_head is a submodule merge commit, the first parent of the
# base_head should be a git-svn commit, which is what we're interested in.
base_svn_head = base_branch
if base_has_submodules:
base_svn_head += '^1'
extra_commits = RunGit(['rev-list', '^' + svn_head, base_svn_head])
if extra_commits:
print ('This branch has %d additional commits not upstreamed yet.'
% len(extra_commits.splitlines()))
print ('Upstream "%s" or rebase this branch on top of the upstream trunk '
'before attempting to %s.' % (base_branch, cmd))
return 1
merge_base = RunGit(['merge-base', base_branch, 'HEAD']).strip()
if not options.bypass_hooks:
author = None
if options.contributor:
author = re.search(r'\<(.*)\>', options.contributor).group(1)
hook_results = cl.RunHook(
committing=True,
may_prompt=not options.force,
verbose=options.verbose,
change=cl.GetChange(merge_base, author))
if not hook_results.should_continue():
return 1
# Check the tree status if the tree status URL is set.
status = GetTreeStatus()
if 'closed' == status:
print('The tree is closed. Please wait for it to reopen. Use '
'"git cl %s --bypass-hooks" to commit on a closed tree.' % cmd)
return 1
elif 'unknown' == status:
print('Unable to determine tree status. Please verify manually and '
'use "git cl %s --bypass-hooks" to commit on a closed tree.' % cmd)
return 1
else:
breakpad.SendStack(
'GitClHooksBypassedCommit',
'Issue %s/%s bypassed hook when committing (tree status was "%s")' %
(cl.GetRietveldServer(), cl.GetIssue(), GetTreeStatus()),
verbose=False)
change_desc = ChangeDescription(options.message)
if not change_desc.description and cl.GetIssue():
change_desc = ChangeDescription(cl.GetDescription())
if not change_desc.description:
if not cl.GetIssue() and options.bypass_hooks:
change_desc = ChangeDescription(CreateDescriptionFromLog([merge_base]))
else:
print 'No description set.'
print 'Visit %s/edit to set it.' % (cl.GetIssueURL())
return 1
# Keep a separate copy for the commit message, because the commit message
# contains the link to the Rietveld issue, while the Rietveld message contains
# the commit viewvc url.
# Keep a separate copy for the commit message.
if cl.GetIssue():
change_desc.update_reviewers(cl.GetApprovingReviewers())
commit_desc = ChangeDescription(change_desc.description)
if cl.GetIssue():
# Xcode won't linkify this URL unless there is a non-whitespace character
# after it. Add a period on a new line to circumvent this. Also add a space
# before the period to make sure that Gitiles continues to correctly resolve
# the URL.
commit_desc.append_footer('Review URL: %s .' % cl.GetIssueURL())
if options.contributor:
commit_desc.append_footer('Patch from %s.' % options.contributor)
print('Description:')
print(commit_desc.description)
branches = [merge_base, cl.GetBranchRef()]
if not options.force:
print_stats(options.similarity, options.find_copies, branches)
# We want to squash all this branch's commits into one commit with the proper
# description. We do this by doing a "reset --soft" to the base branch (which
# keeps the working copy the same), then dcommitting that. If origin/master
# has a submodule merge commit, we'll also need to cherry-pick the squashed
# commit onto a branch based on the git-svn head.
MERGE_BRANCH = 'git-cl-commit'
CHERRY_PICK_BRANCH = 'git-cl-cherry-pick'
# Delete the branches if they exist.
for branch in [MERGE_BRANCH, CHERRY_PICK_BRANCH]:
showref_cmd = ['show-ref', '--quiet', '--verify', 'refs/heads/%s' % branch]
result = RunGitWithCode(showref_cmd)
if result[0] == 0:
RunGit(['branch', '-D', branch])
# We might be in a directory that's present in this branch but not in the
# trunk. Move up to the top of the tree so that git commands that expect a
# valid CWD won't fail after we check out the merge branch.
rel_base_path = settings.GetRelativeRoot()
if rel_base_path:
os.chdir(rel_base_path)
# Stuff our change into the merge branch.
# We wrap in a try...finally block so if anything goes wrong,
# we clean up the branches.
retcode = -1
pushed_to_pending = False
pending_ref = None
revision = None
try:
RunGit(['checkout', '-q', '-b', MERGE_BRANCH])
RunGit(['reset', '--soft', merge_base])
if options.contributor:
RunGit(
[
'commit', '--author', options.contributor,
'-m', commit_desc.description,
])
else:
RunGit(['commit', '-m', commit_desc.description])
if base_has_submodules:
cherry_pick_commit = RunGit(['rev-list', 'HEAD^!']).rstrip()
RunGit(['branch', CHERRY_PICK_BRANCH, svn_head])
RunGit(['checkout', CHERRY_PICK_BRANCH])
RunGit(['cherry-pick', cherry_pick_commit])
if cmd == 'land':
remote, branch = cl.FetchUpstreamTuple(cl.GetBranch())
pending_prefix = settings.GetPendingRefPrefix()
if not pending_prefix or branch.startswith(pending_prefix):
# If not using refs/pending/heads/* at all, or target ref is already set
# to pending, then push to the target ref directly.
retcode, output = RunGitWithCode(
['push', '--porcelain', remote, 'HEAD:%s' % branch])
pushed_to_pending = pending_prefix and branch.startswith(pending_prefix)
else:
# Cherry-pick the change on top of pending ref and then push it.
assert branch.startswith('refs/'), branch
assert pending_prefix[-1] == '/', pending_prefix
pending_ref = pending_prefix + branch[len('refs/'):]
retcode, output = PushToGitPending(remote, pending_ref, branch)
pushed_to_pending = (retcode == 0)
if retcode == 0:
revision = RunGit(['rev-parse', 'HEAD']).strip()
else:
# dcommit the merge branch.
cmd_args = [
'svn', 'dcommit',
'-C%s' % options.similarity,
'--no-rebase', '--rmdir',
]
if settings.GetForceHttpsCommitUrl():
# Allow forcing https commit URLs for some projects that don't allow
# committing to http URLs (like Google Code).
remote_url = cl.GetGitSvnRemoteUrl()
if urlparse.urlparse(remote_url).scheme == 'http':
remote_url = remote_url.replace('http://', 'https://')
cmd_args.append('--commit-url=%s' % remote_url)
_, output = RunGitWithCode(cmd_args)
if 'Committed r' in output:
revision = re.match(
'.*?\nCommitted r(\\d+)', output, re.DOTALL).group(1)
logging.debug(output)
finally:
# And then swap back to the original branch and clean up.
RunGit(['checkout', '-q', cl.GetBranch()])
RunGit(['branch', '-D', MERGE_BRANCH])
if base_has_submodules:
RunGit(['branch', '-D', CHERRY_PICK_BRANCH])
if not revision:
print 'Failed to push. If this persists, please file a bug.'
return 1
killed = False
if pushed_to_pending:
try:
revision = WaitForRealCommit(remote, revision, base_branch, branch)
# We set pushed_to_pending to False, since it made it all the way to the
# real ref.
pushed_to_pending = False
except KeyboardInterrupt:
killed = True
if cl.GetIssue():
to_pending = ' to pending queue' if pushed_to_pending else ''
viewvc_url = settings.GetViewVCUrl()
if not to_pending:
if viewvc_url and revision:
change_desc.append_footer(
'Committed: %s%s' % (viewvc_url, revision))
elif revision:
change_desc.append_footer('Committed: %s' % (revision,))
print ('Closing issue '
'(you may be prompted for your codereview password)...')
cl.UpdateDescription(change_desc.description)
cl.CloseIssue()
props = cl.GetIssueProperties()
patch_num = len(props['patchsets'])
comment = "Committed patchset #%d (id:%d)%s manually as %s" % (
patch_num, props['patchsets'][-1], to_pending, revision)
if options.bypass_hooks:
comment += ' (tree was closed).' if GetTreeStatus() == 'closed' else '.'
else:
comment += ' (presubmit successful).'
cl.RpcServer().add_comment(cl.GetIssue(), comment)
cl.SetIssue(None)
if pushed_to_pending:
_, branch = cl.FetchUpstreamTuple(cl.GetBranch())
print 'The commit is in the pending queue (%s).' % pending_ref
print (
'It will show up on %s in ~1 min, once it gets a Cr-Commit-Position '
'footer.' % branch)
hook = POSTUPSTREAM_HOOK_PATTERN % cmd
if os.path.isfile(hook):
RunCommand([hook, merge_base], error_ok=True)
return 1 if killed else 0
def WaitForRealCommit(remote, pushed_commit, local_base_ref, real_ref):
print
print 'Waiting for commit to be landed on %s...' % real_ref
print '(If you are impatient, you may Ctrl-C once without harm)'
target_tree = RunGit(['rev-parse', '%s:' % pushed_commit]).strip()
current_rev = RunGit(['rev-parse', local_base_ref]).strip()
loop = 0
while True:
sys.stdout.write('fetching (%d)... \r' % loop)
sys.stdout.flush()
loop += 1
RunGit(['retry', 'fetch', remote, real_ref], stderr=subprocess2.VOID)
to_rev = RunGit(['rev-parse', 'FETCH_HEAD']).strip()
commits = RunGit(['rev-list', '%s..%s' % (current_rev, to_rev)])
for commit in commits.splitlines():
if RunGit(['rev-parse', '%s:' % commit]).strip() == target_tree:
print 'Found commit on %s' % real_ref
return commit
current_rev = to_rev
def PushToGitPending(remote, pending_ref, upstream_ref):
"""Fetches pending_ref, cherry-picks current HEAD on top of it, pushes.
Returns:
(retcode of last operation, output log of last operation).
"""
assert pending_ref.startswith('refs/'), pending_ref
local_pending_ref = 'refs/git-cl/' + pending_ref[len('refs/'):]
cherry = RunGit(['rev-parse', 'HEAD']).strip()
code = 0
out = ''
max_attempts = 3
attempts_left = max_attempts
while attempts_left:
if attempts_left != max_attempts:
print 'Retrying, %d attempts left...' % (attempts_left - 1,)
attempts_left -= 1
# Fetch. Retry fetch errors.
print 'Fetching pending ref %s...' % pending_ref
code, out = RunGitWithCode(
['retry', 'fetch', remote, '+%s:%s' % (pending_ref, local_pending_ref)])
if code:
print 'Fetch failed with exit code %d.' % code
if out.strip():
print out.strip()
continue
# Try to cherry pick. Abort on merge conflicts.
print 'Cherry-picking commit on top of pending ref...'
RunGitWithCode(['checkout', local_pending_ref], suppress_stderr=True)
code, out = RunGitWithCode(['cherry-pick', cherry])
if code:
print (
'Your patch doesn\'t apply cleanly to ref \'%s\', '
'the following files have merge conflicts:' % pending_ref)
print RunGit(['diff', '--name-status', '--diff-filter=U']).strip()
print 'Please rebase your patch and try again.'
RunGitWithCode(['cherry-pick', '--abort'])
return code, out
# Applied cleanly, try to push now. Retry on error (flake or non-ff push).
print 'Pushing commit to %s... It can take a while.' % pending_ref
code, out = RunGitWithCode(
['retry', 'push', '--porcelain', remote, 'HEAD:%s' % pending_ref])
if code == 0:
# Success.
print 'Commit pushed to pending ref successfully!'
return code, out
print 'Push failed with exit code %d.' % code
if out.strip():
print out.strip()
if IsFatalPushFailure(out):
print (
'Fatal push error. Make sure your .netrc credentials and git '
'user.email are correct and you have push access to the repo.')
return code, out
print 'All attempts to push to pending ref failed.'
return code, out
def IsFatalPushFailure(push_stdout):
"""True if retrying push won't help."""
return '(prohibited by Gerrit)' in push_stdout
@subcommand.usage('[upstream branch to apply against]')
def CMDdcommit(parser, args):
"""Commits the current changelist via git-svn."""
if not settings.GetIsGitSvn():
if get_footer_svn_id():
# If it looks like previous commits were mirrored with git-svn.
message = """This repository appears to be a git-svn mirror, but no
upstream SVN master is set. You probably need to run 'git auto-svn' once."""
else:
message = """This doesn't appear to be an SVN repository.
If your project has a true, writeable git repository, you probably want to run
'git cl land' instead.
If your project has a git mirror of an upstream SVN master, you probably need
to run 'git svn init'.
Using the wrong command might cause your commit to appear to succeed, and the
review to be closed, without actually landing upstream. If you choose to
proceed, please verify that the commit lands upstream as expected."""
print(message)
ask_for_data('[Press enter to dcommit or ctrl-C to quit]')
return SendUpstream(parser, args, 'dcommit')
@subcommand.usage('[upstream branch to apply against]')
def CMDland(parser, args):
"""Commits the current changelist via git."""
if settings.GetIsGitSvn() or get_footer_svn_id():
print('This appears to be an SVN repository.')
print('Are you sure you didn\'t mean \'git cl dcommit\'?')
print('(Ignore if this is the first commit after migrating from svn->git)')
ask_for_data('[Press enter to push or ctrl-C to quit]')
return SendUpstream(parser, args, 'land')
def ParseIssueNum(arg):
"""Parses the issue number from args if present otherwise returns None."""
if re.match(r'\d+', arg):
return arg
if arg.startswith('http'):
return re.sub(r'.*/(\d+)/?', r'\1', arg)
return None
@subcommand.usage('<patch url or issue id or issue url>')
def CMDpatch(parser, args):
"""Patches in a code review."""
parser.add_option('-b', dest='newbranch',
help='create a new branch off trunk for the patch')
parser.add_option('-f', '--force', action='store_true',
help='with -b, clobber any existing branch')
parser.add_option('-d', '--directory', action='store', metavar='DIR',
help='Change to the directory DIR immediately, '
'before doing anything else.')
parser.add_option('--reject', action='store_true',
help='failed patches spew .rej files rather than '
'attempting a 3-way merge')
parser.add_option('-n', '--no-commit', action='store_true', dest='nocommit',
help="don't commit after patch applies")
auth.add_auth_options(parser)
(options, args) = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if len(args) != 1:
parser.print_help()
return 1
issue_arg = ParseIssueNum(args[0])
# The patch URL works because ParseIssueNum won't do any substitution
# as the re.sub pattern fails to match and just returns it.
if issue_arg == None:
parser.print_help()
return 1
# We don't want uncommitted changes mixed up with the patch.
if git_common.is_dirty_git_tree('patch'):
return 1
# TODO(maruel): Use apply_issue.py
# TODO(ukai): use gerrit-cherry-pick for gerrit repository?
if options.newbranch:
if options.force:
RunGit(['branch', '-D', options.newbranch],
stderr=subprocess2.PIPE, error_ok=True)
RunGit(['checkout', '-b', options.newbranch,
Changelist().GetUpstreamBranch()])
return PatchIssue(issue_arg, options.reject, options.nocommit,
options.directory, auth_config)
def PatchIssue(issue_arg, reject, nocommit, directory, auth_config):
# PatchIssue should never be called with a dirty tree. It is up to the
# caller to check this, but just in case we assert here since the
# consequences of the caller not checking this could be dire.
assert(not git_common.is_dirty_git_tree('apply'))
if type(issue_arg) is int or issue_arg.isdigit():
# Input is an issue id. Figure out the URL.
issue = int(issue_arg)
cl = Changelist(issue=issue, auth_config=auth_config)
patchset = cl.GetMostRecentPatchset()
patch_data = cl.GetPatchSetDiff(issue, patchset)
else:
# Assume it's a URL to the patch. Default to https.
issue_url = gclient_utils.UpgradeToHttps(issue_arg)
match = re.match(r'(.*?)/download/issue(\d+)_(\d+).diff', issue_url)
if not match:
DieWithError('Must pass an issue ID or full URL for '
'\'Download raw patch set\'')
issue = int(match.group(2))
cl = Changelist(issue=issue, auth_config=auth_config)
cl.rietveld_server = match.group(1)
patchset = int(match.group(3))
patch_data = urllib2.urlopen(issue_arg).read()
# Switch up to the top-level directory, if necessary, in preparation for
# applying the patch.
top = settings.GetRelativeRoot()
if top:
os.chdir(top)
# Git patches have a/ at the beginning of source paths. We strip that out
# with a sed script rather than the -p flag to patch so we can feed either
# Git or svn-style patches into the same apply command.
# re.sub() should be used but flags=re.MULTILINE is only in python 2.7.
try:
patch_data = subprocess2.check_output(
['sed', '-e', 's|^--- a/|--- |; s|^+++ b/|+++ |'], stdin=patch_data)
except subprocess2.CalledProcessError:
DieWithError('Git patch mungling failed.')
logging.info(patch_data)
# We use "git apply" to apply the patch instead of "patch" so that we can
# pick up file adds.
# The --index flag means: also insert into the index (so we catch adds).
cmd = ['git', 'apply', '--index', '-p0']
if directory:
cmd.extend(('--directory', directory))
if reject:
cmd.append('--reject')
elif IsGitVersionAtLeast('1.7.12'):
cmd.append('--3way')
try:
subprocess2.check_call(cmd, env=GetNoGitPagerEnv(),
stdin=patch_data, stdout=subprocess2.VOID)
except subprocess2.CalledProcessError:
print 'Failed to apply the patch'
return 1
# If we had an issue, commit the current state and register the issue.
if not nocommit:
RunGit(['commit', '-m', (cl.GetDescription() + '\n\n' +
'patch from issue %(i)s at patchset '
'%(p)s (http://crrev.com/%(i)s#ps%(p)s)'
% {'i': issue, 'p': patchset})])
cl = Changelist(auth_config=auth_config)
cl.SetIssue(issue)
cl.SetPatchset(patchset)
print "Committed patch locally."
else:
print "Patch applied to index."
return 0
def CMDrebase(parser, args):
"""Rebases current branch on top of svn repo."""
# Provide a wrapper for git svn rebase to help avoid accidental
# git svn dcommit.
# It's the only command that doesn't use parser at all since we just defer
# execution to git-svn.
return RunGitWithCode(['svn', 'rebase'] + args)[1]
def GetTreeStatus(url=None):
"""Fetches the tree status and returns either 'open', 'closed',
'unknown' or 'unset'."""
url = url or settings.GetTreeStatusUrl(error_ok=True)
if url:
status = urllib2.urlopen(url).read().lower()
if status.find('closed') != -1 or status == '0':
return 'closed'
elif status.find('open') != -1 or status == '1':
return 'open'
return 'unknown'
return 'unset'
def GetTreeStatusReason():
"""Fetches the tree status from a json url and returns the message
with the reason for the tree to be opened or closed."""
url = settings.GetTreeStatusUrl()
json_url = urlparse.urljoin(url, '/current?format=json')
connection = urllib2.urlopen(json_url)
status = json.loads(connection.read())
connection.close()
return status['message']
def GetBuilderMaster(bot_list):
"""For a given builder, fetch the master from AE if available."""
map_url = 'https://builders-map.appspot.com/'
try:
master_map = json.load(urllib2.urlopen(map_url))
except urllib2.URLError as e:
return None, ('Failed to fetch builder-to-master map from %s. Error: %s.' %
(map_url, e))
except ValueError as e:
return None, ('Invalid json string from %s. Error: %s.' % (map_url, e))
if not master_map:
return None, 'Failed to build master map.'
result_master = ''
for bot in bot_list:
builder = bot.split(':', 1)[0]
master_list = master_map.get(builder, [])
if not master_list:
return None, ('No matching master for builder %s.' % builder)
elif len(master_list) > 1:
return None, ('The builder name %s exists in multiple masters %s.' %
(builder, master_list))
else:
cur_master = master_list[0]
if not result_master:
result_master = cur_master
elif result_master != cur_master:
return None, 'The builders do not belong to the same master.'
return result_master, None
def CMDtree(parser, args):
"""Shows the status of the tree."""
_, args = parser.parse_args(args)
status = GetTreeStatus()
if 'unset' == status:
print 'You must configure your tree status URL by running "git cl config".'
return 2
print "The tree is %s" % status
print
print GetTreeStatusReason()
if status != 'open':
return 1
return 0
def CMDtry(parser, args):
"""Triggers a try job through BuildBucket."""
group = optparse.OptionGroup(parser, "Try job options")
group.add_option(
"-b", "--bot", action="append",
help=("IMPORTANT: specify ONE builder per --bot flag. Use it multiple "
"times to specify multiple builders. ex: "
"'-b win_rel -b win_layout'. See "
"the try server waterfall for the builders name and the tests "
"available."))
group.add_option(
"-m", "--master", default='',
help=("Specify a try master where to run the tries."))
group.add_option( "--luci", action='store_true')
group.add_option(
"-r", "--revision",
help="Revision to use for the try job; default: the "
"revision will be determined by the try server; see "
"its waterfall for more info")
group.add_option(
"-c", "--clobber", action="store_true", default=False,
help="Force a clobber before building; e.g. don't do an "
"incremental build")
group.add_option(
"--project",
help="Override which project to use. Projects are defined "
"server-side to define what default bot set to use")
group.add_option(
"-p", "--property", dest="properties", action="append", default=[],
help="Specify generic properties in the form -p key1=value1 -p "
"key2=value2 etc (buildbucket only). The value will be treated as "
"json if decodable, or as string otherwise.")
group.add_option(
"-n", "--name", help="Try job name; default to current branch name")
group.add_option(
"--use-rietveld", action="store_true", default=False,
help="Use Rietveld to trigger try jobs.")
group.add_option(
"--buildbucket-host", default='cr-buildbucket.appspot.com',
help="Host of buildbucket. The default host is %default.")
parser.add_option_group(group)
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if options.use_rietveld and options.properties:
parser.error('Properties can only be specified with buildbucket')
# Make sure that all properties are prop=value pairs.
bad_params = [x for x in options.properties if '=' not in x]
if bad_params:
parser.error('Got properties with missing "=": %s' % bad_params)
if args:
parser.error('Unknown arguments: %s' % args)
cl = Changelist(auth_config=auth_config)
if not cl.GetIssue():
parser.error('Need to upload first')
props = cl.GetIssueProperties()
if props.get('closed'):
parser.error('Cannot send tryjobs for a closed CL')
if props.get('private'):
parser.error('Cannot use trybots with private issue')
if not options.name:
options.name = cl.GetBranch()
if options.bot and not options.master:
options.master, err_msg = GetBuilderMaster(options.bot)
if err_msg:
parser.error('Tryserver master cannot be found because: %s\n'
'Please manually specify the tryserver master'
', e.g. "-m tryserver.chromium.linux".' % err_msg)
def GetMasterMap():
# Process --bot.
if not options.bot:
change = cl.GetChange(cl.GetCommonAncestorWithUpstream(), None)
# Get try masters from PRESUBMIT.py files.
masters = presubmit_support.DoGetTryMasters(
change,
change.LocalPaths(),
settings.GetRoot(),
None,
None,
options.verbose,
sys.stdout)
if masters:
return masters
# Fall back to deprecated method: get try slaves from PRESUBMIT.py files.
options.bot = presubmit_support.DoGetTrySlaves(
change,
change.LocalPaths(),
settings.GetRoot(),
None,
None,
options.verbose,
sys.stdout)
if not options.bot:
parser.error('No default try builder to try, use --bot')
builders_and_tests = {}
# TODO(machenbach): The old style command-line options don't support
# multiple try masters yet.
old_style = filter(lambda x: isinstance(x, basestring), options.bot)
new_style = filter(lambda x: isinstance(x, tuple), options.bot)
for bot in old_style:
if ':' in bot:
parser.error('Specifying testfilter is no longer supported')
elif ',' in bot:
parser.error('Specify one bot per --bot flag')
else:
builders_and_tests.setdefault(bot, []).append('defaulttests')
for bot, tests in new_style:
builders_and_tests.setdefault(bot, []).extend(tests)
# Return a master map with one master to be backwards compatible. The
# master name defaults to an empty string, which will cause the master
# not to be set on rietveld (deprecated).
return {options.master: builders_and_tests}
masters = GetMasterMap()
for builders in masters.itervalues():
if any('triggered' in b for b in builders):
print >> sys.stderr, (
'ERROR You are trying to send a job to a triggered bot. This type of'
' bot requires an\ninitial job from a parent (usually a builder). '
'Instead send your job to the parent.\n'
'Bot list: %s' % builders)
return 1
patchset = cl.GetMostRecentPatchset()
if patchset and patchset != cl.GetPatchset():
print(
'\nWARNING Mismatch between local config and server. Did a previous '
'upload fail?\ngit-cl try always uses latest patchset from rietveld. '
'Continuing using\npatchset %s.\n' % patchset)
if options.luci:
trigger_luci_job(cl, masters, options)
elif not options.use_rietveld:
try:
trigger_try_jobs(auth_config, cl, options, masters, 'git_cl_try')
except BuildbucketResponseException as ex:
print 'ERROR: %s' % ex
return 1
except Exception as e:
stacktrace = (''.join(traceback.format_stack()) + traceback.format_exc())
print 'ERROR: Exception when trying to trigger tryjobs: %s\n%s' % (
e, stacktrace)
return 1
else:
try:
cl.RpcServer().trigger_distributed_try_jobs(
cl.GetIssue(), patchset, options.name, options.clobber,
options.revision, masters)
except urllib2.HTTPError as e:
if e.code == 404:
print('404 from rietveld; '
'did you mean to use "git try" instead of "git cl try"?')
return 1
print('Tried jobs on:')
for (master, builders) in sorted(masters.iteritems()):
if master:
print 'Master: %s' % master
length = max(len(builder) for builder in builders)
for builder in sorted(builders):
print ' %*s: %s' % (length, builder, ','.join(builders[builder]))
return 0
@subcommand.usage('[new upstream branch]')
def CMDupstream(parser, args):
"""Prints or sets the name of the upstream branch, if any."""
_, args = parser.parse_args(args)
if len(args) > 1:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist()
if args:
# One arg means set upstream branch.
branch = cl.GetBranch()
RunGit(['branch', '--set-upstream', branch, args[0]])
cl = Changelist()
print "Upstream branch set to " + cl.GetUpstreamBranch()
# Clear configured merge-base, if there is one.
git_common.remove_merge_base(branch)
else:
print cl.GetUpstreamBranch()
return 0
def CMDweb(parser, args):
"""Opens the current CL in the web browser."""
_, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
issue_url = Changelist().GetIssueURL()
if not issue_url:
print >> sys.stderr, 'ERROR No issue to open'
return 1
webbrowser.open(issue_url)
return 0
def CMDset_commit(parser, args):
"""Sets the commit bit to trigger the Commit Queue."""
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist(auth_config=auth_config)
props = cl.GetIssueProperties()
if props.get('private'):
parser.error('Cannot set commit on private issue')
cl.SetFlag('commit', '1')
return 0
def CMDset_close(parser, args):
"""Closes the issue."""
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist(auth_config=auth_config)
# Ensure there actually is an issue to close.
cl.GetDescription()
cl.CloseIssue()
return 0
def CMDdiff(parser, args):
"""Shows differences between local tree and last upload."""
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
# Uncommitted (staged and unstaged) changes will be destroyed by
# "git reset --hard" if there are merging conflicts in PatchIssue().
# Staged changes would be committed along with the patch from last
# upload, hence counted toward the "last upload" side in the final
# diff output, and this is not what we want.
if git_common.is_dirty_git_tree('diff'):
return 1
cl = Changelist(auth_config=auth_config)
issue = cl.GetIssue()
branch = cl.GetBranch()
if not issue:
DieWithError('No issue found for current branch (%s)' % branch)
TMP_BRANCH = 'git-cl-diff'
base_branch = cl.GetCommonAncestorWithUpstream()
# Create a new branch based on the merge-base
RunGit(['checkout', '-q', '-b', TMP_BRANCH, base_branch])
try:
# Patch in the latest changes from rietveld.
rtn = PatchIssue(issue, False, False, None, auth_config)
if rtn != 0:
RunGit(['reset', '--hard'])
return rtn
# Switch back to starting branch and diff against the temporary
# branch containing the latest rietveld patch.
subprocess2.check_call(['git', 'diff', TMP_BRANCH, branch, '--'])
finally:
RunGit(['checkout', '-q', branch])
RunGit(['branch', '-D', TMP_BRANCH])
return 0
def CMDowners(parser, args):
"""Interactively find the owners for reviewing."""
parser.add_option(
'--no-color',
action='store_true',
help='Use this option to disable color output')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
author = RunGit(['config', 'user.email']).strip() or None
cl = Changelist(auth_config=auth_config)
if args:
if len(args) > 1:
parser.error('Unknown args')
base_branch = args[0]
else:
# Default to diffing against the common ancestor of the upstream branch.
base_branch = cl.GetCommonAncestorWithUpstream()
change = cl.GetChange(base_branch, None)
return owners_finder.OwnersFinder(
[f.LocalPath() for f in
cl.GetChange(base_branch, None).AffectedFiles()],
change.RepositoryRoot(), author,
fopen=file, os_path=os.path, glob=glob.glob,
disable_color=options.no_color).run()
def BuildGitDiffCmd(diff_type, upstream_commit, args, extensions):
"""Generates a diff command."""
# Generate diff for the current branch's changes.
diff_cmd = ['diff', '--no-ext-diff', '--no-prefix', diff_type,
upstream_commit, '--' ]
if args:
for arg in args:
if os.path.isdir(arg):
diff_cmd.extend(os.path.join(arg, '*' + ext) for ext in extensions)
elif os.path.isfile(arg):
diff_cmd.append(arg)
else:
DieWithError('Argument "%s" is not a file or a directory' % arg)
else:
diff_cmd.extend('*' + ext for ext in extensions)
return diff_cmd
@subcommand.usage('[files or directories to diff]')
def CMDformat(parser, args):
"""Runs auto-formatting tools (clang-format etc.) on the diff."""
CLANG_EXTS = ['.cc', '.cpp', '.h', '.mm', '.proto', '.java']
parser.add_option('--full', action='store_true',
help='Reformat the full content of all touched files')
parser.add_option('--dry-run', action='store_true',
help='Don\'t modify any file on disk.')
parser.add_option('--python', action='store_true',
help='Format python code with yapf (experimental).')
parser.add_option('--diff', action='store_true',
help='Print diff to stdout rather than modifying files.')
opts, args = parser.parse_args(args)
# git diff generates paths against the root of the repository. Change
# to that directory so clang-format can find files even within subdirs.
rel_base_path = settings.GetRelativeRoot()
if rel_base_path:
os.chdir(rel_base_path)
# Grab the merge-base commit, i.e. the upstream commit of the current
# branch when it was created or the last time it was rebased. This is
# to cover the case where the user may have called "git fetch origin",
# moving the origin branch to a newer commit, but hasn't rebased yet.
upstream_commit = None
cl = Changelist()
upstream_branch = cl.GetUpstreamBranch()
if upstream_branch:
upstream_commit = RunGit(['merge-base', 'HEAD', upstream_branch])
upstream_commit = upstream_commit.strip()
if not upstream_commit:
DieWithError('Could not find base commit for this branch. '
'Are you in detached state?')
if opts.full:
# Only list the names of modified files.
diff_type = '--name-only'
else:
# Only generate context-less patches.
diff_type = '-U0'
diff_cmd = BuildGitDiffCmd(diff_type, upstream_commit, args, CLANG_EXTS)
diff_output = RunGit(diff_cmd)
top_dir = os.path.normpath(
RunGit(["rev-parse", "--show-toplevel"]).rstrip('\n'))
# Locate the clang-format binary in the checkout
try:
clang_format_tool = clang_format.FindClangFormatToolInChromiumTree()
except clang_format.NotFoundError, e:
DieWithError(e)
# Set to 2 to signal to CheckPatchFormatted() that this patch isn't
# formatted. This is used to block during the presubmit.
return_value = 0
if opts.full:
# diff_output is a list of files to send to clang-format.
files = diff_output.splitlines()
if files:
cmd = [clang_format_tool]
if not opts.dry_run and not opts.diff:
cmd.append('-i')
stdout = RunCommand(cmd + files, cwd=top_dir)
if opts.diff:
sys.stdout.write(stdout)
else:
env = os.environ.copy()
env['PATH'] = str(os.path.dirname(clang_format_tool))
# diff_output is a patch to send to clang-format-diff.py
try:
script = clang_format.FindClangFormatScriptInChromiumTree(
'clang-format-diff.py')
except clang_format.NotFoundError, e:
DieWithError(e)
cmd = [sys.executable, script, '-p0']
if not opts.dry_run and not opts.diff:
cmd.append('-i')
stdout = RunCommand(cmd, stdin=diff_output, cwd=top_dir, env=env)
if opts.diff:
sys.stdout.write(stdout)
if opts.dry_run and len(stdout) > 0:
return_value = 2
# Similar code to above, but using yapf on .py files rather than clang-format
# on C/C++ files
if opts.python:
diff_cmd = BuildGitDiffCmd(diff_type, upstream_commit, args, ['.py'])
diff_output = RunGit(diff_cmd)
yapf_tool = gclient_utils.FindExecutable('yapf')
if yapf_tool is None:
DieWithError('yapf not found in PATH')
if opts.full:
files = diff_output.splitlines()
if files:
cmd = [yapf_tool]
if not opts.dry_run and not opts.diff:
cmd.append('-i')
stdout = RunCommand(cmd + files, cwd=top_dir)
if opts.diff:
sys.stdout.write(stdout)
else:
# TODO(sbc): yapf --lines mode still has some issues.
# https://github.com/google/yapf/issues/154
DieWithError('--python currently only works with --full')
# Build a diff command that only operates on dart files. dart's formatter
# does not have the nice property of only operating on modified chunks, so
# hard code full.
dart_diff_cmd = BuildGitDiffCmd('--name-only', upstream_commit,
args, ['.dart'])
dart_diff_output = RunGit(dart_diff_cmd)
if dart_diff_output:
try:
command = [dart_format.FindDartFmtToolInChromiumTree()]
if not opts.dry_run and not opts.diff:
command.append('-w')
command.extend(dart_diff_output.splitlines())
stdout = RunCommand(command, cwd=top_dir, env=env)
if opts.dry_run and stdout:
return_value = 2
except dart_format.NotFoundError as e:
print ('Unable to check dart code formatting. Dart SDK is not in ' +
'this checkout.')
return return_value
@subcommand.usage('<codereview url or issue id>')
def CMDcheckout(parser, args):
"""Checks out a branch associated with a given Rietveld issue."""
_, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
target_issue = ParseIssueNum(args[0])
if target_issue == None:
parser.print_help()
return 1
key_and_issues = [x.split() for x in RunGit(
['config', '--local', '--get-regexp', r'branch\..*\.rietveldissue'])
.splitlines()]
branches = []
for key, issue in key_and_issues:
if issue == target_issue:
branches.append(re.sub(r'branch\.(.*)\.rietveldissue', r'\1', key))
if len(branches) == 0:
print 'No branch found for issue %s.' % target_issue
return 1
if len(branches) == 1:
RunGit(['checkout', branches[0]])
else:
print 'Multiple branches match issue %s:' % target_issue
for i in range(len(branches)):
print '%d: %s' % (i, branches[i])
which = raw_input('Choose by index: ')
try:
RunGit(['checkout', branches[int(which)]])
except (IndexError, ValueError):
print 'Invalid selection, not checking out any branch.'
return 1
return 0
def CMDlol(parser, args):
# This command is intentionally undocumented.
print zlib.decompress(base64.b64decode(
'eNptkLEOwyAMRHe+wupCIqW57v0Vq84WqWtXyrcXnCBsmgMJ+/SSAxMZgRB6NzE'
'E2ObgCKJooYdu4uAQVffUEoE1sRQLxAcqzd7uK2gmStrll1ucV3uZyaY5sXyDd9'
'JAnN+lAXsOMJ90GANAi43mq5/VeeacylKVgi8o6F1SC63FxnagHfJUTfUYdCR/W'
'Ofe+0dHL7PicpytKP750Fh1q2qnLVof4w8OZWNY'))
return 0
class OptionParser(optparse.OptionParser):
"""Creates the option parse and add --verbose support."""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(
self, *args, prog='git cl', version=__version__, **kwargs)
self.add_option(
'-v', '--verbose', action='count', default=0,
help='Use 2 times for more debugging info')
def parse_args(self, args=None, values=None):
options, args = optparse.OptionParser.parse_args(self, args, values)
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(options.verbose, len(levels) - 1)])
return options, args
def main(argv):
if sys.hexversion < 0x02060000:
print >> sys.stderr, (
'\nYour python version %s is unsupported, please upgrade.\n' %
sys.version.split(' ', 1)[0])
return 2
# Reload settings.
global settings
settings = Settings()
colorize_CMDstatus_doc()
dispatcher = subcommand.CommandDispatcher(__name__)
try:
return dispatcher.execute(OptionParser(), argv)
except auth.AuthenticationError as e:
DieWithError(str(e))
except urllib2.HTTPError, e:
if e.code != 500:
raise
DieWithError(
('AppEngine is misbehaving and returned HTTP %d, again. Keep faith '
'and retry or visit go/isgaeup.\n%s') % (e.code, str(e)))
return 0
if __name__ == '__main__':
# These affect sys.stdout so do it outside of main() to simplify mocks in
# unit testing.
fix_encoding.fix_encoding()
colorama.init()
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| Midrya/chromium | git_cl.py | Python | bsd-3-clause | 133,721 | [
"VisIt"
] | 7205429025983a82faf9602b425c787a3ab8e15990a0ba7558b8e75bdb49154b |
#!/usr/bin/env python
import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import math
import srwl_uti_smp
def set_optics(v=None):
el = []
pp = []
names = ['Aperture', 'Aperture_Watchpoint', 'Watchpoint']
for el_name in names:
if el_name == 'Aperture':
# Aperture: aperture 33.1798m
el.append(srwlib.SRWLOptA(
_shape=v.op_Aperture_shape,
_ap_or_ob='a',
_Dx=v.op_Aperture_Dx,
_Dy=v.op_Aperture_Dy,
_x=v.op_Aperture_x,
_y=v.op_Aperture_y,
))
pp.append(v.op_Aperture_pp)
elif el_name == 'Aperture_Watchpoint':
# Aperture_Watchpoint: drift 33.1798m
el.append(srwlib.SRWLOptD(
_L=v.op_Aperture_Watchpoint_L,
))
pp.append(v.op_Aperture_Watchpoint_pp)
elif el_name == 'Watchpoint':
# Watchpoint: watch 45.0m
pass
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = [
['name', 's', 'Tabulated Undulator Example', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', '', 'standard electron beam name'],
['ebm_nms', 's', '', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_e', 'f', 3.0, 'electron beam avarage energy [GeV]'],
['ebm_de', 'f', 0.0, 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0.0, 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0.0, 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0.0, 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0.0, 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', 0.0, 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', 0.0007, 'electron beam relative energy spread'],
['ebm_emx', 'f', 1.5e-09, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', 8e-12, 'electron beam vertical emittance [m]'],
# Definition of the beam through Twiss:
['ebm_betax', 'f', 1.84, 'horizontal beta-function [m]'],
['ebm_betay', 'f', 1.17, 'vertical beta-function [m]'],
['ebm_alphax', 'f', 0.0, 'horizontal alpha-function [rad]'],
['ebm_alphay', 'f', 0.0, 'vertical alpha-function [rad]'],
['ebm_etax', 'f', 0.0, 'horizontal dispersion function [m]'],
['ebm_etay', 'f', 0.0, 'vertical dispersion function [m]'],
['ebm_etaxp', 'f', 0.0, 'horizontal dispersion function derivative [rad]'],
['ebm_etayp', 'f', 0.0, 'vertical dispersion function derivative [rad]'],
#---Undulator
['und_bx', 'f', 0.0, 'undulator horizontal peak magnetic field [T]'],
['und_by', 'f', 0.88770981, 'undulator vertical peak magnetic field [T]'],
['und_phx', 'f', 0.0, 'initial phase of the horizontal magnetic field [rad]'],
['und_phy', 'f', 0.0, 'initial phase of the vertical magnetic field [rad]'],
['und_b2e', '', '', 'estimate undulator fundamental photon energy (in [eV]) for the amplitude of sinusoidal magnetic field defined by und_b or und_bx, und_by', 'store_true'],
['und_e2b', '', '', 'estimate undulator field amplitude (in [T]) for the photon energy defined by w_e', 'store_true'],
['und_per', 'f', 0.02, 'undulator period [m]'],
['und_len', 'f', 3.0, 'undulator length [m]'],
['und_zc', 'f', 1.305, 'undulator center longitudinal position [m]'],
['und_sx', 'i', 1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_g', 'f', 6.72, 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
['und_ph', 'f', 0.0, 'shift of magnet arrays [mm] for which the field should be set up'],
['und_mdir', 's', '', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', '', 'name of magnetic measurements for different gaps summary file'],
#---Calculation Types
# Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 10000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 2, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 100.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20000.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 2, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'],
['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'],
['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'],
['sm_na', 'i', 5, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'],
['sm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 2, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 8019.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.0015, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.0015, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 0.3, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 1, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 2, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 1000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0.0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0.0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
['wm_fbk', '', '', 'create backup file(s) with propagated multi-e intensity distribution vs horizontal and vertical position and other radiation characteristics', 'store_true'],
#to add options
['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['rs_type', 's', 't', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---Beamline optics:
# Aperture: aperture
['op_Aperture_shape', 's', 'r', 'shape'],
['op_Aperture_Dx', 'f', 0.00025, 'horizontalSize'],
['op_Aperture_Dy', 'f', 0.00025, 'verticalSize'],
['op_Aperture_x', 'f', 0.0, 'horizontalOffset'],
['op_Aperture_y', 'f', 0.0, 'verticalOffset'],
# Aperture_Watchpoint: drift
['op_Aperture_Watchpoint_L', 'f', 11.8202, 'length'],
#---Propagation parameters
['op_Aperture_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Aperture'],
['op_Aperture_Watchpoint_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Aperture_Watchpoint'],
['op_fin_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
]
def setup_magnetic_measurement_files(filename, v):
import os
c = None
f = None
r = 0
try:
import mpi4py.MPI
if mpi4py.MPI.COMM_WORLD.Get_size() > 1:
c = mpi4py.MPI.COMM_WORLD
r = c.Get_rank()
except Exception:
pass
if r == 0:
try:
import zipfile
z = zipfile.ZipFile(filename)
f = [x for x in z.namelist() if x.endswith('.txt')]
if len(f) != 1:
raise RuntimeError(
'{} magnetic measurement index (*.txt) file={}'.format(
'too many' if len(f) > 0 else 'missing',
filename,
)
)
f = f[0]
z.extractall()
except Exception:
if c:
c.Abort(1)
raise
if c:
f = c.bcast(f, root=0)
v.und_mfs = os.path.basename(f)
v.und_mdir = os.path.dirname(f) or './'
def main():
v = srwl_bl.srwl_uti_parse_options(srwl_bl.srwl_uti_ext_options(varParam), use_sys_argv=True)
setup_magnetic_measurement_files("magnetic_measurements.zip", v)
op = set_optics(v)
v.ss = True
v.ss_pl = 'e'
v.sm = True
v.sm_pl = 'e'
v.pw = True
v.pw_pl = 'xy'
v.si = True
v.si_pl = 'xy'
v.tr = True
v.tr_pl = 'xz'
v.ws = True
v.ws_pl = 'xy'
mag = None
if v.rs_type == 'm':
mag = srwlib.SRWLMagFldC()
mag.arXc.append(0)
mag.arYc.append(0)
mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len))
mag.arZc.append(v.mp_zc)
srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
main()
| mkeilman/sirepo | tests/template/srw_generate_data/tabulated-undulator-example.py | Python | apache-2.0 | 22,553 | [
"Gaussian"
] | 2f3e8e700695fab650dc0965fb5e77275c5b2da75bb3fcb436967c0887d2aa0d |
# Copyright (c) 2000-2010 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""handle diagram generation options for class diagram or default diagrams
"""
from logilab import astng
from logilab.astng.utils import LocalsVisitor
from pylint.pyreverse.diagrams import PackageDiagram, ClassDiagram
# diagram generators ##########################################################
class DiaDefGenerator:
"""handle diagram generation options
"""
def __init__(self, linker, handler):
"""common Diagram Handler initialization"""
self.config = handler.config
self._set_default_options()
self.linker = linker
self.classdiagram = None # defined by subclasses
def get_title(self, node):
"""get title for objects"""
title = node.name
if self.module_names:
title = '%s.%s' % (node.root().name, title)
return title
def _set_option(self, option):
"""activate some options if not explicitly deactivated"""
# if we have a class diagram, we want more information by default;
# so if the option is None, we return True
if option is None:
if self.config.classes:
return True
else:
return False
return option
def _set_default_options(self):
"""set different default options with _default dictionary"""
self.module_names = self._set_option(self.config.module_names)
all_ancestors = self._set_option(self.config.all_ancestors)
all_associated = self._set_option(self.config.all_associated)
anc_level, ass_level = (0, 0)
if all_ancestors:
anc_level = -1
if all_associated:
ass_level = -1
if self.config.show_ancestors is not None:
anc_level = self.config.show_ancestors
if self.config.show_associated is not None:
ass_level = self.config.show_associated
self.anc_level, self.ass_level = anc_level, ass_level
def _get_levels(self):
"""help function for search levels"""
return self.anc_level, self.ass_level
def show_node(self, node):
"""true if builtins and not show_builtins"""
if self.config.show_builtin:
return True
return node.root().name != '__builtin__'
def add_class(self, node):
"""visit one class and add it to diagram"""
self.linker.visit(node)
self.classdiagram.add_object(self.get_title(node), node)
def get_ancestors(self, node, level):
"""return ancestor nodes of a class node"""
if level == 0:
return
for ancestor in node.ancestors(recurs=False):
if not self.show_node(ancestor):
continue
yield ancestor
def get_associated(self, klass_node, level):
"""return associated nodes of a class node"""
if level == 0:
return
for ass_nodes in klass_node.instance_attrs_type.values():
for ass_node in ass_nodes:
if isinstance(ass_node, astng.Instance):
ass_node = ass_node._proxied
if not (isinstance(ass_node, astng.Class)
and self.show_node(ass_node)):
continue
yield ass_node
def extract_classes(self, klass_node, anc_level, ass_level):
"""extract recursively classes related to klass_node"""
if self.classdiagram.has_node(klass_node) or not self.show_node(klass_node):
return
self.add_class(klass_node)
for ancestor in self.get_ancestors(klass_node, anc_level):
self.extract_classes(ancestor, anc_level-1, ass_level)
for ass_node in self.get_associated(klass_node, ass_level):
self.extract_classes(ass_node, anc_level, ass_level-1)
class DefaultDiadefGenerator(LocalsVisitor, DiaDefGenerator):
"""generate minimum diagram definition for the project :
* a package diagram including project's modules
* a class diagram including project's classes
"""
def __init__(self, linker, handler):
DiaDefGenerator.__init__(self, linker, handler)
LocalsVisitor.__init__(self)
def visit_project(self, node):
"""visit an astng.Project node
create a diagram definition for packages
"""
mode = self.config.mode
if len(node.modules) > 1:
self.pkgdiagram = PackageDiagram('packages %s' % node.name, mode)
else:
self.pkgdiagram = None
self.classdiagram = ClassDiagram('classes %s' % node.name, mode)
def leave_project(self, node):
"""leave the astng.Project node
return the generated diagram definition
"""
if self.pkgdiagram:
return self.pkgdiagram, self.classdiagram
return self.classdiagram,
def visit_module(self, node):
"""visit an astng.Module node
add this class to the package diagram definition
"""
if self.pkgdiagram:
self.linker.visit(node)
self.pkgdiagram.add_object(node.name, node)
def visit_class(self, node):
"""visit an astng.Class node
add this class to the class diagram definition
"""
anc_level, ass_level = self._get_levels()
self.extract_classes(node, anc_level, ass_level)
def visit_from(self, node):
"""visit astng.From and catch modules for package diagram
"""
if self.pkgdiagram:
self.pkgdiagram.add_from_depend(node, node.modname)
class ClassDiadefGenerator(DiaDefGenerator):
"""generate a class diagram definition including all classes related to a
given class
"""
def __init__(self, linker, handler):
DiaDefGenerator.__init__(self, linker, handler)
def class_diagram(self, project, klass):
"""return a class diagram definition for the given klass and its
related klasses
"""
self.classdiagram = ClassDiagram(klass, self.config.mode)
if len(project.modules) > 1:
module, klass = klass.rsplit('.', 1)
module = project.get_module(module)
else:
module = project.modules[0]
klass = klass.split('.')[-1]
klass = module.ilookup(klass).next()
anc_level, ass_level = self._get_levels()
self.extract_classes(klass, anc_level, ass_level)
return self.classdiagram
# diagram handler #############################################################
class DiadefsHandler:
"""handle diagram definitions :
get it from user (i.e. xml files) or generate them
"""
def __init__(self, config):
self.config = config
def get_diadefs(self, project, linker):
"""get the diagrams configuration data
:param linker: astng.inspector.Linker(IdGeneratorMixIn, LocalsVisitor)
:param project: astng.manager.Project
"""
# read and interpret diagram definitions (Diadefs)
diagrams = []
generator = ClassDiadefGenerator(linker, self)
for klass in self.config.classes:
diagrams.append(generator.class_diagram(project, klass))
if not diagrams:
diagrams = DefaultDiadefGenerator(linker, self).visit(project)
for diagram in diagrams:
diagram.extract_relationships()
return diagrams
| dbbhattacharya/kitsune | vendor/packages/pylint/pyreverse/diadefslib.py | Python | bsd-3-clause | 8,190 | [
"VisIt"
] | 03fd1753a0f0648fecbc5c49116e892ebe699b4b8ac1ecf0a6b6cb7391674992 |
"""
[2015-03-13] Challenge #205 [Hard] DNA and Protein Sequence Alignment
https://www.reddit.com/r/dailyprogrammer/comments/2yx8b8/20150313_challenge_205_hard_dna_and_protein/
#Description
If you are studying a particular pair of genes or proteins, an important question is to what extent the two sequences
are similar. To quantify similarity, it is necessary to align the two sequences, and then you can calculate a
similarity score based on the alignment.
There are two types of alignment in general. A global alignment is an alignment of the full length of two sequences,
for example, of two protein sequences or of two DNA sequences. A local alignment is an alignment of part of one
sequence to part of another sequence.
Alignment treats the two inputs as a linear sequence to be lined up as much as possible, with optional gaps and
conversions allowed. The goal is to minimize these differences.
The first step in computing a sequence alignment is to decide on a scoring system. For this exercise, we'll simplify
this and give a score of +2 to a match and a penalty of -1 to a mismatch, and a penalty of -2 to a gap.
Here's a small example. Our two DNA sequences to align:
CTCTAGCATTAG
GTGCACCCA
One alignment might look like this:
CTCTAGCATTAG
GT---GCACCCA
But that one adds three gaps. We can do a bit better with only one gap added (and a small shift in starting position):
CTCTAGCATTAG
GT-GCACCCA
While not an exact match, it now minimizes the conversion penalties between the two and aligns them as best we can.
For more information and how to do this using an R package, see the chapter ["Pairwise Sequence
Alignment"](http://a-little-book-of-r-for-bioinformatics.readthedocs.org/en/latest/src/chapter4.html), or [this set of
lecture notes from George Washington University](http://www.seas.gwu.edu/~simhaweb/cs151/lectures/module12/align.html).
The key algorithm is [Needleman-Wunsch](http://en.wikipedia.org/wiki/Needleman%E2%80%93Wunsch_algorithm).
For this challenge your task is to write a program that accepts two sequences and globally aligns them. If you want to
make this harder and integrate the BLOSUM matrices, you may.
#Input Description
You'll be given two sequences on two lines, one line per sequence. They'll be the same type of input, DNA or protein.
#Output Description
Your program should emit the aligned sequences with gaps introduced represented by dashed ("-").
#Input
DNA example
GACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
Protein example
MTNRTLSREEIRKLDRDLRILVATNGTLTRVLNVVANEEIVVDIINQQLLDVAPKIPELENLKIGRILQRDILLKGQKSGILFVAAESLIVIDLLPTAITTYLTKTHHPIGEIMAASRIETYKEDAQVWIGDLPCWLADYGYWDLPKRAVGRRYRIIAGGQPVIITTEYFLRSVFQDTPREELDRCQYSNDIDTRSGDRFVLHGRVFK
MLAVLPEKREMTECHLSDEEIRKLNRDLRILIATNGTLTRILNVLANDEIVVEIVKQQIQDAAPEMDGCDHSSIGRVLRRDIVLKGRRSGIPFVAAESFIAIDLLPPEIVASLLETHRPIGEVMAASCIETFKEEAKVWAGESPAWLELDRRRNLPPKVVGRQYRVIAEGRPVIIITEYFLRSVFEDNSREEPIRHQRSVGTSARSGRSIC
#Output
DNA example
GACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
Protein example
MTNRTLSREEIRKLDRDLRILVATNGTLTRVLNVVANEEIVVDIINQQLLDVAPKIPELENLKIGRILQRDILLKGQKSGILFVAAESLIVIDLLPTAITTYLTKTHHPIGEIMAASRIETYKEDAQVWIGDLPCWLADYGYWDLPKRAVGRRYRIIAGGQPVIITTEYFLRSVFQDTPREELDRCQYSNDIDTRSGDRFVLHGRVFK
MLAVLPEKREMTECHLSDEEIRKLNRDLRILIATNGTLTRILNVLANDEIVVEIVKQQIQDAAPEMDGCDHSSIGRVLRRDIVLKGRRSGIPFVAAESFIAIDLLPPEIVASLLETHRPIGEVMAASCIETFKEEAKVWAGESPAWLELDRRRNLPPKVVGRQYRVIAEGRPVIIITEYFLRSVFEDNSREEPIRHQRS--VGT-SA-R---SGRSIC
#Notes
Once you have a simple NW algorithm implemented, you can alter the cost matrices. In the bioinformatics field, the PAM
and BLOSUM matrices are the standards. You can find them here: ftp://ftp.ncbi.nih.gov/blast/matrices/
Have a cool challenge idea? Post it to /r/DailyProgrammer_Ideas!
"""
def main():
pass
if __name__ == "__main__":
main()
| DayGitH/Python-Challenges | DailyProgrammer/DP20150313C.py | Python | mit | 3,971 | [
"BLAST"
] | 2532a2960f88066980a566ad2658762d1031c107fb6f52b2cf2d9b6aa7d7939a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.