repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
theonion/django-bulbs | tests/contributions/test_contributions_utils.py | 1 | 5195 | from django.contrib.auth import get_user_model
from bulbs.content.models import Content, FeatureType
from bulbs.contributions.models import (
Contribution, ContributorRole, FeatureTypeRate, FeatureTypeOverride, FlatRate,
FlatRateOverride, HourlyRate, HourlyOverride, OverrideProfile
)
from bulbs.contributions.utils import merge_roles
from bulbs.utils.test import BaseIndexableTestCase
class MergeRoleTestCase(BaseIndexableTestCase):
def setUp(self):
super(MergeRoleTestCase, self).setUp()
user_cls = get_user_model()
self.dominant = ContributorRole.objects.create(name="Dominator")
self.deprecated = ContributorRole.objects.create(name="Deprecated")
self.fella = user_cls.objects.create(first_name="fella", last_name="guy")
self.feature_types = [
FeatureType.objects.create(name="surf"),
FeatureType.objects.create(name="turf"),
FeatureType.objects.create(name="smurf"),
FeatureType.objects.create(name="burf")
]
for i in range(100):
content = Content.objects.create(title="a{}".format(i))
if i % 2 == 0:
content.contributions.create(role=self.dominant, contributor=self.fella)
else:
content.contributions.create(role=self.deprecated, contributor=self.fella)
profile = OverrideProfile.objects.create(contributor=self.fella, role=self.deprecated)
FlatRate.objects.create(role=self.deprecated, rate=40)
FlatRateOverride.objects.create(profile=profile, rate=100)
HourlyRate.objects.create(role=self.deprecated, rate=6)
HourlyOverride.objects.create(profile=profile, rate=42)
for feature_type in self.feature_types:
rate, _ = FeatureTypeRate.objects.get_or_create(
role=self.deprecated, feature_type=feature_type
)
rate.rate = 21
rate.save()
FeatureTypeOverride.objects.create(
profile=profile, feature_type=feature_type
)
def test_contributions_merge(self):
merge_roles(self.dominant.name, self.deprecated.name)
for contribution in Contribution.objects.all():
self.assertNotEqual(contribution.role, self.deprecated)
def test_flat_rate_merge(self):
self.assertFalse(self.dominant.flat_rates.exists())
merge_roles(self.dominant.name, self.deprecated.name)
self.assertTrue(self.dominant.flat_rates.exists())
def test_flat_rate_merge_rate_exists(self):
rate = FlatRate.objects.create(role=self.dominant, rate=20)
merge_roles(self.dominant.name, self.deprecated.name)
self.assertEqual(self.dominant.flat_rates.first().id, rate.id)
self.assertEqual(self.dominant.flat_rates.count(), 1)
def test_hourly_rate_merge(self):
self.assertFalse(self.dominant.hourly_rates.exists())
merge_roles(self.dominant.name, self.deprecated.name)
self.assertTrue(self.dominant.hourly_rates.exists())
def test_hourly_rate_merge_rate_exists(self):
rate = HourlyRate.objects.create(role=self.dominant, rate=3)
merge_roles(self.dominant.name, self.deprecated.name)
self.assertEqual(self.dominant.hourly_rates.first().id, rate.id)
self.assertEqual(self.dominant.hourly_rates.count(), 1)
def test_feature_type_rate_merge(self):
merge_roles(self.dominant.name, self.deprecated.name)
self.assertEqual(self.dominant.feature_type_rates.count(), len(self.feature_types))
def test_feature_type_rate_merge_zero(self):
for feature_type in self.feature_types:
FeatureTypeRate.objects.get_or_create(
rate=0, role=self.dominant, feature_type=feature_type
)
merge_roles(self.dominant.name, self.deprecated.name)
for feature_type_rate in self.dominant.feature_type_rates.all():
self.assertGreater(feature_type_rate.rate, 0)
def test_feature_type_rate_merge_not_zero(self):
for feature_type in self.feature_types:
rate, _ = FeatureTypeRate.objects.get_or_create(
role=self.dominant, feature_type=feature_type
)
rate.rate = 1
rate.save()
merge_roles(self.dominant.name, self.deprecated.name)
for feature_type_rate in self.dominant.feature_type_rates.all():
self.assertEqual(feature_type_rate.rate, 1)
def test_override_merge(self):
self.assertFalse(self.dominant.overrides.exists())
merge_roles(self.dominant.name, self.deprecated.name)
self.assertTrue(self.dominant.overrides.exists())
def test_override_merge_exists_no_overrides(self):
OverrideProfile.objects.create(role=self.dominant, contributor=self.fella)
merge_roles(self.dominant.name, self.deprecated.name)
self.assertEquals(
self.dominant.overrides.first().override_feature_type.count(), len(self.feature_types)
)
self.assertTrue(self.dominant.overrides.first().override_flatrate.exists())
self.assertTrue(self.dominant.overrides.first().override_hourly.exists())
| mit | ee3ce10ff2b4189ac9ec58cc325aa016 | 44.570175 | 98 | 0.676035 | 3.632867 | false | true | false | false |
theonion/django-bulbs | bulbs/campaigns/migrations/0001_initial.py | 2 | 1592 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import djbetty.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sponsor_name', models.CharField(max_length=255)),
('sponsor_logo', djbetty.fields.ImageField(default=None, null=True, blank=True)),
('sponsor_url', models.URLField(null=True, blank=True)),
('start_date', models.DateTimeField(null=True, blank=True)),
('end_date', models.DateTimeField(null=True, blank=True)),
('campaign_label', models.CharField(max_length=255)),
('impression_goal', models.IntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CampaignPixel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField()),
('campaign_type', models.IntegerField(default=0, choices=[(0, b'Logo')])),
('campaign', models.ForeignKey(related_name='pixels', to='campaigns.Campaign')),
],
options={
},
bases=(models.Model,),
),
]
| mit | f41c5590a06cb2748bed1482e879394f | 36.904762 | 114 | 0.546482 | 4.326087 | false | false | false | false |
theonion/django-bulbs | bulbs/api/metadata.py | 1 | 1057 | from rest_framework.metadata import SimpleMetadata
from bulbs.infographics.metadata import InfographicMetadata
from bulbs.infographics.utils import get_infographics_serializer
from bulbs.super_features.metadata import BaseSuperFeatureMetadata
from bulbs.super_features.utils import get_superfeature_serializer
SUPERFEATURE_SERIALIZER = get_superfeature_serializer()
INFOGRAPHICS_SERIALIZER = get_infographics_serializer()
class PolymorphicContentMetadata(SimpleMetadata):
serializer_lookup = {
INFOGRAPHICS_SERIALIZER: InfographicMetadata(),
SUPERFEATURE_SERIALIZER: BaseSuperFeatureMetadata()
}
def determine_metadata(self, request, view):
if hasattr(view, "get_serializer_class"):
serializer_class = view.get_serializer_class()
metadata = self.serializer_lookup.get(serializer_class, None)
if metadata:
return metadata.determine_metadata(request, view)
# TODO: Spike out why we included this generic (and bad) response.
return {"status": "ok"}
| mit | 064afc30acff58513402c82671703bd8 | 38.148148 | 74 | 0.742668 | 4.177866 | false | false | false | false |
wireservice/csvkit | csvkit/convert/fixed.py | 1 | 5779 | #!/usr/bin/env python
from codecs import iterdecode
from collections import namedtuple
import agate
import six
def fixed2csv(f, schema, output=None, skip_lines=0, **kwargs):
"""
Convert a fixed-width file to csv using a CSV-formatted schema description.
A schema CSV must start with a header row with (at least) columns labeled
"column","start", and "length". (Other columns will be ignored.) For each
subsequent row, therefore, those columns will be used to identify a column
name, the starting index of the column (an integer), and the length of the
column (also an integer).
Values in the 'start' column are assumed to be zero-based, unless the first
value for 'start' is 1, in which case all values are assumed to be
one-based.
If output is specified, rows will be written to that object, otherwise the
complete data will be returned.
:param skip_lines:
The number of lines to skip from the top of the file.
"""
streaming = True if output else False
if not streaming:
output = six.StringIO()
try:
encoding = kwargs['encoding']
except KeyError:
encoding = None
if isinstance(skip_lines, int):
while skip_lines > 0:
f.readline()
skip_lines -= 1
else:
raise ValueError('skip_lines argument must be an int')
writer = agate.csv.writer(output)
reader = FixedWidthReader(f, schema, encoding=encoding)
writer.writerows(reader)
if not streaming:
data = output.getvalue()
output.close()
return data
# Return empty string when streaming
return ''
class FixedWidthReader(six.Iterator):
"""
Given a fixed-width file and a schema file, produce an analog to a csv
reader that yields a row of strings for each line in the fixed-width file,
preceded with a row of headers as provided in the schema. (This might be
problematic if fixed-width-files ever have header rows also, but I haven't
seen that.)
The schema_file should be in CSV format with a header row which has columns
'column', 'start', and 'length'. (Other columns will be ignored.) Values
in the 'start' column are assumed to be "zero-based" unless the first value
is "1" in which case all values are assumed to be "one-based."
"""
def __init__(self, f, schema, encoding=None):
if encoding is not None:
f = iterdecode(f, encoding)
self.file = f
self.parser = FixedWidthRowParser(schema)
self.header = True
def __iter__(self):
return self
def __next__(self):
if self.header:
self.header = False
return self.parser.headers
return self.parser.parse(next(self.file))
FixedWidthField = namedtuple('FixedWidthField', ['name', 'start', 'length'])
class FixedWidthRowParser(object):
"""
Instantiated with a schema, able to return a sequence of trimmed strings
representing fields given a fixed-length line. Flexible about where the
columns are, as long as they are headed with the literal names 'column',
'start', and 'length'.
"""
def __init__(self, schema):
self.fields = [] # A list of FixedWidthFields
schema_reader = agate.csv.reader(schema)
schema_decoder = SchemaDecoder(next(schema_reader))
for i, row in enumerate(schema_reader):
try:
self.fields.append(schema_decoder(row))
except Exception as e:
raise ValueError("Error reading schema at line %i: %s" % (i + 2, e))
def parse(self, line):
values = []
for field in self.fields:
values.append(line[field.start:field.start + field.length].strip())
return values
def parse_dict(self, line):
"""
Convenience method returns a dict. Equivalent to
``dict(zip(self.headers,self.parse(line)))``.
"""
return dict(zip(self.headers, self.parse(line)))
@property
def headers(self):
return [field.name for field in self.fields]
class SchemaDecoder(object):
"""
Extracts column, start, and length columns from schema rows. Once
instantiated, each time the instance is called with a row, a
``(column,start,length)`` tuple will be returned based on values in that
row and the constructor kwargs.
"""
REQUIRED_COLUMNS = [('column', None), ('start', int), ('length', int)]
start = None
length = None
column = None
one_based = None
def __init__(self, header):
"""
Constructs a schema row decoder.
"""
for p, val_type in self.REQUIRED_COLUMNS:
try:
if val_type:
setattr(self, p, val_type(header.index(p)))
else:
setattr(self, p, header.index(p))
except ValueError:
raise ValueError('A column named "%s" must exist in the schema file.' % (p))
def __call__(self, row):
"""
Return a tuple (column, start, length) based on this instance's
parameters. If the first time this is called, the row's 'start'
value is 1, then all 'start' values including the first will be one
less than in the actual input data, to adjust for one-based
specifications. Values for 'start' and 'length' will be cast to
integers.
"""
if self.one_based is None:
self.one_based = (int(row[self.start]) == 1)
if self.one_based:
adjusted_start = int(row[self.start]) - 1
else:
adjusted_start = int(row[self.start])
return FixedWidthField(row[self.column], adjusted_start, int(row[self.length]))
| mit | d9c147543074adf14f1af00d51f0cdab | 30.752747 | 92 | 0.623983 | 4.199855 | false | false | false | false |
cole/aiosmtplib | tests/test_sendmail.py | 1 | 19384 | """
SMTP.sendmail and SMTP.send_message method testing.
"""
import asyncio
import copy
import email.generator
import email.header
import email.message
from typing import Any, Callable, Coroutine, List, Tuple, Type
import pytest
from aiosmtpd.smtp import SMTP as SMTPD
from aiosmtplib import (
SMTP,
SMTPNotSupported,
SMTPRecipientsRefused,
SMTPResponseException,
SMTPStatus,
)
from aiosmtplib.email import formataddr
pytestmark = pytest.mark.asyncio()
async def test_sendmail_simple_success(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
sender_str: str,
recipient_str: str,
message_str: str,
) -> None:
async with smtp_client:
errors, response = await smtp_client.sendmail(
sender_str, [recipient_str], message_str
)
assert not errors
assert isinstance(errors, dict)
assert response != ""
async def test_sendmail_binary_content(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
sender_str: str,
recipient_str: str,
message_str: str,
) -> None:
async with smtp_client:
errors, response = await smtp_client.sendmail(
sender_str, [recipient_str], bytes(message_str, "ascii")
)
assert not errors
assert isinstance(errors, dict)
assert response != ""
async def test_sendmail_with_recipients_string(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
sender_str: str,
recipient_str: str,
message_str: str,
) -> None:
async with smtp_client:
errors, response = await smtp_client.sendmail(
sender_str, recipient_str, message_str
)
assert not errors
assert response != ""
async def test_sendmail_with_mail_option(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
sender_str: str,
recipient_str: str,
message_str: str,
) -> None:
async with smtp_client:
errors, response = await smtp_client.sendmail(
sender_str, [recipient_str], message_str, mail_options=["BODY=8BITMIME"]
)
assert not errors
assert response != ""
async def test_sendmail_without_size_option(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
smtpd_class: Type[SMTPD],
smtpd_mock_response_done: Callable,
monkeypatch: pytest.MonkeyPatch,
sender_str: str,
recipient_str: str,
message_str: str,
) -> None:
monkeypatch.setattr(smtpd_class, "smtp_EHLO", smtpd_mock_response_done)
async with smtp_client:
errors, response = await smtp_client.sendmail(
sender_str, [recipient_str], message_str
)
assert not errors
assert response != ""
async def test_sendmail_with_invalid_mail_option(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
sender_str: str,
recipient_str: str,
message_str: str,
) -> None:
async with smtp_client:
with pytest.raises(SMTPResponseException) as excinfo:
await smtp_client.sendmail(
sender_str,
[recipient_str],
message_str,
mail_options=["BADDATA=0x00000000"],
)
assert excinfo.value.code == SMTPStatus.syntax_error
async def test_sendmail_with_rcpt_option(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
sender_str: str,
recipient_str: str,
message_str: str,
) -> None:
async with smtp_client:
with pytest.raises(SMTPRecipientsRefused) as excinfo:
await smtp_client.sendmail(
sender_str,
[recipient_str],
message_str,
rcpt_options=["NOTIFY=FAILURE,DELAY"],
)
recipient_exc = excinfo.value.recipients[0]
assert recipient_exc.code == SMTPStatus.syntax_error
assert (
recipient_exc.message
== "RCPT TO parameters not recognized or not implemented"
)
async def test_sendmail_simple_failure(
smtp_client: SMTP, smtpd_server: asyncio.AbstractServer
) -> None:
async with smtp_client:
with pytest.raises(SMTPRecipientsRefused):
# @@ is an invalid recipient.
await smtp_client.sendmail("test@example.com", ["@@"], "blah")
async def test_sendmail_error_silent_rset_handles_disconnect(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
smtpd_class: Type[SMTPD],
smtpd_mock_response_error_disconnect: Callable,
monkeypatch: pytest.MonkeyPatch,
sender_str: str,
recipient_str: str,
message_str: str,
) -> None:
monkeypatch.setattr(smtpd_class, "smtp_DATA", smtpd_mock_response_error_disconnect)
async with smtp_client:
with pytest.raises(SMTPResponseException):
await smtp_client.sendmail(sender_str, [recipient_str], message_str)
async def test_rset_after_sendmail_error_response_to_mail(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
received_commands: List[Tuple[str, Tuple[Any, ...]]],
) -> None:
"""
If an error response is given to the MAIL command in the sendmail method,
test that we reset the server session.
"""
async with smtp_client:
response = await smtp_client.ehlo()
assert response.code == SMTPStatus.completed
with pytest.raises(SMTPResponseException) as excinfo:
await smtp_client.sendmail(">foobar<", ["test@example.com"], "Hello World")
assert excinfo.value.code == SMTPStatus.unrecognized_parameters
assert received_commands[-1][0] == "RSET"
async def test_rset_after_sendmail_error_response_to_rcpt(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
received_commands: List[Tuple[str, Tuple[Any, ...]]],
) -> None:
"""
If an error response is given to the RCPT command in the sendmail method,
test that we reset the server session.
"""
async with smtp_client:
response = await smtp_client.ehlo()
assert response.code == SMTPStatus.completed
with pytest.raises(SMTPRecipientsRefused) as excinfo:
await smtp_client.sendmail(
"test@example.com", [">not an addr<"], "Hello World"
)
assert excinfo.value.recipients[0].code == SMTPStatus.unrecognized_parameters
assert received_commands[-1][0] == "RSET"
async def test_rset_after_sendmail_error_response_to_data(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
smtpd_class: Type[SMTPD],
monkeypatch: pytest.MonkeyPatch,
error_code: int,
sender_str: str,
recipient_str: str,
message_str: str,
received_commands: List[Tuple[str, Tuple[Any, ...]]],
smtpd_mock_response_error_with_code: Callable[[SMTPD], Coroutine[Any, Any, None]],
) -> None:
"""
If an error response is given to the DATA command in the sendmail method,
test that we reset the server session.
"""
monkeypatch.setattr(smtpd_class, "smtp_DATA", smtpd_mock_response_error_with_code)
async with smtp_client:
response = await smtp_client.ehlo()
assert response.code == SMTPStatus.completed
with pytest.raises(SMTPResponseException) as excinfo:
await smtp_client.sendmail(sender_str, [recipient_str], message_str)
assert excinfo.value.code == error_code
assert received_commands[-1][0] == "RSET"
async def test_send_message(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
message: email.message.Message,
) -> None:
async with smtp_client:
errors, response = await smtp_client.send_message(message)
assert not errors
assert isinstance(errors, dict)
assert response != ""
async def test_send_message_with_sender_and_recipient_args(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
message: email.message.Message,
received_messages: List[email.message.EmailMessage],
) -> None:
sender = "sender2@example.com"
recipients = ["recipient1@example.com", "recipient2@example.com"]
async with smtp_client:
errors, response = await smtp_client.send_message(
message, sender=sender, recipients=recipients
)
assert not errors
assert isinstance(errors, dict)
assert response != ""
assert len(received_messages) == 1
assert received_messages[0]["X-MailFrom"] == sender
assert received_messages[0]["X-RcptTo"] == ", ".join(recipients)
async def test_send_message_with_cc_recipients(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
recipient_str: str,
message: email.message.Message,
received_messages: List[email.message.EmailMessage],
received_commands: List[Tuple[str, Tuple[Any, ...]]],
) -> None:
cc_recipients = ["recipient1@example.com", "recipient2@example.com"]
message["Cc"] = ", ".join(cc_recipients)
async with smtp_client:
errors, _ = await smtp_client.send_message(message)
assert not errors
assert len(received_messages) == 1
assert (
received_messages[0]["X-RcptTo"]
== f'{recipient_str}, {", ".join(cc_recipients)}'
)
assert received_commands[2][0] == "RCPT"
assert received_commands[2][1][0] == recipient_str
assert received_commands[3][0] == "RCPT"
assert received_commands[3][1][0] == cc_recipients[0]
assert received_commands[4][0] == "RCPT"
assert received_commands[4][1][0] == cc_recipients[1]
async def test_send_message_with_bcc_recipients(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
recipient_str: str,
message: email.message.Message,
received_messages: List[email.message.EmailMessage],
received_commands: List[Tuple[str, Tuple[Any, ...]]],
) -> None:
bcc_recipients = ["recipient1@example.com", "recipient2@example.com"]
message["Bcc"] = ", ".join(bcc_recipients)
async with smtp_client:
errors, _ = await smtp_client.send_message(message)
assert not errors
assert len(received_messages) == 1
assert received_commands[2][0] == "RCPT"
assert received_commands[2][1][0] == recipient_str
assert received_commands[3][0] == "RCPT"
assert received_commands[3][1][0] == bcc_recipients[0]
assert received_commands[4][0] == "RCPT"
assert received_commands[4][1][0] == bcc_recipients[1]
async def test_send_message_with_cc_and_bcc_recipients(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
recipient_str: str,
message: email.message.Message,
received_messages: List[email.message.EmailMessage],
received_commands: List[Tuple[str, Tuple[Any, ...]]],
) -> None:
cc_recipient = "recipient2@example.com"
message["Cc"] = cc_recipient
bcc_recipient = "recipient2@example.com"
message["Bcc"] = bcc_recipient
async with smtp_client:
errors, _ = await smtp_client.send_message(message)
assert not errors
assert len(received_messages) == 1
assert received_messages[0]["To"] == recipient_str
assert received_messages[0]["Cc"] == cc_recipient
# BCC shouldn't be passed through
assert received_messages[0]["Bcc"] is None
assert received_commands[2][0] == "RCPT"
assert received_commands[2][1][0] == recipient_str
assert received_commands[3][0] == "RCPT"
assert received_commands[3][1][0] == cc_recipient
assert received_commands[4][0] == "RCPT"
assert received_commands[4][1][0] == bcc_recipient
async def test_send_message_recipient_str(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
message: email.message.Message,
received_commands: List[Tuple[str, Tuple[Any, ...]]],
) -> None:
recipient_str = "1234@example.org"
async with smtp_client:
errors, response = await smtp_client.send_message(
message, recipients=recipient_str
)
assert not errors
assert isinstance(errors, dict)
assert response != ""
assert received_commands[2][1][0] == recipient_str
async def test_send_message_mail_options(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
message: email.message.Message,
) -> None:
async with smtp_client:
errors, response = await smtp_client.send_message(
message, mail_options=["BODY=8BITMIME"]
)
assert not errors
assert isinstance(errors, dict)
assert response != ""
async def test_send_multiple_messages_in_sequence(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
message: email.message.Message,
) -> None:
message1 = copy.copy(message)
message2 = copy.copy(message)
del message2["To"]
message2["To"] = "recipient2@example.com"
async with smtp_client:
errors1, response1 = await smtp_client.send_message(message1)
assert not errors1
assert isinstance(errors1, dict)
assert response1 != ""
errors2, response2 = await smtp_client.send_message(message2)
assert not errors2
assert isinstance(errors2, dict)
assert response2 != ""
async def test_send_message_without_recipients(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
message: email.message.Message,
) -> None:
del message["To"]
async with smtp_client:
with pytest.raises(ValueError):
await smtp_client.send_message(message)
async def test_send_message_without_sender(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
message: email.message.Message,
) -> None:
del message["From"]
async with smtp_client:
with pytest.raises(ValueError):
await smtp_client.send_message(message)
async def test_send_message_smtputf8_sender(
smtp_client_smtputf8: SMTP,
smtpd_server_smtputf8: asyncio.AbstractServer,
message: email.message.Message,
received_commands: List[Tuple[str, Tuple[Any, ...]]],
received_messages: List[email.message.EmailMessage],
) -> None:
del message["From"]
message["From"] = "séndër@exåmple.com"
async with smtp_client_smtputf8:
errors, response = await smtp_client_smtputf8.send_message(message)
assert not errors
assert response != ""
assert received_commands[1][0] == "MAIL"
assert received_commands[1][1][0] == message["From"]
# Size varies depending on the message type
assert received_commands[1][1][1][0].startswith("SIZE=")
assert received_commands[1][1][1][1:] == ["SMTPUTF8", "BODY=8BITMIME"]
assert len(received_messages) == 1
assert received_messages[0]["X-MailFrom"] == message["From"]
async def test_send_mime_message_smtputf8_recipient(
smtp_client_smtputf8: SMTP,
smtpd_server_smtputf8: asyncio.AbstractServer,
mime_message: email.message.EmailMessage,
received_commands: List[Tuple[str, Tuple[Any, ...]]],
received_messages: List[email.message.EmailMessage],
) -> None:
mime_message["To"] = "reçipïént@exåmple.com"
async with smtp_client_smtputf8:
errors, response = await smtp_client_smtputf8.send_message(mime_message)
assert not errors
assert response != ""
assert received_commands[2][0] == "RCPT"
assert received_commands[2][1][0] == mime_message["To"]
assert len(received_messages) == 1
assert received_messages[0]["X-RcptTo"] == ", ".join(mime_message.get_all("To"))
async def test_send_compat32_message_smtputf8_recipient(
smtp_client_smtputf8: SMTP,
smtpd_server_smtputf8: asyncio.AbstractServer,
compat32_message: email.message.Message,
received_commands: List[Tuple[str, Tuple[Any, ...]]],
received_messages: List[email.message.EmailMessage],
) -> None:
recipient_bytes = bytes("reçipïént@exåmple.com", "utf-8")
compat32_message["To"] = email.header.Header(recipient_bytes, "utf-8")
async with smtp_client_smtputf8:
errors, response = await smtp_client_smtputf8.send_message(compat32_message)
assert not errors
assert response != ""
assert received_commands[2][0] == "RCPT"
assert received_commands[2][1][0] == compat32_message["To"]
assert len(received_messages) == 1
assert (
received_messages[0]["X-RcptTo"]
== "recipient@example.com, reçipïént@exåmple.com"
)
async def test_send_message_smtputf8_not_supported(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
message: email.message.Message,
) -> None:
message["To"] = "reçipïént2@exåmple.com"
async with smtp_client:
with pytest.raises(SMTPNotSupported):
await smtp_client.send_message(message)
async def test_send_message_with_formataddr(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
message: email.message.Message,
) -> None:
message["To"] = formataddr(("æøå", "someotheruser@example.com"))
async with smtp_client:
errors, response = await smtp_client.send_message(message)
assert not errors
assert response != ""
async def test_send_compat32_message_utf8_text_without_smtputf8(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
compat32_message: email.message.Message,
received_commands: List[Tuple[str, Tuple[Any, ...]]],
received_messages: List[email.message.EmailMessage],
) -> None:
compat32_message["To"] = email.header.Header(
"reçipïént <recipient2@example.com>", "utf-8"
)
async with smtp_client:
errors, response = await smtp_client.send_message(compat32_message)
assert not errors
assert response != ""
assert received_commands[2][0] == "RCPT"
assert received_commands[2][1][0] == compat32_message["To"].encode()
assert len(received_messages) == 1
assert (
received_messages[0]["X-RcptTo"]
== "recipient@example.com, recipient2@example.com"
)
# Name should be encoded
assert received_messages[0].get_all("To") == [
"recipient@example.com",
"=?utf-8?b?cmXDp2lww6/DqW50IDxyZWNpcGllbnQyQGV4YW1wbGUuY29tPg==?=",
]
async def test_send_mime_message_utf8_text_without_smtputf8(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
mime_message: email.message.EmailMessage,
received_commands: List[Tuple[str, Tuple[Any, ...]]],
received_messages: List[email.message.EmailMessage],
) -> None:
mime_message["To"] = "reçipïént <recipient2@example.com>"
async with smtp_client:
errors, response = await smtp_client.send_message(mime_message)
assert not errors
assert response != ""
assert received_commands[2][0] == "RCPT"
assert received_commands[2][1][0] == mime_message["To"]
assert len(received_messages) == 1
assert (
received_messages[0]["X-RcptTo"]
== "recipient@example.com, recipient2@example.com"
)
# Name should be encoded
assert received_messages[0].get_all("To") == [
"recipient@example.com",
"=?utf-8?b?cmXDp2lww6/DqW50IDxyZWNpcGllbnQyQGV4YW1wbGUuY29tPg==?=",
]
async def test_sendmail_empty_sender(
smtp_client: SMTP,
smtpd_server: asyncio.AbstractServer,
recipient_str: str,
message_str: str,
) -> None:
async with smtp_client:
errors, response = await smtp_client.sendmail("", [recipient_str], message_str)
assert not errors
assert isinstance(errors, dict)
assert response != ""
| mit | ed9261dc18d7cd7050148fbc567a122a | 29.626582 | 87 | 0.659692 | 3.571877 | false | true | false | false |
theislab/scib | tests/metrics/test_all.py | 1 | 1171 | import scanpy as sc
import scib
from tests.common import LOGGER
def test_fast(adata_neighbors):
metrics_df = scib.me.metrics_fast(
adata_neighbors,
adata_neighbors,
batch_key="batch",
label_key="celltype",
embed="X_pca",
)
for score in metrics_df:
LOGGER.info(f"score: {score}")
assert 0 <= score <= 1
def test_slim(adata_paul15):
sc.pp.pca(adata_paul15)
sc.pp.neighbors(adata_paul15)
sc.tl.dpt(adata_paul15)
metrics_df = scib.me.metrics_slim(
adata_paul15,
adata_paul15,
batch_key="batch",
label_key="celltype",
embed="X_pca",
)
for score in metrics_df:
LOGGER.info(f"score: {score}")
assert 0 <= score <= 1
# def test_all(adata_paul15):
# sc.pp.pca(adata_paul15)
# sc.pp.neighbors(adata_paul15)
# sc.tl.dpt(adata_paul15)
#
# metrics_df = scib.me.metrics_all(
# adata_paul15,
# adata_paul15,
# batch_key='batch',
# label_key='celltype',
# embed='X_pca',
# )
#
# for score in metrics_df:
# LOGGER.info(f"score: {score}")
# assert 0 <= score <= 1
| mit | 6b91dd19d645de3bc32ed3d5962dd865 | 20.685185 | 39 | 0.566183 | 2.891358 | false | true | false | false |
agronholm/apscheduler | src/apscheduler/triggers/combining.py | 1 | 5734 | from __future__ import annotations
from abc import abstractmethod
from datetime import datetime, timedelta
from typing import Any
import attrs
from .._exceptions import MaxIterationsReached
from .._validators import as_timedelta, require_state_version
from ..abc import Trigger
from ..marshalling import marshal_object, unmarshal_object
@attrs.define
class BaseCombiningTrigger(Trigger):
triggers: list[Trigger]
_next_fire_times: list[datetime | None] = attrs.field(
init=False, eq=False, factory=list
)
def __getstate__(self) -> dict[str, Any]:
return {
"version": 1,
"triggers": [marshal_object(trigger) for trigger in self.triggers],
"next_fire_times": self._next_fire_times,
}
@abstractmethod
def __setstate__(self, state: dict[str, Any]) -> None:
self.triggers = [
unmarshal_object(*trigger_state) for trigger_state in state["triggers"]
]
self._next_fire_times = state["next_fire_times"]
@attrs.define
class AndTrigger(BaseCombiningTrigger):
"""
Fires on times produced by the enclosed triggers whenever the fire times are within
the given threshold.
If the produced fire times are not within the given threshold of each other, the
trigger(s) that produced the earliest fire time will be asked for their next fire
time and the iteration is restarted. If instead all the triggers agree on a fire
time, all the triggers are asked for their next fire times and the earliest of the
previously produced fire times will be returned.
This trigger will be finished when any of the enclosed trigger has finished.
:param triggers: triggers to combine
:param threshold: maximum time difference between the next fire times of the
triggers in order for the earliest of them to be returned from :meth:`next` (in
seconds, or as timedelta)
:param max_iterations: maximum number of iterations of fire time calculations before
giving up
"""
threshold: timedelta = attrs.field(converter=as_timedelta, default=1)
max_iterations: int | None = 10000
def next(self) -> datetime | None:
if not self._next_fire_times:
# Fill out the fire times on the first run
self._next_fire_times = [t.next() for t in self.triggers]
for _ in range(self.max_iterations):
# Find the earliest and latest fire times
earliest_fire_time: datetime | None = None
latest_fire_time: datetime | None = None
for fire_time in self._next_fire_times:
# If any of the fire times is None, this trigger is finished
if fire_time is None:
return None
if earliest_fire_time is None or earliest_fire_time > fire_time:
earliest_fire_time = fire_time
if latest_fire_time is None or latest_fire_time < fire_time:
latest_fire_time = fire_time
# Replace all the fire times that were within the threshold
for i, _trigger in enumerate(self.triggers):
if self._next_fire_times[i] - earliest_fire_time <= self.threshold:
self._next_fire_times[i] = self.triggers[i].next()
# If all the fire times were within the threshold, return the earliest one
if latest_fire_time - earliest_fire_time <= self.threshold:
self._next_fire_times = [t.next() for t in self.triggers]
return earliest_fire_time
else:
raise MaxIterationsReached
def __getstate__(self) -> dict[str, Any]:
state = super().__getstate__()
state["threshold"] = self.threshold.total_seconds()
state["max_iterations"] = self.max_iterations
return state
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
super().__setstate__(state)
self.threshold = timedelta(seconds=state["threshold"])
self.max_iterations = state["max_iterations"]
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}({self.triggers}, "
f"threshold={self.threshold.total_seconds()}, "
f"max_iterations={self.max_iterations})"
)
@attrs.define
class OrTrigger(BaseCombiningTrigger):
"""
Fires on every fire time of every trigger in chronological order.
If two or more triggers produce the same fire time, it will only be used once.
This trigger will be finished when none of the enclosed triggers can produce any new
fire times.
:param triggers: triggers to combine
"""
def next(self) -> datetime | None:
# Fill out the fire times on the first run
if not self._next_fire_times:
self._next_fire_times = [t.next() for t in self.triggers]
# Find out the earliest of the fire times
earliest_time: datetime | None = min(
(fire_time for fire_time in self._next_fire_times if fire_time is not None),
default=None,
)
if earliest_time is not None:
# Generate new fire times for the trigger(s) that generated the earliest
# fire time
for i, fire_time in enumerate(self._next_fire_times):
if fire_time == earliest_time:
self._next_fire_times[i] = self.triggers[i].next()
return earliest_time
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
super().__setstate__(state)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.triggers})"
| mit | b0fae89c69fe784be8618027f6b37668 | 37.226667 | 88 | 0.62766 | 4.225497 | false | false | false | false |
agronholm/apscheduler | examples/web/wsgi_noframework.py | 1 | 1413 | """
Example demonstrating use with WSGI (raw WSGI application, no framework).
Requires the "postgresql" and "redis" services to be running.
To install prerequisites: pip install sqlalchemy psycopg2 uwsgi
To run: uwsgi -T --http :8000 --wsgi-file wsgi_noframework.py
It should print a line on the console on a one-second interval while running a
basic web app at http://localhost:8000.
"""
from __future__ import annotations
from datetime import datetime
from sqlalchemy.future import create_engine
from apscheduler.datastores.sqlalchemy import SQLAlchemyDataStore
from apscheduler.eventbrokers.redis import RedisEventBroker
from apscheduler.schedulers.sync import Scheduler
from apscheduler.triggers.interval import IntervalTrigger
def tick():
print("Hello, the time is", datetime.now())
def application(environ, start_response):
response_body = b"Hello, World!"
response_headers = [
("Content-Type", "text/plain"),
("Content-Length", str(len(response_body))),
]
start_response("200 OK", response_headers)
return [response_body]
engine = create_engine("postgresql+psycopg2://postgres:secret@localhost/testdb")
data_store = SQLAlchemyDataStore(engine)
event_broker = RedisEventBroker.from_url("redis://localhost")
scheduler = Scheduler(data_store, event_broker)
scheduler.add_schedule(tick, IntervalTrigger(seconds=1), id="tick")
scheduler.start_in_background()
| mit | eb493b295c1041aeb94a159c52831713 | 31.860465 | 80 | 0.7615 | 3.881868 | false | false | false | false |
agronholm/apscheduler | src/apscheduler/_validators.py | 1 | 5069 | from __future__ import annotations
import sys
from datetime import date, datetime, timedelta, timezone, tzinfo
from typing import Any
from attrs import Attribute
from tzlocal import get_localzone
from ._exceptions import DeserializationError
from .abc import Trigger
if sys.version_info >= (3, 9):
from zoneinfo import ZoneInfo
else:
from backports.zoneinfo import ZoneInfo
def as_int(value) -> int | None:
"""Convert the value into an integer."""
if value is None:
return None
return int(value)
def as_timezone(value: str | tzinfo | None) -> tzinfo:
"""
Convert the value into a tzinfo object.
If ``value`` is ``None`` or ``'local'``, use the local timezone.
:param value: the value to be converted
:return: a timezone object
"""
if value is None or value == "local":
return get_localzone()
elif isinstance(value, str):
return ZoneInfo(value)
elif isinstance(value, tzinfo):
if value is timezone.utc:
return ZoneInfo("UTC")
else:
return value
raise TypeError(
f"Expected tzinfo instance or timezone name, got "
f"{value.__class__.__qualname__} instead"
)
def as_date(value: date | str | None) -> date | None:
"""
Convert the value to a date.
:param value: the value to convert to a date
:return: a date object, or ``None`` if ``None`` was given
"""
if value is None:
return None
elif isinstance(value, str):
return date.fromisoformat(value)
elif isinstance(value, date):
return value
raise TypeError(
f"Expected string or date, got {value.__class__.__qualname__} instead"
)
def as_timestamp(value: datetime | None) -> float | None:
if value is None:
return None
return value.timestamp()
def as_ordinal_date(value: date | None) -> int | None:
if value is None:
return None
return value.toordinal()
def as_aware_datetime(value: datetime | str | None) -> datetime | None:
"""
Convert the value to a timezone aware datetime.
:param value: a datetime, an ISO 8601 representation of a datetime, or ``None``
:param tz: timezone to use for making the datetime timezone aware
:return: a timezone aware datetime, or ``None`` if ``None`` was given
"""
if value is None:
return None
if isinstance(value, str):
if value.upper().endswith("Z"):
value = value[:-1] + "+00:00"
value = datetime.fromisoformat(value)
if isinstance(value, datetime):
if not value.tzinfo:
return value.replace(tzinfo=get_localzone())
else:
return value
raise TypeError(
f"Expected string or datetime, got {value.__class__.__qualname__} instead"
)
def positive_number(instance, attribute, value) -> None:
if value <= 0:
raise ValueError(f"Expected positive number, got {value} instead")
def non_negative_number(instance, attribute, value) -> None:
if value < 0:
raise ValueError(f"Expected non-negative number, got {value} instead")
def as_positive_integer(value, name: str) -> int:
if isinstance(value, int):
if value > 0:
return value
else:
raise ValueError(f"{name} must be positive")
raise TypeError(
f"{name} must be an integer, got {value.__class__.__name__} instead"
)
def as_timedelta(value: timedelta | float) -> timedelta:
if isinstance(value, (int, float)):
return timedelta(seconds=value)
elif isinstance(value, timedelta):
return value
# raise TypeError(f'{attribute.name} must be a timedelta or number of seconds, got '
# f'{value.__class__.__name__} instead')
def as_list(value, element_type: type, name: str) -> list:
value = list(value)
for i, element in enumerate(value):
if not isinstance(element, element_type):
raise TypeError(
f"Element at index {i} of {name} is not of the expected type "
f"({element_type.__name__}"
)
return value
def aware_datetime(instance: Any, attribute: Attribute, value: datetime) -> None:
if not value.tzinfo:
raise ValueError(f"{attribute.name} must be a timezone aware datetime")
def require_state_version(
trigger: Trigger, state: dict[str, Any], max_version: int
) -> None:
try:
if state["version"] > max_version:
raise DeserializationError(
f"{trigger.__class__.__name__} received a serialized state with "
f'version {state["version"]}, but it only supports up to version '
f"{max_version}. This can happen when an older version of APScheduler "
f"is being used with a data store that was previously used with a "
f"newer APScheduler version."
)
except KeyError as exc:
raise DeserializationError(
'Missing "version" key in the serialized state'
) from exc
| mit | a90ed389bf2a06c4c4cff034ddc0e10c | 27.161111 | 88 | 0.618465 | 4.151515 | false | false | false | false |
lmjohns3/theanets | test/util.py | 1 | 2671 | '''Helper code for theanets unit tests.'''
import numpy as np
np.random.seed(13)
NUM_EXAMPLES = 64
NUM_INPUTS = 7
NUM_HID1 = 8
NUM_HID2 = 12
NUM_OUTPUTS = 5
NUM_CLASSES = 6
INPUTS = np.random.randn(NUM_EXAMPLES, NUM_INPUTS).astype('f')
INPUT_WEIGHTS = abs(np.random.randn(NUM_EXAMPLES, NUM_INPUTS)).astype('f')
OUTPUTS = np.random.randn(NUM_EXAMPLES, NUM_OUTPUTS).astype('f')
OUTPUT_WEIGHTS = abs(np.random.randn(NUM_EXAMPLES, NUM_OUTPUTS)).astype('f')
CLASSES = np.random.randint(NUM_CLASSES, size=NUM_EXAMPLES).astype('i')
CLASS_WEIGHTS = abs(np.random.rand(NUM_EXAMPLES)).astype('f')
AE_DATA = [INPUTS]
WAE_DATA = [INPUTS, INPUT_WEIGHTS]
AE_LAYERS = [NUM_INPUTS, NUM_HID1, NUM_HID2, NUM_INPUTS]
CLF_DATA = [INPUTS, CLASSES]
WCLF_DATA = [INPUTS, CLASSES, CLASS_WEIGHTS]
CLF_LAYERS = [NUM_INPUTS, NUM_HID1, NUM_HID2, NUM_CLASSES]
REG_DATA = [INPUTS, OUTPUTS]
WREG_DATA = [INPUTS, OUTPUTS, OUTPUT_WEIGHTS]
REG_LAYERS = [NUM_INPUTS, NUM_HID1, NUM_HID2, NUM_OUTPUTS]
class RNN:
NUM_TIMES = 31
INPUTS = np.random.randn(NUM_EXAMPLES, NUM_TIMES, NUM_INPUTS).astype('f')
INPUT_WEIGHTS = abs(
np.random.randn(NUM_EXAMPLES, NUM_TIMES, NUM_INPUTS)).astype('f')
OUTPUTS = np.random.randn(NUM_EXAMPLES, NUM_TIMES, NUM_OUTPUTS).astype('f')
OUTPUT_WEIGHTS = abs(
np.random.randn(NUM_EXAMPLES, NUM_TIMES, NUM_OUTPUTS)).astype('f')
CLASSES = np.random.randn(NUM_EXAMPLES, NUM_TIMES).astype('i')
CLASS_WEIGHTS = abs(np.random.rand(NUM_EXAMPLES, NUM_TIMES)).astype('f')
AE_DATA = [INPUTS]
WAE_DATA = [INPUTS, INPUT_WEIGHTS]
CLF_DATA = [INPUTS, CLASSES]
WCLF_DATA = [INPUTS, CLASSES, CLASS_WEIGHTS]
REG_DATA = [INPUTS, OUTPUTS]
WREG_DATA = [INPUTS, OUTPUTS, OUTPUT_WEIGHTS]
class CNN:
NUM_WIDTH = 13
NUM_HEIGHT = 15
FILTER_WIDTH = 4
FILTER_HEIGHT = 3
FILTER_SIZE = (FILTER_WIDTH, FILTER_HEIGHT)
INPUTS = np.random.randn(
NUM_EXAMPLES, NUM_WIDTH, NUM_HEIGHT, NUM_INPUTS).astype('f')
CLF_DATA = [INPUTS, CLASSES]
WCLF_DATA = [INPUTS, CLASSES, CLASS_WEIGHTS]
REG_DATA = [INPUTS, OUTPUTS]
WREG_DATA = [INPUTS, OUTPUTS, OUTPUT_WEIGHTS]
def assert_progress(model, data, algo='sgd'):
trainer = model.itertrain(
data, algo=algo, momentum=0.5, batch_size=3, max_gradient_norm=1)
train0, valid0 = next(trainer)
train1, valid1 = next(trainer)
assert train1['loss'] < valid0['loss'] # should have made progress!
assert valid1['loss'] == valid0['loss'] # no new validation occurred
def assert_shape(actual, expected):
if not isinstance(expected, tuple):
expected = (NUM_EXAMPLES, expected)
assert actual == expected
| mit | cbfb54dfac6a8eef6d67420ac4d6c02d | 30.05814 | 79 | 0.673156 | 2.835456 | false | true | false | false |
uccser/cs-unplugged | csunplugged/tests/topics/TopicsTestDataGenerator.py | 1 | 12032 | """Create test data for topic tests."""
import os.path
import yaml
from topics.models import (
Topic,
Lesson,
LessonNumber,
AgeGroup,
CurriculumIntegration,
CurriculumArea,
ProgrammingChallenge,
ProgrammingChallengeDifficulty,
ProgrammingChallengeLanguage,
ProgrammingChallengeImplementation,
ProgrammingChallengeNumber,
LearningOutcome,
GlossaryTerm,
ResourceDescription,
ClassroomResource,
)
from plugging_it_in.models import TestCase
class TopicsTestDataGenerator:
"""Class for generating test data for topics."""
def __init__(self):
"""Create TopicsTestDataGenerator object."""
self.BASE_PATH = "tests/topics/"
self.LOADER_ASSET_PATH = os.path.join(self.BASE_PATH, "loaders/assets/")
def load_yaml_file(self, yaml_file_path):
"""Load a yaml file.
Args:
yaml_file_path: The path to a given yaml file (str).
Returns:
Contents of a yaml file.
"""
yaml_file = open(yaml_file_path, encoding="UTF-8").read()
return yaml.load(yaml_file)
def create_integration(self, topic, number, lessons=None, curriculum_areas=None):
"""Create curriculum integration object.
Args:
topic: The related Topic object (Topic).
number: Identifier of the topic (int).
lessons: List of prerequisite lessons (list).
curriculum_areas: List of curriculum areas (list).
Returns:
CurriculumIntegration object.
"""
integration = CurriculumIntegration(
topic=topic,
slug="integration-{}".format(number),
name="Integration {}".format(number),
number=number,
content="<p>Content for integration {}.</p>".format(number),
)
integration.save()
if lessons:
for lesson in lessons:
integration.prerequisite_lessons.add(lesson)
if curriculum_areas:
for curriculum_area in curriculum_areas:
integration.curriculum_areas.add(curriculum_area)
return integration
def create_curriculum_area(self, number, parent=None):
"""Create curriculum area object.
Args:
number: Identifier of the area (int).
parent: Parent of the curriculum area (CurriculumArea).
Returns:
CurriculumArea object.
"""
area = CurriculumArea(
slug="area-{}".format(number),
name="Area {}".format(number),
colour="colour-{}".format(number),
number=number,
parent=parent,
languages=["en"],
)
area.save()
return area
def create_topic(self, number):
"""Create topic object.
Args:
number: Identifier of the topic (int).
Returns:
Topic object.
"""
topic = Topic(
slug="topic-{}".format(number),
name="Topic {}".format(number),
content="<p>Content for topic {}.</p>".format(number),
languages=["en"],
)
topic.save()
return topic
def create_lesson(self, topic, number, age_groups=None):
"""Create lesson object.
Args:
topic: The related Topic object (Topic).
number: Identifier of the topic (int).
Returns:
Lesson object.
"""
if age_groups and not isinstance(age_groups, list):
age_groups = [age_groups]
lesson = Lesson(
topic=topic,
slug="lesson-{}".format(number),
name="Lesson {} ({} to {})".format(
number,
age_groups[0].ages[0] if age_groups else "none",
age_groups[-1].ages[1] if age_groups else "none"
),
duration=number,
content="<p>Content for lesson {}.</p>".format(number),
languages=["en"],
)
lesson.save()
if age_groups:
for age_group in age_groups:
LessonNumber(
age_group=age_group,
lesson=lesson,
number=number,
).save()
return lesson
def create_age_group(self, min_age, max_age):
"""Create AgeGroup object.
Args:
min_age: the minumum age for the group (int).
max_age: the maximum age for the group (int).
Returns:
AgeGroup object.
"""
age_group = AgeGroup(
slug="{}-{}".format(min_age, max_age),
ages=(min_age, max_age),
languages=["en"],
)
age_group.save()
return age_group
def create_difficulty_level(self, number):
"""Create difficuly level object.
Args:
number: Identifier of the level (int).
Returns:
ProgrammingChallengeDifficulty object.
"""
difficulty = ProgrammingChallengeDifficulty(
level="1",
name="Difficulty-{}".format(number),
languages=["en"],
)
difficulty.save()
return difficulty
def create_programming_language(self, number):
"""Create programming language object.
Args:
number: Identifier of the language (int).
Returns:
ProgrammingChallengeLanguage object.
"""
language = ProgrammingChallengeLanguage(
slug="language-{}".format(number),
name="Language {}".format(number),
number=number,
languages=["en"],
)
language.save()
return language
def create_programming_challenge(self, topic, number,
difficulty,
challenge_set_number=1,
challenge_number=1,
content="<p>Example content.</p>",
testing_examples="<p>Testing example</p>",
extra_challenge="<p>Example challenge.</p>",
):
"""Create programming challenge object.
Args:
topic: Topic related to the challenge.
number: Identifier of the challenge (int).
difficulty: Difficulty related to the challenge
(ProgrammingChallengeDifficulty).
challenge_set_number: Integer of challenge set number (int).
challenge_number: Integer of challenge number (int).
content: Text of challenge (str).
extra_challenge: Text of extra challenge (str).
Returns:
ProgrammingChallenge object.
"""
challenge = ProgrammingChallenge(
topic=topic,
slug="challenge-{}".format(number),
name="Challenge {}.{}: {}".format(
challenge_set_number,
challenge_number,
number,
),
challenge_set_number=challenge_set_number,
challenge_number=challenge_number,
content=content,
testing_examples=testing_examples,
extra_challenge=extra_challenge,
difficulty=difficulty,
languages=["en"],
)
challenge.save()
return challenge
def create_programming_challenge_implementation(self, topic,
language,
challenge,
expected_result="<p>Example result.</p>",
hints="<p>Example hints.</p>",
solution="<p>Example solution.</p>",
):
"""Create programming challenge implementation object.
Args:
topic: Topic related to the implementation.
language: Language related to the implementation
(ProgrammingChallengeLanguage).
challenge: Challenge related to the implementation
(ProgrammingChallenge).
expected_result: Text of expected_result (str).
hints: Text of hints (str).
solution: Text of solution (str).
Returns:
ProgrammingChallengeImplementation object.
"""
implementation = ProgrammingChallengeImplementation(
topic=topic,
language=language,
challenge=challenge,
expected_result=expected_result,
hints=hints,
solution=solution,
languages=["en"],
)
implementation.save()
return implementation
def create_learning_outcome(self, number):
"""Create learning outcome object.
Args:
number: Identifier of the outcome (int).
Returns:
LearningOutcome object.
"""
outcome = LearningOutcome(
slug="outcome-{}".format(number),
text="Outcome {}".format(number),
languages=["en"],
)
outcome.save()
return outcome
def create_classroom_resource(self, number):
"""Create classroom resource object.
Args:
number: Identifier of the resource (int).
Returns:
ClassroomResource object.
"""
resource = ClassroomResource(
slug="resource-{}".format(number),
description="Resource {}".format(number),
languages=["en"],
)
resource.save()
return resource
def add_challenge_lesson_relationship(self, challenge, lesson, set_number, number):
"""Add relationship between challenge and lesson objects.
Args:
challenge: Challenge to add relationship between
(ProgrammingChallenge).
lesson: Lesson to add relationship between (Lesson).
set_number: Number to display as challenge set number (int).
number: Number to display as challenge number (int).
"""
relationship = ProgrammingChallengeNumber(
programming_challenge=challenge,
lesson=lesson,
challenge_set_number=set_number,
challenge_number=number,
)
relationship.save()
def create_glossary_term(self, number):
"""Create glossary term object.
Args:
number: Identifier of the glossary term (int).
Returns:
GlossaryTerm object.
"""
term = GlossaryTerm(
slug="term-{}".format(number),
term="Term {}".format(number),
definition="Defintion for term {}".format(number),
)
term.save()
return term
def add_lesson_resource_relationship(self, lesson, resource, number):
"""Add relationship between challenge and lesson objects.
Args:
lesson (Lesson): Lesson to add relationship between.
resource (Resource): Resource to add relationship between.
number (int): Number to display in description.
"""
relationship = ResourceDescription(
lesson=lesson,
resource=resource,
description="Description {}".format(number),
)
relationship.save()
def create_programming_challenge_test_case(self, number, challenge):
"""Create test case object.
Args:
number: Identifier of the test case (int).
challenge: Challenge that the test case is for (Challenge object).
Returns:
TestCase object.
"""
test_case = TestCase(
number=number,
challenge=challenge,
)
test_case.save()
return test_case
| mit | 417acf73f3859f14b6ae6374f92445ed | 31.085333 | 93 | 0.53499 | 4.957561 | false | true | false | false |
uccser/cs-unplugged | csunplugged/topics/management/commands/_LessonsLoader.py | 1 | 10682 | """Custom loader for loading lessons."""
from django.core.exceptions import ObjectDoesNotExist
from utils.TranslatableModelLoader import TranslatableModelLoader
from utils.convert_heading_tree_to_dict import convert_heading_tree_to_dict
from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
from utils.errors.KeyNotFoundError import KeyNotFoundError
from utils.errors.InvalidYAMLValueError import InvalidYAMLValueError
from topics.models import (
ProgrammingChallenge,
ProgrammingChallengeNumber,
LearningOutcome,
Resource,
ResourceDescription,
ClassroomResource
)
class LessonsLoader(TranslatableModelLoader):
"""Custom loader for loading lessons."""
def __init__(self, topic, **kwargs):
"""Create the loader for loading lessons.
Args:
topic: Object of Topic model (Topic).
"""
super().__init__(**kwargs)
self.topic = topic
def load(self):
"""Load the content for a single lesson.
Raises:
KeyNotFoundError: when no object can be found with the matching attribute.
InvalidYAMLValueError: when provided value is not valid.
MissingRequiredFieldError: when a value for a required model field cannot be
found in the config file.
"""
lessons_structure = self.load_yaml_file(self.structure_file_path)
for (lesson_slug, lesson_structure) in lessons_structure.items():
if lesson_structure is None:
raise MissingRequiredFieldError(
self.structure_file_path,
["number"],
"Lesson"
)
lesson_translations = self.get_blank_translation_dictionary()
content_filename = "{}.md".format(lesson_slug)
content_translations = self.get_markdown_translations(content_filename)
for language, content in content_translations.items():
lesson_translations[language]["content"] = content.html_string
lesson_translations[language]["name"] = content.title
if content.heading_tree:
heading_tree = convert_heading_tree_to_dict(content.heading_tree)
lesson_translations[language]["heading_tree"] = heading_tree
if "computational-thinking-links" in lesson_structure:
filename = lesson_structure["computational-thinking-links"]
ct_links_translations = self.get_markdown_translations(
filename,
heading_required=False,
remove_title=False,
)
for language, content in ct_links_translations.items():
lesson_translations[language]["computational_thinking_links"] = content.html_string
if "programming-challenges-description" in lesson_structure:
filename = lesson_structure["programming-challenges-description"]
pcd_translations = self.get_markdown_translations(
filename,
heading_required=False,
remove_title=False,
)
for language, content in pcd_translations.items():
lesson_translations[language]["programming_challenges_description"] = content.html_string
if "duration" in lesson_structure:
lesson_duration = lesson_structure["duration"]
else:
lesson_duration = None
lesson, lesson_created = self.topic.lessons.update_or_create(
slug=lesson_slug,
defaults={
'duration': lesson_duration,
}
)
self.populate_translations(lesson, lesson_translations)
self.mark_translation_availability(lesson, required_fields=["name", "content"])
lesson.save()
# Add programming challenges
if "programming-challenges" in lesson_structure and not self.lite_loader:
lesson.programming_challenges.clear()
programming_challenge_slugs = lesson_structure["programming-challenges"]
if programming_challenge_slugs is not None:
# Check all slugs are valid
for programming_challenge_slug in programming_challenge_slugs:
try:
ProgrammingChallenge.objects.get(
slug=programming_challenge_slug,
topic=self.topic
)
except ObjectDoesNotExist:
raise KeyNotFoundError(
self.structure_file_path,
programming_challenge_slug,
"Programming Challenges"
)
# Store number of challenge in relationship with lesson.
# If three linked challenges have numbers 1.1, 4.2, and 4.5
# They will be stored as 1.1, 2.1, and 2.2 respectively.
# Order challenges for numbering.
programming_challenges = ProgrammingChallenge.objects.filter(
slug__in=programming_challenge_slugs,
topic=self.topic
).order_by("challenge_set_number", "challenge_number")
# Setup variables for numbering.
display_set_number = 0
last_set_number = -1
display_number = 0
last_number = -1
# For each challenge, increment number variables if original
# numbers are different.
for programming_challenge in programming_challenges:
if programming_challenge.challenge_set_number > last_set_number:
display_set_number += 1
display_number = 0
last_number = -1
if programming_challenge.challenge_number > last_number:
display_number += 1
last_set_number = programming_challenge.challenge_set_number
last_number = programming_challenge.challenge_number
# Create and save relationship between lesson and
# challenge that contains challenge number.
relationship = ProgrammingChallengeNumber(
programming_challenge=programming_challenge,
lesson=lesson,
challenge_set_number=display_set_number,
challenge_number=display_number,
)
relationship.save()
# Add learning outcomes
if "learning-outcomes" in lesson_structure:
learning_outcome_slugs = lesson_structure.get("learning-outcomes", None)
if learning_outcome_slugs is None:
raise InvalidYAMLValueError(
self.structure_file_path,
["learning-outcomes"],
"Lesson"
)
else:
for learning_outcome_slug in learning_outcome_slugs:
try:
learning_outcome = LearningOutcome.objects.get(
slug=learning_outcome_slug
)
lesson.learning_outcomes.add(learning_outcome)
except ObjectDoesNotExist:
raise KeyNotFoundError(
self.structure_file_path,
learning_outcome_slug,
"Learning Outcomes"
)
# Add classroom resources
if "classroom-resources" in lesson_structure:
classroom_resources_slugs = lesson_structure["classroom-resources"]
if classroom_resources_slugs is not None:
for classroom_resources_slug in classroom_resources_slugs:
try:
classroom_resource = ClassroomResource.objects.get(
slug=classroom_resources_slug
)
lesson.classroom_resources.add(classroom_resource)
except ObjectDoesNotExist:
raise KeyNotFoundError(
self.structure_file_path,
classroom_resources_slug,
"Classroom Resources"
)
# Add generated resources
if "generated-resources" in lesson_structure:
resources = lesson_structure["generated-resources"]
if resources is not None:
relationship_strings_filename = "{}-resource-descriptions.yaml".format(lesson_slug)
relationship_translations = self.get_yaml_translations(
relationship_strings_filename,
)
for resource_slug in resources:
relationship_translation = relationship_translations.get(resource_slug, dict())
try:
resource = Resource.objects.get(
slug=resource_slug
)
except ObjectDoesNotExist:
raise KeyNotFoundError(
self.structure_file_path,
resource_slug,
"Resources"
)
relationship, created = ResourceDescription.objects.update_or_create(
resource=resource,
lesson=lesson,
)
self.populate_translations(relationship, relationship_translation)
self.mark_translation_availability(relationship, required_fields=["description"])
relationship.save()
if lesson_created:
term = 'Created'
else:
term = 'Updated'
self.log(f'{term} lesson: {lesson.__str__()}', 2)
| mit | 6827c93deccadccd97f38a5a5090390b | 45.043103 | 109 | 0.521251 | 5.5956 | false | false | false | false |
uccser/cs-unplugged | csunplugged/resources/utils/BaseResourceGenerator.py | 1 | 9233 | """Class for generator for a resource."""
import os.path
from abc import ABC, abstractmethod
from resources.utils.resize_encode_resource_images import resize_encode_resource_images
from utils.errors.ThumbnailPageNotFoundError import ThumbnailPageNotFoundError
from utils.errors.MoreThanOneThumbnailPageFoundError import MoreThanOneThumbnailPageFoundError
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from resources.utils.resource_parameters import (
EnumResourceParameter,
TextResourceParameter,
IntegerResourceParameter,
)
PAPER_SIZE_VALUES = {
"a4": _("A4"),
"letter": _("US Letter"),
}
class BaseResourceGenerator(ABC):
"""Class for generator for a resource."""
copies = False # Default
def __init__(self, requested_options=None):
"""Construct BaseResourceGenerator instance.
Args:
requested_options: QueryDict of requested_options (QueryDict).
"""
self.options = self.get_options()
self.options.update(self.get_local_options())
if requested_options:
self.process_requested_options(requested_options)
@classmethod
def get_options(cls):
"""Get options dictionary, including additional subclass options.
Returns:
Dictionary, of form {option name: ResourceParameter object, ...}
"""
options = cls.get_additional_options()
options.update({
"paper_size": EnumResourceParameter(
name="paper_size",
description=_("Paper Size"),
values=PAPER_SIZE_VALUES,
default="a4"
),
})
return options
@classmethod
def get_option_defaults(cls):
"""Get dictionary of default option values.
Returns:
Dictionary of option IDs to default values.
"""
defaults = dict()
for key, values in cls.get_options().items():
defaults[key] = values.default
return defaults
@classmethod
def get_local_options(cls):
"""Get local options dictionary, including additional subclass local options.
These options are only included when running locally.
Returns:
Dictionary, of form {option name: ResourceParameter object, ...}
"""
local_options = cls.get_additional_local_options()
local_options = {
"header_text": TextResourceParameter(
name="header_text",
description=_("Header Text"),
placeholder=_("Example School: Room Four"),
required=False
),
}
if cls.copies:
local_options.update({
"copies": IntegerResourceParameter(
name="copies",
description=_("Number of Copies"),
min_val=1,
max_val=50,
default=1,
required=False
),
})
return local_options
@classmethod
def get_additional_options(cls):
"""Return additional options, for use on subclass.
Returns:
Dictionary, of form {option name: ResourceParameter object, ...}
"""
return {}
@classmethod
def get_additional_local_options(cls):
"""Return additional options, for use on subclass.
Returns:
Dictionary, of form {option name: ResourceParameter object, ...}
"""
return {}
@abstractmethod
def data(self):
"""Abstract method to be implemented by subclasses."""
raise NotImplementedError # pragma: no cover
@property
def subtitle(self):
"""Return the subtitle string of the resource.
Used after the resource name in the filename, and
also on the resource image.
Returns:
Text for subtitle (str).
"""
return str(self.options["paper_size"].value)
def process_requested_options(self, requested_options):
"""Convert requested options to usable types.
Args:
requested_options: QueryDict of requested_options (QueryDict).
Method does the following:
- Update all values through str_to_bool utility function.
- Raises 404 error is requested option cannot be found.
- Raises 404 is option given with invalid value.
Returns:
QueryDict of converted requested options (QueryDict).
"""
# requested_options = requested_options.copy()
for option_name, option in self.options.items():
values = requested_options.getlist(option_name)
option.process_requested_values(values)
def pdf(self, resource_name):
"""Return PDF for resource request.
The PDF is returned (compared to the thumbnail which is directly saved)
as the PDF may be either saved to the disk, or returned in a HTTP
response.
Args:
resource_name: Name of the resource (str).
Return:
PDF file of resource.
"""
# Only import weasyprint when required as production environment
# does not have it installed.
from weasyprint import HTML, CSS
context = dict()
context["resource"] = resource_name
context["header_text"] = self.options["header_text"].value
context["paper_size"] = self.options["paper_size"].value
if self.copies:
num_copies = self.options["copies"].value
else:
num_copies = 1
context["all_data"] = []
for copy in range(num_copies):
copy_data = self.data()
if not isinstance(copy_data, list):
copy_data = [copy_data]
copy_data = resize_encode_resource_images(
self.options["paper_size"].value,
copy_data
)
context["all_data"].append(copy_data)
filename = "{} ({})".format(resource_name, self.subtitle)
context["filename"] = filename
pdf_html = render_to_string("resources/base-resource-pdf.html", context)
html = HTML(string=pdf_html, base_url=settings.BUILD_ROOT)
css_file = os.path.join(settings.BUILD_ROOT, "css/print-resource-pdf.css")
css_string = open(css_file, encoding="UTF-8").read()
base_css = CSS(string=css_string)
return (html.write_pdf(stylesheets=[base_css]), filename)
def save_thumbnail(self, resource_name, path):
"""Create thumbnail for resource request.
Args:
resource_name: Name of the resource (str).
path: The path to write the thumbnail to (str).
"""
thumbnail_data = self.generate_thumbnail()
self.write_thumbnail(thumbnail_data, resource_name, path)
def generate_thumbnail(self):
"""Create thumbnail for resource request.
Raises:
ThumbnailPageNotFoundError: If resource with more than one page does
not provide a thumbnail page.
MoreThanOneThumbnailPageFoundError: If resource provides more than
one page as the thumbnail.
Returns:
Dictionary of thumbnail data.
"""
thumbnail_data = self.data()
if not isinstance(thumbnail_data, list):
thumbnail_data = [thumbnail_data]
if len(thumbnail_data) > 1:
thumbnail_data = list(filter(lambda thumbnail_data: thumbnail_data.get("thumbnail"), thumbnail_data))
if len(thumbnail_data) == 0:
raise ThumbnailPageNotFoundError(self)
elif len(thumbnail_data) > 1:
raise MoreThanOneThumbnailPageFoundError(self)
thumbnail_data = resize_encode_resource_images(
self.options["paper_size"].value,
thumbnail_data
)
return thumbnail_data[0]
def write_thumbnail(self, thumbnail_data, resource_name, path):
"""Save generatered thumbnail.
Args:
thumbnail_data: Data of generated thumbnail.
resource_name: Name of the resource (str).
path: The path to write the thumbnail to (str).
"""
# Only import weasyprint when required as production environment
# does not have it installed.
from weasyprint import HTML, CSS
context = dict()
context["resource"] = resource_name
context["paper_size"] = self.options["paper_size"].value
context["all_data"] = [[thumbnail_data]]
pdf_html = render_to_string("resources/base-resource-pdf.html", context)
html = HTML(string=pdf_html, base_url=settings.BUILD_ROOT)
css_file = os.path.join(settings.BUILD_ROOT, "css/print-resource-pdf.css")
css_string = open(css_file, encoding="UTF-8").read()
base_css = CSS(string=css_string)
thumbnail = html.write_png(stylesheets=[base_css], resolution=72)
thumbnail_file = open(path, "wb")
thumbnail_file.write(thumbnail)
thumbnail_file.close()
| mit | b9c56ece8b66bca7669eee27dcf6fd89 | 34.106464 | 113 | 0.601213 | 4.63504 | false | false | false | false |
uccser/cs-unplugged | csunplugged/general/views.py | 1 | 1414 | """Views for the general application."""
from django.views.generic import TemplateView
class GeneralIndexView(TemplateView):
"""View for the homepage that renders from a template."""
template_name = "general/index.html"
class GeneralAboutView(TemplateView):
"""View for the about page that renders from a template."""
template_name = "general/about.html"
class GeneralContactView(TemplateView):
"""View for the contact page that renders from a template."""
template_name = "general/contact.html"
class GeneralPeopleView(TemplateView):
"""View for the people page that renders from a template."""
template_name = "general/people.html"
class GeneralPrinciplesView(TemplateView):
"""View for the princples page that renders from a template."""
template_name = "general/principles.html"
class WhatIsCSView(TemplateView):
"""View for the 'What is Computer Science?' page that renders from a template."""
template_name = "general/what-is-computer-science.html"
class ComputationalThinkingView(TemplateView):
"""View for the Computational Thinking page that renders from a template."""
template_name = "general/computational-thinking.html"
class HowDoITeachCSUnpluggedView(TemplateView):
"""View for the 'How do I teach CS Unplugged?' page that renders from a template."""
template_name = "general/how-do-i-teach-cs-unplugged.html"
| mit | 088a9ce54c68cc277cf01e70c3eea59b | 26.72549 | 88 | 0.732673 | 3.811321 | false | false | false | false |
uccser/cs-unplugged | csunplugged/resources/generators/LeftRightCardsResourceGenerator.py | 1 | 2209 | """Class for Left and Right Cards resource generator."""
from PIL import Image, ImageDraw
from math import pi
from utils.TextBoxDrawer import TextBoxDrawer, TextBox
from django.utils.translation import ugettext_lazy as _
from resources.utils.BaseResourceGenerator import BaseResourceGenerator
from resources.utils.coords import calculate_box_vertices
FONT_PATH = "static/fonts/PatrickHand-Regular.ttf"
FONT_SIZE = 300
LABEL_DATA = {
"left": {
"text": _("Left"),
"areas": [
((13, 84), 660, 315),
((701, 84), 660, 315),
((1389, 84), 660, 315),
((2077, 84), 660, 315),
],
"rotation": 0,
},
"right": {
"text": _("Right"),
"areas": [
((13, 1660), 660, 315),
((701, 1660), 660, 315),
((1389, 1660), 660, 315),
((2077, 1660), 660, 315),
],
"rotation": pi,
},
}
class LeftRightCardsResourceGenerator(BaseResourceGenerator):
"""Class for Left and Right Cards resource generator."""
def data(self):
"""Create data for a copy of the Left and Right Cards resource.
Returns:
A dictionary of the one page for the resource.
"""
image_path = "static/img/resources/left-right-cards/left-right-cards.png"
image = Image.open(image_path)
draw = ImageDraw.Draw(image)
textbox_drawer = TextBoxDrawer(image, draw)
for label, label_data in LABEL_DATA.items():
label_text = label_data["text"]
for (top_left, width, height) in label_data["areas"]:
vertices = calculate_box_vertices(top_left, width, height)
box = TextBox(
vertices,
width,
height,
font_path=FONT_PATH,
font_size=FONT_SIZE,
angle=label_data["rotation"]
)
textbox_drawer.write_text_box(
box,
label_text,
horiz_just="center",
)
image = image.rotate(90, expand=True)
return {"type": "image", "data": image}
| mit | 019f01aa20275d9b80919c2d6ab0c4a9 | 31.970149 | 81 | 0.53101 | 3.923623 | false | false | false | false |
uccser/cs-unplugged | csunplugged/tests/resources/generators/test_searching_cards.py | 1 | 11613 | from django.http import QueryDict
from django.test import tag
from resources.generators.SearchingCardsResourceGenerator import SearchingCardsResourceGenerator
from tests.resources.generators.utils import BaseGeneratorTest
@tag("resource")
class SearchingCardsResourceGeneratorTest(BaseGeneratorTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = "en"
self.base_valid_query = QueryDict("number_cards=15&max_number=cards&help_sheet=yes&paper_size=a4")
def test_number_cards_values(self):
generator = SearchingCardsResourceGenerator(self.base_valid_query)
self.run_parameter_smoke_tests(generator, "number_cards")
def test_max_number_values(self):
generator = SearchingCardsResourceGenerator(self.base_valid_query)
self.run_parameter_smoke_tests(generator, "max_number")
def test_help_sheet_values(self):
generator = SearchingCardsResourceGenerator(self.base_valid_query)
self.run_parameter_smoke_tests(generator, "help_sheet")
def test_subtitle_15_cards_sheet_a4(self):
query = QueryDict("number_cards=15&max_number=cards&help_sheet=yes&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - 1 to 15 - with helper sheet - a4"
)
def test_subtitle_15_cards_sheet_letter(self):
query = QueryDict("number_cards=15&max_number=cards&help_sheet=yes&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - 1 to 15 - with helper sheet - letter"
)
def test_subtitle_31_cards_sheet_a4(self):
query = QueryDict("number_cards=31&max_number=cards&help_sheet=yes&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - 1 to 31 - with helper sheet - a4"
)
def test_subtitle_31_cards_sheet_letter(self):
query = QueryDict("number_cards=31&max_number=cards&help_sheet=yes&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - 1 to 31 - with helper sheet - letter"
)
def test_subtitle_15_99_sheet_a4(self):
query = QueryDict("number_cards=15&max_number=99&help_sheet=yes&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - 1 to 99 - with helper sheet - a4"
)
def test_subtitle_15_99_sheet_letter(self):
query = QueryDict("number_cards=15&max_number=99&help_sheet=yes&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - 1 to 99 - with helper sheet - letter"
)
def test_subtitle_31_99_sheet_a4(self):
query = QueryDict("number_cards=31&max_number=99&help_sheet=yes&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - 1 to 99 - with helper sheet - a4"
)
def test_subtitle_31_99_sheet_letter(self):
query = QueryDict("number_cards=31&max_number=99&help_sheet=yes&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - 1 to 99 - with helper sheet - letter"
)
def test_subtitle_15_999_sheet_a4(self):
query = QueryDict("number_cards=15&max_number=999&help_sheet=yes&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - 1 to 999 - with helper sheet - a4"
)
def test_subtitle_15_999_sheet_letter(self):
query = QueryDict("number_cards=15&max_number=999&help_sheet=yes&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - 1 to 999 - with helper sheet - letter"
)
def test_subtitle_31_999_sheet_a4(self):
query = QueryDict("number_cards=31&max_number=999&help_sheet=yes&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - 1 to 999 - with helper sheet - a4"
)
def test_subtitle_31_999_sheet_letter(self):
query = QueryDict("number_cards=31&max_number=999&help_sheet=yes&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - 1 to 999 - with helper sheet - letter"
)
def test_subtitle_15_blank_sheet_a4(self):
query = QueryDict("number_cards=15&max_number=blank&help_sheet=yes&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - Blank - with helper sheet - a4"
)
def test_subtitle_15_blank_sheet_letter(self):
query = QueryDict("number_cards=15&max_number=blank&help_sheet=yes&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - Blank - with helper sheet - letter"
)
def test_subtitle_31_blank_sheet_a4(self):
query = QueryDict("number_cards=31&max_number=blank&help_sheet=yes&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - Blank - with helper sheet - a4"
)
def test_subtitle_31_blank_sheet_letter(self):
query = QueryDict("number_cards=31&max_number=blank&help_sheet=yes&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - Blank - with helper sheet - letter"
)
def test_subtitle_15_cards_no_sheet_a4(self):
query = QueryDict("number_cards=15&max_number=cards&help_sheet=no&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - 1 to 15 - without helper sheet - a4"
)
def test_subtitle_15_cards_no_sheet_letter(self):
query = QueryDict("number_cards=15&max_number=cards&help_sheet=no&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - 1 to 15 - without helper sheet - letter"
)
def test_subtitle_31_cards_no_sheet_a4(self):
query = QueryDict("number_cards=31&max_number=cards&help_sheet=no&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - 1 to 31 - without helper sheet - a4"
)
def test_subtitle_31_cards_no_sheet_letter(self):
query = QueryDict("number_cards=31&max_number=cards&help_sheet=no&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - 1 to 31 - without helper sheet - letter"
)
def test_subtitle_15_99_no_sheet_a4(self):
query = QueryDict("number_cards=15&max_number=99&help_sheet=no&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - 1 to 99 - without helper sheet - a4"
)
def test_subtitle_15_99_no_sheet_letter(self):
query = QueryDict("number_cards=15&max_number=99&help_sheet=no&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - 1 to 99 - without helper sheet - letter"
)
def test_subtitle_31_99_no_sheet_a4(self):
query = QueryDict("number_cards=31&max_number=99&help_sheet=no&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - 1 to 99 - without helper sheet - a4"
)
def test_subtitle_31_99_no_sheet_letter(self):
query = QueryDict("number_cards=31&max_number=99&help_sheet=no&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - 1 to 99 - without helper sheet - letter"
)
def test_subtitle_15_999_no_sheet_a4(self):
query = QueryDict("number_cards=15&max_number=999&help_sheet=no&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - 1 to 999 - without helper sheet - a4"
)
def test_subtitle_15_999_no_sheet_letter(self):
query = QueryDict("number_cards=15&max_number=999&help_sheet=no&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - 1 to 999 - without helper sheet - letter"
)
def test_subtitle_31_999_no_sheet_a4(self):
query = QueryDict("number_cards=31&max_number=999&help_sheet=no&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - 1 to 999 - without helper sheet - a4"
)
def test_subtitle_31_999_no_sheet_letter(self):
query = QueryDict("number_cards=31&max_number=999&help_sheet=no&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - 1 to 999 - without helper sheet - letter"
)
def test_subtitle_15_blank_no_sheet_a4(self):
query = QueryDict("number_cards=15&max_number=blank&help_sheet=no&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - Blank - without helper sheet - a4"
)
def test_subtitle_15_blank_no_sheet_letter(self):
query = QueryDict("number_cards=15&max_number=blank&help_sheet=no&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"15 cards - Blank - without helper sheet - letter"
)
def test_subtitle_31_blank_no_sheet_a4(self):
query = QueryDict("number_cards=31&max_number=blank&help_sheet=no&paper_size=a4")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - Blank - without helper sheet - a4"
)
def test_subtitle_31_blank_no_sheet_letter(self):
query = QueryDict("number_cards=31&max_number=blank&help_sheet=no&paper_size=letter")
generator = SearchingCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"31 cards - Blank - without helper sheet - letter"
)
| mit | 05eeb86fa8e971f3af5d7e47b82b33e8 | 40.327402 | 106 | 0.6429 | 3.780273 | false | true | false | false |
uccser/cs-unplugged | csunplugged/tests/utils/translatable_model_loader/test_TranslatableModelLoader.py | 1 | 12476 | """Test class for TranslatableModelLoader."""
import imp
from unittest import mock
from modeltranslation import settings as mt_settings
from modeltranslation.translator import translator, TranslationOptions
from django.db import models
from django.test import SimpleTestCase
from django.utils import translation
from utils.TranslatableModelLoader import TranslatableModelLoader
from utils.TranslatableModel import TranslatableModel
from utils.errors.MissingRequiredModelsError import MissingRequiredModelsError
from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
from utils.errors.CouldNotFindYAMLFileError import CouldNotFindYAMLFileError
from utils.errors.CouldNotFindMarkdownFileError import CouldNotFindMarkdownFileError
from utils.language_utils import get_available_languages
class MockTranslatableModel(TranslatableModel):
"""Mock TranslatableModel for testing TranslatableModelLoader functionality."""
# Fields with fallback to english disabled
nofallback1 = models.CharField(default="")
nofallback2 = models.CharField(default="")
nofallback3 = models.CharField(default="")
# Fields with fallback to english enabled
fallback1 = models.CharField(default="")
fallback2 = models.CharField(default="")
class Meta:
app_label = "test",
class MockTranslatableModelTranslationOptions(TranslationOptions):
"""Translation options for MockTranslatableModel model."""
fields = ("nofallback1", "nofallback2", "nofallback3", "fallback1", "fallback2")
fallback_undefined = {
"nofallback1": None,
"nofallback2": None,
"nofallback3": None,
}
class TranslatableModelLoaderTest(SimpleTestCase):
"""Test class for TranslatableModelLoader."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.base_path = "tests/utils/translatable_model_loader/assets"
@classmethod
def setUpClass(cls):
super().setUpClass()
imp.reload(mt_settings) # Let modeltranslation pick up any overridden settings
translator.register(MockTranslatableModel, MockTranslatableModelTranslationOptions)
def test_get_yaml_translations_english(self):
yaml_file = "basic.yaml"
loader = TranslatableModelLoader(base_path=self.base_path)
translations = loader.get_yaml_translations(yaml_file)
self.assertIsInstance(translations, dict)
self.assertSetEqual(set(["model1", "model2"]), set(translations.keys()))
model1_translations = translations["model1"]
self.assertIsInstance(model1_translations, dict)
self.assertSetEqual(set(["en"]), set(model1_translations.keys()))
model1_english = model1_translations["en"]
self.assertIsInstance(model1_english, dict)
self.assertSetEqual(set(["field1", "field2"]), set(model1_english.keys()))
self.assertEqual("value 1-1", model1_english["field1"])
self.assertEqual("value 1-2", model1_english["field2"])
model2_translations = translations["model2"]
self.assertIsInstance(model2_translations, dict)
self.assertSetEqual(set(["en"]), set(model2_translations.keys()))
model2_english = model2_translations["en"]
self.assertIsInstance(model2_english, dict)
self.assertSetEqual(set(["field1", "field2"]), set(model2_english.keys()))
self.assertEqual("value 2-1", model2_english["field1"])
self.assertEqual("value 2-2", model2_english["field2"])
def test_get_yaml_translations_english_missing_reqd_field(self):
yaml_file = "missingreqdfield.yaml"
loader = TranslatableModelLoader(base_path=self.base_path)
with self.assertRaises(MissingRequiredFieldError):
loader.get_yaml_translations(yaml_file, required_fields=["field1"])
def test_get_yaml_translations_english_missing_reqd_slug(self):
yaml_file = "missingreqdslug.yaml"
loader = TranslatableModelLoader(base_path=self.base_path)
with self.assertRaises(MissingRequiredModelsError):
loader.get_yaml_translations(yaml_file, required_slugs=["model1", "model2"])
def test_get_yaml_translations_english_missing_file_with_reqd_slugs(self):
yaml_file = "doesnotexist.yaml"
loader = TranslatableModelLoader(base_path=self.base_path)
# With required slugs, a missing english yaml file should raise Exception
with self.assertRaises(CouldNotFindYAMLFileError):
loader.get_yaml_translations(yaml_file, required_slugs=["model1", "model2"])
def test_get_yaml_translations_english_missing_yaml_no_reqd_slugs(self):
yaml_file = "doesnotexist.yaml"
loader = TranslatableModelLoader(base_path=self.base_path)
# If no required slugs, no error should be raised
loader.get_yaml_translations(yaml_file)
def test_get_yaml_translations_field_map(self):
yaml_file = "basic.yaml"
loader = TranslatableModelLoader(base_path=self.base_path)
translations = loader.get_yaml_translations(
yaml_file,
field_map={"field1": "new_field1"}
)
model1 = translations["model1"]["en"]
self.assertSetEqual(set(["new_field1", "field2"]), set(model1.keys()))
self.assertEqual("value 1-1", model1["new_field1"])
def test_get_yaml_translations_translated(self):
yaml_file = "translation.yaml"
loader = TranslatableModelLoader(base_path=self.base_path)
translations = loader.get_yaml_translations(yaml_file)
self.assertIsInstance(translations, dict)
self.assertSetEqual(set(["model1", "model2"]), set(translations.keys()))
model1_translations = translations["model1"]
self.assertIsInstance(model1_translations, dict)
self.assertSetEqual(set(["en", "de"]), set(model1_translations.keys()))
model1_english = model1_translations["en"]
self.assertIsInstance(model1_english, dict)
self.assertSetEqual(set(["field1", "field2"]), set(model1_english.keys()))
self.assertEqual("en value 1-1", model1_english["field1"])
self.assertEqual("en value 1-2", model1_english["field2"])
model1_german = model1_translations["de"]
self.assertIsInstance(model1_german, dict)
self.assertSetEqual(set(["field1", "field2"]), set(model1_german.keys()))
self.assertEqual("de value 1-1", model1_german["field1"])
self.assertEqual("de value 1-2", model1_german["field2"])
def test_get_yaml_translations_translated_missing_reqd_field(self):
yaml_file = "translationmissingreqdfield.yaml"
loader = TranslatableModelLoader(base_path=self.base_path)
# required fields only apply to default language (en) so no error should be raised
translations = loader.get_yaml_translations(yaml_file, required_fields=["field1"])
self.assertSetEqual(set(["field1", "field2"]), set(translations["model2"]["en"].keys()))
self.assertSetEqual(set(["field2"]), set(translations["model2"]["de"].keys()))
def test_get_yaml_translations_translated_missing_reqd_slug(self):
yaml_file = "translationmissingreqdslug.yaml"
loader = TranslatableModelLoader(base_path=self.base_path)
# required slugs only apply to default language (en) so no error should be raised
translations = loader.get_yaml_translations(yaml_file, required_slugs=["model1", "model2"])
self.assertSetEqual(set(["en", "de"]), set(translations["model1"].keys()))
self.assertSetEqual(set(["en"]), set(translations["model2"].keys()))
def test_get_markdown_translations_english(self):
filename = "basic.md"
loader = TranslatableModelLoader(base_path=self.base_path)
translations = loader.get_markdown_translations(filename)
self.assertSetEqual(set(["en"]), set(translations.keys()))
self.assertIn("Basic Content", translations["en"].html_string)
self.assertIn("Heading", translations["en"].title)
def test_get_markdown_translation_english_missing_file_required(self):
filename = "doesnotexist.md"
loader = TranslatableModelLoader(base_path=self.base_path)
with self.assertRaises(CouldNotFindMarkdownFileError):
loader.get_markdown_translations(filename, required=True)
def test_get_markdown_translation_english_missing_file_not_required(self):
filename = "doesnotexist.md"
loader = TranslatableModelLoader(base_path=self.base_path)
# Should not raise error if required is False
loader.get_markdown_translations(filename, required=False)
def test_get_markdown_translations_translated(self):
filename = "translation.md"
loader = TranslatableModelLoader(base_path=self.base_path)
translations = loader.get_markdown_translations(filename)
self.assertSetEqual(set(["en", "de"]), set(translations.keys()))
en = translations["en"]
self.assertIn("English Content", en.html_string)
self.assertIn("English Heading", en.title)
de = translations["de"]
self.assertIn("German Content", de.html_string)
self.assertIn("German Heading", de.title)
def test_populate_translations(self):
model = MockTranslatableModel()
translations = {
"en": {
"fallback1": "english value 1",
"nofallback1": "english value 2"
},
"de": {
"fallback1": "german value 1",
"nofallback1": "german value 2"
}
}
TranslatableModelLoader.populate_translations(model, translations)
self.assertEqual(model.fallback1, "english value 1")
self.assertEqual(model.nofallback1, "english value 2")
with translation.override("de"):
self.assertEqual(model.fallback1, "german value 1")
self.assertEqual(model.nofallback1, "german value 2")
def test_mark_translation_availability_all_required_fields_present(self):
model = MockTranslatableModel()
model.fallback1 = "english value 1"
model.nofallback1 = "english value 2"
with translation.override("de"):
model.fallback1 = "german value 1"
model.nofallback1 = "german value 2"
TranslatableModelLoader.mark_translation_availability(model, required_fields=["fallback1", "nofallback1"])
self.assertSetEqual(set(["en", "de"]), set(model.languages))
def test_mark_translation_availability_required_fallback_field_missing(self):
model = MockTranslatableModel()
model.fallback1 = "english value 1"
model.nofallback1 = "english value 2"
with translation.override("de"):
# Don't populate the field "fallback1" which has fallback enabled
model.nofallback1 = "german value 2"
TranslatableModelLoader.mark_translation_availability(model, required_fields=["fallback1", "nofallback1"])
self.assertSetEqual(set(["en"]), set(model.languages))
def test_mark_translation_availability_required_no_fallback_field_missing(self):
model = MockTranslatableModel()
model.fallback1 = "english value 1"
model.nofallback1 = "english value 2"
with translation.override("de"):
# Don't populate the field "nofallback1" which does not have fallback enabled
model.fallback1 = "german value 1"
TranslatableModelLoader.mark_translation_availability(model, required_fields=["fallback1", "nofallback1"])
self.assertSetEqual(set(["en"]), set(model.languages))
def test_mark_translation_availability_required_fields_not_given(self):
model = MockTranslatableModel()
with mock.patch("utils.language_utils.get_available_languages", return_value=["en", "de", "fr"]):
TranslatableModelLoader.mark_translation_availability(model)
self.assertSetEqual(set(get_available_languages()), set(model.languages))
def test_get_blank_translation_dictionary(self):
translation_dict = TranslatableModelLoader.get_blank_translation_dictionary()
self.assertSetEqual(set(get_available_languages()), set(translation_dict.keys()))
self.assertDictEqual(translation_dict["en"], {})
# Check to make sure it's not a dictionary of references to the same dictionary
self.assertFalse(translation_dict["en"] is translation_dict["de"])
| mit | d4258c7bebac97952b9699f3e1bb557e | 46.984615 | 114 | 0.689243 | 4.091833 | false | true | false | false |
uccser/cs-unplugged | csunplugged/topics/migrations/0016_auto_20170226_2234.py | 1 | 1187 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-26 22:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('resources', '0004_auto_20170226_0758'),
('topics', '0015_auto_20170226_2133'),
]
operations = [
migrations.CreateModel(
name='ConnectedGeneratedResource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=300)),
('lesson', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='topics.Lesson')),
('resource', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='resources.Resource')),
],
),
migrations.AddField(
model_name='lesson',
name='generated_resources',
field=models.ManyToManyField(related_name='lesson_generated_resources', through='topics.ConnectedGeneratedResource', to='resources.Resource'),
),
]
| mit | be3e25eb7a5e068326f8470cdef9a39a | 37.290323 | 154 | 0.625948 | 4.179577 | false | false | false | false |
jewettaij/moltemplate | examples/coarse_grained/DNA_models/dsDNA_only/2strands/3bp_2particles/simple_dna_example/measure_persistence_length/merge_lines_periodic.py | 4 | 10846 | #!/usr/bin/env python
import sys, math
g_filename = __file__.split('/')[-1]
g_module_name = g_filename
g_program_name = g_filename
if g_filename.rfind('.py') != -1:
g_module_name = g_filename[:g_filename.rfind('.py')]
g_date_str = '2020-1-19'
g_version_str = '0.3.0'
usage_example = g_program_name + """
merge_lines_periodic.py i1 i2 i3... \
[-p natoms_per_monomer] \
[-s nskip] [-d delim_atom] [-D delim_monomer] \
< crds_input.raw \
> multiple_atom_crds_per_line.dat
Explanation: This script splits a text file into equally sized "blocks" (aka "monomers")
and pastes the text text from different lines in each block into the
same line (with optional delimeters).
The i1 i2 i3,... indices select the lines in each block (of atom
coordinates in each monomer) that you want to merge together.
Indexing begins at 0, not 1. (The first line in a block has i=0)
-Negative numbers correspond to atoms in the previous block(monomer).
-Numbers larger than natoms_per_monomer lie in the next block(monomer).
If any of these indices lie out of range, then the entire list
of lines in this block is ignored.
-The -p argument indicates the number of lines in each block (aka "monomer")
If the -p argument is skipped, then it is assumed to be infinity. (In other
words, it is equal to the number of lines in the polymer conformation.)
-The -s nskip argument allows you to skip over lines at the begining
of the file. (NOTE: Comments and lines beginning with comments
are ignored already, so don't include them in the nskip argument.)
-The -d and -D delimeters allow you to change the string which
separates text belonging to different atoms(lines), and different
monomers (blocks). By default, they are " " and "\\n", respectively.
-Blank lines (if present) in the input file are interpreted as delimeters
separating different "polymer conformations". When encountered, each
"polymer conformation" is processed separately, with the output for
different polymer conformations delimted by blank lines.
"""
class InputError(Exception):
""" A generic exception object containing a string for error reporting.
(Raising this exception implies that the caller has provided
a faulty input file or argument.)
"""
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def __repr__(self):
return str(self)
def EscCharStrToChar(s_in, escape='\\'):
"""
EscCharStrToChar() replaces any escape sequences
in a string with their 1-character equivalents.
"""
assert(len(escape) > 0)
out_lstr = []
escaped_state = False
for c in s_in:
if escaped_state:
if (c == 'n'):
out_lstr.append('\n')
elif (c == 't'):
out_lstr.append('\t')
elif (c == 'r'):
out_lstr.append('\r')
elif (c == 'f'):
out_lstr.append('\f')
elif (c == '\''):
out_lstr.append('\'')
elif (c == '\"'):
out_lstr.append('\"')
elif c in escape:
out_lstr.append(c)
else:
out_lstr.append(escape+c) # <- keep both characters
escaped_state = False
else:
if c in escape:
escaped_state = True
else:
out_lstr.append(c)
return ''.join(out_lstr)
def SafelyEncodeString(in_str,
quotes='\'\"',
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
"""
SafelyEncodeString(in_str) scans through the input string (in_str),
and returns a new string in which problematic characters
(like newlines, tabs, quotes, etc), are replaced by their two-character
backslashed equivalents (like '\n', '\t', '\'', '\"', etc).
The escape character is the backslash by default, but it too can be
overridden to create custom escape sequences
(but this does not effect the encoding for characters like '\n', '\t').
"""
assert(len(escape) > 0)
out_lstr = []
use_outer_quotes = False
for c in in_str:
if (c == '\n'):
c = '\\n'
elif (c == '\t'):
c = '\\t'
elif (c == '\r'):
c = '\\r'
elif (c == '\f'):
c = '\\f'
elif c in quotes:
c = escape[0]+c
elif c in escape:
c = c+c
elif c in delimiters:
use_outer_quotes = True
# hmm... that's all that comes to mind. Did I leave anything out?
out_lstr.append(c)
if use_outer_quotes:
out_lstr = ['\"'] + out_lstr + ['\"']
return ''.join(out_lstr)
def ProcessSnapshot(lines,
out_file,
offsets,
period,
nskip,
delimeter_atom,
delimeter_monomer):
offsets_min = min(offsets)
offsets_max = max(offsets)
if period == 0:
num_monomers = 1
else:
num_monomers = math.floor((len(lines)-nskip)/period)
for I in range(0, num_monomers):
# If any of the entries will be missing, then ignore the whole list
# of atoms (lines) for this monomer (block).
if (I*period + offsets_min < nskip):
continue
if (I*period + offsets_max >= len(lines)):
continue
for J in range(0, len(offsets)):
j = offsets[J]
i = (I*period + nskip) + j
if (nskip <= i) and (i < len(lines)):
out_file.write(lines[i])
if J+1 < len(offsets):
out_file.write(delimeter_atom)
else:
out_file.write(delimeter_monomer)
g_period = 0
g_nskip = 0
g_delimeter_atom = ' '
g_delimeter_monomer = '\n'
g_delimeter_snapshot = '\n'
g_offsets = []
####### Main Code Below: #######
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ')
if sys.version < '3':
sys.stderr.write(' (python version < 3)\n')
else:
sys.stderr.write('\n')
try:
argv = [arg for arg in sys.argv]
# Loop over the remaining arguments not processed yet.
# These arguments are specific to the lttree.py program
# and are not understood by ttree.py:
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if ((argv[i].lower() == '-?') or
(argv[i].lower() == '--?') or
(argv[i].lower() == '-help') or
(argv[i].lower() == '-help')):
if i+1 >= len(argv):
sys.stdout.write("\n Usage:\n\n"+usage_example+'\n')
sys.exit(0)
elif argv[i].lower() == '-p':
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a number.\n')
g_period = int(argv[i+1])
sys.stderr.write(' period = '+str(g_period)+'\n')
del(argv[i:i+2])
elif argv[i].lower() == '-s':
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a number.\n')
g_nskip = float(argv[i+1])
sys.stderr.write(' skip first '+str(g_nskip)+' non-comment lines\n')
del(argv[i:i+2])
elif argv[i].lower() == '-d':
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a string.\n')
g_delimeter_atom = EscCharStrToChar(argv[i+1])
sys.stderr.write(' delimeter_atom = \"'+SafelyEncodeString(g_delimeter_atom)+'\"\n')
del(argv[i:i+2])
elif argv[i].lower() == '-D':
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by string.\n')
g_delimeter_atom = EscCharStrToChar(argv[i+1])
sys.stderr.write(' delimeter_monomer = \"'+SafelyEncodeString(g_delimeter_monomer)+'\"\n')
del(argv[i:i+2])
elif argv[i][0] == '-':
# Note: It could be a negative integer, so check for
# that before printing an error message
if not argv[i][1:].isdigit():
raise InputError('Error('+g_program_name+'):\n'
'Unrecogized command line argument \"'+argv[i]+'\"\n')
i += 1
else:
i += 1
if len(argv) == 1:
raise InputError("Error: Expected a list of integers.\n\n"+
"Usage: \n\n"+
" "+usage_example+"\n")
g_offsets = [int(arg) for arg in argv[1:]]
# --- Now (finally) read the lines in the standard input ----
n_snapshots = 0
lines = []
in_file = sys.stdin
for line_orig in in_file:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
# Blank lines in a trajectory file usually signal the end of the
# coordinates for that snapshot in the trajectory, and the beginning
# of the next snapshot.
if len(line_orig.strip()) == 0:
if n_snapshots > 0:
sys.stdout.write(g_delimeter_snapshot)
if len(lines) > 0:
ProcessSnapshot(lines,
sys.stdout,
g_offsets,
g_period,
g_nskip,
g_delimeter_atom,
g_delimeter_monomer)
n_snapshots += 1
# Clear the lines buffer to begin reading the new snapshot
del lines[:]
else:
if len(line.strip()) > 0:
lines.append(line)
if len(lines) > 0:
if n_snapshots > 0:
sys.stdout.write(g_delimeter_snapshot)
# After reading all of the lines in the file, deal with any lines
# left over since reading the last frame
ProcessSnapshot(lines,
sys.stdout,
g_offsets,
g_period,
g_nskip,
g_delimeter_atom,
g_delimeter_monomer)
except (ValueError, InputError) as err:
sys.stderr.write('\n\n'+str(err)+'\n')
sys.exit(-1)
| mit | 17984489aa310e279f151bb4937e6154 | 32.68323 | 104 | 0.51466 | 3.810963 | false | false | false | false |
jewettaij/moltemplate | moltemplate/nbody_Angles.py | 2 | 2608 | # Author: Andrew Jewett (jewett.aij at g mail)
# License: MIT License (See LICENSE.md)
# Copyright (c) 2013
try:
from .nbody_graph_search import Ugraph
except (ImportError, SystemError, ValueError):
# not installed as a package
from nbody_graph_search import Ugraph
# This file defines how 3-body angle interactions are generated by moltemplate
# by default. It can be overridden by supplying your own custom file.
# To find 3-body "angle" interactions, we would use this subgraph:
#
#
# *---*---* => 1st bond connects atoms 0 and 1
# 0 1 2 2nd bond connects atoms 1 and 2
#
bond_pattern = Ugraph([(0, 1), (1, 2)])
# (Ugraph atom indices begin at 0, not 1)
# The next function eliminates the redundancy between 0-1-2 and 2-1-0:
def canonical_order(match):
"""
Before defining a new interaction, we must check to see if an
interaction between these same 3 atoms has already been created
(perhaps listed in a different, but equivalent order).
If we don't check for this this, we will create many unnecessary redundant
interactions (which can slow down he simulation).
To avoid this, I define a "canonical_order" function which sorts the atoms
and bonds in a way which is consistent with the symmetry of the interaction
being generated... Later the re-ordered list of atom and bond ids will be
tested against the list of atom/bond ids in the matches-found-so-far,
before it is added to the list of interactions found so far. Note that
the energy of an angle interaction is a function of the angle between.
three consecutively bonded atoms (referred to here as: 0,1,2).
This angle does not change when swapping the atoms at either end (0 and 2).
So it does not make sense to define a separate 3-body angle
interaction between atoms 0,1,2 AS WELL AS an interaction between 2,1,0.
So we sort the atoms and bonds so that the first atom has a always has
a lower atomID than the third atom. (Later we will check to see if we
have already defined an interaction between these 3 atoms. If not then
we create a new one.)
"""
# match[0][0:2] contains the ID numbers for the 3 atoms in the match
atom0 = match[0][0]
atom1 = match[0][1]
atom2 = match[0][2]
# match[1][0:1] contains the ID numbers for the 2 bonds
bond0 = match[1][0]
bond1 = match[1][1]
if atom0 < atom2:
# return ((atom0, atom1, atom2), (bond0, bond1)) same thing as:
return match
else:
return ((atom2, atom1, atom0), (bond1, bond0))
| mit | 62f62324ba0944200cce28c6d3a87007 | 41.064516 | 79 | 0.684433 | 3.647552 | false | false | false | false |
jewettaij/moltemplate | moltemplate/depreciated/oplsaa_moltemplate.py | 2 | 17508 | #! /usr/bin/env python
#
# Author: Jason Lambert
# (some additional corrections by Miguel Gonzalez, Yue Chun Chiu and others)
# License: MIT License (See LICENSE.md)
# Copyright (c) 2014
#
# The purpose of this script is to create a moltemplate lt file for the oplsaa.
# forcefield. This will assist researchers in building complex simulations using
# this OPLS-UA and the OPLS-AA forcefields.
__author__="Jason Lambert"
# (some additional corrections by Miguel Gonzalez, Yue Chun Chiu, and Andrew Jewett)
__version__="0.21"
import sys
import os
from operator import itemgetter
g_program_name = __file__.split('/')[-1]
# First make a copy of the \"oplsaa.prm\" file
# (which can be downloaded from the TINKER web site).
# The lines in this file beginning with the word \"atoms\" should
# define the atoms which you plan to put in your simulation. All other
# lines beginning with the word \"atoms\" should be deleted.
# (Leave the other sections of this file alone.)
#""")
if sys.version > '3':
import io
else:
import cStringIO
try:
if sys.version < '2.7':
raise Exception('Error: Using python '+sys.version+'\n'+
' Alas, your version of python is too old.\n'
' You must upgrade to a newer version of python (2.7 or later).')
except Exception as err:
sys.stderr.write('\n\n'+str(err)+'\n')
sys.exit(-1)
#input data from file containing oplsaa force field parameters.
try:
f=open(sys.argv[1],"r")
except:
sys.stderr.write("Error: \n"
" You need to specify a file name as an input argument:\n"
" python oplsaa_moltemplate.py <forcefield file name>\n"
" (or the file name is specified incorrectly)\n")
sys.exit()
sys.stderr.write(g_program_name+", version "+__version__+"\n"
"Reading parameter file...\n")
#output lt file
g=open("oplsaa.lt","w")
lines = f.readlines()
# Ignore/Comment out lines before the "## Atom Type Definitions ##" section.
for i in range(0, len(lines)):
if (lines[i].find("## Atom Type Definitions ##") != -1):
break
else:
lines[i] = '# ' + lines[i]
# As of late 2014, there appear to be 906 atom types, but we don't assume this.
# First try to infer out how many atom types there were in the original
# oplsaa.prm file, or at least find an upper bound on the atom-type numbers.
# (Keep track of the maximum value of the first column in the "atom" section.)
max_atomType = 0
num_atomTypes = 0
for line in lines:
# skip over text after a # comment character
ic = line.find('#')
if ic != -1:
line = (line[:ic]).strip()
else:
line = line.strip()
# now look for lines beginning with the word "atom"
tokens = line.split()
if ((len(tokens)>2) and (tokens[0] == "atom")):
num_atomTypes += 1
if (int(tokens[1]) > max_atomType):
max_atomType = int(tokens[1])
if num_atomTypes > 25:
sys.stderr.write("\n"
"(Note: If your computer freezes while running "+g_program_name+",\n"
" it could be because you forgot to edit the .prm file.\n"
" The original \"oplsaa.prm\" file distributed with TINKER has over 900 atom\n"
" types. If you run "+g_program_name+" on this file, it may freeze or\n"
" crash. Instead, run "+g_program_name+" on a SUBSET of the OPLS atoms\n"
" relevant to your problem. To do that, delete the lines from the .prm\n"
" file beginning with \"atom\" which you do not need.)\n\n")
#temporary storage file
atom_lookup={} #this dictionary contains all the atom ffid's as a key and the number of atoms with that key
#atom=[[10000,10000] for i in range(906)] <- don't assume there are 906 atoms
atom=[[-10000,-10000] for i in range(0,max_atomType+1)]
#charge_by_type={} # lookup charge by atom type
#vdw_by_type={} # lookup epsilon & sigma paramters by atom type
charge_by_type=[0.0 for i in range(0,max_atomType+1)] # lookup charge by atom
vdw_by_type=[(0.0,0.0) for i in range(0,max_atomType+1)] # lookup epsilon & sigma
#atom is declared this way so for sorting purposes.
#atom contains the following data upon allocation
#atom[][0]=atom_id( Important for partial charges and non_bonded interactions)
#atom[][1]=atom_ffid( Important for stretches, bending, torsions and impropers)
#atom[][2]=atom_mass
#atom[][3]=partial charge
#atom[][4]=non_bonding sigma
#atom[][5]=non_bonding epsilon
#atom[][6]=atom comment
bond=[]
#bond contains the following data
#bond[0]=atom 1 ffid
#bond[1]=atom 2 ffid
#bond[2]=bond spring constant(OPLS-aa compatible)
#bond[3]=equilibrium bond distance(Angstrom)
angle=[]
#angle contains the following data
#angle[0]=atom 1 ffid
#angle[1]=atom 2 ffid
#angle[2]=atom 3 ffid
#angle[3]=spring constant
#angle[4]=equilibrium angle (degrees)
dihedral=[]
#dihedral contains the following data
#dihedral[0]=atom 1 ffid
#dihedral[1]=atom 2 ffid
#dihedral[2]=atom 3 ffid
#dihedral[3]=atom 4 ffid
#dihedral[4]=v1
#dihedral[5]=v2
#dihedral[6]=v3
#dihedral[7]=v4
improper=[]
#improper[0]=atom 1 ffid
#improper[1]=atom 2 ffid(central atom)
#improper[2]=atom 3 ffid
#improper[3]=atom 4 ffid
#improper[4]=spring coefficient
#improper[5]=equilibrium angle
#This section gets all the parameters from the force field file
for line in lines:
# skip over text after a # comment character
ic = line.find('#')
if ic != -1:
line = (line[:ic]).strip()
else:
line = line.strip()
if line.find("atom") == 0:
line=line.split()
atom[int(line[1])-1]=[int(line[1]),int(line[2]),float(line[-2]),
0.0,0.0,0.0," ".join(line[3:-2])]
elif line.find("vdw") == 0:
line=line.split()
#vdw_temp.append([float(line[1]),float(line[2]),float(line[3])])
if (int(line[1]) <= max_atomType):
vdw_by_type[int(line[1])] = (float(line[2]),float(line[3]))
elif line.find("bond") == 0:
line=line.split()
bond.append([int(line[1]),int(line[2]),float(line[3]),float(line[4])])
elif line.find("angle") == 0:
line=line.split()
angle.append([int(line[1]),int(line[2]),int(line[3]),
float(line[4]),float(line[5])])
elif line.find("torsion") == 0:
line=line.split()
dihedral.append([int(line[1]),int(line[2]),int(line[3]),int(line[4]),
float(line[5]),float(line[8]), float(line[11]), 0.0])
elif line.find("charge") == 0:
line=line.split()
#charge_temp.append([int(line[1]),float(line[2])])
if (int(line[1]) <= max_atomType):
charge_by_type[int(line[1])] = float(line[2])
elif line.find("imptors") == 0:
line=line.split()
improper.append([int(line[1]), int(line[2]),
int(line[3]), int(line[4]), float(line[5]), float(line[6])])
#if len(atom) > 600:
# sys.stderr.write("WARNING: The number of atom types in your file exceeds 600\n"
# " (You were supposed to edit out the atoms you don't need.\n"
# " Not doing this may crash your computer.)\n"
# "\n"
# " Proceed? (Y/N): ")
# reply = sys.stdin.readline()
# if find(reply.strip().lower(), 'y') != 0:
# exit(0)
#adding the charge and Lennard Jones parameters to
#to each atom type.
#----------------------------------------------#
system_is_charged = False
for i in range(0,len(atom)):
atom_type_num = atom[i][0]
#q = charge_by_type.get(atomTypeNum)
#if q:
# atom[i][3] = q
if atom_type_num != -10000:
q = charge_by_type[atom_type_num]
atom[i][3] = q
if q != 0.0:
# the system has some charged atoms
system_is_charged = True
for i in range(0,len(atom)):
atom_type_num = atom[i][0]
#vdw_params = vdw_by_type.get(atomTypeNum)
#if vdw_params:
# atom[i][4] = vdw_params[0]
# atom[i][5] = vdw_params[1]
if atom_type_num != -10000:
vdw_params = vdw_by_type[atom_type_num]
atom[i][4] = vdw_params[0]
atom[i][5] = vdw_params[1]
del(charge_by_type)
del(vdw_by_type)
if system_is_charged:
pair_style = "lj/cut/coul/long"
pair_style_params = "10.0 10.0"
kspace_style = " kspace_style pppm 0.0001\n"
else:
pair_style = "lj/cut"
pair_style_params = "10.0"
kspace_style = ""
pair_style_command = " pair_style hybrid "+pair_style+" "+pair_style_params+"\n"
#----------------------------------------------------------#
#begin writing content to lt file
g.write("# NOTE: This file was created automatically using:\n"
"# "+g_program_name+" \""+sys.argv[1]+"\"\n\n\n")
g.write("OPLSAA {\n\n" )
#write out the atom masses
#----------------------------------------------------------#
g.write(" write_once(\"Data Masses\"){\n")#checked with gaff
for i,x in enumerate(atom):
if x[0] != -10000:
g.write(" @atom:{} {} #{} partial charge={}\n".format(
x[0],x[2],x[6],x[3]))
g.write(" } #(end of atom masses)\n\n")
#----------------------------------------------------------#
#write out the pair coefficients
#----------------------------------------------------------#
g.write(" write_once(\"In Settings\"){\n")#checked with gaff
for i,x in enumerate(atom):
if x[0] != -10000:
fmt = " pair_coeff @atom:{0} @atom:{0} "+pair_style+" {1} {2}\n"
g.write(fmt.format(x[0],x[5],x[4]))
g.write(" } #(end of pair coeffs)\n\n")
g.write(" write_once(\"In Charges\"){\n")#checked with gaff
for i,x in enumerate(atom):
if x[0] != -10000:
g.write(" set type @atom:{0} charge {1}\n".format(x[0],x[3]))
g.write(" } #(end of atom charges)\n\n")
#-----------------------------------------------------------#
# This part of the code creates a lookup dictionary
# that allows you to find every type of atom by its
# force field id. force field id is the id number
# relevant to bonds, angles, dihedrals, and impropers.
# This greatly increases the speed of angle, bond, dihedral
# and improper assignment.
#------------------------------------------------------------#
atom=sorted(atom,key=itemgetter(1))
atom_ffid=0
for x in atom:
if x[1]==atom_ffid:
atom_lookup[x[1]].append(x[0])
elif x[1]>atom_ffid:
atom_lookup[x[1]]=[x[0]]
atom_ffid=x[1]
atom_lookup[0]=["*"]
#-------------------------------------------------------------#
#writing out the bond coefficients and bond parameters#
#-------------------------------------------------------------#
# First check if the atoms in system can potentially form bonds
might_have_bonds = False
for x in bond:
for y in atom_lookup.get(x[0],[]):
for z in atom_lookup.get(x[1],[]):
might_have_bonds = True
if might_have_bonds:
h=open("temp.txt","w+")
g.write(" write_once(\"In Settings\") {\n")
index1=0
for x in bond:
for y in atom_lookup.get(x[0],[]):
for z in atom_lookup.get(x[1],[]):
#g.write(" bond_coeff @bond:{}-{} harmonic {} {}\n".format(y,z,x[2]/2,x[3]))
# Miguel Gonzales corrected this line to:
g.write(" bond_coeff @bond:{}-{} harmonic {} {}\n".format(y,z,x[2],x[3]))
h.write(" @bond:{0}-{1} @atom:{0} @atom:{1}\n".format(y,z))
g.write(" } #(end of bond_coeffs)\n\n")
h.seek(0,0)
g.write(" write_once(\"Data Bonds By Type\") {\n")
for line in h.readlines():
g.write(line)
g.write(" } #(end of bonds by type)\n\n")
del(bond)
h.close()
#-----------------------------------------------------------#
#writing out angle coefficients and angles by type.---------#
#-----------------------------------------------------------#
# First check if the atoms in system can potentially form angle interactions
might_have_angles = False
for x in angle:
for y in atom_lookup.get(x[0],[]):
for z in atom_lookup.get(x[1],[]):
for u in atom_lookup.get(x[2],[]):
might_have_angles = True
if might_have_angles:
h=open("temp.txt","w+")
g.write(" write_once(\"Data Angles By Type\"){\n")
for x in angle:
for y in atom_lookup.get(x[0],[]):
for z in atom_lookup.get(x[1],[]):
for u in atom_lookup.get(x[2],[]):
#print(y,z,u,x)
#h.write(" angle_coeff @angle:{}-{}-{} harmonic {} {}\n".format(y,z,u,x[3]/2.0,x[4]))
# Miguel Gonzales corrected this line:
h.write(" angle_coeff @angle:{}-{}-{} harmonic {} {}\n".format(y,z,u,x[3],x[4]))
g.write(" @angle:{0}-{1}-{2} @atom:{0} @atom:{1} @atom:{2}\n".format(y,z,u))
g.write(" } #(end of angles by type)\n\n")
h.seek(0,0)
g.write(" write_once(\"In Settings\" ){\n")
for line in h.readlines():
g.write(line)
g.write(" } #(end of angle_coeffs)\n\n")
del(angle)
h.close()
#----------------------------------------------------------#
#writing dihedrals by type and dihedral coefficients-------#
#----------------------------------------------------------#
# First check if the atoms in system can potentially form dihedral interactions
might_have_dihedrals = False
for x in dihedral:
for y in atom_lookup.get(x[0],[]):
for z in atom_lookup.get(x[1],[]):
for u in atom_lookup.get(x[2],[]):
for v in atom_lookup.get(x[3],[]):
might_have_dihedrals = True
if might_have_dihedrals:
h=open("temp.txt","w+")
g.write(" write_once(\"Data Dihedrals By Type\") {\n")
#print(atom_lookup)
for x in dihedral:
for y in atom_lookup.get(x[0],[]):
for z in atom_lookup.get(x[1],[]):
for u in atom_lookup.get(x[2],[]):
for v in atom_lookup.get(x[3],[]):
if x[0]!=0 and x[3]!=0:
g.write(" @dihedral:{0}-{1}-{2}-{3} @atom:{0} @atom:{1} @atom:{2} @atom:{3}\n".format(y,z,u,v))
h.write(" dihedral_coeff @dihedral:{}-{}-{}-{} opls {} {} {} {}\n".format(y,z,u,v,x[4],x[5],x[6],x[7]))
elif x[0]==0 and x[3]!=0:
g.write(" @dihedral:0-{1}-{2}-{3} @atom:{0} @atom:{1} @atom:{2} @atom:{3}\n".format(
y,z,u,v))
h.write(" dihedral_coeff @dihedral:0-{}-{}-{} opls {} {} {} {}\n".format(z,u,v,x[4],x[5],x[6],x[7]))
elif x[0]==0 and x[3]==0:
g.write(" @dihedral:0-{1}-{2}-0 @atom:{0} @atom:{1} @atom:{2} @atom:{3}\n".format(y,z,u,v))
#h.write(" dihedral_coeff @dihedral:0-{}-{}-0 harmonic {} {} {} {}\n".format(z,u,x[4],x[5],x[6],x[7]))
h.write(" dihedral_coeff @dihedral:0-{}-{}-0 opls {} {} {} {}\n".format(z,u,x[4],x[5],x[6],x[7]))
del(dihedral)
g.write(" } #(end of Dihedrals by type)\n\n")
h.seek(0,0)
g.write(" write_once(\"In Settings\") {\n")
for line in h.readlines():
g.write(line)
g.write(" } #(end of dihedral_coeffs)\n\n")
h.close()
#-----------------------------------------------------------------------#
#----writing out improper coefficients and impropers by type------------#
#-----------------------------------------------------------------------#
# First check if the atoms in system can potentially form improper interactions
might_have_impropers = False
for x in improper:
for y in atom_lookup.get(x[0],[]):
for z in atom_lookup.get(x[1],[]):
for u in atom_lookup.get(x[2],[]):
for v in atom_lookup.get(x[3],[]):
might_have_impropers = True
if might_have_impropers:
h=open("temp.txt","w+")
g.write(" write_once(\"Data Impropers By Type (opls_imp.py)\") {\n")
for x in improper:
for y in atom_lookup.get(x[0],[]):
for z in atom_lookup.get(x[1],[]):
for u in atom_lookup.get(x[2],[]):
for v in atom_lookup.get(x[3],[]):
# Notation: let I,J,K,L denote the atom types ("biotypes")
# listed in the order they appear in the "oplsaa.prm" file.
# (I think J and L are represented by "u" and "v" in the code here.)
# It looks like the "oplsaa.prm" file distributed with tinker
# treats the third atom ("K") as the central atom.
# After checking the code, it appears that the improper angle is
# calculated as the angle between the I,J,K and the J,K,L planes
if x[0]==0 and x[1]==0 and x[3]==0:
g.write(" @improper:0-0-{2}-0 @atom:{0} @atom:{1} @atom:{2} @atom:{3}\n".format(y,z,u,v))
h.write(" improper_coeff @improper:0-0-{2}-0 harmonic {4} {5} \n".format(y,z,u,v,x[4]/2,180))
else:
g.write(" @improper:0-0-{2}-{3} @atom:{0} @atom:{1} @atom:{2} @atom:{3}\n".format(y,z,u,v))
h.write(" improper_coeff @improper:0-0-{2}-{3} harmonic {4} {5} \n".format(y,z,u,v,x[4]/2,180))
g.write(" } #(end of impropers by type)\n\n")
h.seek(0,0)
g.write(" write_once(\"In Settings\") {\n")
for line in h.readlines():
g.write(line)
g.write(" } #(end of improp_coeffs)\n\n")
h.close()
#-----------------------------------------------------------------------#
#This section writes out the input parameters required for an opls-aa simulation
# lammps.
g.write(" write_once(\"In Init\") {\n")
g.write(" units real\n")
g.write(" atom_style full\n")
g.write(" bond_style hybrid harmonic\n")
g.write(" angle_style hybrid harmonic\n")
g.write(" dihedral_style hybrid opls\n")
g.write(" improper_style hybrid harmonic\n")
g.write(pair_style_command)
g.write(" pair_modify mix geometric\n")
g.write(" special_bonds lj/coul 0.0 0.0 0.5\n")
g.write(kspace_style)
g.write(" } #end of init parameters\n\n")
g.write("} # OPLSAA\n")
f.close()
g.close()
os.remove("temp.txt")
sys.stderr.write("...finished.\n")
| mit | a503bc436c2a96b171619bc36fa738d1 | 34.513185 | 120 | 0.563914 | 2.982115 | false | false | false | false |
jewettaij/moltemplate | moltemplate/nbody_alt_symmetry/cenJsortIKL.py | 13 | 1732 | try:
from ..nbody_graph_search import Ugraph
except:
# not installed as a module
from nbody_graph_search import Ugraph
# To find 4-body "improper" interactions, we would use this subgraph:
# 3
# * 1st bond connects atoms 1 and 0
# | => 2nd bond connects atoms 1 and 2
# _.*._ 3rd bond connects atoms 1 and 3
# *' 1 `*
# 0 2
#
bond_pattern = Ugraph([(1,0), (1,2), (1,3)])
# (Note: Ugraph atom-index counters begin at 0, not 1)
def canonical_order(match):
"""
When searching for atoms with matching bond patterns GraphMatcher
often returns redundant results. We must define a "canonical_order"
function which sorts the atoms and bonds in a way which is consistent
with the type of N-body interaction being considered.
The atoms (and bonds) in a candidate match are rearranged by the
canonical_order(). Then the re-ordered list of atom and bond ids is
tested against the list of atom/bond ids in the matches-found-so-far,
before it is added to the list of interactions found so far. In this
case we assume the second atom is the central atom (the "hub"), and the
energy is invariant with respect to permutations of the other 3 atoms.
So we arbitrarily sort these other 3 atoms in increasing order
(as well as the bonds which connect the central atom to them).
"""
atom0 = match[0][0]
atom1 = match[0][1]
atom2 = match[0][2]
atom3 = match[0][3]
bonds = match[1]
ab=[(atom0,0), (atom2,1), (atom3,2)]
ab.sort()
return ((ab[0][0], atom1, ab[1][0], ab[2][0]),
(bonds[ab[0][1]], bonds[ab[1][1]], bonds[ab[2][1]]))
| mit | 870b6f4854561e2fa8ff615e9785bd29 | 38.363636 | 75 | 0.624134 | 3.491935 | false | false | false | false |
jewettaij/moltemplate | examples/coarse_grained/protein_folding_examples/1bead+chaperone/unfrustrated+chaperonin/moltemplate_files/generate_tables/calc_chaperone_table.py | 39 | 3038 | #!/usr/bin/env python
# Calculate a table of pairwise energies and forces between atoms in the
# protein and a chaperone provided in the supplemental materials section of:
# AI Jewett, A Baumketner and J-E Shea, PNAS, 101 (36), 13192-13197, (2004)
# This is stored in a tabulated force field with a singularity at a distance R.
#
# To calculate the table for interaction between
# ...the chaperone and a hydrophobic bead (2004 PNAS paper), use this table:
# ./calc_chaperone_table.py 1.0 1.0 6.0 0.475 0.0 5.9 1181
# ...the chaperone and a hydrophilic bead (2004 PNAS paper), use this table:
# ./calc_chaperone_table.py 1.0 1.0 6.0 0.0 0.0 5.9 1181
# ...the chaperone and a hydrophobic bead (2006 JMB paper), use this table:
# ./calc_chaperone_table.py 1.0 1.0 3.0 0.60 3.1 8.0 981 True
# ...the chaperone and a hydrophilic bead (2006 JMB paper), use this table:
# ./calc_chaperone_table.py 1.0 1.0 3.0 0.0 3.1 8.0 981 True
from math import *
import sys
def U(r, eps, sigma, R, h):
#print('r='+str(r)+' eps='+str(eps)+' s='+str(sigma)+' R='+str(R)+' h='+str(h))
# Formula is undefined at r=0, but you can take the limit:
if r <= 0:
return 4.0*pi*R*R*4.0*eps*(pow((sigma/R), 12.0)
- h*pow((sigma/R), 6.0))
xp = sigma/(r+R)
xm = sigma/(r-R)
term10 = pow(xm, 10.0) - pow(xp, 10.0)
term4 = pow(xm, 4.0) - pow(xp, 4.0)
return 4.0*pi*eps*(R/r) * (0.2*term10 - 0.5*h*term4)
def F(r, eps, sigma, R, h):
# Formula is undefined at r=0, but you can take the limit:
if r <= 0:
return 0.0
product_term_a = U(r, eps, sigma, R, h) / r
ixp = (r+R)/sigma
ixm = (r-R)/sigma
dix_dr = 1.0/sigma
term10 = (10.0/sigma)*(pow(ixm, -11.0) - pow(ixp, -11.0))
term4 = (4.0/sigma)*(pow(ixm, -5.0) - pow(ixp, -5.0))
product_term_b = 4.0*eps*pi*(R/r) * (0.2*term10 - 0.5*h*term4)
return product_term_a + product_term_b
class InputError(Exception):
""" A generic exception object containing a string for error reporting.
"""
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def __repr__(self):
return str(self)
if len(sys.argv) < 8:
sys.stderr.write("Error: expected 7 arguments:\n"
"\n"
"Usage: "+sys.argv[0]+" epsilon sigma R h rmin rmax N\n\n")
sys.exit(-1)
epsilon = float(sys.argv[1])
sigma = float(sys.argv[2])
R = float(sys.argv[3])
h = float(sys.argv[4])
rmin = float(sys.argv[5])
rmax = float(sys.argv[6])
N = int(sys.argv[7])
subtract_Urcut = False
if len(sys.argv) == 9:
subtract_Urcut = True
rcut = rmax
for i in range(0,N):
r = rmin + i*(rmax-rmin)/(N-1)
U_r = U(r, epsilon, sigma, R, h)
F_r = F(r, epsilon, sigma, R, h)
if subtract_Urcut:
U_r -= U(rcut, epsilon, sigma, R, h)
if (r >= rcut) or (i==N-1):
U_r = 0.0
F_r = 0.0
print(str(i+1)+' '+str(r)+' '+str(U_r)+' '+str(F_r))
| mit | 99876f3d212891a83ffdcb3252a37b0f | 33.91954 | 83 | 0.575379 | 2.548658 | false | false | false | false |
jewettaij/moltemplate | examples/coarse_grained/membrane_examples/membrane_Cooke_Kremer_DesernoPRE2005/moltemplate_files/calc_table/gen_potential-cooke.py | 37 | 4383 | #!/usr/bin/python2.7
import os,sys
from fractions import Fraction
from numpy import *
### PARAMETERS ###
sigma = 1.00
epsilon = 1.00
b_hh = 0.95 * sigma
b_ht = 0.95 * sigma
b_tt = 1.00 * sigma
r_init = 0.000001
r_max = sigma * 3.
r_space = 0.01
##################
### INPUTS ###
if len(sys.argv) == 2:
w_cut = float(sys.argv[1])
else:
w_cut = 1.6
# 1.6 seems to be 'good' for vesicles, bilayers 1.4
##############
def WCA_energy(b, r):
# Calculate WCA energy
E_pot = 0
val1 = math.pow((b / r), 12)
val2 = -math.pow((b / r), 6)
E_pot = 4 * epsilon * (val1 + val2 + 0.25)
return E_pot
def WCA_forces(b, r):
# Calculate WCA forces
Force = 0
val1 = 24 * math.pow(b, 6) / math.pow(r, 7)
val2 = -48 * math.pow(b, 12) / math.pow(r, 13)
Force = -(val1 + val2)
return Force
def Tail_energy(b, r, r_cut):
# Calculate extra Tail energy
E_pot = 0
if (r < r_cut):
E_pot = -1 * epsilon
else:
val1 = math.cos((math.pi * (r - r_cut)) / (2 * w_cut))
E_pot = -1 * epsilon * math.pow(val1, 2)
return E_pot
def Tail_forces(b, r, r_cut):
Force = 0
if (r < r_cut):
Force = 0;
else:
val1 = math.sin((math.pi * (r - r_cut)) / w_cut)
Force = -math.pi * val1 / (2 * w_cut)
return Force
##############
ofile = open('tabulated_potential.dat', 'w')
tot_potential_hh = zeros((int(r_max / r_space) + 1, 4))
tot_potential_ht = zeros((int(r_max / r_space) + 1, 4))
tot_potential_tt = zeros((int(r_max / r_space) + 1, 4))
# Setup up formatting & distances in all arrays
for i in range(int(r_max / r_space)+1):
tot_potential_hh[:,0][i] = i+1
tot_potential_ht[:,0][i] = i+1
tot_potential_tt[:,0][i] = i+1
for i in range(1, int(r_max / r_space)+1):
tot_potential_hh[:,1][i] = tot_potential_hh[:,1][i-1] + r_space
tot_potential_ht[:,1][i] = tot_potential_ht[:,1][i-1] + r_space
tot_potential_tt[:,1][i] = tot_potential_tt[:,1][i-1] + r_space
tot_potential_hh[:,1][0] = r_init
tot_potential_ht[:,1][0] = r_init
tot_potential_tt[:,1][0] = r_init
ofile.write("# Tabulated potential for Cooke 3-bead lipid model, Wc = %f\n\n" % w_cut)
num = len(tot_potential_hh[:,0])
### Calcaulte first potential, H-H
ofile.write("HEAD_HEAD\n")
r_cut = 2**Fraction('1/6') * b_hh
rmax = int(r_cut / r_space)
ofile.write("N %d R %f %f\n\n" % (num, r_init, r_max))
ofile.write("1 %f %f %f\n" % (tot_potential_hh[:,1][0], tot_potential_hh[:,2][0], tot_potential_hh[:,3][0]))
for i in range(1, rmax+1):
tot_potential_hh[:,2][i] = WCA_energy(b_hh, tot_potential_hh[:,1][i])
tot_potential_hh[:,3][i] = WCA_forces(b_hh, tot_potential_hh[:,1][i])
for i in range(1, int(r_max / r_space)+1):
ofile.write("%d %f %f %f\n" % (i+1, tot_potential_hh[:,1][i], tot_potential_hh[:,2][i], tot_potential_hh[:,3][i]))
ofile.write("\n")
### Calcaulte second potential, H-T
ofile.write("HEAD_TAIL\n")
r_cut = 2**Fraction('1/6') * b_ht
rmax = int(r_cut / r_space)
ofile.write("N %d R %f %f\n\n" % (num, r_init, r_max))
ofile.write("1 %f %f %f\n" % (tot_potential_ht[:,1][0], tot_potential_ht[:,2][0], tot_potential_ht[:,3][0]))
for i in range(1, rmax+1):
tot_potential_ht[:,2][i] = WCA_energy(b_ht, tot_potential_ht[:,1][i])
tot_potential_ht[:,3][i] = WCA_forces(b_ht, tot_potential_ht[:,1][i])
for i in range(1, int(r_max / r_space)+1):
ofile.write("%d %f %f %f\n" % (i+1, tot_potential_ht[:,1][i], tot_potential_ht[:,2][i], tot_potential_ht[:,3][i]))
ofile.write("\n")
### Calcaulte third potential, T-T
# Also include extra tail-tail attraction term
ofile.write("TAIL_TAIL\n")
r_cut = 2**Fraction('1/6') * b_tt
rmax = int(r_cut / r_space)
ofile.write("N %d R %f %f\n\n" % (num, r_init, r_max))
ofile.write("1 %f %f %f\n" % (tot_potential_tt[:,1][0], tot_potential_tt[:,2][0], tot_potential_tt[:,3][0]))
for i in range(1, rmax+1):
tot_potential_tt[:,2][i] = WCA_energy(b_tt, tot_potential_tt[:,1][i])
tot_potential_tt[:,3][i] = WCA_forces(b_tt, tot_potential_tt[:,1][i])
max2 = int( (r_cut + w_cut) / r_space)
for i in range(1, max2+1):
tot_potential_tt[:,2][i] = tot_potential_tt[:,2][i] + Tail_energy(b_tt, tot_potential_tt[:,1][i], r_cut)
tot_potential_tt[:,3][i] = tot_potential_tt[:,3][i] + Tail_forces(b_tt, tot_potential_tt[:,1][i], r_cut)
for i in range(1, int(r_max / r_space)+1):
ofile.write("%d %f %f %f\n" % (i+1, tot_potential_tt[:,1][i], tot_potential_tt[:,2][i], tot_potential_tt[:,3][i]))
ofile.write("\n")
sys.exit()
| mit | 53e24897adbb5291e0f71b70d935eccb | 29.4375 | 116 | 0.585672 | 2.300787 | false | false | false | false |
jewettaij/moltemplate | moltemplate/ttree_lex.py | 1 | 90925 | # -*- coding: iso-8859-1 -*-
### -*- coding: utf-8 -*-
# Authors: Eric S. Raymond, 21 Dec 1998
# Andrew Jewett (jewett.aij at g mail)
# LICENSE: The PSF license:
# https://docs.python.org/3/license.html
# The PSF license is compatible with the GPL license. It is not a copyleft
# license. It is apparently similar to the BSD and MIT licenses.
#
# Contributions:
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
# Unicode support hack ("wordterminators") and numerous other hideous
# ttree-specific hacks added by Andrew Jewett September 2011.
"""A lexical analyzer class for simple shell-like syntaxes.
This version has been modified slightly to work better with unicode.
It was forked from the version of shlex that ships with python 3.2.2.
A few minor features and functions have been added. -Andrew Jewett 2011 """
import os.path
import sys
from collections import deque
import re
import fnmatch
import string
#import gc
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__all__ = ["TtreeShlex",
"split",
"LineLex",
"SplitQuotedString",
"ExtractVarName",
"GetVarName",
"EscCharStrToChar",
"SafelyEncodeString",
"RemoveOuterQuotes",
"MaxLenStr",
"VarNameToRegex",
"HasRE",
"HasWildcard",
"MatchesPattern",
"InputError",
"ErrorLeader",
"SrcLoc",
"OSrcLoc",
"TextBlock",
"VarRef",
"VarNPtr",
"VarBinding",
"SplitTemplate",
"SplitTemplateMulti",
"TableFromTemplate",
"ExtractCatName",
#"_TableFromTemplate",
#"_DeleteLineFromTemplate",
"DeleteLinesWithBadVars",
"TemplateLexer"]
class TtreeShlex(object):
""" A lexical analyzer class for simple shell-like syntaxes.
TtreeShlex is a backwards-compatible version of python's standard shlex
module. It has the additional member: "self.wordterminators", which
overrides the "self.wordchars" member. This enables better handling of
unicode characters by allowing a much larger variety of characters to
appear in words or tokens parsed by TtreeShlex.
"""
def __init__(self,
instream=None,
infile=None,
posix=False):
if isinstance(instream, str):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
#if self.posix:
# self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
# 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
if self.posix:
self.wordchars += ('��������������������������������'
'������������������������������')
self.wordterminators = set([])
self.prev_space_terminator = ''
self.whitespace = ' \t\r\f\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.operators = '=' #binary numeric operators like +-*/ might be added
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
# self.source_triggers
# are tokens which allow the seamless insertion of other
# files into the file being read.
self.source_triggers = set(['source'])
self.source_triggers_x = set([])
# self.source_triggers_x is a subset of self.source_triggers.
# In this case file inclusion is exclusive.
# In other words, the file is only included
# if it has not been included already. It does this
# by checking if one of these tokens has been encountered.
self.source_files_restricted = set([])
self.include_path = []
if 'TTREE_PATH' in os.environ:
include_path_list = os.environ['TTREE_PATH'].split(':')
self.include_path += [d for d in include_path_list if len(d) > 0]
if self.debug:
sys.stderr.write('TtreeShlex: reading from %s, line %d'
% (self.instream, self.lineno))
self.end_encountered = False
@staticmethod
def _belongs_to(char, include_chars, exclude_chars):
if ((not exclude_chars) or (len(exclude_chars)==0)):
return char in include_chars
else:
return char not in exclude_chars
def push_raw_text(self, text):
"""Push a block of text onto the stack popped by the ReadLine() method.
(If multiple lines are present in the text, (which is determined by
self.line_terminators), then the text is split into multiple lines
and each one of them is pushed onto this stack individually.
The "self.lineno" counter is also adjusted, depending on the number
of newline characters in "line".
Do not strip off the newline, or other line terminators
at the end of the text block before using push_raw_text()!
"""
if self.debug >= 1:
sys.stderr.write("TtreeShlex: pushing token " + repr(text))
for c in reversed(text):
self.pushback.appendleft(c)
if c == '\n':
self.lineno -= 1
if len(text) > 0:
self.end_encountered = False
def push_token(self, text):
"Push a token onto the stack popped by the get_token method"
self.push_raw_text(text + self.prev_space_terminator)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, str):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
sys.stderr.write('TtreeShlex: pushing to file %s' % (self.infile,))
else:
sys.stderr.write('TtreeShlex: pushing to stream %s' % (self.instream,))
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
sys.stderr.write('TtreeShlex: popping to %s, line %d'
% (self.instream, self.lineno))
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
#### #CHANGING: self.pushback is now a stack of characters, not tokens
#### if self.pushback:
#### tok = self.pushback.popleft()
#### if self.debug >= 1:
#### sys.stderr.write("TtreeShlex: popping token " + repr(tok))
#### return tok
#### No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source_triggers is not None:
while raw in self.source_triggers:
fname = self.read_token()
spec = self.sourcehook(fname)
if spec:
(newfile, newstream) = spec
if ((raw not in self.source_triggers_x) or
(newfile not in self.source_files_restricted)):
self.push_source(newstream, newfile)
if raw in self.source_triggers_x:
self.source_files_restricted.add(newfile)
else:
if self.debug >= 1:
sys.stderr.write(
'\ndebug warning: duplicate attempt to import file:\n \"' + newfile + '\"\n')
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
sys.stderr.write("TtreeShlex: token=" + repr(raw))
else:
sys.stderr.write("TtreeShlex: token=EOF")
if raw == self.eof:
self.end_encountered = True
return raw
def read_char(self):
if self.pushback:
nextchar = self.pushback.popleft()
assert((type(nextchar) is str) and (len(nextchar)==1))
else:
nextchar = self.instream.read(1)
return nextchar
def read_token(self):
self.prev_space_terminator = ''
quoted = False
escapedstate = ' '
while True:
#### self.pushback is now a stack of characters, not tokens
nextchar = self.read_char()
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
sys.stderr.write("TtreeShlex: in state", repr(self.state),
"I see character:", repr(nextchar))
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
sys.stderr.write("TtreeShlex: I see whitespace in whitespace state")
if self.token or (self.posix and quoted):
# Keep track of which whitespace
# character terminated the token.
self.prev_space_terminator = nextchar
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif TtreeShlex._belongs_to(nextchar,
self.wordchars,
self.wordterminators):
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
sys.stderr.write("TtreeShlex: I see EOF in quotes state")
# XXX what error should be raised here?
raise ValueError("Error at or before " + self.error_leader() + "\n"
" No closing quotation.")
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
sys.stderr.write("TtreeShlex: I see EOF in escape state")
# What error should be raised here?
raise InputError('File terminated immediately following an escape character.')
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
sys.stderr.write("TtreeShlex: I see whitespace in word state")
self.state = ' '
if self.token or (self.posix and quoted):
# Keep track of which whitespace
# character terminated the token.
self.prev_space_terminator = nextchar
break # emit current token
else:
continue
elif nextchar in self.commenters:
comment_contents = self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
# Keep track of which character(s) terminated
# the token (including whitespace and comments).
self.prev_space_terminator = nextchar + comment_contents
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif (TtreeShlex._belongs_to(nextchar,
self.wordchars,
self.wordterminators)
or (nextchar in self.quotes)
or (self.whitespace_split)):
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
sys.stderr.write("TtreeShlex: I see punctuation in word state")
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
sys.stderr.write("TtreeShlex: raw token=" + repr(result))
else:
sys.stderr.write("TtreeShlex: raw token=EOF")
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
newfile = RemoveOuterQuotes(newfile)
# This implements cpp-like semantics for relative-path inclusion.
newfile_full = newfile
if isinstance(self.infile, str) and not os.path.isabs(newfile):
newfile_full = os.path.join(os.path.dirname(self.infile), newfile)
try:
f = open(newfile_full, "r")
except IOError:
# If not found,
err = True
# ...then check to see if the file is in one of the
# directories in the self.include_path list.
for d in self.include_path:
newfile_full = os.path.join(d, newfile)
try:
f = open(newfile_full, "r")
err = False
break
except IOError:
err = True
if err:
raise InputError('Error at ' + self.error_leader() + '\n'
' unable to open file \"' + newfile + '\"\n'
' for reading.\n')
return (newfile, f)
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def __bool__(self):
return not self.end_encountered
# For compatibility with python 2.x, I must also define:
def __nonzero__(self):
return self.__bool__()
# The split() function was originally from shlex
# It is included for backwards compatibility.
def split(s, comments=False, posix=True):
lex = TtreeShlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
##################### NEW ADDITIONS (may be removed later) #################
#"""
# -- linelex.py --
# linelex.py defines the LineLex class, which inherits from, and further
# augments the capabilities of TtreeShlex by making it easier to parse
# individual lines one at a time. (The original shlex's "source" inclusion
# ability still works when reading entire lines, and lines are still counted.)
#
#"""
#import sys
class InputError(Exception):
""" A generic exception object containing a string for error reporting.
(Raising this exception implies that the caller has provided
a faulty input file or argument.)
"""
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def __repr__(self):
return str(self)
def ErrorLeader(infile, lineno):
return '\"' + infile + '\", line ' + str(lineno)
class SrcLoc(object):
""" SrcLoc is essentially nothing more than a 2-tuple containing the name
of a file (str) and a particular line number inside that file (an integer).
"""
__slots__ = ["infile", "lineno"]
def __init__(self, infile='', lineno=-1):
self.infile = infile
self.lineno = lineno
def SplitQuotedString(string,
quotes='\'\"',
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#',
endquote=None):
tokens = []
token = ''
reading_token = True
escaped_state = False
quote_state = None
for c in string:
if (c in comment_char) and (not escaped_state) and (quote_state == None):
tokens.append(token)
return tokens
elif (c in delimiters) and (not escaped_state) and (quote_state == None):
if reading_token:
tokens.append(token)
token = ''
reading_token = False
elif c in escape:
if escaped_state:
token += c
reading_token = True
escaped_state = False
else:
escaped_state = True
# and leave c (the '\' character) out of token
elif (c == quote_state) and (not escaped_state) and (quote_state != None):
quote_state = None
if include_endquote:
token += c
elif (c in quotes) and (not escaped_state):
if quote_state == None:
if endquote != None:
quote_state = endquote
else:
quote_state = c
# Now deal with strings like
# a "b" "c d" efg"h i j"
# Assuming quotes='"', then we want this to be split into:
# ['a', 'b', 'c d', 'efg"h i j"']
# ...in other words, include the end quote if the token did
# not begin with a quote
include_endquote = False
if token != '':
# if this is not the first character in the token
include_endquote = True
token += c
reading_token = True
else:
if (c == 'n') and (escaped_state == True):
c = '\n'
elif (c == 't') and (escaped_state == True):
c = '\t'
elif (c == 'r') and (escaped_state == True):
c = '\r'
elif (c == 'f') and (escaped_state == True):
c = '\f'
token += c
reading_token = True
escaped_state = False
# Remove any empty strings from the front or back of the list,
# just in case SplitQuotedString() fails to remove them.
# (Possible bug in SplitQuotedString(), but too lazy to investigate.)
if (len(tokens) > 0) and (tokens[0] == ''):
del tokens[0]
if (len(tokens) > 0) and (tokens[-1] == ''):
del tokens[-1]
if (len(string) > 0) and (token != ''):
tokens.append(token)
return tokens
def GetVarName(lex):
""" Read a string like 'atom:A ' or '{/atom:A B/C/../D }ABC '
and return ('','atom:A',' ') or ('{','/atom:A B/C/../D ','}ABC')
These are 3-tuples containing the portion of the text containing
only the variable's name (assumed to be within the text),
...in addition to the text on either side of the variable name.
"""
escape = '\''
lparen = '{'
rparen = '}'
if hasattr(lex, 'escape'):
escape = lex.escape
if hasattr(lex, 'var_open_paren'):
lparen = lex.var_open_paren
if hasattr(lex, 'var_close_paren'):
rparen = lex.var_close_paren
nextchar = lex.read_char()
# Skip past the left-hand side paren '{'
paren_depth = 0
escaped = False
if nextchar == lparen:
paren_depth = 1
elif nextchar in lex.escape:
escaped = True
elif (hasattr(lex, 'wordterminators') and
(nextchar in lex.wordterminators)):
lex.push_raw_text(nextchar)
return ''
else:
lex.push_raw_text(nextchar)
# Now read the variable name:
var_name_l = []
while lex:
nextchar=lex.read_char()
if nextchar == '':
break
elif nextchar == '\n':
lex.lineno += 1
if paren_depth > 0:
var_name_l.append(nextchar)
else:
lex.push_raw_text(nextchar)
break
elif escaped:
var_name_l.append(nextchar)
escaped = False
elif nextchar in lex.escape:
escaped = True
elif nextchar == lparen:
paren_depth += 1
if (hasattr(lex, 'wordterminators') and
(nextchar in lex.wordterminators)):
lex.push_raw_text(nextchar)
break
else:
var_name_l.append(nextchar)
elif nextchar == rparen:
paren_depth -= 1
if paren_depth == 0:
break
elif (hasattr(lex, 'wordterminators') and
(nextchar in lex.wordterminators)):
lex.push_raw_text(nextchar)
break
else:
var_name_l.append(nextchar)
elif paren_depth > 0:
var_name_l.append(nextchar)
escaped = False
elif nextchar in lex.whitespace:
lex.push_raw_text(nextchar)
break
elif (hasattr(lex, 'wordterminators') and
(nextchar in lex.wordterminators) and
(paren_depth == 0)):
lex.push_raw_text(nextchar)
break
elif nextchar in lex.commenters:
lex.instream.readline()
lex.lineno += 1
break
else:
var_name_l.append(nextchar)
escaped = False
var_name = ''.join(var_name_l)
return var_name
def ExtractVarName(text,
commenters = '#',
whitespace = ' \t\r\f\n'):
""" Read a string like 'atom:A ' or '{/atom:A B/C/../D }ABC '
and return ('','atom:A',' ') or ('{','/atom:A B/C/../D ','}ABC')
These are 3-tuples containing the portion of the text containing
only the variable's name (assumed to be within the text),
...in addition to the text on either side of the variable name.
"""
ibegin = 0
left_paren = ''
if text[0] == '{':
ibegin = 1
left_paren = text[0] #(GetVarName() strips the leading '{' character)
# The best way to insure consistency with other code is to use
# lex.GetVarName() to figure out where the variable name ends.
lex = TtreeShlex(StringIO(text))
var_name = GetVarName(lex)
# Any text following the end of the variable name should be returned as well
text_after_list = []
if left_paren:
text_after_list.append('}') #(GetVarName() strips the trailing '}' char)
while lex:
c = lex.read_char()
if c == '':
break
text_after_list.append(c)
text_after = ''.join(text_after_list)
return (left_paren, var_name, text_after)
def EscCharStrToChar(s_in, escape='\\'):
"""
EscCharStrToChar() replaces any escape sequences
in a string with their 1-character equivalents.
"""
assert(len(escape) > 0)
out_lstr = []
escaped_state = False
for c in s_in:
if escaped_state:
if (c == 'n'):
out_lstr.append('\n')
elif (c == 't'):
out_lstr.append('\t')
elif (c == 'r'):
out_lstr.append('\r')
elif (c == 'f'):
out_lstr.append('\f')
elif (c == '\''):
out_lstr.append('\'')
elif (c == '\"'):
out_lstr.append('\"')
elif c in escape:
out_lstr.append(c)
else:
out_lstr.append(escape + c) # <- keep both characters
escaped_state = False
else:
if c in escape:
escaped_state = True
else:
out_lstr.append(c)
return ''.join(out_lstr)
def SafelyEncodeString(in_str,
quotes='\'\"',
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
"""
SafelyEncodeString(in_str) scans through the input string (in_str),
and returns a new string in which probletic characters
(like newlines, tabs, quotes, etc), are replaced by their two-character
backslashed equivalents (like '\n', '\t', '\'', '\"', etc).
The escape character is the backslash by default, but it too can be
overridden to create custom escape sequences
(but this does not effect the encoding for characters like '\n', '\t').
"""
assert(len(escape) > 0)
out_lstr = []
use_outer_quotes = False
for c in in_str:
if (c == '\n'):
c = '\\n'
elif (c == '\t'):
c = '\\t'
elif (c == '\r'):
c = '\\r'
elif (c == '\f'):
c = '\\f'
elif c in quotes:
c = escape[0] + c
elif c in escape:
c = c + c
elif c in delimiters:
use_outer_quotes = True
# hmm... that's all that comes to mind. Did I leave anything out?
out_lstr.append(c)
if use_outer_quotes:
out_lstr = ['\"'] + out_lstr + ['\"']
return ''.join(out_lstr)
def RemoveOuterQuotes(text, quotes='\"\''):
if ((len(text) >= 2) and (text[0] in quotes) and (text[-1] == text[0])):
return text[1:-1]
else:
return text
def MaxLenStr(s1, s2):
if len(s2) > len(s1):
return s2
else:
return s1
def VarNameToRegex(s):
"""
Returns the portion of a TTREE-style variable name (eg "@atom:re.C[1-5]")
that corresponds to a regular expression (eg "C[1-5]"). A variable name
is assumed to encode a regular expression if it begins with "re.", OR if
the a ':' character is followed by "re.".
If so, the text in s (excluding "re.") is assumed to be a regular expresion
and is returned to the caller.
If not, the empty string ('') is returned.
If the first or second character is a '{', and if the final character
is '}', they will be deleted. Consequently:
VarNameToRegex('@atom:C') returns ''
VarNameToRegex('@atom:re.C[1-5]') returns '@atom:C[1-5]'
VarNameToRegex('@{/atom:re.C[1-5]}') returns '@/atom:C[1-5]'
VarNameToRegex('@bond:AB') returns ''
VarNameToRegex('@bond:re.A*B') returns '@bond:a*b'
VarNameToRegex('bond:re.A*B') returns 'bond:a*b'
VarNameToRegex('{bond:re.A*B}') returns 'bond:a*b'
VarNameToRegex('@{bond:re.A*B}') returns '@bond:a*b'
"""
# First, deal with parenthesis {}
iparen_L = s.find('{')
iparen_R = s.rfind('}')
if (((iparen_L == 0) or (iparen_L == 1)) and (iparen_R == len(s)-1)):
optional_char = ''
if iparen_L == 1:
optional_char = s[0]
s = optional_char + s[iparen_L+1:iparen_R]
# Now check to see if the remaining string contains 're.' or ':re.'
icolon = s.find(':')
# If 're.' is not found immediately after the first ':' character
# or following a '/' character
# (or if it is not found at the beginning when no ':' is present)
# then there is no regular expression. In that case, return ''
ire = s.find('re.')
if ((ire == -1) or
(not ((ire > 0) and ((s[ire-1] == ':') or (s[ire-1] == '/'))))):
return ''
return s[0:ire] + s[ire+3:]
def HasRE(pat):
"""
Returns true if a string (pat) begins with 're.'
"""
return len(VarNameToRegex(pat)) > 0
def HasWildcard(pat):
"""
Returns true if a string (pat) contains a '*' or '?' character.
"""
return (pat.find('*') != -1) or (pat.find('?') != -1)
# def HasWildcard(pat):
# """
# Returns true if a string (pat) contains a non-backslash-protected
# * or ? character.
#
# """
# N=len(pat)
# i=0
# while i < N:
# i = pat.find('*', i, N)
# if i == -1:
# break
# elif (i==0) or (pat[i-1] != '\\'):
# return True
# i += 1
# i=0
# while i < N:
# i = pat.find('?', i, N)
# if i == -1:
# break
# elif (i==0) or (pat[i-1] != '\\'):
# return True
# i += 1
# return False
def MatchesPattern(s, pattern):
if type(pattern) is str:
# old code:
# if ((len(s) > 1) and (s[0] == '/') and (s[-1] == '/'):
# re_string = p[1:-1] # strip off the slashes '/' and '/'
# if not re.search(re_string, s):
# return False
# new code:
# uses precompiled regular expressions (See "pattern.search" below)
if HasWildcard(pattern):
if not fnmatch.fnmatchcase(s, pattern):
return False
elif s != pattern:
return False
else:
#assert(type(p) is _sre.SRE_Match)
# I assume pattern = re.compile(some_reg_expr)
if not pattern.search(s):
return False
return True
def MatchesAll(multi_string, pattern):
assert(len(multi_string) == len(pattern))
for i in range(0, len(pattern)):
if not MatchesPattern(multi_string[i], pattern[i]):
return False
return True
class LineLex(TtreeShlex):
""" This class extends the TtreeShlex module (a slightly modified
version of the python 3.2.2 version of shlex). LineLex has the
ability to read one line at a time (in addition to one token at a time).
(Many files and scripts must be parsed one line at a time instead of one
token at a time. In these cases, the whitespace position also matters.)
Arguably, this class might not be necessary.
I could get rid of this class completely. That would be nice. To do that
we would need to augment and generalize shlex's get_token() member function
to make it read lines, not just tokens. Of course, you can always
change the wordchars (or wordterminators). Even so, there are two other
difficulties using the current version of shlex.get_token() to read lines:
1) File inclusion happen whenever the beginning of a line/token matches one
of the "source_triggers" (not the whole line as required by get_token()).
2) Lines ending in a special character (by default the backslash character)
continue on to the next line.
This code seems to work on our test files, but I'm sure there are bugs.
Andrew 2012-3-25
"""
def __init__(self,
instream=None,
infile=None,
posix=False):
TtreeShlex.__init__(self, instream, infile, posix)
self.line_terminators = '\n'
self.line_extend_chars = '\\'
self.skip_comments_during_readline = True
def _StripComments(self, line):
if self.skip_comments_during_readline:
for i in range(0, len(line)):
if ((line[i] in self.commenters) and
((i == 0) or (line[i - 1] not in self.escape))):
return line[:i]
return line
def _ReadLine(self,
recur_level=0):
"""
This function retrieves a block of text, halting at a
terminal character. Escape sequences are respected.
The self.lineno (newline counter) is also maintained.
The main difference between Readline and get_token()
is the way they handle the "self.source_triggers" member.
Both Readline() and get_token() insert text from other files when they
encounter a string in "self.source_triggers" in the text they read.
However ReadLine() ONLY inserts text from other files if the token which
matches with self.source_triggers appears at the beginning of the line.
get_token() inserts text only if lex.source matches the entire token.
comment-to-self:
At some point, once I'm sure this code is working, I should replace
shlex.get_token() with the code from ReadLine() which is more general.
It would be nice to get rid of "class LineLex" entirely. ReadLine()
is the only new feature that LineLex which was lacking in shlex.
To do this I would need to add a couple optional arguments to
"get_token()", allowing it to mimic ReadLine(), such as:
"override_wordterms" argument (which we can pass a '\n'), and
"token_extender" argument (like '\' for extending lines)
"""
first_token = ''
line = ''
escaped_state = False
found_space = False
while True:
nextchar = self.read_char()
# sys.stderr.write('nextchar=\"'+nextchar+'\"\n')
while nextchar == '':
if not self.filestack:
return self._StripComments(line), '', first_token, found_space
else:
self.pop_source()
nextchar = self.read_char()
if nextchar == '\n':
self.lineno += 1
if escaped_state:
escaped_state = False
else:
if nextchar in self.escape:
line += nextchar
escaped_state = True
else:
escaped_state = False
if not escaped_state:
if (nextchar in self.whitespace):
found_space = True
while first_token in self.source_triggers:
fname = RemoveOuterQuotes(self.get_token())
if (fname == '') or (fname in self.source_triggers):
raise InputError('Error: near ' + self.error_leader() + '\n'
' Nonsensical file inclusion request.\n')
if self.debug >= 0:
sys.stderr.write((' ' * recur_level) +
'reading file \"' + fname + '\"\n')
spec = self.sourcehook(fname)
if spec:
(fname, subfile) = spec
if ((first_token not in self.source_triggers_x) or
(fname not in self.source_files_restricted)):
self.push_source(subfile, fname)
if first_token in self.source_triggers_x:
self.source_files_restricted.add(fname)
else:
if self.debug >= 0:
sys.stderr.write('\nWarning at ' + self.error_leader() + ':\n'
' duplicate attempt to import file:\n \"' + fname + '\"\n')
line, nextchar, first_token, found_space = \
self._ReadLine(recur_level + 1)
if nextchar in self.line_terminators:
line_nrw = line.rstrip(self.whitespace)
# sys.stderr.write('line_nrw=\"'+line_nrw+'\"\n')
if ((len(line_nrw) > 0) and
(line_nrw[-1] in self.line_extend_chars) and
((len(line_nrw) < 2) or (line_nrw[-2] not in self.escape))):
# delete the line_extend character
line = line_nrw[:-1]
# from the end of that line and keep reading...
else:
return self._StripComments(line), nextchar, first_token, found_space
else:
line += nextchar
if not found_space:
first_token += nextchar
def ReadLine(self, recur_level=0):
line, nextchar, first_token, found_space = \
self._ReadLine(recur_level)
if nextchar == self.eof:
self.end_encountered = True
return line + nextchar
@staticmethod
def TextBlock2Lines(text, delimiters, keep_delim=True):
""" This splits a string into a list of sub-strings split by delimiter
characters. This function is different from the standard str.split()
function: The string is split at every character which belongs to the
"delimiters" argument (which can be a string or some other container).
This character is included at the end of every substring. Example:
TextBlock2Lines('\nabc\nde^fg\nhi j\n', '^\n')
returns:
['\n', 'abc\n', 'de^', 'fg\n', 'hi j\n']
"""
ls = []
i = 0
i_prev = 0
while i < len(text):
if text[i] in delimiters:
if keep_delim:
ls.append(text[i_prev:i + 1])
else:
ls.append(text[i_prev:i])
i_prev = i + 1
i += 1
if (i_prev < len(text)):
ls.append(text[i_prev:i + 1])
return ls
def __iter__(self):
return self
def __next__(self):
line = self.ReadLine()
if line == self.eof:
raise StopIteration
return line
class OSrcLoc(object):
""" OSrcLoc is barely more than a 2-tuple containing the name of a file
(a string) and a particular line number inside that file (an integer).
These objects are passed around and stored in the nodes of
every tree, so that if a syntax error or broken link in that node
is discovered, an error message can be provided to the user.
"""
__slots__ = ["infile", "lineno", "order"]
count = 0
def __init__(self, infile='', lineno=-1):
self.infile = infile
self.lineno = lineno
OSrcLoc.count += 1
self.order = OSrcLoc.count # keep track of how many times it was called
def __lt__(self, x):
return self.order < x.order
# def __repr__(self):
# return repr((self.infile, self.lineno, self.order))
class TextBlock(object):
"""TextBlock is just a 3-tuple consisting of a string, and an OSrcLoc
to help locate it in the original file from which it was read."""
__slots__ = ["text", "srcloc"]
def __init__(self, text, srcloc): # srcloc_end):
self.text = text
if srcloc == None:
self.srcloc = OSrcLoc()
else:
self.srcloc = srcloc
# if srcloc_end == None:
# self.srcloc_end = OSrcLoc()
# else:
# self.srcloc_end = srcloc_end
def __repr__(self):
return '\"' + self.text + '\"'
class VarRef(object):
"""VarRef stores variable names, and paths, and other attribute information,
as well as a "OSrcLoc" to keep track of the file it was defined in."""
__slots__ = ["prefix", "descr_str", "suffix", "srcloc", "binding", "nptr"]
def __init__(self,
prefix='', # '$' or '${'
descr_str='', # <- descriptor string: "cpath/category:lpath"
suffix='', # '}'
srcloc=None, # location in file where defined
binding=None, # a pointer to a tuple storing the value
nptr=None): # <- see class VarNPtr
self.prefix = prefix # Any text before the descriptor string goes here
self.suffix = suffix # Any text after the descriptor string goes here
self.descr_str = descr_str
if srcloc == None: # <- Location in text file where variable appears
self.srcloc = OSrcLoc()
else:
self.srcloc = srcloc
self.binding = binding
if nptr == None:
self.nptr = VarNPtr()
else:
self.nptr = nptr
def __lt__(self, x):
return self.order < x.order
# def __repr__(self):
# return repr((self.prefix + self.descr_str + self.suffix, srcloc))
class VarNPtr(object):
"""
Every time a variable appears in a template, it has has a "descriptor".
For example, consider the variable
"$atom:CA"
This is a string which encodes 3 pieces of information.
1) the category name: This is essentialy indicates the variable's type.
(ie "atom", in the example above)
2) the category node: Some TYPES have limited scope. Users can
specify the root node of the portion of the tree
in which this variable's type makes sense.
If this node is the root node, then that category
is relevant everywhere, and is not molecule or class
specific. All variables have a category node, which
is often not explicitly defined to by the user.
(Category node = the root "/", in the example above.)
3) the leaf node: This is a node whose ".name" member matches the name
of a variable. This node is created for this purpose
and it's position in the tree is a reflection of
that variable's intended scope.
In a molecule this "name" might be the name
of a type of atom, or an atom ID, or a bond type,
which is found in a particular molecule.
(Leaf node would be named "CA" in the example above.)
The VarNPtr class is simply a 3-tuple which
keeps these 3 pieces of data together.
"""
__slots__ = ["cat_name", "cat_node", "leaf_node"]
def __init__(self, cat_name='', cat_node=None, leaf_node=None):
self.cat_name = cat_name
self.cat_node = cat_node
self.leaf_node = leaf_node
# def __repr__(self):
# return repr((self.cat_name, self.cat_node.name, self.leaf_node.name))
class VarBinding(object):
""" VarBinding is essentially a tuple consistng of (full_name, binding, refs):
"self.full_name" is canonical name for this variable. This is a string
which specifies full path leading to the category node (beginning with '/'),
the category name (followed by a ':'),
as well as the leaf node (including the path leading up to it from cat_node)
This triplet identifies the variable uniquely.
"self.value" is the data that the variable refers to (usually a string).
"self.refs" stores a list of VarRefs which mention the same variable
from the various places inside various templates in the tree.
"""
__slots__ = ["full_name", "nptr", "value", "refs", "order", "category"]
def __init__(self,
full_name='',
nptr=None,
value=None,
refs=None,
order=-1,
category=None):
self.full_name = full_name
self.nptr = nptr
self.value = value
self.refs = refs
self.order = order
self.category = category
def __lt__(self, x):
return self.order < x.order
def __repr__(self):
return repr((self.full_name, self.value, self.order))
def ExtractCatName(descr_str):
""" When applied to a VarRef's "descr_str" member,
this function will extract the "catname" of it's corresponding
"nptr" member. This can be useful for error reporting.
(I use it to insure that the user is using the correct counter
variable types at various locations in their input files.)
"""
ib = descr_str.find(':')
if ib == -1:
ib = len(descr_str)
ia = descr_str.rfind('/')
if ia == -1:
ia = 0
return descr_str[ia:ib]
else:
str_before_colon = descr_str[0:ib]
ia = str_before_colon.rfind('/')
if ia == -1:
return str_before_colon
else:
return str_before_colon[ia + 1:]
def _DeleteLineFromTemplate(tmpl_list,
i_entry, # index into tmpl_list
newline_delimiter='\n'):
""" Delete a single line from tmpl_list.
tmpl_list is an alternating list of VarRefs and TextBlocks.
To identify the line, the index corresponding to one of the
entries in the tmpl_list is used. (Usually it is a VarRef)
The text after the preceeding newline, and the text up to the next newline
(starting from the beginning of the current entry, if a TextBlock)
is deleted, including any VarRef (variables) located in between.
It returns the index corresponding to the next
entry in the list (after deletion).
"""
i_prev_newline = i_entry
while i_prev_newline >= 0:
entry = tmpl_list[i_prev_newline]
if isinstance(entry, TextBlock):
i_char_newline = entry.text.rfind(newline_delimiter)
if i_char_newline != -1: # then newline found
# Delete the text after this newline
entry.text = entry.text[:i_char_newline + 1]
break
i_prev_newline -= 1
first_var = True
#i_next_newline = i_entry
i_next_newline = i_prev_newline + 1
while i_next_newline < len(tmpl_list):
entry = tmpl_list[i_next_newline]
if isinstance(entry, TextBlock):
i_char_newline = entry.text.find(newline_delimiter)
if i_char_newline != -1: # then newline found
# Delete the text before this newline (including the newline)
entry.text = entry.text[i_char_newline + 1:]
break
# Invoke DeleteSelf() on the first variables on this line. This will
# insure that it is deleted from the ttree_assignments.txt file.
elif isinstance(entry, VarRef):
if first_var:
entry.nptr.leaf_node.DeleteSelf()
first_var = False
i_next_newline += 1
del tmpl_list[i_prev_newline + 1: i_next_newline]
return i_prev_newline + 1
def DeleteLinesWithBadVars(tmpl_list,
delete_entire_template=False,
newline_delimiter='\n'):
"""
Loop through the entries in a template,
an alternating list of TextBlocks and VarRefs (tmpl_list).
If a VarRef points to a leaf_node which no longer exists
(ie. no longer in the corresponding category's .bindings list).
Then delete the line it came from from the template (tmpl_list).
"""
out_str_list = []
i = 0
while i < len(tmpl_list):
entry = tmpl_list[i]
if isinstance(entry, VarRef):
var_ref = entry
var_bindings = var_ref.nptr.cat_node.categories[
var_ref.nptr.cat_name].bindings
# if var_ref.nptr.leaf_node not in var_bindings:
if var_ref.nptr.leaf_node.IsDeleted():
if delete_entire_template:
del tmpl_list[:]
return 0
else:
i = _DeleteLineFromTemplate(tmpl_list,
i,
newline_delimiter)
else:
i += 1
else:
i += 1
def SplitTemplate(ltmpl, delim, delete_blanks=False):
"""
Split a template "ltmpl" into a list of "tokens" (sub-templates)
using a single delimiter string "delim".
INPUT arguments:
"ltmpl" should be an list of TextBlocks and VarRefs.
"delim" should be a simple string (type str)
"delete_blanks" should be a boolean True/False value.
When true, successive occurrences of the delimiter
should not create blank entries in the output list.
OUTPUT:
A list of tokens.
Each "token" is either a TextBlock, a VarRef,
or a (flat, 1-dimensional) list containing more than one of these objects.
The number of "tokens" returned equals the number of times the delimiter
is encountered in any of the TextBlocks in the "ltmpl" argument, plus one.
(... Unless "delete_blanks" is set to True.
Again, in that case, empty entries in this list are deleted.)
"""
assert(type(delim) is str)
if not hasattr(ltmpl, '__len__'):
ltmpl = [ltmpl]
tokens_lltmpl = []
token_ltmpl = []
i = 0
while i < len(ltmpl):
entry = ltmpl[i]
#sys.stderr.write('ltmpl['+str(i)+'] = '+str(entry)+'\n')
if isinstance(entry, TextBlock):
# if hasattr(entry, 'text'):
prev_src_loc = entry.srcloc
tokens_str = entry.text.split(delim)
lineno = entry.srcloc.lineno
j = 0
while j < len(tokens_str):
token_str = tokens_str[j]
delim_found = False
if (j < len(tokens_str) - 1):
delim_found = True
if token_str == '':
if delete_blanks:
if delim == '\n':
lineno += 1
if len(token_ltmpl) > 0:
if len(token_ltmpl) == 1:
tokens_lltmpl.append(token_ltmpl[0])
else:
tokens_lltmpl.append(token_ltmpl)
del token_ltmpl
token_ltmpl = []
j += 1
continue
new_src_loc = OSrcLoc(prev_src_loc.infile, lineno)
new_src_loc.order = prev_src_loc.order
for c in token_str:
# Reminder to self: c != delim (so c!='\n' if delim='\n')
# (We keep track of '\n' characters in delimiters above.)
if c == '\n':
lineno += 1
new_src_loc.lineno = lineno
text_block = TextBlock(token_str,
new_src_loc)
prev_src_loc = new_src_loc
if len(token_ltmpl) == 0:
if delim_found:
tokens_lltmpl.append(text_block)
del token_ltmpl
token_ltmpl = []
else:
token_ltmpl.append(text_block)
else:
if delim_found:
if len(token_str) > 0:
token_ltmpl.append(text_block)
tokens_lltmpl.append(token_ltmpl)
del token_ltmpl
token_ltmpl = []
else:
assert(not delete_blanks)
if (isinstance(token_ltmpl[-1], VarRef)
and
((j > 0)
or
((j == len(tokens_str) - 1) and
(i == len(ltmpl) - 1))
)):
# In that case, this empty token_str corresponds
# to a delimiter which was located immediately
# after the variable name,
# AND
# -there is more text to follow,
# OR
# -we are at the end of the template.
token_ltmpl.append(text_block)
if len(token_ltmpl) == 1:
tokens_lltmpl.append(token_ltmpl[0])
else:
tokens_lltmpl.append(token_ltmpl)
del token_ltmpl
token_ltmpl = []
else:
token_ltmpl.append(text_block)
if (delim_found and (delim == '\n')):
lineno += 1
j += 1
elif isinstance(entry, VarRef):
# elif hasattr(entry, 'descr_str'):
lineno = entry.srcloc.lineno
if ((len(token_ltmpl) == 1) and
isinstance(token_ltmpl[0], TextBlock) and
(len(token_ltmpl[0].text) == 0)):
# special case: if the previous entry was "", then it means
# the delimeter appeared at the end of the previous text block
# leading up to this variable. It separates the variable from
# the previous text block. It is not a text block of length 0.
token_ltmpl[0] = entry
else:
token_ltmpl.append(entry)
elif entry == None:
token_ltmpl.append(entry)
else:
assert(False)
i += 1
# Append left over remains of the last token
if len(token_ltmpl) == 1:
tokens_lltmpl.append(token_ltmpl[0])
elif len(token_ltmpl) > 1:
tokens_lltmpl.append(token_ltmpl)
del token_ltmpl
return tokens_lltmpl
def SplitTemplateMulti(ltmpl, delims, delete_blanks=False):
"""
Split a template "ltmpl" into a list of templates using a
single one or more delimiter strings "delim_list".
If multiple delimiter strings are provided, splitting
begins using the first delimiter string in the list.
Then each token in the resulting list of templates
is split using the next delimiter string
and so on until we run out of delimiter strings.
"ltmpl" should be an list of TextBlocks and VarRefs.
"delims" should be a simple string (type str) or a list of strings
"delete_blanks" is either True or False
If True, then any blank entries in the resulting list of
tokens (sub-templates) will be deleted.
"""
if hasattr(delims, '__len__'): # then it hopefully is a list of strings
delim_list = delims
else:
delim_list = [delims] # then it hopefully is a string
tokens = [ltmpl]
for delim in delim_list:
assert(type(delim) is str)
tokens_il = []
for t in tokens:
sub_tokens = SplitTemplate(t, delim, delete_blanks)
for st in sub_tokens:
if hasattr(st, '__len__'):
if (len(st) > 0) or (not delete_blanks):
tokens_il.append(st)
else:
tokens_il.append(st)
tokens = tokens_il
del tokens_il
return tokens
def _TableFromTemplate(d, ltmpl, delimiters, delete_blanks):
"""
See the docstring for the TableFromTemplate() function for an explanation.
(This _TableFromTemplate() and SplitTemplate() are the workhorse functions
for TableFromTemplate().)
"""
output = SplitTemplateMulti(ltmpl, delimiters[d], delete_blanks[d])
if d > 0:
i = 0
while i < len(output):
output[i] = _TableFromTemplate(d - 1,
output[i],
delimiters,
delete_blanks)
# Delete empty LISTS?
if (delete_blanks[d] and
hasattr(output[i], '__len__') and
(len(output[i]) == 0)):
del output[i]
else:
i += 1
return output
def TableFromTemplate(ltmpl, delimiters, delete_blanks=True):
"""
This function can be used to split a template
(a list containing TextBlocks and VarRefs) into a table
into a multidimensional table, with an arbitrary number of dimensions.
Arguments:
ltmpl
An alternating list of TextBlocks and VarRefs containing
the contents of this text template.
delimiters
The user must supply a list or tuple of delimiters: one delimiter for
each dimension in the table, with low-priority delimiters
(such as spaces ' ') appearing first, and higher-priority delimiters
(sich as newlines '\n') appearing later on in the list.
This function will divide the entire "ltmpl" into an n-dimensional
table. Initially the text is split into a list of text using the
highest-priority delimiter. Then each entry in the resulting list is
split into another list according to the next highest-priority delimiter.
This continues until all of the delimiters are used up and an
n-dimensional list-of-lists is remaining.
delete_blanks
The optional "delete_blanks" argument can be used to indicate whether
or not to delete blank entries in the table (which occur as a result
of placing two delimiters next to each other). It should be either
None (default), or it should be an array of booleans matching the
size of the "delimiters" argument. This allows the caller to customize
the merge settings separately for each dimension (for example: to allow
merging of whitespace within a line, without ignoring blank lines).
---- Details: ----
1) Multi-character delimiters ARE allowed (like '\n\n').
2) If a delimiter in the "delimiters" argument is not a string
but is a tuple (or a list) of strings, then the text is split according
to any of the delimiters in that tuple/list (starting from the last entry).
This way, users can use this feature to split text according to multiple
different kinds of whitespace characters (such as ' ' and '\t'), for
example, buy setting delimiters[0] = (' ','\t'). If, additionally,
delete_blanks[0] == True, then this will cause this function to
divide text in without regard to whitespace on a given line (for example).
Detailed example:
table2D = TableFromTmplList(ltmpl,
delimiters = ((' ','\t'), '\n'),
delete_blanks = (True, False))
This divides text in a similar way that the "awk" program does by default,
ie, by ignoring various kinds of whitespace between text fields, but NOT
ignoring blank lines.
3) Any text contained in variable-names is ignored.
"""
# Make a copy of ltmpl
# (The workhorse function "_TableFromTemplate()" makes in-place changes to
# its "ltmpl" argument. I don't want to modify "ltmpl", so I make a copy
# of it before I invoke "_TableFromTemplate()" on it.)
output = [ltmpl[i] for i in range(0, len(ltmpl))]
d = len(delimiters) - 1
output = _TableFromTemplate(d, output, delimiters, delete_blanks)
return output
class TemplateLexer(TtreeShlex):
""" This class extends the standard python lexing module, shlex, adding a
new member function (ReadTemplate()), which can read in a block of raw text,
(halting at an (non-escaped) terminal character), and split the text into
alternating blocks of text and variables. (As far as this lexer is
concerned, "variables" are simply tokens preceeded by $ or @ characters,
and surrounded by optional curly-brackets {}.)
"""
def __init__(self,
instream=None,
infile=None,
posix=False):
TtreeShlex.__init__(self, instream, infile, posix)
self.var_delim = '$@' # characters which can begin a variable name
self.var_open_paren = '{' # optional parenthesis surround a variable
self.var_close_paren = '}' # optional parenthesis surround a variable
self.newline = '\n'
self.comment_skip_var = '#'
# Which characters belong in words?
#
# We want to allow these characters:
# ./$@&%^!*~`-_:;?<>[]()
# to appear inside the tokens that TtreeShlex.get_token()
# retrieves (TtreeShlex.get_token() is used to read class
# names, and instance names, and variable names)
#
# settings.lex.wordchars+='./$@&%^!*~`-_+:;?<>[]' #Allow these chars
#
# Ommisions:
# Note: I left out quotes, whitespace, comment chars ('#'), and escape
# characters ('\\') because they are also dealt with separately.
# Those characters should not overlap with settings.lex.wordchars.
#
# Enabling unicode support requires that we override this choice
# by specifying "lex.wordterminators" instead of "wordchars".
#
# lex.wordterminators should be the (printable) set inverse of lex.wordchars
# I'm not sure which ascii characters are NOT included in the string above
# (We need to figure that out, and put them in settings.lex.wordterminators)
# To figure that out, uncomment the 8 lines below:
#
# self.wordterminators=''
# for i in range(0,256):
# c = chr(i)
# if c not in self.wordchars:
# self.wordterminators += c
#sys.stderr.write('-------- wordterminators = --------\n')
# sys.stderr.write(self.wordterminators+'\n')
# sys.stderr.write('-----------------------------------\n')
#
# Here is the result:
self.wordterminators = '(){|}' + \
self.whitespace + \
self.quotes + \
self.operators + \
self.escape + \
self.commenters
# Note:
# self.whitespace = ' \t\r\f\n'
# self.quotes = '\'"'
# self.escape = '\\'
# self.commenters = '#'
# Note: I do not terminate on these characters: +-=*'"`
# because they appear in the names of atom types in many force-fields.
# Also * characters are needed for variables containing wildcards
# in the name (which will be dealt with later).
self.source_triggers = set(['include', 'import'])
self.source_triggers_x = set(['import'])
def GetSrcLoc(self):
return OSrcLoc(self.infile, self.lineno)
def ReadTemplate(self,
simplify_output=False,
terminators='}',
remove_esc_preceeding='{\\', #explained below
var_terminators='{}(),', #(var_delim, spaces also included)
keep_terminal_char=True):
"""
ReadTemplate() reads a block of text (between terminators)
and divides it into variables (tokens following a '$' or '@' character)
and raw text. This is similar to pythons string.Template(),
however it reads from streams (files), not strings, and it allows use
of more complicated variable names with multiple variable delimiters
(eg '$' and '@').
This readline()-like member function terminates when reaching a
user-specified terminator character character (second argument),
or when variable (eg: "$var"$ is encountered). The result is
a list of variable-separated text-blocks (stored in the first
argument). For example, the string:
"string with $var1 and $var2 variables.}" contains:
"string with ",
$var1,
" and ",
$var2,
" variables.}"
This simplifies the final process of rendering
(substituting text into) the text blocks later on.
Output:
This function returns a list of (alternating) blocks of
text, and variable names. Each entry in the list is either:
1) a text block:
Raw text is copied from the source, verbatim, along with
some additional data (filename and line numbers), to
help retroactively identify where the text came from
(in case a syntax error in the text is discovered later).
In this case, the list entry is stored as a list
The format (TextBlock) is similar to:
[text_string, ((filenameA,lineBegin), (filenameB,lineEnd))],
where the tuples, (filenameA,lineBegin) and (filenameB,lineEnd)
denote the source file(s) from which the text was read, and
line number at the beginning and ending of the text block.
(This information is useful for generating helpful error
messages. Note that the "TtreeShlex" class allows users to
combine multiple files transparently into one stream using
the "source" (or "sourcehook()") member. For this reason, it
is possible, although unlikely, that the text-block
we are reading could span multiple different files.)
2) a variable (for example "$var" or "${var}"):
In this case, the list entry is stored in the "VarRef" format
which is essentialy shown below:
[[var_prefix, var_nptr, var_suffix], (filename,lineno)]
where var_prefix and var_suffix are strings containing brackets
and other text enclosing the variable name (and may be empty).
As an example, we consider a file named "datafile" which
contains the text containing 2 text blocks and 1 variable:
"some\n text\n before ${var}. Text after\n".
ReadTemplate() will read this and return a list with 3 entries:
[ ['some\n text\n before', (('datafile', 1), ('datafile', 3))],
[['${', 'var', '}'], ('datafile', 3, 3)],
['Text after\n', (('datafile', 3), ('datafile', 4))] ]
Note that while parsing the text, self.lineno counter is
incremented whenever a newline character is encountered.
(Also: Unlike shlex.get_token(), this function does not
delete commented text, or insert text from other files.)
Exceptional Cases:
Terminator characters are ignored if they are part of a variable
reference. (For example, the '}' in "${cat:var}", is used to denote a
bracketed variable, and does not cause ReadTemplate() to stop reading)
OR if they are part of a two-character escape sequence
(for example, '}' in "\}" does not cause terminate parsing).
In that case, the text is considered normal text. (However the
\ character is also stripped out. It is also stripped out if it
preceeds any characters in "remove_esc_preceeding", which is
the second argument. Otherwise it is left in the text block.)
What is the purpose of "remove_esc_preceeding"? To force ReadTemplate()
to remove the preceeding \ when it otherwise would not. For example,
we want to remove \ whenever it preceeds another \ character, so we
include it in the remove_esc_preceeding string variable. We alse include
'{' because we want to remove \ when it preceeds the '{' character.
That way the \ gets deleted when it preceeds either '{' or '}'.
(The \ character is already removed before the '}' character.)
We want consistent behavior that people expect, so that
"\{abc\}" -> ReadTemplate() -> "{abc}" (instead of "\{abc}").
In retrospect, perhaps this is a confusing way to implement this.
"""
#sys.stderr.write(' ReadTemplate('+terminators+') invoked at '+self.error_leader())
# The main loop of the parser reads only one variable at time.
# The following variables keep track of where we are in the template.
reading_var = False # Are we currently reading in the name of a variable?
prev_char_delim = False # True iff we just read a var_delim character like '$'
# True iff we just read a (non-escaped) esc character '\'
escaped_state = False
# True iff we are in a region of text where vars should be ignored
commented_state = False
var_paren_depth = 0 # This is non-zero iff we are inside a
# bracketed variable's name for example: "${var}"
var_terminators += self.whitespace + self.newline + self.var_delim
tmpl_list = [] # List of alternating tuples of text_blocks and
# variable names (see format comment above)
# This list will be returned to the caller.
# sys.stderr.write('report_progress='+str(report_progress))
prev_filename = self.infile
prev_lineno = self.lineno
var_prefix = ''
var_descr_plist = []
var_suffix = ''
text_block_plist = []
done_reading = False
while not done_reading:
terminate_text = False
terminate_var = False
#delete_prior_escape = False
nextchar = self.read_char()
#sys.stderr.write(' ReadTemplate() nextchar=\''+nextchar+'\' at '+self.error_leader()+' esc='+str(escaped_state)+', pvar='+str(prev_char_delim)+', paren='+str(var_paren_depth))
# Count newlines:
if nextchar in self.newline:
commented_state = False
self.lineno += 1
elif ((nextchar in self.comment_skip_var) and
(not escaped_state)):
commented_state = True
# Check for end-of-file:
if nextchar == '':
if escaped_state:
raise InputError('Error: in ' + self.error_leader() + '\n\n'
'File terminated immediately following an escape character.')
terminate_var = True
else:
terminate_text = True
done_reading = True
# --- Now process the character: ---
# What we do next depends on which "mode" we are in.
# If we are reading a regular text block (reading_var == False),
# then we keep appending characters onto the end of "text_block",
# checking for terminal characters, or variable delimiters.
# If we are reading a variable name (reading_var == True),
# then we append characters to the end of "var_descr_plist[]",
# checking for variable terminator characters, as well as
# parenthesis (some variables are surrounded by parenthesis).
elif reading_var:
if nextchar in terminators:
#sys.stdout.write(' ReadTemplate() readmode found terminator.\n')
if escaped_state:
# In this case, the '\' char was only to prevent terminating
# string prematurely, so delete the '\' character.
#delete_prior_escape = True
del var_descr_plist[-1]
var_descr_plist.append(nextchar)
#escaped_state = False
elif not ((var_paren_depth > 0) and
(nextchar in self.var_close_paren)):
terminate_var = True
done_reading = True
if nextchar in self.var_open_paren: # eg: nextchar == '{'
#sys.stdout.write(' ReadTemplate() readmode found {\n')
if escaped_state:
var_descr_plist.append(nextchar)
#escaped_state = False
else:
# "${var}" is a valid way to refer to a variable
if prev_char_delim:
var_prefix += nextchar
var_paren_depth = 1
# "${{var}}" is also a valid way to refer to a variable,
# (although strange), but "$va{r}" is not.
# Parenthesis (in bracketed variable names) must
# immediately follow the '$' character (as in "${var}")
elif var_paren_depth > 0:
var_paren_depth += 1
var_descr_plist.append(nextchar)
elif nextchar in self.var_close_paren:
#sys.stdout.write(' ReadTemplate() readmode found }.\n')
if escaped_state:
# In this case, the '\' char was only to prevent
# interpreting '}' as a variable suffix,
# delete_prior_escape=True #so skip the '\' character
del var_descr_plist[-1]
var_descr_plist.append(nextchar)
#escaped_state = False
else:
if var_paren_depth > 0:
var_paren_depth -= 1
if var_paren_depth == 0:
var_suffix = nextchar
terminate_var = True
else:
var_descr_plist.append(nextchar)
elif nextchar in var_terminators:
#sys.stdout.write(' ReadTemplate() readmode found var_terminator \"'+nextchar+'\"\n')
if (escaped_state or (var_paren_depth > 0)):
# In that case ignore the terminator
# and append it to the variable name
if escaped_state:
# In this case, the '\' char was only to prevent
# interpreting nextchar as a variable terminator
# delete_prior_escape = True # so skip the '\'
# # character
del var_descr_plist[-1]
#escaped_state = False
var_descr_plist.append(nextchar)
else:
terminate_var = True
elif nextchar in self.var_delim: # such as '$'
#sys.stdout.write(' ReadTemplate() readmode found var_delim.\n')
if escaped_state:
# In this case, the '\' char was only to prevent
# interpreting '$' as a new variable name
# delete_prior_escape = True # so skip the '\'
# character
del var_descr_plist[-1]
var_descr_plist.append(nextchar)
#escaped_state = False
else:
prev_var_delim = True
# Then we are processing a new variable name
terminate_var = True
else:
var_descr_plist.append(nextchar)
prev_char_delim = False
else: # begin else clause for "if reading_var:"
# Then we are reading a text_block
if nextchar in terminators:
if escaped_state:
# In this case, the '\' char was only to prevent terminating
# string prematurely, so delete the '\' character.
#delete_prior_escape = True
del text_block_plist[-1]
text_block_plist.append(nextchar)
elif commented_state:
text_block_plist.append(nextchar)
else:
terminate_text = True
done_reading = True
elif nextchar in self.var_delim: # such as '$'
if escaped_state:
# In this case, the '\' char was only to prevent
# interpreting '$' as a variable prefix.
# delete_prior_escape=True #so delete the '\'
# character
del text_block_plist[-1]
text_block_plist.append(nextchar)
elif commented_state:
text_block_plist.append(nextchar)
else:
prev_char_delim = True
reading_var = True
# NOTE TO SELF: IN THE FUTURE, USE GetVarName(self)
# TO PARSE TEXT ASSOCIATED WITH A VARIABLE
# THIS WILL SIMPLIFY THE CODE AND ENSURE CONSISTENCY.
var_paren_depth = 0
terminate_text = True
else:
text_block_plist.append(nextchar)
# TO DO: use "list_of_chars.join()" instead of '+='
prev_char_delim = False # the previous character was not '$'
# Now deal with "remove_esc_preceeding". (See explanation above.)
if escaped_state and (nextchar in remove_esc_preceeding):
if reading_var:
#sys.stdout.write(' ReadTemplate: var_descr_str=\''+''.join(var_descr_plist)+'\'\n')
assert(var_descr_plist[-2] in self.escape)
del var_descr_plist[-2]
else:
#sys.stdout.write(' ReadTemplate: text_block=\''+''.join(text_block_plist)+'\'\n')
assert(text_block_plist[-2] in self.escape)
del text_block_plist[-2]
if terminate_text:
#sys.stdout.write('ReadTemplate() appending: ')
# sys.stdout.write(text_block)
# tmpl_list.append( [text_block,
# ((prev_filename, prev_lineno),
# (self.infile, self.lineno))] )
if simplify_output:
tmpl_list.append(''.join(text_block_plist))
else:
tmpl_list.append(TextBlock(''.join(text_block_plist),
OSrcLoc(prev_filename, prev_lineno)))
#, OSrcLoc(self.infile, self.lineno)))
if not done_reading:
# The character that ended the text block
# was a variable delimiter (like '$'), in which case
# we should put it (nextchar) in the variable's prefix.
var_prefix = nextchar
else:
var_prefix = ''
var_descr_plist = []
var_suffix = ''
prev_filename = self.infile
prev_lineno = self.lineno
del text_block_plist
text_block_plist = []
# gc.collect()
elif terminate_var:
# Print an error if we terminated in the middle of
# an incomplete variable name:
if prev_char_delim:
raise InputError('Error: near ' + self.error_leader() + '\n\n'
'Null variable name.')
if var_paren_depth > 0:
raise InputError('Error: near ' + self.error_leader() + '\n\n'
'Incomplete bracketed variable name.')
var_descr_str = ''.join(var_descr_plist)
# Now check for variable format modifiers,
# like python's ".rjust()" and ".ljust()".
# If present, then put these in the variable suffix.
if ((len(var_descr_plist) > 0) and (var_descr_plist[-1] == ')')):
#i = len(var_descr_plist)-1
# while i >= 0:
# if var_descr_plist[i] == '(':
# break
# i -= 1
i = var_descr_str.rfind('(')
if (((i - 6) >= 0) and
((var_descr_str[i - 6:i] == '.rjust') or
(var_descr_str[i - 6:i] == '.ljust'))):
var_suffix = ''.join(
var_descr_plist[i - 6:]) + var_suffix
#var_descr_plist = var_descr_plist[:i-6]
var_descr_str = var_descr_str[:i - 6]
# Process any special characters in the variable name
var_descr_str = EscCharStrToChar(var_descr_str)
# tmpl_list.append( [[var_prefix, var_descr_str, var_suffix],
# (self.infile, self.lineno)] )
if simplify_output:
tmpl_list.append(var_prefix + var_descr_str + var_suffix)
else:
tmpl_list.append(VarRef(var_prefix, var_descr_str, var_suffix,
OSrcLoc(self.infile, self.lineno)))
# if report_progress:
#sys.stderr.write(' parsed variable '+var_prefix+var_descr_str+var_suffix+'\n')
#sys.stdout.write('ReadTemplate() appending: ')
#sys.stderr.write(var_prefix + var_descr_str + var_suffix)
del var_descr_plist
del var_descr_str
prev_filename = self.infile
prev_lineno = self.lineno
var_prefix = ''
var_descr_plist = []
var_suffix = ''
# Special case: Variable delimiters like '$'
# terminate the reading of variables,
# but they also signify that a new
# variable is being read.
if nextchar in self.var_delim:
# Then we are processing a new variable name
prev_var_delim = True
reading_var = True
# NOTE TO SELF: IN THE FUTURE, USE GetVarName(self)
# TO PARSE TEXT ASSOCIATED WITH A VARIABLE
# THIS WILL SIMPLIFY THE CODE AND ENSURE CONSISTENCY.
var_paren_depth = 0
var_prefix = nextchar
elif nextchar in self.var_close_paren:
del text_block_plist
text_block_plist = []
# gc.collect()
prev_var_delim = False
reading_var = False
else:
# Generally, we don't want to initialize the next text block
# with the empty string. Consider that whatever character
# caused us to stop reading the previous variable and append
# it to the block of text that comes after.
del text_block_plist
text_block_plist = [nextchar]
# gc.collect()
prev_var_delim = False
reading_var = False
# If we reached the end of the template (and the user requests it),
# then the terminal character can be included in the list
# of text_blocks to be returned to the caller.
if done_reading and keep_terminal_char:
#sys.stdout.write('ReadTemplate() appending: \''+nextchar+'\'\n')
# Here we create a new text block which contains only the
# terminal character (nextchar).
# tmpl_list.append( [nextchar,
# ((self.infile, self.lineno),
# (self.infile, self.lineno))] )
if simplify_output:
tmpl_list.append(nextchar)
else:
tmpl_list.append(TextBlock(nextchar,
OSrcLoc(self.infile, self.lineno)))
#, OSrcLoc(self.infile, self.lineno)))
if escaped_state:
escaped_state = False
else:
if nextchar in self.escape:
escaped_state = True
#sys.stderr.write("*** TMPL_LIST0 = ***", tmpl_list)
return tmpl_list # <- return value stored here
def GetParenExpr(self, prepend_str='', left_paren='(', right_paren=')'):
""" GetParenExpr() is useful for reading in strings
with nested parenthesis and spaces.
This function can read in the entire string:
.trans(0, 10.0*sin(30), 10.0*cos(30))
(Because I was too lazy to write this correctly...)
Spaces are currently stripped out of the expression.
(...unless surrounded by quotes) The string above becomes:
".trans(0,10.0*sin(30),10.0*cos(30))"
Sometimes the caller wants to prepend some text to the beginning
of the expression (which may contain parenthesis). For this
reason, an optional first argument ("prepend_str") can be
provided. By default it is empty.
"""
src_loc_begin = SrcLoc(self.infile, self.lineno)
orig_wordterm = self.wordterminators
self.wordterminators = self.wordterminators.replace(
left_paren, '').replace(right_paren, '')
token = self.get_token()
if ((token == '') or
(token == self.eof)):
return prepend_str
expr_str = prepend_str + token
# if (expr_str.find(left_paren) == -1):
# raise InputError('Error near or before '+self.error_leader()+'\n'
# 'Expected an open-paren (\"'+prepend_str+left_paren+'\") before this point.\n')
# return expr_str
paren_depth = expr_str.count(left_paren) - expr_str.count(right_paren)
while ((len(expr_str) == 0) or (paren_depth > 0)):
token = self.get_token()
if ((type(token) is not str) or
(token == '')):
raise InputError('Error somewhere between ' +
self.error_leader(src_loc_begin.infile,
src_loc_begin.lineno)
+ 'and ' + self.error_leader() + '\n'
'Invalid expression: \"' + expr_str[0:760] + '\"')
expr_str += token
paren_depth = expr_str.count(
left_paren) - expr_str.count(right_paren)
if (paren_depth != 0):
raise InputError('Error somewhere between ' +
self.error_leader(src_loc_begin.infile,
src_loc_begin.lineno)
+ 'and ' + self.error_leader() + '\n'
'Invalid expression: \"' + expr_str[0:760] + '\"')
self.wordterminators = orig_wordterm
return expr_str
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = TtreeShlex()
else:
file = sys.argv[1]
lexer = TtreeShlex(open(file), file)
while 1:
tt = lexer.get_token()
if tt:
sys.stderr.write("Token: " + repr(tt))
else:
break
| mit | 365f052b6db5d1cd56933917cddb880e | 39.285778 | 192 | 0.521815 | 4.319477 | false | false | false | false |
gawel/irc3 | irc3/plugins/userlist.py | 1 | 6805 | # -*- coding: utf-8 -*-
from irc3 import plugin
from irc3 import utils
from irc3 import rfc
from irc3.dec import event
from irc3.utils import IrcString
from collections import defaultdict
__doc__ = '''
==============================================
:mod:`irc3.plugins.userlist` User list plugin
==============================================
This plugin maintain a known user list and a channel list.
..
>>> from irc3.testing import IrcBot
Usage::
>>> bot = IrcBot()
>>> bot.include('irc3.plugins.userlist')
>>> bot.test(':gawel!user@host JOIN #chan')
>>> print(list(bot.channels['#chan'])[0])
gawel
>>> print(list(bot.nicks.keys())[0])
gawel
>>> bot.test(':gawel!user@host MODE #chan +o gawel')
>>> print(list(bot.channels['#chan'].modes['@'])[0])
gawel
Api
===
.. autoclass:: Channel
'''
class Channel(set):
"""A set like object which contains nicknames that are on the channel and
user modes:
.. code-block:: python
>>> channel = Channel()
>>> channel.add('gawel', modes='@')
>>> 'gawel' in channel
True
>>> 'gawel' in channel.modes['@']
True
>>> channel.remove('gawel')
>>> 'gawel' in channel
False
>>> 'gawel' in channel.modes['@']
False
"""
def __init__(self):
set.__init__(self)
self.modes = defaultdict(set)
self.topic = None
def add(self, item, modes=''):
set.add(self, item)
for mode in modes:
self.modes[mode].add(item)
def remove(self, item):
try:
set.remove(self, item)
except KeyError:
pass
for items in self.modes.values():
if item in items:
items.remove(item)
def __repr__(self):
return repr(sorted(self))
@plugin
class Userlist:
def __init__(self, context):
self.context = context
self.connection_lost()
def connection_lost(self, client=None):
self.channels = defaultdict(Channel)
self.context.channels = self.channels
self.nicks = {}
self.context.nicks = self.nicks
def broadcast(self, *args, **kwargs):
# only usefull for servers
pass
@event(rfc.JOIN_PART_QUIT)
def on_join_part_quit(self, mask=None, event=None, **kwargs):
getattr(self, event.lower())(mask.nick, mask, **kwargs)
@event(rfc.KICK)
def on_kick(self, mask=None, event=None, target=None, **kwargs):
self.part(target.nick, mask=None, **kwargs)
def join(self, nick, mask, client=None, **kwargs):
channel = self.channels[kwargs['channel']]
if nick != self.context.nick:
channel.add(mask.nick)
self.nicks[mask.nick] = client or mask
if client:
self.broadcast(client=client, clients=channel, **kwargs)
def part(self, nick, mask=None, channel=None, client=None, **kwargs):
if nick == self.context.nick:
self.channels.pop(channel, None)
else:
channel = self.channels[channel]
self.broadcast(client=client, clients=channel, **kwargs)
channel.remove(nick)
if client is None and all(
nick not in c for c in self.channels.values()):
self.nicks.pop(nick, None)
def quit(self, nick, mask, channel=None, client=None, **kwargs):
if nick == self.context.nick:
self.connection_lost()
else:
clients = set()
for channel in self.channels.values():
if nick in channel:
clients.update(channel)
channel.remove(nick)
self.broadcast(client=client, clients=clients, **kwargs)
self.nicks.pop(nick, None)
@event(rfc.NEW_NICK)
def new_nick(self, nick=None, new_nick=None, client=None, **kwargs):
"""update list on new nick"""
if client is None:
self.nicks[new_nick] = new_nick + '!' + nick.host
nick = nick.nick
clients = {new_nick}
for channel in self.channels.values():
if nick in channel:
for nicknames in channel.modes.values():
if nick in nicknames:
nicknames.add(new_nick)
channel.remove(nick)
clients.update(channel)
channel.add(new_nick)
self.nicks.pop(nick, None)
self.broadcast(client=client, clients=clients, **kwargs)
@event(rfc.RPL_NAMREPLY)
def names(self, channel=None, data=None, **kwargs):
"""Initialise channel list and channel.modes"""
statusmsg = self.context.server_config['STATUSMSG']
nicknames = data.split(' ')
channel = self.channels[channel]
for item in nicknames:
nick = item.strip(statusmsg)
channel.add(nick, modes=item[:-len(nick)])
self.nicks[nick] = nick
@event(rfc.RPL_WHOREPLY)
def who(self, channel=None, nick=None, username=None, server=None, **kw):
"""Set nick mask"""
self.channels[channel].add(nick)
mask = IrcString(nick + '!' + username + '@' + server)
self.nicks[nick] = mask
@event(rfc.MODE)
def mode(self, target=None, modes=None, data=None, client=None, **kw):
"""Add nicknames to channel.modes"""
if target[0] not in self.context.server_config['CHANTYPES'] \
or not data:
# not a channel or no user target
return
noargs = self.context.server_config['CHANMODES'].split(',')[-1]
if not isinstance(data, list):
data = [d for d in data.split(' ') if d]
if not modes.startswith(('+', '-')):
modes = '+' + modes
modes = utils.parse_modes(modes, data, noargs)
prefix = self.context.server_config['PREFIX']
prefix = dict(zip(*prefix.strip('(').split(')')))
channel = self.channels[target]
for char, mode, tgt in modes:
if mode in prefix:
nicknames = channel.modes[prefix[mode]]
if char == '+':
nicknames.add(tgt)
elif tgt in nicknames:
nicknames.remove(tgt)
if client is not None:
broadcast = (
':{mask} MODE {target} {char}{mode} {tgt}').format(
char=char, mode=mode, target=target, tgt=tgt,
**client.data)
self.broadcast(client=client, broadcast=broadcast,
clients=channel)
@event(rfc.RPL_TOPIC)
def topic(self, channel=None, data=None, client=None, **kwargs):
self.channels[channel].topic = data
| mit | fe2d00583f9360b2f1e8a5ff9f8a4add | 31.559809 | 77 | 0.545334 | 3.91092 | false | false | false | false |
gawel/irc3 | irc3/_gen_doc.py | 1 | 2996 | # -*- coding: utf-8 -*-
from . import rfc
from . import template
import os
def render_attrs(title, attrs, out):
out.write(title + '\n')
out.write(len(title) * '=' + '\n')
out.write('\n')
for attr in attrs:
name = attr.name
title = name
if isinstance(attr, int):
title = '%s - %s' % (attr, title)
out.write(title + '\n')
out.write(len(title) * '-' + '\n\n')
if hasattr(attr, 'tpl'):
out.write('Format ``%s``\n\n' % attr.tpl.replace('{c.', '{'))
out.write('Match ``%s``\n\n' % attr.re)
out.write('Example:\n\n')
out.write('.. code-block:: python\n\n')
out.write(' @irc3.event(rfc.%s)\n' % name)
params = getattr(attr, 'params', [])
if params:
params = '=None, '.join(params)
out.write(' def myevent(bot, %s=None):\n' % params)
else:
out.write(' def myevent(bot):\n' % params)
out.write(' # do something\n')
out.write('\n')
re_out = getattr(attr, 're_out', None)
if re_out is not None:
out.write('Out Match ``%s``\n\n' % re_out.re)
out.write('Example:\n\n')
out.write('.. code-block:: python\n\n')
out.write(' @irc3.event(rfc.%s, iotype="out")\n' % name)
params = getattr(re_out, 'params', [])
if params:
params = '=None, '.join(params)
out.write(' def myevent(bot, %s=None):\n' % params)
else:
raise RuntimeError('regexp %s as no params' % re_out)
out.write(' # do something\n')
out.write('\n')
def main():
print('Generate docs...')
attrs = [getattr(rfc, attr) for attr in dir(rfc)
if attr.isupper() and attr not in ('RETCODES',)]
repls = [attr for attr in attrs if attr.name.startswith('RPL_')]
errs = [attr for attr in attrs if attr.name.startswith('ERR_')]
misc = [attr for attr in attrs
if not attr.name.startswith(('ERR_', 'RPL_'))]
out = open('docs/rfc.rst', 'w')
out.write('========================\n')
out.write(':mod:`irc3.rfc` RFC1459\n')
out.write('========================\n\n')
render_attrs('Replies (REPL)', repls, out)
render_attrs('Errors (ERR)', errs, out)
render_attrs('Misc', misc, out)
try:
os.makedirs('docs/plugins')
except OSError:
pass
for filename in os.listdir('irc3/plugins'):
if filename.startswith('_'):
continue
if not filename.endswith('.py'):
continue
filename = filename.replace('.py', '')
modname = 'irc3.plugins.%s' % filename
out = open('docs/plugins/' + filename + '.rst', 'w')
out.write('.. automodule:: ' + modname + '\n')
out.write('\n')
template.main(nick='mybot',
dest=os.path.join(os.getcwd(), 'examples'))
if __name__ == '__main__':
main()
| mit | 2135126fa6b59074a0b0578e5d3c9c00 | 33.837209 | 73 | 0.497997 | 3.471611 | false | false | false | false |
miguelgrinberg/python-engineio | src/engineio/server.py | 1 | 33822 | import base64
import gzip
import importlib
import io
import logging
import secrets
import urllib
import zlib
from . import exceptions
from . import packet
from . import payload
from . import socket
default_logger = logging.getLogger('engineio.server')
class Server(object):
"""An Engine.IO server.
This class implements a fully compliant Engine.IO web server with support
for websocket and long-polling transports.
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are "threading",
"eventlet", "gevent" and "gevent_uwsgi". If this
argument is not given, "eventlet" is tried first, then
"gevent_uwsgi", then "gevent", and finally "threading".
The first async mode that has all its dependencies
installed is the one that is chosen.
:param ping_interval: The interval in seconds at which the server pings
the client. The default is 25 seconds. For advanced
control, a two element tuple can be given, where
the first number is the ping interval and the second
is a grace period added by the server.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default
is 20 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 1,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: If set to a string, it is the name of the HTTP cookie the
server sends back tot he client containing the client
session id. If set to a dictionary, the ``'name'`` key
contains the cookie name and other keys define cookie
attributes, where the value of each attribute can be a
string, a callable with no arguments, or a boolean. If set
to ``None`` (the default), a cookie is not sent to the
client.
:param cors_allowed_origins: Origin or list of origins that are allowed to
connect to this server. Only the same origin
is allowed by default. Set this argument to
``'*'`` to allow all origins, or to ``[]`` to
disable CORS handling.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default
is ``True``.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``. Note that fatal errors are logged even when
``logger`` is ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param async_handlers: If set to ``True``, run message event handlers in
non-blocking threads. To run handlers synchronously,
set to ``False``. The default is ``True``.
:param monitor_clients: If set to ``True``, a background task will ensure
inactive clients are closed. Set to ``False`` to
disable the monitoring task (not recommended). The
default is ``True``.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. Defaults to
``['polling', 'websocket']``.
:param kwargs: Reserved for future extensions, any additional parameters
given as keyword arguments will be silently ignored.
"""
compression_methods = ['gzip', 'deflate']
event_names = ['connect', 'disconnect', 'message']
valid_transports = ['polling', 'websocket']
_default_monitor_clients = True
sequence_number = 0
def __init__(self, async_mode=None, ping_interval=25, ping_timeout=20,
max_http_buffer_size=1000000, allow_upgrades=True,
http_compression=True, compression_threshold=1024,
cookie=None, cors_allowed_origins=None,
cors_credentials=True, logger=False, json=None,
async_handlers=True, monitor_clients=None, transports=None,
**kwargs):
self.ping_timeout = ping_timeout
if isinstance(ping_interval, tuple):
self.ping_interval = ping_interval[0]
self.ping_interval_grace_period = ping_interval[1]
else:
self.ping_interval = ping_interval
self.ping_interval_grace_period = 0
self.max_http_buffer_size = max_http_buffer_size
self.allow_upgrades = allow_upgrades
self.http_compression = http_compression
self.compression_threshold = compression_threshold
self.cookie = cookie
self.cors_allowed_origins = cors_allowed_origins
self.cors_credentials = cors_credentials
self.async_handlers = async_handlers
self.sockets = {}
self.handlers = {}
self.log_message_keys = set()
self.start_service_task = monitor_clients \
if monitor_clients is not None else self._default_monitor_clients
if json is not None:
packet.Packet.json = json
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = default_logger
if self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
modes = self.async_modes()
if async_mode is not None:
modes = [async_mode] if async_mode in modes else []
self._async = None
self.async_mode = None
for mode in modes:
try:
self._async = importlib.import_module(
'engineio.async_drivers.' + mode)._async
asyncio_based = self._async['asyncio'] \
if 'asyncio' in self._async else False
if asyncio_based != self.is_asyncio_based():
continue # pragma: no cover
self.async_mode = mode
break
except ImportError:
pass
if self.async_mode is None:
raise ValueError('Invalid async_mode specified')
if self.is_asyncio_based() and \
('asyncio' not in self._async or not
self._async['asyncio']): # pragma: no cover
raise ValueError('The selected async_mode is not asyncio '
'compatible')
if not self.is_asyncio_based() and 'asyncio' in self._async and \
self._async['asyncio']: # pragma: no cover
raise ValueError('The selected async_mode requires asyncio and '
'must use the AsyncServer class')
if transports is not None:
if isinstance(transports, str):
transports = [transports]
transports = [transport for transport in transports
if transport in self.valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or self.valid_transports
self.logger.info('Server initialized for %s.', self.async_mode)
def is_asyncio_based(self):
return False
def async_modes(self):
return ['eventlet', 'gevent_uwsgi', 'gevent', 'threading']
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler(sid, environ):
print('Connection request')
if environ['REMOTE_ADDR'] in blacklisted:
return False # reject
# as a method:
def message_handler(sid, msg):
print('Received message: ', msg)
eio.send(sid, 'response')
eio.on('message', message_handler)
The handler function receives the ``sid`` (session ID) for the
client as first argument. The ``'connect'`` event handler receives the
WSGI environment as a second argument, and can return ``False`` to
reject the connection. The ``'message'`` handler receives the message
payload as a second argument. The ``'disconnect'`` handler does not
take a second argument.
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def send(self, sid, data):
"""Send a message to a client.
:param sid: The session id of the recipient client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
"""
try:
socket = self._get_socket(sid)
except KeyError:
# the socket is not available
self.logger.warning('Cannot send to sid %s', sid)
return
socket.send(packet.Packet(packet.MESSAGE, data=data))
def get_session(self, sid):
"""Return the user session for a client.
:param sid: The session id of the client.
The return value is a dictionary. Modifications made to this
dictionary are not guaranteed to be preserved unless
``save_session()`` is called, or when the ``session`` context manager
is used.
"""
socket = self._get_socket(sid)
return socket.session
def save_session(self, sid, session):
"""Store the user session for a client.
:param sid: The session id of the client.
:param session: The session dictionary.
"""
socket = self._get_socket(sid)
socket.session = session
def session(self, sid):
"""Return the user session for a client with context manager syntax.
:param sid: The session id of the client.
This is a context manager that returns the user session dictionary for
the client. Any changes that are made to this dictionary inside the
context manager block are saved back to the session. Example usage::
@eio.on('connect')
def on_connect(sid, environ):
username = authenticate_user(environ)
if not username:
return False
with eio.session(sid) as session:
session['username'] = username
@eio.on('message')
def on_message(sid, msg):
with eio.session(sid) as session:
print('received message from ', session['username'])
"""
class _session_context_manager(object):
def __init__(self, server, sid):
self.server = server
self.sid = sid
self.session = None
def __enter__(self):
self.session = self.server.get_session(sid)
return self.session
def __exit__(self, *args):
self.server.save_session(sid, self.session)
return _session_context_manager(self, sid)
def disconnect(self, sid=None):
"""Disconnect a client.
:param sid: The session id of the client to close. If this parameter
is not given, then all clients are closed.
"""
if sid is not None:
try:
socket = self._get_socket(sid)
except KeyError: # pragma: no cover
# the socket was already closed or gone
pass
else:
socket.close()
if sid in self.sockets: # pragma: no cover
del self.sockets[sid]
else:
for client in self.sockets.values():
client.close()
self.sockets = {}
def transport(self, sid):
"""Return the name of the transport used by the client.
The two possible values returned by this function are ``'polling'``
and ``'websocket'``.
:param sid: The session of the client.
"""
return 'websocket' if self._get_socket(sid).upgraded else 'polling'
def handle_request(self, environ, start_response):
"""Handle an HTTP request from the client.
This is the entry point of the Engine.IO application, using the same
interface as a WSGI application. For the typical usage, this function
is invoked by the :class:`Middleware` instance, but it can be invoked
directly when the middleware is not used.
:param environ: The WSGI environment.
:param start_response: The WSGI ``start_response`` function.
This function returns the HTTP response body to deliver to the client
as a byte sequence.
"""
if self.cors_allowed_origins != []:
# Validate the origin header if present
# This is important for WebSocket more than for HTTP, since
# browsers only apply CORS controls to HTTP.
origin = environ.get('HTTP_ORIGIN')
if origin:
allowed_origins = self._cors_allowed_origins(environ)
if allowed_origins is not None and origin not in \
allowed_origins:
self._log_error_once(
origin + ' is not an accepted origin.', 'bad-origin')
r = self._bad_request('Not an accepted origin.')
start_response(r['status'], r['headers'])
return [r['response']]
method = environ['REQUEST_METHOD']
query = urllib.parse.parse_qs(environ.get('QUERY_STRING', ''))
jsonp = False
jsonp_index = None
# make sure the client uses an allowed transport
transport = query.get('transport', ['polling'])[0]
if transport not in self.transports:
self._log_error_once('Invalid transport', 'bad-transport')
r = self._bad_request('Invalid transport')
start_response(r['status'], r['headers'])
return [r['response']]
# make sure the client speaks a compatible Engine.IO version
sid = query['sid'][0] if 'sid' in query else None
if sid is None and query.get('EIO') != ['4']:
self._log_error_once(
'The client is using an unsupported version of the Socket.IO '
'or Engine.IO protocols', 'bad-version')
r = self._bad_request(
'The client is using an unsupported version of the Socket.IO '
'or Engine.IO protocols')
start_response(r['status'], r['headers'])
return [r['response']]
if 'j' in query:
jsonp = True
try:
jsonp_index = int(query['j'][0])
except (ValueError, KeyError, IndexError):
# Invalid JSONP index number
pass
if jsonp and jsonp_index is None:
self._log_error_once('Invalid JSONP index number',
'bad-jsonp-index')
r = self._bad_request('Invalid JSONP index number')
elif method == 'GET':
if sid is None:
# transport must be one of 'polling' or 'websocket'.
# if 'websocket', the HTTP_UPGRADE header must match.
upgrade_header = environ.get('HTTP_UPGRADE').lower() \
if 'HTTP_UPGRADE' in environ else None
if transport == 'polling' \
or transport == upgrade_header == 'websocket':
r = self._handle_connect(environ, start_response,
transport, jsonp_index)
else:
self._log_error_once('Invalid websocket upgrade',
'bad-upgrade')
r = self._bad_request('Invalid websocket upgrade')
else:
if sid not in self.sockets:
self._log_error_once('Invalid session ' + sid, 'bad-sid')
r = self._bad_request('Invalid session')
else:
socket = self._get_socket(sid)
try:
packets = socket.handle_get_request(
environ, start_response)
if isinstance(packets, list):
r = self._ok(packets, jsonp_index=jsonp_index)
else:
r = packets
except exceptions.EngineIOError:
if sid in self.sockets: # pragma: no cover
self.disconnect(sid)
r = self._bad_request()
if sid in self.sockets and self.sockets[sid].closed:
del self.sockets[sid]
elif method == 'POST':
if sid is None or sid not in self.sockets:
self._log_error_once(
'Invalid session ' + (sid or 'None'), 'bad-sid')
r = self._bad_request('Invalid session')
else:
socket = self._get_socket(sid)
try:
socket.handle_post_request(environ)
r = self._ok(jsonp_index=jsonp_index)
except exceptions.EngineIOError:
if sid in self.sockets: # pragma: no cover
self.disconnect(sid)
r = self._bad_request()
except: # pragma: no cover
# for any other unexpected errors, we log the error
# and keep going
self.logger.exception('post request handler error')
r = self._ok(jsonp_index=jsonp_index)
elif method == 'OPTIONS':
r = self._ok()
else:
self.logger.warning('Method %s not supported', method)
r = self._method_not_found()
if not isinstance(r, dict):
return r or []
if self.http_compression and \
len(r['response']) >= self.compression_threshold:
encodings = [e.split(';')[0].strip() for e in
environ.get('HTTP_ACCEPT_ENCODING', '').split(',')]
for encoding in encodings:
if encoding in self.compression_methods:
r['response'] = \
getattr(self, '_' + encoding)(r['response'])
r['headers'] += [('Content-Encoding', encoding)]
break
cors_headers = self._cors_headers(environ)
start_response(r['status'], r['headers'] + cors_headers)
return [r['response']]
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object that represents the background task,
on which the ``join()`` methond can be invoked to wait for the task to
complete.
"""
th = self._async['thread'](target=target, args=args, kwargs=kwargs)
th.start()
return th # pragma: no cover
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self._async['sleep'](seconds)
def create_queue(self, *args, **kwargs):
"""Create a queue object using the appropriate async model.
This is a utility function that applications can use to create a queue
without having to worry about using the correct call for the selected
async mode.
"""
return self._async['queue'](*args, **kwargs)
def get_queue_empty_exception(self):
"""Return the queue empty exception for the appropriate async model.
This is a utility function that applications can use to work with a
queue without having to worry about using the correct call for the
selected async mode.
"""
return self._async['queue_empty']
def create_event(self, *args, **kwargs):
"""Create an event object using the appropriate async model.
This is a utility function that applications can use to create an
event without having to worry about using the correct call for the
selected async mode.
"""
return self._async['event'](*args, **kwargs)
def generate_id(self):
"""Generate a unique session id."""
id = base64.b64encode(
secrets.token_bytes(12) + self.sequence_number.to_bytes(3, 'big'))
self.sequence_number = (self.sequence_number + 1) & 0xffffff
return id.decode('utf-8').replace('/', '_').replace('+', '-')
def _generate_sid_cookie(self, sid, attributes):
"""Generate the sid cookie."""
cookie = attributes.get('name', 'io') + '=' + sid
for attribute, value in attributes.items():
if attribute == 'name':
continue
if callable(value):
value = value()
if value is True:
cookie += '; ' + attribute
else:
cookie += '; ' + attribute + '=' + value
return cookie
def _handle_connect(self, environ, start_response, transport,
jsonp_index=None):
"""Handle a client connection request."""
if self.start_service_task:
# start the service task to monitor connected clients
self.start_service_task = False
self.start_background_task(self._service_task)
sid = self.generate_id()
s = socket.Socket(self, sid)
self.sockets[sid] = s
pkt = packet.Packet(packet.OPEN, {
'sid': sid,
'upgrades': self._upgrades(sid, transport),
'pingTimeout': int(self.ping_timeout * 1000),
'pingInterval': int(
self.ping_interval + self.ping_interval_grace_period) * 1000})
s.send(pkt)
s.schedule_ping()
# NOTE: some sections below are marked as "no cover" to workaround
# what seems to be a bug in the coverage package. All the lines below
# are covered by tests, but some are not reported as such for some
# reason
ret = self._trigger_event('connect', sid, environ, run_async=False)
if ret is not None and ret is not True: # pragma: no cover
del self.sockets[sid]
self.logger.warning('Application rejected connection')
return self._unauthorized(ret or None)
if transport == 'websocket': # pragma: no cover
ret = s.handle_get_request(environ, start_response)
if s.closed and sid in self.sockets:
# websocket connection ended, so we are done
del self.sockets[sid]
return ret
else: # pragma: no cover
s.connected = True
headers = None
if self.cookie:
if isinstance(self.cookie, dict):
headers = [(
'Set-Cookie',
self._generate_sid_cookie(sid, self.cookie)
)]
else:
headers = [(
'Set-Cookie',
self._generate_sid_cookie(sid, {
'name': self.cookie, 'path': '/', 'SameSite': 'Lax'
})
)]
try:
return self._ok(s.poll(), headers=headers,
jsonp_index=jsonp_index)
except exceptions.QueueEmpty:
return self._bad_request()
def _upgrades(self, sid, transport):
"""Return the list of possible upgrades for a client connection."""
if not self.allow_upgrades or self._get_socket(sid).upgraded or \
transport == 'websocket':
return []
if self._async['websocket'] is None: # pragma: no cover
self._log_error_once(
'The WebSocket transport is not available, you must install a '
'WebSocket server that is compatible with your async mode to '
'enable it. See the documentation for details.',
'no-websocket')
return []
return ['websocket']
def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
if event in self.handlers:
if run_async:
return self.start_background_task(self.handlers[event], *args)
else:
try:
return self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
def _get_socket(self, sid):
"""Return the socket object for a given session."""
try:
s = self.sockets[sid]
except KeyError:
raise KeyError('Session not found')
if s.closed:
del self.sockets[sid]
raise KeyError('Session is disconnected')
return s
def _ok(self, packets=None, headers=None, jsonp_index=None):
"""Generate a successful HTTP response."""
if packets is not None:
if headers is None:
headers = []
headers += [('Content-Type', 'text/plain; charset=UTF-8')]
return {'status': '200 OK',
'headers': headers,
'response': payload.Payload(packets=packets).encode(
jsonp_index=jsonp_index).encode('utf-8')}
else:
return {'status': '200 OK',
'headers': [('Content-Type', 'text/plain')],
'response': b'OK'}
def _bad_request(self, message=None):
"""Generate a bad request HTTP error response."""
if message is None:
message = 'Bad Request'
message = packet.Packet.json.dumps(message)
return {'status': '400 BAD REQUEST',
'headers': [('Content-Type', 'text/plain')],
'response': message.encode('utf-8')}
def _method_not_found(self):
"""Generate a method not found HTTP error response."""
return {'status': '405 METHOD NOT FOUND',
'headers': [('Content-Type', 'text/plain')],
'response': b'Method Not Found'}
def _unauthorized(self, message=None):
"""Generate a unauthorized HTTP error response."""
if message is None:
message = 'Unauthorized'
message = packet.Packet.json.dumps(message)
return {'status': '401 UNAUTHORIZED',
'headers': [('Content-Type', 'application/json')],
'response': message.encode('utf-8')}
def _cors_allowed_origins(self, environ):
default_origins = []
if 'wsgi.url_scheme' in environ and 'HTTP_HOST' in environ:
default_origins.append('{scheme}://{host}'.format(
scheme=environ['wsgi.url_scheme'], host=environ['HTTP_HOST']))
if 'HTTP_X_FORWARDED_PROTO' in environ or \
'HTTP_X_FORWARDED_HOST' in environ:
scheme = environ.get(
'HTTP_X_FORWARDED_PROTO',
environ['wsgi.url_scheme']).split(',')[0].strip()
default_origins.append('{scheme}://{host}'.format(
scheme=scheme, host=environ.get(
'HTTP_X_FORWARDED_HOST', environ['HTTP_HOST']).split(
',')[0].strip()))
if self.cors_allowed_origins is None:
allowed_origins = default_origins
elif self.cors_allowed_origins == '*':
allowed_origins = None
elif isinstance(self.cors_allowed_origins, str):
allowed_origins = [self.cors_allowed_origins]
elif callable(self.cors_allowed_origins):
origin = environ.get('HTTP_ORIGIN')
allowed_origins = [origin] \
if self.cors_allowed_origins(origin) else []
else:
allowed_origins = self.cors_allowed_origins
return allowed_origins
def _cors_headers(self, environ):
"""Return the cross-origin-resource-sharing headers."""
if self.cors_allowed_origins == []:
# special case, CORS handling is completely disabled
return []
headers = []
allowed_origins = self._cors_allowed_origins(environ)
if 'HTTP_ORIGIN' in environ and \
(allowed_origins is None or environ['HTTP_ORIGIN'] in
allowed_origins):
headers = [('Access-Control-Allow-Origin', environ['HTTP_ORIGIN'])]
if environ['REQUEST_METHOD'] == 'OPTIONS':
headers += [('Access-Control-Allow-Methods', 'OPTIONS, GET, POST')]
if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in environ:
headers += [('Access-Control-Allow-Headers',
environ['HTTP_ACCESS_CONTROL_REQUEST_HEADERS'])]
if self.cors_credentials:
headers += [('Access-Control-Allow-Credentials', 'true')]
return headers
def _gzip(self, response):
"""Apply gzip compression to a response."""
bytesio = io.BytesIO()
with gzip.GzipFile(fileobj=bytesio, mode='w') as gz:
gz.write(response)
return bytesio.getvalue()
def _deflate(self, response):
"""Apply deflate compression to a response."""
return zlib.compress(response)
def _log_error_once(self, message, message_key):
"""Log message with logging.ERROR level the first time, then log
with given level."""
if message_key not in self.log_message_keys:
self.logger.error(message + ' (further occurrences of this error '
'will be logged with level INFO)')
self.log_message_keys.add(message_key)
else:
self.logger.info(message)
def _service_task(self): # pragma: no cover
"""Monitor connected clients and clean up those that time out."""
while True:
if len(self.sockets) == 0:
# nothing to do
self.sleep(self.ping_timeout)
continue
# go through the entire client list in a ping interval cycle
sleep_interval = float(self.ping_timeout) / len(self.sockets)
try:
# iterate over the current clients
for s in self.sockets.copy().values():
if not s.closing and not s.closed:
s.check_ping_timeout()
self.sleep(sleep_interval)
except (SystemExit, KeyboardInterrupt):
self.logger.info('service task canceled')
break
except:
# an unexpected exception has occurred, log it and continue
self.logger.exception('service task exception')
| mit | 1f7664aa60b9313758b36999c284d0bf | 42.867704 | 79 | 0.548578 | 4.813149 | false | false | false | false |
miguelgrinberg/python-engineio | tests/asyncio/test_async_aiohttp.py | 1 | 1902 | import unittest
from unittest import mock
from engineio.async_drivers import aiohttp as async_aiohttp
class AiohttpTests(unittest.TestCase):
def test_create_route(self):
app = mock.MagicMock()
mock_server = mock.MagicMock()
async_aiohttp.create_route(app, mock_server, '/foo')
app.router.add_get.assert_any_call('/foo', mock_server.handle_request)
app.router.add_post.assert_any_call('/foo', mock_server.handle_request)
def test_translate_request(self):
request = mock.MagicMock()
request._message.method = 'PUT'
request._message.path = '/foo/bar?baz=1'
request._message.version = (1, 1)
request._message.headers = {
'a': 'b',
'c-c': 'd',
'c_c': 'e',
'content-type': 'application/json',
'content-length': 123,
}
request._payload = b'hello world'
environ = async_aiohttp.translate_request(request)
expected_environ = {
'REQUEST_METHOD': 'PUT',
'PATH_INFO': '/foo/bar',
'QUERY_STRING': 'baz=1',
'CONTENT_TYPE': 'application/json',
'CONTENT_LENGTH': 123,
'HTTP_A': 'b',
# 'HTTP_C_C': 'd,e',
'RAW_URI': '/foo/bar?baz=1',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.input': b'hello world',
'aiohttp.request': request,
}
for k, v in expected_environ.items():
assert v == environ[k]
assert environ['HTTP_C_C'] == 'd,e' or environ['HTTP_C_C'] == 'e,d'
# @mock.patch('async_aiohttp.aiohttp.web.Response')
def test_make_response(self):
rv = async_aiohttp.make_response(
'202 ACCEPTED', {'foo': 'bar'}, b'payload', {}
)
assert rv.status == 202
assert rv.headers['foo'] == 'bar'
assert rv.body == b'payload'
| mit | df7ef82677e3dfd828da5a638f2ffc36 | 34.886792 | 79 | 0.542587 | 3.575188 | false | true | false | false |
miguelgrinberg/python-engineio | src/engineio/async_drivers/asgi.py | 1 | 10181 | import os
import sys
import asyncio
from engineio.static_files import get_static_file
class ASGIApp:
"""ASGI application middleware for Engine.IO.
This middleware dispatches traffic to an Engine.IO application. It can
also serve a list of static files to the client, or forward unrelated
HTTP traffic to another ASGI application.
:param engineio_server: The Engine.IO server. Must be an instance of the
``engineio.AsyncServer`` class.
:param static_files: A dictionary with static file mapping rules. See the
documentation for details on this argument.
:param other_asgi_app: A separate ASGI app that receives all other traffic.
:param engineio_path: The endpoint where the Engine.IO application should
be installed. The default value is appropriate for
most cases.
:param on_startup: function to be called on application startup; can be
coroutine
:param on_shutdown: function to be called on application shutdown; can be
coroutine
Example usage::
import engineio
import uvicorn
eio = engineio.AsyncServer()
app = engineio.ASGIApp(eio, static_files={
'/': {'content_type': 'text/html', 'filename': 'index.html'},
'/index.html': {'content_type': 'text/html',
'filename': 'index.html'},
})
uvicorn.run(app, '127.0.0.1', 5000)
"""
def __init__(self, engineio_server, other_asgi_app=None,
static_files=None, engineio_path='engine.io',
on_startup=None, on_shutdown=None):
self.engineio_server = engineio_server
self.other_asgi_app = other_asgi_app
self.engineio_path = engineio_path
if not self.engineio_path.startswith('/'):
self.engineio_path = '/' + self.engineio_path
if not self.engineio_path.endswith('/'):
self.engineio_path += '/'
self.static_files = static_files or {}
self.on_startup = on_startup
self.on_shutdown = on_shutdown
async def __call__(self, scope, receive, send):
if scope['type'] in ['http', 'websocket'] and \
scope['path'].startswith(self.engineio_path):
await self.engineio_server.handle_request(scope, receive, send)
else:
static_file = get_static_file(scope['path'], self.static_files) \
if scope['type'] == 'http' and self.static_files else None
if scope['type'] == 'lifespan':
await self.lifespan(scope, receive, send)
elif static_file and os.path.exists(static_file['filename']):
await self.serve_static_file(static_file, receive, send)
elif self.other_asgi_app is not None:
await self.other_asgi_app(scope, receive, send)
else:
await self.not_found(receive, send)
async def serve_static_file(self, static_file, receive,
send): # pragma: no cover
event = await receive()
if event['type'] == 'http.request':
with open(static_file['filename'], 'rb') as f:
payload = f.read()
await send({'type': 'http.response.start',
'status': 200,
'headers': [(b'Content-Type', static_file[
'content_type'].encode('utf-8'))]})
await send({'type': 'http.response.body',
'body': payload})
async def lifespan(self, scope, receive, send):
if self.other_asgi_app is not None and self.on_startup is None and \
self.on_shutdown is None:
# let the other ASGI app handle lifespan events
await self.other_asgi_app(scope, receive, send)
return
while True:
event = await receive()
if event['type'] == 'lifespan.startup':
if self.on_startup:
try:
await self.on_startup() \
if asyncio.iscoroutinefunction(self.on_startup) \
else self.on_startup()
except:
await send({'type': 'lifespan.startup.failed'})
return
await send({'type': 'lifespan.startup.complete'})
elif event['type'] == 'lifespan.shutdown':
if self.on_shutdown:
try:
await self.on_shutdown() \
if asyncio.iscoroutinefunction(self.on_shutdown) \
else self.on_shutdown()
except:
await send({'type': 'lifespan.shutdown.failed'})
return
await send({'type': 'lifespan.shutdown.complete'})
return
async def not_found(self, receive, send):
"""Return a 404 Not Found error to the client."""
await send({'type': 'http.response.start',
'status': 404,
'headers': [(b'Content-Type', b'text/plain')]})
await send({'type': 'http.response.body',
'body': b'Not Found'})
async def translate_request(scope, receive, send):
class AwaitablePayload(object): # pragma: no cover
def __init__(self, payload):
self.payload = payload or b''
async def read(self, length=None):
if length is None:
r = self.payload
self.payload = b''
else:
r = self.payload[:length]
self.payload = self.payload[length:]
return r
event = await receive()
payload = b''
if event['type'] == 'http.request':
payload += event.get('body') or b''
while event.get('more_body'):
event = await receive()
if event['type'] == 'http.request':
payload += event.get('body') or b''
elif event['type'] == 'websocket.connect':
pass
else:
return {}
raw_uri = scope['path'].encode('utf-8')
if 'query_string' in scope and scope['query_string']:
raw_uri += b'?' + scope['query_string']
environ = {
'wsgi.input': AwaitablePayload(payload),
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.async': True,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'SERVER_SOFTWARE': 'asgi',
'REQUEST_METHOD': scope.get('method', 'GET'),
'PATH_INFO': scope['path'],
'QUERY_STRING': scope.get('query_string', b'').decode('utf-8'),
'RAW_URI': raw_uri.decode('utf-8'),
'SCRIPT_NAME': '',
'SERVER_PROTOCOL': 'HTTP/1.1',
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_PORT': '0',
'SERVER_NAME': 'asgi',
'SERVER_PORT': '0',
'asgi.receive': receive,
'asgi.send': send,
'asgi.scope': scope,
}
for hdr_name, hdr_value in scope['headers']:
hdr_name = hdr_name.upper().decode('utf-8')
hdr_value = hdr_value.decode('utf-8')
if hdr_name == 'CONTENT-TYPE':
environ['CONTENT_TYPE'] = hdr_value
continue
elif hdr_name == 'CONTENT-LENGTH':
environ['CONTENT_LENGTH'] = hdr_value
continue
key = 'HTTP_%s' % hdr_name.replace('-', '_')
if key in environ:
hdr_value = '%s,%s' % (environ[key], hdr_value)
environ[key] = hdr_value
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
return environ
async def make_response(status, headers, payload, environ):
headers = [(h[0].encode('utf-8'), h[1].encode('utf-8')) for h in headers]
if environ['asgi.scope']['type'] == 'websocket':
if status.startswith('200 '):
await environ['asgi.send']({'type': 'websocket.accept',
'headers': headers})
else:
if payload:
reason = payload.decode('utf-8') \
if isinstance(payload, bytes) else str(payload)
await environ['asgi.send']({'type': 'websocket.close',
'reason': reason})
else:
await environ['asgi.send']({'type': 'websocket.close'})
return
await environ['asgi.send']({'type': 'http.response.start',
'status': int(status.split(' ')[0]),
'headers': headers})
await environ['asgi.send']({'type': 'http.response.body',
'body': payload})
class WebSocket(object): # pragma: no cover
"""
This wrapper class provides an asgi WebSocket interface that is
somewhat compatible with eventlet's implementation.
"""
def __init__(self, handler):
self.handler = handler
self.asgi_receive = None
self.asgi_send = None
async def __call__(self, environ):
self.asgi_receive = environ['asgi.receive']
self.asgi_send = environ['asgi.send']
await self.asgi_send({'type': 'websocket.accept'})
await self.handler(self)
async def close(self):
await self.asgi_send({'type': 'websocket.close'})
async def send(self, message):
msg_bytes = None
msg_text = None
if isinstance(message, bytes):
msg_bytes = message
else:
msg_text = message
await self.asgi_send({'type': 'websocket.send',
'bytes': msg_bytes,
'text': msg_text})
async def wait(self):
event = await self.asgi_receive()
if event['type'] != 'websocket.receive':
raise IOError()
return event.get('bytes') or event.get('text')
_async = {
'asyncio': True,
'translate_request': translate_request,
'make_response': make_response,
'websocket': WebSocket,
}
| mit | c882552251925af79497a0b504bfc647 | 37.711027 | 79 | 0.530793 | 4.187988 | false | false | false | false |
miguelgrinberg/python-engineio | src/engineio/async_drivers/tornado.py | 1 | 5901 | import asyncio
import sys
from urllib.parse import urlsplit
from .. import exceptions
import tornado.web
import tornado.websocket
def get_tornado_handler(engineio_server):
class Handler(tornado.websocket.WebSocketHandler): # pragma: no cover
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if isinstance(engineio_server.cors_allowed_origins, str):
if engineio_server.cors_allowed_origins == '*':
self.allowed_origins = None
else:
self.allowed_origins = [
engineio_server.cors_allowed_origins]
else:
self.allowed_origins = engineio_server.cors_allowed_origins
self.receive_queue = asyncio.Queue()
async def get(self, *args, **kwargs):
if self.request.headers.get('Upgrade', '').lower() == 'websocket':
ret = super().get(*args, **kwargs)
if asyncio.iscoroutine(ret):
await ret
else:
await engineio_server.handle_request(self)
async def open(self, *args, **kwargs):
# this is the handler for the websocket request
asyncio.ensure_future(engineio_server.handle_request(self))
async def post(self, *args, **kwargs):
await engineio_server.handle_request(self)
async def options(self, *args, **kwargs):
await engineio_server.handle_request(self)
async def on_message(self, message):
await self.receive_queue.put(message)
async def get_next_message(self):
return await self.receive_queue.get()
def on_close(self):
self.receive_queue.put_nowait(None)
def check_origin(self, origin):
if self.allowed_origins is None or origin in self.allowed_origins:
return True
return super().check_origin(origin)
def get_compression_options(self):
# enable compression
return {}
return Handler
def translate_request(handler):
"""This function takes the arguments passed to the request handler and
uses them to generate a WSGI compatible environ dictionary.
"""
class AwaitablePayload(object):
def __init__(self, payload):
self.payload = payload or b''
async def read(self, length=None):
if length is None:
r = self.payload
self.payload = b''
else:
r = self.payload[:length]
self.payload = self.payload[length:]
return r
payload = handler.request.body
uri_parts = urlsplit(handler.request.path)
full_uri = handler.request.path
if handler.request.query: # pragma: no cover
full_uri += '?' + handler.request.query
environ = {
'wsgi.input': AwaitablePayload(payload),
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.async': True,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'SERVER_SOFTWARE': 'aiohttp',
'REQUEST_METHOD': handler.request.method,
'QUERY_STRING': handler.request.query or '',
'RAW_URI': full_uri,
'SERVER_PROTOCOL': 'HTTP/%s' % handler.request.version,
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_PORT': '0',
'SERVER_NAME': 'aiohttp',
'SERVER_PORT': '0',
'tornado.handler': handler
}
for hdr_name, hdr_value in handler.request.headers.items():
hdr_name = hdr_name.upper()
if hdr_name == 'CONTENT-TYPE':
environ['CONTENT_TYPE'] = hdr_value
continue
elif hdr_name == 'CONTENT-LENGTH':
environ['CONTENT_LENGTH'] = hdr_value
continue
key = 'HTTP_%s' % hdr_name.replace('-', '_')
environ[key] = hdr_value
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
path_info = uri_parts.path
environ['PATH_INFO'] = path_info
environ['SCRIPT_NAME'] = ''
return environ
def make_response(status, headers, payload, environ):
"""This function generates an appropriate response object for this async
mode.
"""
tornado_handler = environ['tornado.handler']
try:
tornado_handler.set_status(int(status.split()[0]))
except RuntimeError: # pragma: no cover
# for websocket connections Tornado does not accept a response, since
# it already emitted the 101 status code
return
for header, value in headers:
tornado_handler.set_header(header, value)
tornado_handler.write(payload)
tornado_handler.finish()
class WebSocket(object): # pragma: no cover
"""
This wrapper class provides a tornado WebSocket interface that is
somewhat compatible with eventlet's implementation.
"""
def __init__(self, handler):
self.handler = handler
self.tornado_handler = None
async def __call__(self, environ):
self.tornado_handler = environ['tornado.handler']
self.environ = environ
await self.handler(self)
async def close(self):
self.tornado_handler.close()
async def send(self, message):
try:
self.tornado_handler.write_message(
message, binary=isinstance(message, bytes))
except tornado.websocket.WebSocketClosedError:
raise exceptions.EngineIOError()
async def wait(self):
msg = await self.tornado_handler.get_next_message()
if not isinstance(msg, bytes) and \
not isinstance(msg, str):
raise IOError()
return msg
_async = {
'asyncio': True,
'translate_request': translate_request,
'make_response': make_response,
'websocket': WebSocket,
}
| mit | 13da81d5e05d6d28fc0d0ececd996892 | 31.423077 | 78 | 0.59312 | 4.24838 | false | false | false | false |
miguelgrinberg/python-engineio | src/engineio/asyncio_socket.py | 1 | 10248 | import asyncio
import sys
import time
from . import exceptions
from . import packet
from . import payload
from . import socket
class AsyncSocket(socket.Socket):
async def poll(self):
"""Wait for packets to send to the client."""
try:
packets = [await asyncio.wait_for(
self.queue.get(),
self.server.ping_interval + self.server.ping_timeout)]
self.queue.task_done()
except (asyncio.TimeoutError, asyncio.CancelledError):
raise exceptions.QueueEmpty()
if packets == [None]:
return []
while True:
try:
pkt = self.queue.get_nowait()
self.queue.task_done()
if pkt is None:
self.queue.put_nowait(None)
break
packets.append(pkt)
except asyncio.QueueEmpty:
break
return packets
async def receive(self, pkt):
"""Receive packet from the client."""
self.server.logger.info('%s: Received packet %s data %s',
self.sid, packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes)
else '<binary>')
if pkt.packet_type == packet.PONG:
self.schedule_ping()
elif pkt.packet_type == packet.MESSAGE:
await self.server._trigger_event(
'message', self.sid, pkt.data,
run_async=self.server.async_handlers)
elif pkt.packet_type == packet.UPGRADE:
await self.send(packet.Packet(packet.NOOP))
elif pkt.packet_type == packet.CLOSE:
await self.close(wait=False, abort=True)
else:
raise exceptions.UnknownPacketError()
async def check_ping_timeout(self):
"""Make sure the client is still sending pings."""
if self.closed:
raise exceptions.SocketIsClosedError()
if self.last_ping and \
time.time() - self.last_ping > self.server.ping_timeout:
self.server.logger.info('%s: Client is gone, closing socket',
self.sid)
# Passing abort=False here will cause close() to write a
# CLOSE packet. This has the effect of updating half-open sockets
# to their correct state of disconnected
await self.close(wait=False, abort=False)
return False
return True
async def send(self, pkt):
"""Send a packet to the client."""
if not await self.check_ping_timeout():
return
else:
await self.queue.put(pkt)
self.server.logger.info('%s: Sending packet %s data %s',
self.sid, packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes)
else '<binary>')
async def handle_get_request(self, environ):
"""Handle a long-polling GET request from the client."""
connections = [
s.strip()
for s in environ.get('HTTP_CONNECTION', '').lower().split(',')]
transport = environ.get('HTTP_UPGRADE', '').lower()
if 'upgrade' in connections and transport in self.upgrade_protocols:
self.server.logger.info('%s: Received request to upgrade to %s',
self.sid, transport)
return await getattr(self, '_upgrade_' + transport)(environ)
if self.upgrading or self.upgraded:
# we are upgrading to WebSocket, do not return any more packets
# through the polling endpoint
return [packet.Packet(packet.NOOP)]
try:
packets = await self.poll()
except exceptions.QueueEmpty:
exc = sys.exc_info()
await self.close(wait=False)
raise exc[1].with_traceback(exc[2])
return packets
async def handle_post_request(self, environ):
"""Handle a long-polling POST request from the client."""
length = int(environ.get('CONTENT_LENGTH', '0'))
if length > self.server.max_http_buffer_size:
raise exceptions.ContentTooLongError()
else:
body = (await environ['wsgi.input'].read(length)).decode('utf-8')
p = payload.Payload(encoded_payload=body)
for pkt in p.packets:
await self.receive(pkt)
async def close(self, wait=True, abort=False):
"""Close the socket connection."""
if not self.closed and not self.closing:
self.closing = True
await self.server._trigger_event('disconnect', self.sid)
if not abort:
await self.send(packet.Packet(packet.CLOSE))
self.closed = True
if wait:
await self.queue.join()
def schedule_ping(self):
async def send_ping():
self.last_ping = None
await asyncio.sleep(self.server.ping_interval)
if not self.closing and not self.closed:
self.last_ping = time.time()
await self.send(packet.Packet(packet.PING))
self.server.start_background_task(send_ping)
async def _upgrade_websocket(self, environ):
"""Upgrade the connection from polling to websocket."""
if self.upgraded:
raise IOError('Socket has been upgraded already')
if self.server._async['websocket'] is None:
# the selected async mode does not support websocket
return self.server._bad_request()
ws = self.server._async['websocket'](self._websocket_handler)
return await ws(environ)
async def _websocket_handler(self, ws):
"""Engine.IO handler for websocket transport."""
async def websocket_wait():
data = await ws.wait()
if data and len(data) > self.server.max_http_buffer_size:
raise ValueError('packet is too large')
return data
if self.connected:
# the socket was already connected, so this is an upgrade
self.upgrading = True # hold packet sends during the upgrade
try:
pkt = await websocket_wait()
except IOError: # pragma: no cover
return
decoded_pkt = packet.Packet(encoded_packet=pkt)
if decoded_pkt.packet_type != packet.PING or \
decoded_pkt.data != 'probe':
self.server.logger.info(
'%s: Failed websocket upgrade, no PING packet', self.sid)
self.upgrading = False
return
await ws.send(packet.Packet(packet.PONG, data='probe').encode())
await self.queue.put(packet.Packet(packet.NOOP)) # end poll
try:
pkt = await websocket_wait()
except IOError: # pragma: no cover
self.upgrading = False
return
decoded_pkt = packet.Packet(encoded_packet=pkt)
if decoded_pkt.packet_type != packet.UPGRADE:
self.upgraded = False
self.server.logger.info(
('%s: Failed websocket upgrade, expected UPGRADE packet, '
'received %s instead.'),
self.sid, pkt)
self.upgrading = False
return
self.upgraded = True
self.upgrading = False
else:
self.connected = True
self.upgraded = True
# start separate writer thread
async def writer():
while True:
packets = None
try:
packets = await self.poll()
except exceptions.QueueEmpty:
break
if not packets:
# empty packet list returned -> connection closed
break
try:
for pkt in packets:
await ws.send(pkt.encode())
except:
break
writer_task = asyncio.ensure_future(writer())
self.server.logger.info(
'%s: Upgrade to websocket successful', self.sid)
while True:
p = None
wait_task = asyncio.ensure_future(websocket_wait())
try:
p = await asyncio.wait_for(
wait_task,
self.server.ping_interval + self.server.ping_timeout)
except asyncio.CancelledError: # pragma: no cover
# there is a bug (https://bugs.python.org/issue30508) in
# asyncio that causes a "Task exception never retrieved" error
# to appear when wait_task raises an exception before it gets
# cancelled. Calling wait_task.exception() prevents the error
# from being issued in Python 3.6, but causes other errors in
# other versions, so we run it with all errors suppressed and
# hope for the best.
try:
wait_task.exception()
except:
pass
break
except:
break
if p is None:
# connection closed by client
break
pkt = packet.Packet(encoded_packet=p)
try:
await self.receive(pkt)
except exceptions.UnknownPacketError: # pragma: no cover
pass
except exceptions.SocketIsClosedError: # pragma: no cover
self.server.logger.info('Receive error -- socket is closed')
break
except: # pragma: no cover
# if we get an unexpected exception we log the error and exit
# the connection properly
self.server.logger.exception('Unknown receive error')
await self.queue.put(None) # unlock the writer task so it can exit
await asyncio.wait_for(writer_task, timeout=None)
await self.close(wait=False, abort=True)
| mit | 14a7398738665a231ec2bee1912e3694 | 39.828685 | 79 | 0.540203 | 4.782081 | false | false | false | false |
miguelgrinberg/python-engineio | src/engineio/async_drivers/sanic.py | 1 | 4468 | import sys
from urllib.parse import urlsplit
try: # pragma: no cover
from sanic.response import HTTPResponse
try:
from sanic.server.protocols.websocket_protocol import WebSocketProtocol
except ImportError:
print('yay')
from sanic.websocket import WebSocketProtocol
except ImportError:
HTTPResponse = None
WebSocketProtocol = None
def create_route(app, engineio_server, engineio_endpoint): # pragma: no cover
"""This function sets up the engine.io endpoint as a route for the
application.
Note that both GET and POST requests must be hooked up on the engine.io
endpoint.
"""
app.add_route(engineio_server.handle_request, engineio_endpoint,
methods=['GET', 'POST', 'OPTIONS'])
try:
app.enable_websocket()
except AttributeError:
# ignore, this version does not support websocket
pass
def translate_request(request): # pragma: no cover
"""This function takes the arguments passed to the request handler and
uses them to generate a WSGI compatible environ dictionary.
"""
class AwaitablePayload(object):
def __init__(self, payload):
self.payload = payload or b''
async def read(self, length=None):
if length is None:
r = self.payload
self.payload = b''
else:
r = self.payload[:length]
self.payload = self.payload[length:]
return r
uri_parts = urlsplit(request.url)
environ = {
'wsgi.input': AwaitablePayload(request.body),
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.async': True,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'SERVER_SOFTWARE': 'sanic',
'REQUEST_METHOD': request.method,
'QUERY_STRING': uri_parts.query or '',
'RAW_URI': request.url,
'SERVER_PROTOCOL': 'HTTP/' + request.version,
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_PORT': '0',
'SERVER_NAME': 'sanic',
'SERVER_PORT': '0',
'sanic.request': request
}
for hdr_name, hdr_value in request.headers.items():
hdr_name = hdr_name.upper()
if hdr_name == 'CONTENT-TYPE':
environ['CONTENT_TYPE'] = hdr_value
continue
elif hdr_name == 'CONTENT-LENGTH':
environ['CONTENT_LENGTH'] = hdr_value
continue
key = 'HTTP_%s' % hdr_name.replace('-', '_')
if key in environ:
hdr_value = '%s,%s' % (environ[key], hdr_value)
environ[key] = hdr_value
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
path_info = uri_parts.path
environ['PATH_INFO'] = path_info
environ['SCRIPT_NAME'] = ''
return environ
def make_response(status, headers, payload, environ): # pragma: no cover
"""This function generates an appropriate response object for this async
mode.
"""
headers_dict = {}
content_type = None
for h in headers:
if h[0].lower() == 'content-type':
content_type = h[1]
else:
headers_dict[h[0]] = h[1]
return HTTPResponse(body=payload, content_type=content_type,
status=int(status.split()[0]), headers=headers_dict)
class WebSocket(object): # pragma: no cover
"""
This wrapper class provides a sanic WebSocket interface that is
somewhat compatible with eventlet's implementation.
"""
def __init__(self, handler):
self.handler = handler
self._sock = None
async def __call__(self, environ):
request = environ['sanic.request']
protocol = request.transport.get_protocol()
self._sock = await protocol.websocket_handshake(request)
self.environ = environ
await self.handler(self)
async def close(self):
await self._sock.close()
async def send(self, message):
await self._sock.send(message)
async def wait(self):
data = await self._sock.recv()
if not isinstance(data, bytes) and \
not isinstance(data, str):
raise IOError()
return data
_async = {
'asyncio': True,
'create_route': create_route,
'translate_request': translate_request,
'make_response': make_response,
'websocket': WebSocket if WebSocketProtocol else None,
}
| mit | 3e9dbfcd1a3c20811a5c8411e087d989 | 29.394558 | 79 | 0.598254 | 4.110396 | false | false | false | false |
yourlabs/django-autocomplete-light | src/dal/test/utils.py | 1 | 1926 | """Utils for testing autocompletes."""
from django.apps import apps
class Fixtures(object):
"""Callback for post_migrate to create many objects."""
def __init__(self, model_name=None):
"""Preset a model name, ie. 'auth.user'."""
self.model_name = model_name
def get_model(self, sender):
"""Return either the preset model, either the sender's TestModel."""
if self.model_name is None:
return sender.get_model('TModel')
else:
return apps.get_model(self.model_name)
def __call__(self, sender, **kwargs):
"""Call function, calls install_fixtures."""
model = self.get_model(sender)
self.install_fixtures(model)
def install_fixtures(self, model):
"""Install fixtures for model."""
for n in range(1, 50):
try:
model.objects.get(pk=n)
except model.DoesNotExist:
model.objects.create(name='test %s' % n, pk=n)
class OwnedFixtures(Fixtures):
"""Fixtures for models with an "owner" relation to User."""
installed_auth = False
def install_fixtures(self, model):
"""Install owners and fixtures."""
User = apps.get_model('auth.user') # noqa
self.test, created = User.objects.get_or_create(
username='test',
is_staff=True,
is_superuser=True
)
if created:
self.test.set_password('test')
self.test.save()
self.other, created = User.objects.get_or_create(username='other')
if created:
self.other.set_password('test')
self.other.save()
for n in range(1, 3):
for u in [self.test, self.other]:
model.objects.update_or_create(
name='test #%s for %s' % (n, u),
defaults=dict(owner=u),
)
fixtures = Fixtures()
| mit | eb802d58629e8c42a40e7774927aaf4d | 29.09375 | 76 | 0.556075 | 3.987578 | false | true | false | false |
iclab/centinel | centinel/primitives/foctor_core/foctor_authentication.py | 3 | 16818 | __author__ = 'rishabn'
import time
import signal
import errno
import os
from functools import wraps
from selenium.common.exceptions import StaleElementReferenceException, ElementNotSelectableException, \
NoSuchElementException, ElementNotVisibleException, TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
def set_union(list_of_lists):
l_l = list_of_lists
union = list()
for l in l_l:
set_l = set(l)
for item in set_l:
union.append(item)
return set(union)
def compare_record(rec_1, rec_2):
if rec_1 is None:
if rec_2 is None:
return 1
else:
return -1
if rec_2 is None:
if rec_1 is None:
return 1
else:
return -1
if rec_1 == rec_2:
return 1
else:
return -1
def get_record(element, tag):
if element is None:
return None
else:
try:
te, record = element, {}
record['tag'] = tag
record['title'] = unicode(te.get_attribute("title"))
record['type'] = unicode(te.get_attribute("type"))
record['label'] = unicode(te.get_attribute("label"))
record['value'] = unicode(te.get_attribute("value"))
record['id'] = unicode(te.get_attribute("id"))
record['name'] = unicode(te.get_attribute("name"))
record['aria'] = unicode(te.get_attribute("aria-label"))
record['maxlen'] = unicode(te.get_attribute("maxlength"))
record['disabled'] = unicode(te.get_attribute("disabled"))
record['displayed'] = unicode(te.is_displayed())
record['enabled'] = unicode(te.is_enabled())
record['href'] = unicode(te.get_attribute("href"))
record['autofocus'] = unicode(te.get_attribute("autofocus"))
record['text'] = unicode(te.text)
record['string'] = (record['title'].split("\n")[0] + record['type'].split("\n")[0] +
record['label'].split("\n")[0] + record['value'].split("\n")[0]).lower()
record['string'] += (record['id'].split("\n")[0] + record['aria'].split("\n")[0] +
record['name'].split("\n")[0] + record['text'].split("\n")[0]).lower()
record['key-string'] = "\t " + record['text'].split("\n")[0] + ", \t" + record['name'].split("\n")[0] \
+ ", \t" + record['id'].split("\n")[0]
record['key-string'] += ", \t" + record['aria'].split("\n")[0] + ", \t" + record['label'].split("\n")[0] \
+ ", \t" + record['tag']
record['element'] = te
return record
except StaleElementReferenceException:
return None
def scan_page_for_element(driver, keyword_list, tag_list, exclude_list):
all_tags, kw_list_len, status = set_union(tag_list), len(keyword_list), [False]*len(keyword_list)
for tag in all_tags:
elements = driver.find_elements_by_tag_name(tag)
try:
elements.sort(key=lambda x: (x.get_attribute("text"), x.get_attribute("value"), x.get_attribute("title")))
except StaleElementReferenceException:
pass
for index in range(0, kw_list_len):
if tag not in tag_list[index]:
continue
else:
for e in elements:
if False not in status:
return status
if status[index] is True:
break
try:
if e.tag_name != tag:
continue
except (StaleElementReferenceException, NoSuchElementException, ElementNotVisibleException):
continue
exclude_element, element_record = False, get_record(e, tag)
if element_record is None:
continue
if (element_record['displayed'] == unicode(False)) or (element_record['enabled'] == unicode(False)):
continue
for x in exclude_list[index]:
if x in element_record['string']:
exclude_element = True
if exclude_element is True:
continue
try:
value = element_record['value'].lower().strip().replace(" ", "")
except:
value = unicode("")
try:
text = element_record['text'].lower().strip().replace(" ", "")
except:
text = unicode("")
try:
type = element_record['type'].lower().strip().replace(" ", "")
except:
type = unicode("")
try:
id_ = element_record['id'].lower().strip().replace(" ", "")
except:
id_ = unicode("")
k = keyword_list[index]
if (id_ in k) or (text in k) or (value in k) or (type in k):
status[index] = True
return status
def click_closest_match(driver, keywords, tags, exclusions):
min_length = 100
keywords.sort()
for tag in tags:
elements = driver.find_elements_by_tag_name(tag)
try:
elements.sort(key=lambda x: (x.get_attribute("text"), x.get_attribute("value"), x.get_attribute("title")))
except StaleElementReferenceException:
pass
for kw in keywords:
for e in elements:
try:
if e.tag_name != tag:
continue
except (StaleElementReferenceException, NoSuchElementException):
continue
exclude_element, element_record = False, get_record(e, tag)
if element_record is None:
continue
if (element_record['displayed'] == unicode(False)) or (element_record['enabled'] == unicode(False)):
continue
for x in exclusions:
if x in element_record['string']:
exclude_element = True
if exclude_element is True:
continue
try:
value = element_record['value'].lower().strip().replace(" ", "")
except:
value = unicode("")
try:
text = element_record['text'].lower().strip().replace(" ", "")
except:
text = unicode("")
try:
id_ = element_record['id_'].lower().strip().replace(" ", "")
except:
id_ = unicode("")
if (text == unicode(kw)) or (value == unicode(kw)) or (id_ == unicode(kw)):
try:
element_record['element'].click()
return element_record
except (StaleElementReferenceException, ElementNotSelectableException, ElementNotVisibleException):
continue
elif (unicode(kw) in text) or (unicode(kw) in value) or (unicode(kw) in id_):
if min_length > len(text):
min_record = element_record
min_length = len(text)
if min_length < 100:
try:
min_record['element'].click()
except (StaleElementReferenceException, ElementNotSelectableException, ElementNotVisibleException):
return None
return min_record
def scan_page_for_login_status(driver):
keyword_list = [["password", "pass"], ["signin", "login"], ["next, continue"]]
tag_list = [["input"], ["input", "a", "button"], ["input", "a", "button"]]
exclude_list = [["hidden"], ["hidden", "next"], ["hidden"]]
password_status, login_status, next_status = scan_page_for_element(driver, keyword_list, tag_list, exclude_list)
return password_status, login_status, next_status
def front_page_login(driver):
keyword_list, tag_list, exclude_list = [["signin", "login"]], [["input", "a", "button"]], [["hidden", "next"]]
login_status = scan_page_for_element(driver, keyword_list, tag_list, exclude_list)
return login_status[0]
def complete_signin(driver, uname, password, clicks):
print "Regular sign-in page detected..."
uname_status, clicks = fill_all_uname(driver, uname, clicks)
print uname_status
if uname_status is None:
return -1, clicks
pwd_status, clicks = fill_all_password(driver, password, clicks)
print pwd_status
if pwd_status is None:
return -1, clicks
return True, clicks
def google_signin(driver, uname, password, clicks):
print "I think we found a google style (multi-page) sign-in..."
uname_status, clicks = fill_all_uname(driver, uname, clicks)
if uname_status is None:
return -1, clicks
c = click_closest_match(driver, ["next"], ["input", "a", "button"], ["hidden"])
if c is None:
return -1, clicks
else:
clicks.append("click, \t" + c['key-string'])
pwd_status, clicks = fill_all_password(driver, password, clicks)
if pwd_status is None:
return -1, clicks
return True, clicks
def make_next_move(driver, pw, si, n_, uname, password, clicks):
print "Password field: ", pw, "\t Sign in field: ", si, "\t Next field: ", n_, "\n"
# If there is a password field and a sign in button, then fill in the from and hit enter.
if pw is True:
return complete_signin(driver, uname, password, clicks)
# If there is a sign in button, but nothing else. Click it and see what happens next.
elif (pw is False) and (si is True) and (n_ is False):
ce = click_closest_match(driver, ["signin", "login"], ["a", "button", "input"], ["hidden"])
clicks.append("click, \t" + ce['key-string'])
return False, clicks
# If there is a next button, but no password field: It's a multi-page sign in form.
# Fill in what ever you can and then click next.
elif ((pw is False) and (n_ is True)) or ((clicks[0] == "None") and (pw is False) and (n_ is True)):
return google_signin(driver, uname, password, clicks)
else:
return -1, clicks
def record_login_elements(driver, uname, password):
print "Recording elements required to login..."
keywords, tags, exclusions, recorded_clicks = ["signin", "login"], ["a", "button", "input"], ["hidden"], []
ce = click_closest_match(driver, keywords, tags, exclusions)
if ce is None:
recorded_clicks.append("None")
else:
recorded_clicks.append("click, \t" + ce['key-string'])
print recorded_clicks
login_status, iterations = False, 0
while (login_status is False) and (iterations <= 5):
iterations += 1
page_status = scan_page_for_login_status(driver)
if len(page_status) != 3:
login_status = -1
break
else:
password_status, signin_status, next_status = page_status[0], page_status[1], page_status[2]
login_status, recorded_clicks = make_next_move(driver, password_status, signin_status, next_status, uname,
password, recorded_clicks)
if login_status == -1:
print "Aborting login... Foctor failed you :("
return None
else:
return recorded_clicks
def fill_all_uname(driver, uname, clicks):
print "Entering username..."
ret_rec = None
elements = driver.find_elements_by_tag_name("input")
keywords = ["email", "user", "name", "id"]
for e in elements:
try:
e.tag_name
except (StaleElementReferenceException, ElementNotVisibleException, ElementNotSelectableException,
NoSuchElementException):
continue
rec = get_record(e, "input")
if rec is None:
continue
if (rec['enabled'] == unicode(False)) or (rec['displayed'] == unicode(False)):
continue
if "hidden" in rec['string']:
continue
try:
value = rec['value'].lower().strip().replace(" ", "")
except:
value = unicode("")
try:
text = rec['text'].lower().strip().replace(" ", "")
except:
text = unicode("")
try:
type_ = rec['type'].lower().strip().replace(" ", "")
except:
type_ = unicode("")
try:
id_ = rec['id'].lower().strip().replace(" ", "")
except:
id_ = unicode("")
try:
name = rec['name'].lower().strip().replace(" ","")
except:
name = unicode("")
for k in keywords:
if (k in id_) or (k in type_) or (k in text) or (k in value) or (k in name):
e.send_keys(uname)
ret_rec = rec
clicks.append("username, \t" + ret_rec['key-string'])
break
return ret_rec, clicks
def fill_all_password(driver, password, clicks):
print "Entering password..."
ret_rec = None
time.sleep(2)
elements = driver.find_elements_by_tag_name("input")
keywords = ["password", "pass"]
for e in elements:
rec = get_record(e, "input")
if (rec is None) or (compare_record(rec, ret_rec) == 1):
continue
if (rec['enabled'] == unicode(False)) or (rec['displayed'] == unicode(False)):
continue
if "hidden" in rec['string']:
continue
try:
value = rec['value'].lower().strip().replace(" ", "")
except:
value = unicode("")
try:
text = rec['text'].lower().strip().replace(" ", "")
except:
text = unicode("")
try:
type_ = rec['type'].lower().strip().replace(" ", "")
except:
type_ = unicode("")
try:
id_ = rec['id'].lower().strip().replace(" ", "")
except:
id_ = unicode("")
for k in keywords:
if (k in id_) or (k in type_) or (k in text) or (k in value):
ret_rec = rec
e.clear()
e.send_keys(password)
e.send_keys(Keys.RETURN)
clicks.append("password, \t" + rec['key-string'])
return ret_rec, clicks
return ret_rec, clicks
def local_timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(signum, frame):
raise LocalTimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
class LocalTimeoutError(Exception):
pass
@local_timeout(60)
def find_element_by_record(driver, record):
incomplete = 0
if any(c.isalpha() for c in record['id']):
try:
print "Getting element with ID: " + record['id']
e = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, record['id'])))
return e
except (StaleElementReferenceException, ElementNotSelectableException, ElementNotVisibleException,
NoSuchElementException, TimeoutException, LocalTimeoutError):
incomplete = 1
if (incomplete == 1) or (record['id'] == "None") or not(any(c.isalpha() for c in record['id'])):
try:
print "Getting element with record: ", record
elements = driver.find_elements_by_tag_name(record['tag'])
except (StaleElementReferenceException, ElementNotSelectableException, ElementNotVisibleException,
NoSuchElementException):
return None
for e in elements:
try:
r = get_record(e, record['tag'])
if (r['text'].split("\n")[0] == record['text']) and (r['name'].split("\n")[0] == record['name']):
return e
except (StaleElementReferenceException, ElementNotSelectableException, ElementNotVisibleException,
NoSuchElementException, TimeoutException, LocalTimeoutError):
return None
return None
| mit | 5a7e0d7a9a9283a8505b584f643cbd9c | 39.72155 | 120 | 0.532881 | 4.223506 | false | false | false | false |
iclab/centinel | centinel/primitives/foctor_core/foctor_args_error.py | 3 | 3834 | __author__ = 'rishabn'
def fp_crawler_mode_error():
str_err = "Please specify a crawl mode: standard, tor, search_log, or login_log \n"
str_err += "python front-page-crawler.py <crawl-mode>"
print str_err
raise SystemExit
def fp_crawler_standard_mode_error():
str_err = "Usage for standard crawl: python front-page-crawler.py standard <site-list> <start-index> "
str_err += "<end-index> <capture-path> <display 0/1> <process-tag>"
print str_err
raise SystemExit
def fp_crawler_tor_mode_error():
str_err = "Usage for tor crawl: python front-page-crawler.py tor <site-list> <start-index> "
str_err += "<end-index> <capture-path> <display 0/1> <process-tag> <exit-ip> <tor-port>"
print str_err
raise SystemExit
def fp_crawler_search_mode_error():
str_err = "Usage for search-log crawl: python front-page-crawler.py search_log <site-list> <start-index> "
str_err += "<end-index> <capture-path> <display 0/1> <process-tag> <output-rule-log>"
print str_err
raise SystemExit
def fp_crawler_login_mode_error():
str_err = "Usage for login-log crawl: python front-page-crawler.py login_log <site-list> <start-index> "
str_err += "<end-index> <capture-path> <display 0/1> <process-tag>"
print str_err
raise SystemExit
def search_crawler_mode_error():
str_err = "Please specify a search crawl mode: generate rules (rule-gen), " \
"search from existing rules (search-tor/search-standard)"
str_err += "\npython search-crawler.py <search-crawl-mode>"
print str_err
raise SystemExit
def search_crawler_gen_rules_error():
str_err = "Usage for search-log crawl: python search-crawler.py rule-gen <site-list> <start-index> "
str_err += "<end-index> <capture-path> <display 0/1> <process-tag> <output-rule-log>"
print str_err
raise SystemExit
def search_crawler_tor_mode_error():
str_err = "Usage for search-log crawl: python search-crawler.py search-tor <rule-list> <start-index> "
str_err += "<end-index> <capture-path> <display 0/1> <process-tag> <exit-ip> <tor-port>"
print str_err
raise SystemExit
def search_crawler_standard_mode_error():
str_err = "Usage for search-log crawl: python search-crawler.py search-standard <rule-list> <start-index> "
str_err += "<end-index> <capture-path> <display 0/1> <process-tag>"
print str_err
raise SystemExit
def login_crawler_mode_error():
str_err = "Please specify a login crawl mode: generate rules (rule-gen), " \
"search from existing rules (login-tor/login-standard)"
str_err += "\npython login-crawler.py <login-crawl-mode>"
print str_err
raise SystemExit
def login_crawler_compatible_sites_error():
str_err = "Usage for login crawl: python login-crawler.py login-standard <site-list> <credentials-file> "
str_err += "<start-index> <end-index> <capture-path> <display 0/1> <process-tag>"
print str_err
raise SystemExit
def login_crawler_gen_rules_error():
str_err = "Usage for login crawl: python login-crawler.py rule-gen <credentials-file> <start-index> "
str_err += "<end-index> <capture-path> <display 0/1> <process-tag> <output-rule-log>"
print str_err
raise SystemExit
def login_crawler_standard_playback_error():
str_err = "Usage for login crawl: python login-crawler.py standard-playback <rule-list> <credentials-file> " \
"<start-index> <end-index> <capture-path> <display 0/1> <process-tag>"
print str_err
raise SystemExit
def login_crawler_tor_playback_error():
str_err = "Usage for login crawl: python login-crawler.py tor-playback <rule-list> <credentials-file> " \
"<start-index> <end-index> <capture-path> <display 0/1> <process-tag> <exit-ip> <tor-port>"
print str_err
raise SystemExit
| mit | b3037d3306a40535cab99ce9945bbf6f | 36.223301 | 114 | 0.673709 | 3.246401 | false | false | false | false |
usgo/agagd | agagd/agagd_core/views/players_profile.py | 1 | 4733 | # Date Imports
from datetime import date
# AGAGD Models Imports
import agagd_core.models as agagd_models
from agagd_core.tables.games import GamesTable
# AGAGD Django Tables Imports
from agagd_core.tables.players import (
PlayersInformationTable,
PlayersOpponentTable,
PlayersTournamentTable,
)
# Django Imports
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.http import Http404
from django.template.response import TemplateResponse
from django.views.generic.detail import DetailView
# Django Tables 2 Imports
from django_tables2 import RequestConfig
class PlayersProfilePageView(DetailView):
template_name = "players_profile_page.html"
def get(self, request, *args, **kwargs):
player_id = self.kwargs.get("player_id")
try:
player = (
agagd_models.Member.objects.exclude(status="pending")
.exclude(type="chapter")
.get(member_id=player_id)
)
except ObjectDoesNotExist:
raise Http404("Player Profile Not Found.")
player_games = agagd_models.Game.objects.filter(
Q(pin_player_1__exact=player_id) | Q(pin_player_2__exact=player_id)
).order_by("-game_date")
player_rating = agagd_models.Players.objects.filter(
Q(pin_player__exact=player_id)
).values("pin_player", "rating", "sigma")
# compute additional tables for opponents & tournament info. here
# TODO: refactor this into something nicer.
opponent_data = {}
tourney_data = {}
for game in player_games:
try:
t_dat = tourney_data.get(game.tournament_code.pk, {})
t_dat["tournament"] = game.tournament_code
t_dat["won"] = t_dat.get("won", 0)
t_dat["lost"] = t_dat.get("lost", 0)
# Set default game_date to None
game_date = None
# Check for 0000-00-00 dates
if game.game_date != "0000-00-00":
game_date = game.game_date
t_dat["date"] = t_dat.get("date", game_date)
op = game.player_other_than(player)
opp_dat = opponent_data.get(op, {})
opp_dat["opponent"] = op
opp_dat["total"] = opp_dat.get("total", 0) + 1
opp_dat["won"] = opp_dat.get("won", 0)
opp_dat["lost"] = opp_dat.get("lost", 0)
if game.won_by(player):
opp_dat["won"] += 1
t_dat["won"] += 1
else:
opp_dat["lost"] += 1
t_dat["lost"] += 1
opponent_data[op] = opp_dat
tourney_data[game.tournament_code.pk] = t_dat
except ObjectDoesNotExist:
print("failing game_id: %s" % game.pk)
opp_table = PlayersOpponentTable(opponent_data.values())
RequestConfig(request, paginate={"per_page": 10}).configure(opp_table)
t_table = PlayersTournamentTable(
tourney_data.values(),
sorted(
tourney_data.values(),
key=lambda d: d.get("date", date.today()) or date.today(),
reverse=True,
),
prefix="ts_played",
)
RequestConfig(request, paginate={"per_page": 10}).configure(t_table)
player_games_table = GamesTable(
player_games.values(
"game_date",
"handicap",
"pin_player_1",
"pin_player_2",
"tournament_code",
"result",
)
)
player_information_dict = player.__dict__
player_information_dict["rating"] = player_rating[0]["rating"]
try:
chapter_name = agagd_models.Chapters.objects.get(
member_id=player.chapter_id_id
).name
player_information_dict["members_chapter_name"] = chapter_name
except:
player_information_dict["members_chapter_name"] = None
players_information_table = PlayersInformationTable([player_information_dict])
context = locals()
context["page_title"] = "Player Profile | {}".format(player.full_name)
context["player"] = player
context["player_rating"] = player_rating[0]
context["player_games_table"] = player_games_table
context["players_information_table"] = players_information_table
context["player_opponents_table"] = opp_table
context["player_tournaments_table"] = t_table
return TemplateResponse(request, self.template_name, context)
| mit | e662d8e43e9a455b2832aeb04e48d90c | 34.856061 | 86 | 0.563702 | 3.885878 | false | false | false | false |
usgo/agagd | agagd/agagd_core/tables/players.py | 1 | 1944 | import agagd_core.defaults.styles.django_tables2 as django_tables2_styles
import django_tables2 as tables
from agagd_core.models import Chapters
from django.utils.html import format_html
class PlayersInformationTable(tables.Table):
full_name = tables.Column()
members_chapter_name = tables.Column(
verbose_name="Chapter",
linkify={"viewname": "chapter_detail", "args": [tables.A("chapter_id_id")]},
)
member_id = tables.Column()
status = tables.Column()
rating = tables.Column()
renewal_due = tables.Column()
class Meta:
template_name = "player_profile_information.html"
class PlayersOpponentTable(tables.Table):
opponent = tables.Column(
orderable=False,
linkify={
"viewname": "players_profile",
"args": [tables.A("opponent.member_id")],
},
)
total = tables.Column(verbose_name="Games")
won = tables.Column(verbose_name="Won", default=0)
lost = tables.Column(verbose_name="Lost")
ratio = tables.Column(verbose_name="Rate", default=0, empty_values=(-1,))
def render_ratio(self, record):
ratio = record["won"] / record["total"]
return f"{ratio:.2f}"
class Meta:
attrs = django_tables2_styles.default_bootstrap_header_column_attrs
orderable = False
template_name = "django_tables2/bootstrap4.html"
class PlayersTournamentTable(tables.Table):
tournament = tables.Column(
linkify=("tournament_detail", [tables.A("tournament.pk")])
)
date = tables.Column(default="Unknown")
won = tables.Column(verbose_name="Won", default=0)
lost = tables.Column(verbose_name="Lost", default=0)
class Meta:
attrs = django_tables2_styles.default_bootstrap_header_column_attrs
fields = ("date", "tournament", "won", "lost")
orderable = False
template_name = "django_tables2/bootstrap4.html"
sequence = fields
| mit | 769445fa7061d63635266a56103679b8 | 31.949153 | 84 | 0.653807 | 3.633645 | false | false | false | false |
usgo/agagd | agagd/agagd_core/views/api.py | 1 | 3806 | from datetime import datetime, timedelta
from agagd_core.json_response import JsonResponse
from agagd_core.models import Game, Member
from django.db.models import Avg, Count
from django.db.models.functions import TruncMonth, TruncWeek
from django.http import HttpResponse
from django.views import View
class ApiStatusView(View):
def get(self, request):
response = {"health_status_code": 200, "health_status": "The AGAGD is running."}
return JsonResponse(response)
class ApiGameCountView(View):
def get(self, request):
games_by_date = []
for game_obj in Game.objects.values("game_date").annotate(Count("game_date")):
try:
game_date = datetime.strptime(str(game_obj["game_date"]), "%Y-%m-%d")
games_by_date.append(
{
"date": game_date.strftime("%Y-%m-%d"),
"count": game_obj["game_date__count"],
}
)
except ValueError:
pass
sorted_games_by_date = sorted(games_by_date, key=lambda d: d["date"])
return JsonResponse(sorted_games_by_date)
class ApiPlayerRatings(View):
def __get_ratings_json(self, ratings_obj):
ratings_json = []
for rating in ratings_obj:
elab_date = None
sigma = None
players_rating = None
if isinstance(rating, dict):
if "week_date" in rating:
elab_date = rating["week_date"]
elif "month_date" in rating:
elab_date = rating["month_date"]
sigma = rating["sigma__avg"]
players_rating = rating["rating__avg"]
else:
elab_date = rating.elab_date
sigma = rating.sigma
players_rating = rating.rating
if elab_date != None:
ratings_json.append(
{"sigma": sigma, "elab_date": elab_date, "rating": players_rating}
)
return ratings_json
def __get_less_current_date(self, number_of_weeks):
return datetime.now() - timedelta(weeks=number_of_weeks)
def get(self, request, *args, **kwargs):
member_id = self.kwargs.get("player_id")
time_period = self.kwargs.get("time_period")
player = Member.objects.get(pk=member_id)
ratings = None
min_ratings = 3
if time_period == 1:
ratings = (
player.ratings_set.all()
.filter(elab_date__year__gte=self.__get_less_current_date(52).year)
.order_by("elab_date")
)
elif time_period == 5:
ratings = (
player.ratings_set.all()
.filter(elab_date__year__gte=self.__get_less_current_date(260).year)
.annotate(week_date=TruncWeek("elab_date"))
.values("week_date")
.annotate(Avg("rating"), Avg("sigma"))
.order_by("week_date")
)
elif time_period == 10:
ratings = (
player.ratings_set.all()
.filter(elab_date__year__gte=self.__get_less_current_date(520).year)
.annotate(month_date=TruncMonth("elab_date"))
.values("month_date")
.annotate(Avg("rating"), Avg("sigma"))
.order_by("month_date")
)
if ratings == None or ratings.count() < min_ratings:
return JsonResponse(
{
"status": "not enough data",
"status_message": "Not enough data to produce a rating graph.",
}
)
return JsonResponse(self.__get_ratings_json(ratings))
| mit | 273f332138770e5e2a6c9ccc8438f8b5 | 34.240741 | 88 | 0.524172 | 4.023256 | false | false | false | false |
usgo/agagd | agagd/agagd/settings/base.py | 1 | 6757 | import os
import django.conf.global_settings as DEFAULT_SETTINGS
PROJECT_ROOT = os.environ["PROJECT_ROOT"]
# AGAGD Administrators Configuration:
#
# This assumes AGAGD_ADMINS environmental variable is formated as follows:
# 'webmaster:webmaster@usgo.org,agagd_webmaster:agagd+webmaster@usgo.org'
#
# For local configurations the ADMINS defaults to [('Admin', 'admin@localhost.local')]
ADMINS = []
if os.getenv("AGAGD_ADMINS") == None:
ADMINS.append(("Admin", "admin@localhost.local"))
else:
ADMINS = [
(value.split(":")[0], value.split(":")[1])
for value in os.environ.get("AGAGD_ADMINS").split(",")
]
MANAGERS = ADMINS
# Allow only the default ALLOWED_HOSTS ['.localhost', '127.0.0.1', '[::1]'].
ALLOWED_HOSTS = []
# Only one allowed host is expected here.
if os.getenv("ALLOWED_HOSTS") != None:
ALLOWED_HOSTS.append(os.getenv("ALLOWED_HOSTS"))
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = "America/Chicago"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = "/media/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = "/tmp/static/"
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Additional locations of static files
STATICFILES_DIRS = (os.path.join(PROJECT_ROOT, "static"),)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"django.contrib.staticfiles.finders.DefaultStorageFinder",
)
MIDDLEWARE = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
# Uncomment the next line for simple clickjacking protection:
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "agagd.urls"
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = "agagd.wsgi.application"
# Changed TEMPLATES because existing template related settings are depreciated
# https://docs.djangoproject.com/en/1.11/ref/templates/upgrading/#the-templates-settings
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(PROJECT_ROOT, "templates")],
"OPTIONS": {
"context_processors": [
# Standard context_processors
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
# Custom context_processors
"django.template.context_processors.request",
"agagd_core.context_processors.google_analytics_tracking_id",
],
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
# 'django.template.loaders.eggs.Loader',
],
},
},
{
"BACKEND": "django.template.backends.jinja2.Jinja2",
"DIRS": [os.path.join(PROJECT_ROOT, "jinja2")],
"OPTIONS": {
"environment": "agagd_core.jinga2.environment",
"context_processors": [
# Standard context_processors
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
# Custom context_processors
"django.template.context_processors.request",
"agagd_core.context_processors.google_analytics_tracking_id",
],
},
},
]
INSTALLED_APPS = [
"agagd_core",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_extensions",
"django_tables2",
]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
}
},
}
| mit | 4be9fa20bac6e92269589a286a8063a4 | 35.524324 | 88 | 0.667308 | 3.86335 | false | false | false | false |
pytest-dev/execnet | doc/example/sysinfo.py | 1 | 4881 | # -*- coding: utf-8 -*-
"""
sysinfo.py [host1] [host2] [options]
obtain system info from remote machine.
(c) Holger Krekel, MIT license
"""
import optparse
import re
import sys
import execnet
import py
parser = optparse.OptionParser(usage=__doc__)
parser.add_option(
"-f",
"--sshconfig",
action="store",
dest="ssh_config",
default=None,
help="use given ssh config file,"
" and add info all contained hosts for getting info",
)
parser.add_option(
"-i",
"--ignore",
action="store",
dest="ignores",
default=None,
help="ignore hosts " "(useful if the list of hostnames come from a file list)",
)
def parsehosts(path):
path = py.path.local(path)
l = []
rex = re.compile(r"Host\s*(\S+)")
for line in path.readlines():
m = rex.match(line)
if m is not None:
(sshname,) = m.groups()
l.append(sshname)
return l
class RemoteInfo:
def __init__(self, gateway):
self.gw = gateway
self._cache = {}
def exreceive(self, execstring):
if execstring not in self._cache:
channel = self.gw.remote_exec(execstring)
self._cache[execstring] = channel.receive()
return self._cache[execstring]
def getmodattr(self, modpath):
module = modpath.split(".")[0]
return self.exreceive(
"""
import %s
channel.send(%s)
"""
% (module, modpath)
)
def islinux(self):
return self.getmodattr("sys.platform").find("linux") != -1
def getfqdn(self):
return self.exreceive(
"""
import socket
channel.send(socket.getfqdn())
"""
)
def getmemswap(self):
if self.islinux():
return self.exreceive(
r"""
import commands, re
out = commands.getoutput("free")
mem = re.search(r"Mem:\s+(\S*)", out).group(1)
swap = re.search(r"Swap:\s+(\S*)", out).group(1)
channel.send((mem, swap))
"""
)
def getcpuinfo(self):
if self.islinux():
return self.exreceive(
"""
# a hyperthreaded cpu core only counts as 1, although it
# is present as 2 in /proc/cpuinfo. Counting it as 2 is
# misleading because it is *by far* not as efficient as
# two independent cores.
cpus = {}
cpuinfo = {}
f = open("/proc/cpuinfo")
lines = f.readlines()
f.close()
for line in lines + ['']:
if line.strip():
key, value = line.split(":", 1)
cpuinfo[key.strip()] = value.strip()
else:
corekey = (cpuinfo.get("physical id"),
cpuinfo.get("core id"))
cpus[corekey] = 1
numcpus = len(cpus)
model = cpuinfo.get("model name")
channel.send((numcpus, model))
"""
)
def debug(*args):
print >> sys.stderr, " ".join(map(str, args))
def error(*args):
debug("ERROR", args[0] + ":", *args[1:])
def getinfo(sshname, ssh_config=None, loginfo=sys.stdout):
if ssh_config:
spec = "ssh=-F {} {}".format(ssh_config, sshname)
else:
spec += "ssh=%s" % sshname
debug("connecting to", repr(spec))
try:
gw = execnet.makegateway(spec)
except IOError:
error("could not get sshgatway", sshname)
else:
ri = RemoteInfo(gw)
# print "%s info:" % sshname
prefix = sshname.upper() + " "
print >> loginfo, prefix, "fqdn:", ri.getfqdn()
for attr in ("sys.platform", "sys.version_info"):
loginfo.write("{} {}: ".format(prefix, attr))
loginfo.flush()
value = ri.getmodattr(attr)
loginfo.write(str(value))
loginfo.write("\n")
loginfo.flush()
memswap = ri.getmemswap()
if memswap:
mem, swap = memswap
print >> loginfo, prefix, "Memory:", mem, "Swap:", swap
cpuinfo = ri.getcpuinfo()
if cpuinfo:
numcpu, model = cpuinfo
print >> loginfo, prefix, "number of cpus:", numcpu
print >> loginfo, prefix, "cpu model", model
return ri
if __name__ == "__main__":
options, args = parser.parse_args()
hosts = list(args)
ssh_config = options.ssh_config
if ssh_config:
hosts.extend(parsehosts(ssh_config))
ignores = options.ignores or ()
if ignores:
ignores = ignores.split(",")
for host in hosts:
if host not in ignores:
getinfo(host, ssh_config=ssh_config)
| mit | 4d8c478fea1690acc585229e385b62c7 | 27.051724 | 83 | 0.509527 | 3.879968 | false | true | false | false |
hhatto/autopep8 | test/inspect_example.py | 15 | 104371 | """Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python-3000 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
signature() - get a Signature object for the callable
"""
# This module is in the public domain. No warranties.
__author__ = ('Ka-Ping Yee <ping@lfw.org>',
'Yury Selivanov <yselivanov@sprymix.com>')
import ast
import importlib.machinery
import itertools
import linecache
import os
import re
import sys
import tokenize
import token
import types
import warnings
import functools
import builtins
from operator import attrgetter
from collections import namedtuple, OrderedDict
# Create constants for the compiler flags in Include/code.h
# We try to get them from dis to avoid duplication, but fall
# back to hard-coding so the dependency is optional
try:
from dis import COMPILER_FLAG_NAMES as _flag_names
except ImportError:
CO_OPTIMIZED, CO_NEWLOCALS = 0x1, 0x2
CO_VARARGS, CO_VARKEYWORDS = 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
else:
mod_dict = globals()
for k, v in _flag_names.items():
mod_dict["CO_" + v] = k
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
processed = set()
names = dir(object)
# :dd any DynamicClassAttributes to the list of names if object is a class;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists
try:
for base in object.__bases__:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
except AttributeError:
pass
for key in names:
# First try to get the value via getattr. Some descriptors don't
# like calling their __get__ (see bug #1785), so fall back to
# looking in the __dict__.
try:
value = getattr(object, key)
# handle the duplicate key
if key in processed:
raise AttributeError
except AttributeError:
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
# could be a (currently) missing slot member, or a buggy
# __dir__; discard and move on
continue
if not predicate or predicate(value):
results.append((key, value))
processed.add(key)
results.sort(key=lambda pair: pair[0])
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method or descriptor
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained by calling getattr; if this fails, or if the
resulting object does not live anywhere in the class' mro (including
metaclasses) then the object is looked up in the defining class's
dict (found by walking the mro).
If one of the items in dir(cls) is stored in the metaclass it will now
be discovered and not have None be listed as the class in which it was
defined. Any items whose home class cannot be discovered are skipped.
"""
mro = getmro(cls)
metamro = getmro(type(cls)) # for attributes stored in the metaclass
metamro = tuple([cls for cls in metamro if cls not in (type, object)])
class_bases = (cls,) + mro
all_bases = class_bases + metamro
names = dir(cls)
# :dd any DynamicClassAttributes to the list of names;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists.
for base in mro:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
result = []
processed = set()
for name in names:
# Get the object associated with the name, and where it was defined.
# Normal objects will be looked up with both getattr and directly in
# its class' dict (in case getattr fails [bug #1785], and also to look
# for a docstring).
# For DynamicClassAttributes on the second pass we only look in the
# class's dict.
#
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
homecls = None
get_obj = None
dict_obj = None
if name not in processed:
try:
if name == '__dict__':
raise Exception("__dict__ is special, don't want the proxy")
get_obj = getattr(cls, name)
except Exception as exc:
pass
else:
homecls = getattr(get_obj, "__objclass__", homecls)
if homecls not in class_bases:
# if the resulting object does not live somewhere in the
# mro, drop it and search the mro manually
homecls = None
last_cls = None
# first look in the classes
for srch_cls in class_bases:
srch_obj = getattr(srch_cls, name, None)
if srch_obj == get_obj:
last_cls = srch_cls
# then check the metaclasses
for srch_cls in metamro:
try:
srch_obj = srch_cls.__getattr__(cls, name)
except AttributeError:
continue
if srch_obj == get_obj:
last_cls = srch_cls
if last_cls is not None:
homecls = last_cls
for base in all_bases:
if name in base.__dict__:
dict_obj = base.__dict__[name]
if homecls not in metamro:
homecls = base
break
if homecls is None:
# unable to locate the attribute anywhere, most likely due to
# buggy custom __dir__; discard and move on
continue
obj = get_obj or dict_obj
# Classify the object or its descriptor.
if isinstance(dict_obj, staticmethod):
kind = "static method"
obj = dict_obj
elif isinstance(dict_obj, classmethod):
kind = "class method"
obj = dict_obj
elif isinstance(dict_obj, property):
kind = "property"
obj = dict_obj
elif isroutine(obj):
kind = "method"
else:
kind = "data"
result.append(Attribute(name, kind, homecls, obj))
processed.add(name)
return result
# ----------------------------------------------------------- class helpers
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
# -------------------------------------------------------- function helpers
def unwrap(func, *, stop=None):
"""Get the object wrapped by *func*.
Follows the chain of :attr:`__wrapped__` attributes returning the last
object in the chain.
*stop* is an optional callback accepting an object in the wrapper chain
as its sole argument that allows the unwrapping to be terminated early if
the callback returns a true value. If the callback never returns a true
value, the last object in the chain is returned as usual. For example,
:func:`signature` uses this to stop unwrapping if any object in the
chain has a ``__signature__`` attribute defined.
:exc:`ValueError` is raised if a cycle is encountered.
"""
if stop is None:
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return hasattr(f, '__wrapped__') and not stop(f)
f = func # remember the original func for error reporting
memo = {id(f)} # Memoise by id to tolerate non-hashable objects
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
if hasattr(object, '__module__'):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
warnings.warn('inspect.getmoduleinfo() is deprecated', DeprecationWarning,
2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', PendingDeprecationWarning)
import imp
filename = os.path.basename(path)
suffixes = [(-len(suffix), suffix, mode, mtype)
for suffix, mode, mtype in imp.get_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
fname = os.path.basename(path)
# Check for paths that look like an actual module file
suffixes = [(-len(suffix), suffix)
for suffix in importlib.machinery.all_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix in suffixes:
if fname.endswith(suffix):
return fname[:neglen]
return None
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
if any(filename.endswith(s) for s in all_bytecode_suffixes):
filename = (os.path.splitext(filename)[0] +
importlib.machinery.SOURCE_SUFFIXES[0])
elif any(filename.endswith(s) for s in
importlib.machinery.EXTENSION_SUFFIXES):
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if getattr(getmodule(object, filename), '__loader__', None) is not None:
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An OSError
is raised if the source code cannot be retrieved."""
file = getsourcefile(object)
if file:
# Invalidate cache if needed.
linecache.checkcache(file)
else:
file = getfile(object)
# Allow filenames in form of "<something>" to pass through.
# `doctest` monkeypatches `linecache` module to enable
# inspection, so let `linecache.getlines` to be called.
if not (file.startswith('<') and file.endswith('>')):
raise OSError('source code not available')
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise OSError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise OSError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise OSError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise OSError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (OSError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An OSError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
OSError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names.
'args' will include keyword-only argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Use the getfullargspec() API for Python-3000 code, as annotations
and keyword arguments are supported. getargspec() will raise ValueError
if the func has either annotations or keyword arguments.
"""
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only arguments or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a callable object's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
"""
try:
# Re: `skip_bound_arg=False`
#
# There is a notable difference in behaviour between getfullargspec
# and Signature: the former always returns 'self' parameter for bound
# methods, whereas the Signature always shows the actual calling
# signature of the passed object.
#
# To simulate this behaviour, we "unbind" bound methods, to trick
# inspect.signature to always return their first parameter ("self",
# usually)
# Re: `follow_wrapper_chains=False`
#
# getfullargspec() historically ignored __wrapped__ attributes,
# so we ensure that remains the case in 3.3+
sig = _signature_internal(func,
follow_wrapper_chains=False,
skip_bound_arg=False)
except Exception as ex:
# Most of the times 'signature' will raise ValueError.
# But, it can also raise AttributeError, and, maybe something
# else. So to be fully backwards compatible, we catch all
# possible exceptions here, and reraise a TypeError.
raise TypeError('unsupported callable') from ex
args = []
varargs = None
varkw = None
kwonlyargs = []
defaults = ()
annotations = {}
defaults = ()
kwdefaults = {}
if sig.return_annotation is not sig.empty:
annotations['return'] = sig.return_annotation
for param in sig.parameters.values():
kind = param.kind
name = param.name
if kind is _POSITIONAL_ONLY:
args.append(name)
elif kind is _POSITIONAL_OR_KEYWORD:
args.append(name)
if param.default is not param.empty:
defaults += (param.default,)
elif kind is _VAR_POSITIONAL:
varargs = name
elif kind is _KEYWORD_ONLY:
kwonlyargs.append(name)
if param.default is not param.empty:
kwdefaults[name] = param.default
elif kind is _VAR_KEYWORD:
varkw = name
if param.annotation is not param.empty:
annotations[name] = param.annotation
if not kwdefaults:
# compatibility with 'func.__kwdefaults__'
kwdefaults = None
if not defaults:
# compatibility with 'func.__defaults__'
defaults = None
return FullArgSpec(args, varargs, varkw, defaults,
kwonlyargs, kwdefaults, annotations)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getargspec
or getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value)):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(convert(args[i]))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
def _missing_arguments(f_name, argnames, pos, values):
names = [repr(name) for name in argnames if name not in values]
missing = len(names)
if missing == 1:
s = names[0]
elif missing == 2:
s = "{} and {}".format(*names)
else:
tail = ", {} and {}".format(*names[-2:])
del names[-2:]
s = ", ".join(names) + tail
raise TypeError("%s() missing %i required %s argument%s: %s" %
(f_name, missing,
"positional" if pos else "keyword-only",
"" if missing == 1 else "s", s))
def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
atleast = len(args) - defcount
kwonly_given = len([arg for arg in kwonly if arg in values])
if varargs:
plural = atleast != 1
sig = "at least %d" % (atleast,)
elif defcount:
plural = True
sig = "from %d to %d" % (atleast, len(args))
else:
plural = len(args) != 1
sig = str(len(args))
kwonly_sig = ""
if kwonly_given:
msg = " positional argument%s (and %d keyword-only argument%s)"
kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
"s" if kwonly_given != 1 else ""))
raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
(f_name, sig, "s" if plural else "", given, kwonly_sig,
"was" if given == 1 and not kwonly_given else "were"))
def getcallargs(*func_and_positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
func = func_and_positional[0]
positional = func_and_positional[1:]
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
n = min(num_pos, num_args)
for i in range(n):
arg2value[args[i]] = positional[i]
if varargs:
arg2value[varargs] = tuple(positional[n:])
possible_kwargs = set(args + kwonlyargs)
if varkw:
arg2value[varkw] = {}
for kw, value in named.items():
if kw not in possible_kwargs:
if not varkw:
raise TypeError("%s() got an unexpected keyword argument %r" %
(f_name, kw))
arg2value[varkw][kw] = value
continue
if kw in arg2value:
raise TypeError("%s() got multiple values for argument %r" %
(f_name, kw))
arg2value[kw] = value
if num_pos > num_args and not varargs:
_too_many(f_name, args, kwonlyargs, varargs, num_defaults,
num_pos, arg2value)
if num_pos < num_args:
req = args[:num_args - num_defaults]
for arg in req:
if arg not in arg2value:
_missing_arguments(f_name, req, True, arg2value)
for i, arg in enumerate(args[num_args - num_defaults:]):
if arg not in arg2value:
arg2value[arg] = defaults[i]
missing = 0
for kwarg in kwonlyargs:
if kwarg not in arg2value:
if kwonlydefaults and kwarg in kwonlydefaults:
arg2value[kwarg] = kwonlydefaults[kwarg]
else:
missing += 1
if missing:
_missing_arguments(f_name, kwonlyargs, False, arg2value)
return arg2value
ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError("'{!r}' is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if func.__closure__ is None:
nonlocal_vars = {}
else:
nonlocal_vars = {
var : cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)
}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = func.__globals__
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in code.co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except OSError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
# ------------------------------------------------ static version of getattr
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
# ------------------------------------------------ generator introspection
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
def getgeneratorlocals(generator):
"""
Get the mapping of generator local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
if not isgenerator(generator):
raise TypeError("'{!r}' is not a Python generator".format(generator))
frame = getattr(generator, "gi_frame", None)
if frame is not None:
return generator.gi_frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_ClassMethodWrapper = type(int.__dict__['from_bytes'])
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
_ClassMethodWrapper,
types.BuiltinFunctionType)
def _signature_get_user_defined_method(cls, method_name):
try:
meth = getattr(cls, method_name)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def _signature_get_partial(wrapped_sig, partial, extra_args=()):
# Internal helper to calculate how 'wrapped_sig' signature will
# look like after applying a 'functools.partial' object (or alike)
# on it.
old_params = wrapped_sig.parameters
new_params = OrderedDict(old_params.items())
partial_args = partial.args or ()
partial_keywords = partial.keywords or {}
if extra_args:
partial_args = extra_args + partial_args
try:
ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(partial)
raise ValueError(msg) from ex
transform_to_kwonly = False
for param_name, param in old_params.items():
try:
arg_value = ba.arguments[param_name]
except KeyError:
pass
else:
if param.kind is _POSITIONAL_ONLY:
# If positional-only parameter is bound by partial,
# it effectively disappears from the signature
new_params.pop(param_name)
continue
if param.kind is _POSITIONAL_OR_KEYWORD:
if param_name in partial_keywords:
# This means that this parameter, and all parameters
# after it should be keyword-only (and var-positional
# should be removed). Here's why. Consider the following
# function:
# foo(a, b, *args, c):
# pass
#
# "partial(foo, a='spam')" will have the following
# signature: "(*, a='spam', b, c)". Because attempting
# to call that partial with "(10, 20)" arguments will
# raise a TypeError, saying that "a" argument received
# multiple values.
transform_to_kwonly = True
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
else:
# was passed as a positional argument
new_params.pop(param.name)
continue
if param.kind is _KEYWORD_ONLY:
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
if transform_to_kwonly:
assert param.kind is not _POSITIONAL_ONLY
if param.kind is _POSITIONAL_OR_KEYWORD:
new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY)
new_params[param_name] = new_param
new_params.move_to_end(param_name)
elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD):
new_params.move_to_end(param_name)
elif param.kind is _VAR_POSITIONAL:
new_params.pop(param.name)
return wrapped_sig.replace(parameters=new_params.values())
def _signature_bound_method(sig):
# Internal helper to transform signatures for unbound
# functions to bound methods
params = tuple(sig.parameters.values())
if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
raise ValueError('invalid method signature')
kind = params[0].kind
if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):
# Drop first parameter:
# '(p1, p2[, ...])' -> '(p2[, ...])'
params = params[1:]
else:
if kind is not _VAR_POSITIONAL:
# Unless we add a new parameter type we never
# get here
raise ValueError('invalid argument type')
# It's a var-positional parameter.
# Do nothing. '(*args[, ...])' -> '(*args[, ...])'
return sig.replace(parameters=params)
def _signature_is_builtin(obj):
# Internal helper to test if `obj` is a callable that might
# support Argument Clinic's __text_signature__ protocol.
return (isbuiltin(obj) or
ismethoddescriptor(obj) or
isinstance(obj, _NonUserDefinedCallables) or
# Can't test 'isinstance(type)' here, as it would
# also be True for regular python classes
obj in (type, object))
def _signature_is_functionlike(obj):
# Internal helper to test if `obj` is a duck type of FunctionType.
# A good example of such objects are functions compiled with
# Cython, which have all attributes that a pure Python function
# would have, but have their code statically compiled.
if not callable(obj) or isclass(obj):
# All function-like objects are obviously callables,
# and not classes.
return False
name = getattr(obj, '__name__', None)
code = getattr(obj, '__code__', None)
defaults = getattr(obj, '__defaults__', _void) # Important to use _void ...
kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here
annotations = getattr(obj, '__annotations__', None)
return (isinstance(code, types.CodeType) and
isinstance(name, str) and
(defaults is None or isinstance(defaults, tuple)) and
(kwdefaults is None or isinstance(kwdefaults, dict)) and
isinstance(annotations, dict))
def _signature_get_bound_param(spec):
# Internal helper to get first parameter name from a
# __text_signature__ of a builtin method, which should
# be in the following format: '($param1, ...)'.
# Assumptions are that the first argument won't have
# a default value or an annotation.
assert spec.startswith('($')
pos = spec.find(',')
if pos == -1:
pos = spec.find(')')
cpos = spec.find(':')
assert cpos == -1 or cpos > pos
cpos = spec.find('=')
assert cpos == -1 or cpos > pos
return spec[2:pos]
def _signature_strip_non_python_syntax(signature):
"""
Takes a signature in Argument Clinic's extended signature format.
Returns a tuple of three things:
* that signature re-rendered in standard Python syntax,
* the index of the "self" parameter (generally 0), or None if
the function does not have a "self" parameter, and
* the index of the last "positional only" parameter,
or None if the signature has no positional-only parameters.
"""
if not signature:
return signature, None, None
self_parameter = None
last_positional_only = None
lines = [l.encode('ascii') for l in signature.split('\n')]
generator = iter(lines).__next__
token_stream = tokenize.tokenize(generator)
delayed_comma = False
skip_next_comma = False
text = []
add = text.append
current_parameter = 0
OP = token.OP
ERRORTOKEN = token.ERRORTOKEN
# token stream always starts with ENCODING token, skip it
t = next(token_stream)
assert t.type == tokenize.ENCODING
for t in token_stream:
type, string = t.type, t.string
if type == OP:
if string == ',':
if skip_next_comma:
skip_next_comma = False
else:
assert not delayed_comma
delayed_comma = True
current_parameter += 1
continue
if string == '/':
assert not skip_next_comma
assert last_positional_only is None
skip_next_comma = True
last_positional_only = current_parameter - 1
continue
if (type == ERRORTOKEN) and (string == '$'):
assert self_parameter is None
self_parameter = current_parameter
continue
if delayed_comma:
delayed_comma = False
if not ((type == OP) and (string == ')')):
add(', ')
add(string)
if (string == ','):
add(' ')
clean_signature = ''.join(text)
return clean_signature, self_parameter, last_positional_only
def _signature_fromstr(cls, obj, s, skip_bound_arg=True):
# Internal helper to parse content of '__text_signature__'
# and return a Signature based on it
Parameter = cls._parameter_cls
clean_signature, self_parameter, last_positional_only = \
_signature_strip_non_python_syntax(s)
program = "def foo" + clean_signature + ": pass"
try:
module = ast.parse(program)
except SyntaxError:
module = None
if not isinstance(module, ast.Module):
raise ValueError("{!r} builtin has invalid signature".format(obj))
f = module.body[0]
parameters = []
empty = Parameter.empty
invalid = object()
module = None
module_dict = {}
module_name = getattr(obj, '__module__', None)
if module_name:
module = sys.modules.get(module_name, None)
if module:
module_dict = module.__dict__
sys_module_dict = sys.modules
def parse_name(node):
assert isinstance(node, ast.arg)
if node.annotation != None:
raise ValueError("Annotations are not currently supported")
return node.arg
def wrap_value(s):
try:
value = eval(s, module_dict)
except NameError:
try:
value = eval(s, sys_module_dict)
except NameError:
raise RuntimeError()
if isinstance(value, str):
return ast.Str(value)
if isinstance(value, (int, float)):
return ast.Num(value)
if isinstance(value, bytes):
return ast.Bytes(value)
if value in (True, False, None):
return ast.NameConstant(value)
raise RuntimeError()
class RewriteSymbolics(ast.NodeTransformer):
def visit_Attribute(self, node):
a = []
n = node
while isinstance(n, ast.Attribute):
a.append(n.attr)
n = n.value
if not isinstance(n, ast.Name):
raise RuntimeError()
a.append(n.id)
value = ".".join(reversed(a))
return wrap_value(value)
def visit_Name(self, node):
if not isinstance(node.ctx, ast.Load):
raise ValueError()
return wrap_value(node.id)
def p(name_node, default_node, default=empty):
name = parse_name(name_node)
if name is invalid:
return None
if default_node and default_node is not _empty:
try:
default_node = RewriteSymbolics().visit(default_node)
o = ast.literal_eval(default_node)
except ValueError:
o = invalid
if o is invalid:
return None
default = o if o is not invalid else default
parameters.append(Parameter(name, kind, default=default, annotation=empty))
# non-keyword-only parameters
args = reversed(f.args.args)
defaults = reversed(f.args.defaults)
iter = itertools.zip_longest(args, defaults, fillvalue=None)
if last_positional_only is not None:
kind = Parameter.POSITIONAL_ONLY
else:
kind = Parameter.POSITIONAL_OR_KEYWORD
for i, (name, default) in enumerate(reversed(list(iter))):
p(name, default)
if i == last_positional_only:
kind = Parameter.POSITIONAL_OR_KEYWORD
# *args
if f.args.vararg:
kind = Parameter.VAR_POSITIONAL
p(f.args.vararg, empty)
# keyword-only arguments
kind = Parameter.KEYWORD_ONLY
for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults):
p(name, default)
# **kwargs
if f.args.kwarg:
kind = Parameter.VAR_KEYWORD
p(f.args.kwarg, empty)
if self_parameter is not None:
# Possibly strip the bound argument:
# - We *always* strip first bound argument if
# it is a module.
# - We don't strip first bound argument if
# skip_bound_arg is False.
assert parameters
_self = getattr(obj, '__self__', None)
self_isbound = _self is not None
self_ismodule = ismodule(_self)
if self_isbound and (self_ismodule or skip_bound_arg):
parameters.pop(0)
else:
# for builtins, self parameter is always positional-only!
p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY)
parameters[0] = p
return cls(parameters, return_annotation=cls.empty)
def _signature_from_builtin(cls, func, skip_bound_arg=True):
# Internal helper function to get signature for
# builtin callables
if not _signature_is_builtin(func):
raise TypeError("{!r} is not a Python builtin "
"function".format(func))
s = getattr(func, "__text_signature__", None)
if not s:
raise ValueError("no signature found for builtin {!r}".format(func))
return _signature_fromstr(cls, func, s, skip_bound_arg)
def _signature_internal(obj, follow_wrapper_chains=True, skip_bound_arg=True):
if not callable(obj):
raise TypeError('{!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = _signature_internal(obj.__func__,
follow_wrapper_chains,
skip_bound_arg)
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
# Was this function wrapped by a decorator?
if follow_wrapper_chains:
obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__")))
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
if not isinstance(sig, Signature):
raise TypeError(
'unexpected object {!r} in __signature__ '
'attribute'.format(sig))
return sig
try:
partialmethod = obj._partialmethod
except AttributeError:
pass
else:
if isinstance(partialmethod, functools.partialmethod):
# Unbound partialmethod (see functools.partialmethod)
# This means, that we need to calculate the signature
# as if it's a regular partial object, but taking into
# account that the first positional argument
# (usually `self`, or `cls`) will not be passed
# automatically (as for boundmethods)
wrapped_sig = _signature_internal(partialmethod.func,
follow_wrapper_chains,
skip_bound_arg)
sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))
first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]
new_params = (first_wrapped_param,) + tuple(sig.parameters.values())
return sig.replace(parameters=new_params)
if isfunction(obj) or _signature_is_functionlike(obj):
# If it's a pure Python function, or an object that is duck type
# of a Python function (Cython functions, for instance), then:
return Signature.from_function(obj)
if _signature_is_builtin(obj):
return _signature_from_builtin(Signature, obj,
skip_bound_arg=skip_bound_arg)
if isinstance(obj, functools.partial):
wrapped_sig = _signature_internal(obj.func,
follow_wrapper_chains,
skip_bound_arg)
return _signature_get_partial(wrapped_sig, obj)
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = _signature_internal(call,
follow_wrapper_chains,
skip_bound_arg)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _signature_get_user_defined_method(obj, '__new__')
if new is not None:
sig = _signature_internal(new,
follow_wrapper_chains,
skip_bound_arg)
else:
# Finally, we should have at least __init__ implemented
init = _signature_get_user_defined_method(obj, '__init__')
if init is not None:
sig = _signature_internal(init,
follow_wrapper_chains,
skip_bound_arg)
if sig is None:
# At this point we know, that `obj` is a class, with no user-
# defined '__init__', '__new__', or class-level '__call__'
for base in obj.__mro__[:-1]:
# Since '__text_signature__' is implemented as a
# descriptor that extracts text signature from the
# class docstring, if 'obj' is derived from a builtin
# class, its own '__text_signature__' may be 'None'.
# Therefore, we go through the MRO (except the last
# class in there, which is 'object') to find the first
# class with non-empty text signature.
try:
text_sig = base.__text_signature__
except AttributeError:
pass
else:
if text_sig:
# If 'obj' class has a __text_signature__ attribute:
# return a signature based on it
return _signature_fromstr(Signature, obj, text_sig)
# No '__text_signature__' was found for the 'obj' class.
# Last option is to check if its '__init__' is
# object.__init__ or type.__init__.
if type not in obj.__mro__:
# We have a class (not metaclass), but no user-defined
# __init__ or __new__ for it
if obj.__init__ is object.__init__:
# Return a signature of 'object' builtin.
return signature(object)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
try:
sig = _signature_internal(call,
follow_wrapper_chains,
skip_bound_arg)
except ValueError as ex:
msg = 'no signature found for {!r}'.format(obj)
raise ValueError(msg) from ex
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {!r} is not supported by signature'.format(obj))
def signature(obj):
'''Get a signature object for the passed callable.'''
return _signature_internal(obj)
class _void:
'''A private marker - used in Parameter & Signature'''
class _empty:
pass
class _ParameterKind(int):
def __new__(self, *args, name):
obj = int.__new__(self, *args)
obj._name = name
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter:
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is set to
`Parameter.empty`.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is set to
`Parameter.empty`.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is _empty:
raise ValueError('name is a required attribute for Parameter')
if not isinstance(name, str):
raise TypeError("name must be a str, not a {!r}".format(name))
if not name.isidentifier():
raise ValueError('{!r} is not a valid parameter name'.format(name))
self._name = name
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, *, name=_void, kind=_void,
annotation=_void, default=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
return type(self)(name, kind, default=default, annotation=annotation)
def __str__(self):
kind = self.kind
formatted = self._name
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{}:{}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{}={}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{} at {:#x} {!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments:
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature:
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is set to `Signature.empty`.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, *, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
kind_defaults = False
for idx, param in enumerate(parameters):
kind = param.kind
name = param.name
if kind < top_kind:
msg = 'wrong parameter order: {!r} before {!r}'
msg = msg.format(top_kind, kind)
raise ValueError(msg)
elif kind > top_kind:
kind_defaults = False
top_kind = kind
if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD):
if param.default is _empty:
if kind_defaults:
# No default for this parameter, but the
# previous parameter of the same kind had
# a default
msg = 'non-default argument follows default ' \
'argument'
raise ValueError(msg)
else:
# There is a default for this parameter.
kind_defaults = True
if name in params:
msg = 'duplicate parameter name: {!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
is_duck_function = False
if not isfunction(func):
if _signature_is_functionlike(func):
is_duck_function = True
else:
# If it's not a pure Python function, and not a duck type
# of pure function:
raise TypeError('{!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = func_code.co_kwonlyargcount
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = func.__annotations__
defaults = func.__defaults__
kwdefaults = func.__kwdefaults__
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & CO_VARARGS:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & CO_VARKEYWORDS:
index = pos_count + keyword_only_count
if func_code.co_flags & CO_VARARGS:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
# Is 'func' is a pure Python function - don't validate the
# parameters list (for correct order and defaults), it should be OK.
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=is_duck_function)
@classmethod
def from_builtin(cls, func):
return _signature_from_builtin(cls, func)
@property
def parameters(self):
return self._parameters
@property
def return_annotation(self):
return self._return_annotation
def replace(self, *, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = {param: idx
for idx, param in enumerate(other.parameters.keys())}
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, *, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
# No default, not VAR_KEYWORD, not VAR_POSITIONAL,
# not in `kwargs`
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments') from None
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
if param.kind == _VAR_POSITIONAL:
# Named arguments don't refer to '*args'-like parameters.
# We only arrive here if the positional arguments ended
# before reaching the last parameter before *args.
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name)) from None
else:
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(*args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return args[0]._bind(args[1:], kwargs)
def bind_partial(*args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return args[0]._bind(args[1:], kwargs, partial=True)
def __str__(self):
result = []
render_pos_only_separator = False
render_kw_only_separator = True
for param in self.parameters.values():
formatted = str(param)
kind = param.kind
if kind == _POSITIONAL_ONLY:
render_pos_only_separator = True
elif render_pos_only_separator:
# It's not a positional-only parameter, and the flag
# is set to 'True' (there were pos-only params before.)
result.append('/')
render_pos_only_separator = False
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
if render_pos_only_separator:
# There were only positional-only parameters, hence the
# flag was not reset to 'False'
result.append('/')
rendered = '({})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
def _main():
""" Logic for inspecting an object given at command line """
import argparse
import importlib
parser = argparse.ArgumentParser()
parser.add_argument(
'object',
help="The object to be analysed. "
"It supports the 'module:qualname' syntax")
parser.add_argument(
'-d', '--details', action='store_true',
help='Display info about the module rather than its source code')
args = parser.parse_args()
target = args.object
mod_name, has_attrs, attrs = target.partition(":")
try:
obj = module = importlib.import_module(mod_name)
except Exception as exc:
msg = "Failed to import {} ({}: {})".format(mod_name,
type(exc).__name__,
exc)
print(msg, file=sys.stderr)
exit(2)
if has_attrs:
parts = attrs.split(".")
obj = module
for part in parts:
obj = getattr(obj, part)
if module.__name__ in sys.builtin_module_names:
print("Can't get info for builtin modules.", file=sys.stderr)
exit(1)
if args.details:
print('Target: {}'.format(target))
print('Origin: {}'.format(getsourcefile(module)))
print('Cached: {}'.format(module.__cached__))
if obj is module:
print('Loader: {}'.format(repr(module.__loader__)))
if hasattr(module, '__path__'):
print('Submodule search path: {}'.format(module.__path__))
else:
try:
__, lineno = findsource(obj)
except Exception:
pass
else:
print('Line: {}'.format(lineno))
print('\n')
else:
print(getsource(obj))
if __name__ == "__main__":
_main()
| mit | 2c587a5534dbdf5bf5cbf6f1c09d3cd4 | 36.747197 | 84 | 0.581704 | 4.556293 | false | false | false | false |
mfenniak/heroku-buildpack-python-libffi | vendor/distribute-0.6.36/pkg_resources.py | 40 | 93398 | """Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys, os, zipimport, time, re, imp, types
from urlparse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
# This marker is used to simplify the process that checks is the
# setuptools package was installed by the Setuptools project
# or by the Distribute project, in case Setuptools creates
# a distribution with the same version.
#
# The bootstrapping script for instance, will check if this
# attribute is present to decide wether to reinstall the package
_distribute = True
def _bypass_ensure_directory(name, mode=0777):
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
_state_vars = {}
def _declare_state(vartype, **kw):
g = globals()
for name, val in kw.iteritems():
g[name] = val
_state_vars[name] = vartype
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.iteritems():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.iteritems():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform(); m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
from distutils.util import get_platform
except ImportError:
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry,True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None, replacement=True):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if _override_setuptools(req) and replacement:
req = Requirement.parse('distribute')
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None:
if env is None:
env = Environment(self.entries)
dist = best[req.key] = env.best_match(req, self, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self,
plugin_env, full_env=None, installer=None, fallback=True
):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
map(shadow_set.add, self) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError,v:
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
map(shadow_set.add, resolvees)
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:])
def __setstate__(self, (entries, keys, by_key, callbacks)):
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'2.4'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self.cached_files[target_path] = 1
return target_path
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0555) & 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return StringIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec script_code in namespace, namespace
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
if importlib_bootstrap is not None:
register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = zipimport._zip_directory_cache[self.loader.archive]
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
zip_stat = self.zipinfo[zip_path]
t,d,size = zip_stat[5], zip_stat[6], zip_stat[3]
date_time = (
(d>>9)+1980, (d>>5)&0xF, d&0x1F, # ymd
(t&0xFFFF)>>11, (t>>5)&0x3F, (t&0x1F) * 2, 0, 0, -1 # hms, etc.
)
timestamp = time.mktime(date_time)
try:
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if os.path.isfile(real_path):
stat = os.stat(real_path)
if stat.st_size==size and stat.st_mtime==timestamp:
# size and stamp match, don't bother extracting
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
stat = os.stat(real_path)
if stat.st_size==size and stat.st_mtime==timestamp:
# size and stamp match, somebody did it just ahead of
# us, so we're done
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = zipimport._zip_directory_cache[importer.archive]
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
class ImpWrapper:
"""PEP 302 Importer that wraps Python's "normal" import algorithm"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [self.path]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(file, filename, etc)
class ImpLoader:
"""PEP 302 Loader that wraps Python's "normal" import algorithm"""
def __init__(self, file, filename, etc):
self.file = file
self.filename = filename
self.etc = etc
def load_module(self, fullname):
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file: self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_importer(path_item):
"""Retrieve a PEP 302 "importer" for the given path item
If there is no importer, this returns a wrapper around the builtin import
machinery. The returned importer is only cached if it was created by a
path hook.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for hook in sys.path_hooks:
try:
importer = hook(path_item)
except ImportError:
pass
else:
break
else:
importer = None
sys.path_importer_cache.setdefault(path_item,importer)
if importer is None:
try:
importer = ImpWrapper(path_item)
except ImportError:
pass
return importer
try:
from pkgutil import get_importer, ImpImporter
except ImportError:
pass # Python 2.3 or 2.4, use our own implementation
else:
ImpWrapper = ImpImporter # Python 2.5, use pkgutil's implementation
del ImpLoader, ImpImporter
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_in_zip(importer, path_item, only=False):
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_in_zip)
def StringIO(*args, **kw):
"""Thunk to load the real StringIO on demand"""
global StringIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return StringIO(*args,**kw)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
entry_file = open(os.path.join(path_item, entry))
try:
entry_lines = entry_file.readlines()
finally:
entry_file.close()
for line in entry_lines:
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(ImpWrapper,find_on_path)
if importlib_bootstrap is not None:
register_finder(importlib_bootstrap.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []; _set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer,path_item,packageName,module)
if subpath is not None:
path = module.__path__; path.append(subpath)
loader.load_module(packageName); module.__path__ = path
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(ImpWrapper,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
if importlib_bootstrap is not None:
register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer))
#@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
parse = classmethod(parse)
#@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
parse_group = classmethod(parse_group)
#@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
parse_map = classmethod(parse_map)
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self,
location=None, metadata=None, project_name=None, version=None,
py_version=PY_MAJOR, platform=None, precedence = EGG_DIST
):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
#@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
from_location = classmethod(from_location)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
#@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
key = property(key)
#@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
parsed_version = property(parsed_version)
#@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or %s file" % self.PKG_INFO, self
)
version = property(version)
#@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra: extra = safe_extra(extra)
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
_dep_map = property(_dep_map)
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
map(declare_namespace, self._get_metadata('namespace_packages.txt'))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError,attr
return getattr(self._provider, attr)
#@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
from_filename = classmethod(from_filename)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if self.project_name == 'setuptools':
try:
version = self.version
except ValueError:
version = ''
if '0.7' in version:
raise ValueError(
"A 0.7-series setuptools cannot be installed "
"with distribute. Found one at %s" % str(self.location))
if not loc:
return
if path is sys.path:
self.check_version_conflict()
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= map(_normalize_cached, path)
bp = None
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='distribute':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages
):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
#@property
def extras(self):
return [dep for dep in self._dep_map if dep]
extras = property(extras)
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
from email.parser import Parser
self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO))
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = parse_requirements(distvers).next()
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution }
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = lines.next(); p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key <> self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F': return False
elif action=='T': return True
elif action=='+': last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
#@staticmethod
def parse(s, replacement=True):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
founded_req = reqs[0]
# if asked for setuptools distribution
# and if distribute is installed, we want to give
# distribute instead
if _override_setuptools(founded_req) and replacement:
distribute = list(parse_requirements('distribute'))
if len(distribute) == 1:
return distribute[0]
return founded_req
else:
return founded_req
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
parse = staticmethod(parse)
state_machine = {
# =><
'<' : '--T',
'<=': 'T-T',
'>' : 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _override_setuptools(req):
"""Return True when distribute wants to override a setuptools dependency.
We want to override when the requirement is setuptools and the version is
a variant of 0.6.
"""
if req.project_name == 'setuptools':
if not len(req.specs):
# Just setuptools: ok
return True
for comparator, version in req.specs:
if comparator in ['==', '>=', '>']:
if '0.7' in version:
# We want some setuptools not from the 0.6 series.
return False
return True
return False
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
_declare_state('object', working_set = WorkingSet())
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]; map(working_set.add_entry,sys.path) # match order
| mit | defabfc0db382a114d5dfebe9eeb51ad | 32.037849 | 94 | 0.600816 | 4.238813 | false | false | false | false |
mfenniak/heroku-buildpack-python-libffi | vendor/distribute-0.6.36/setuptools/__init__.py | 132 | 3744 | """Extensions to the 'distutils' for large or complex distributions"""
from setuptools.extension import Extension, Library
from setuptools.dist import Distribution, Feature, _get_unpatched
import distutils.core, setuptools.command
from setuptools.depends import Require
from distutils.core import Command as _Command
from distutils.util import convert_path
import os
import sys
__version__ = '0.6'
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages'
]
# This marker is used to simplify the process that checks is the
# setuptools package was installed by the Setuptools project
# or by the Distribute project, in case Setuptools creates
# a distribution with the same version.
#
# The distribute_setup script for instance, will check if this
# attribute is present to decide whether to reinstall the package
# or not.
_distribute = True
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
def find_packages(where='.', exclude=()):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it
will be converted to the appropriate local path syntax. 'exclude' is a
sequence of package names to exclude; '*' can be used as a wildcard in the
names, such that 'foo.*' will exclude all subpackages of 'foo' (but not
'foo' itself).
"""
out = []
stack=[(convert_path(where), '')]
while stack:
where,prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where,name)
if ('.' not in name and os.path.isdir(fn) and
os.path.isfile(os.path.join(fn,'__init__.py'))
):
out.append(prefix+name); stack.append((fn,prefix+name+'.'))
for pat in list(exclude)+['ez_setup', 'distribute_setup']:
from fnmatch import fnmatchcase
out = [item for item in out if not fnmatchcase(item,pat)]
return out
setup = distutils.core.setup
_Command = _get_unpatched(_Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
# Add support for keyword arguments
_Command.__init__(self,dist)
for k,v in kw.items():
setattr(self,k,v)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
for k,v in kw.items():
setattr(cmd,k,v) # update command with keywords
return cmd
import distutils.core
distutils.core.Command = Command # we can't patch distutils.cmd, alas
def findall(dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
all_files = []
for base, dirs, files in os.walk(dir):
if base==os.curdir or base.startswith(os.curdir+os.sep):
base = base[2:]
if base:
files = [os.path.join(base, f) for f in files]
all_files.extend(filter(os.path.isfile, files))
return all_files
import distutils.filelist
distutils.filelist.findall = findall # fix findall bug in distutils.
# sys.dont_write_bytecode was introduced in Python 2.6.
if ((hasattr(sys, "dont_write_bytecode") and sys.dont_write_bytecode) or
(not hasattr(sys, "dont_write_bytecode") and os.environ.get("PYTHONDONTWRITEBYTECODE"))):
_dont_write_bytecode = True
else:
_dont_write_bytecode = False
| mit | ef765426b638829921015795f4fa860a | 35 | 93 | 0.672543 | 3.736527 | false | false | false | false |
mfenniak/heroku-buildpack-python-libffi | vendor/distribute-0.6.36/_markerlib/markers.py | 71 | 3878 | # -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
| mit | 01abb7653d4b69109df88af585008b58 | 32.721739 | 106 | 0.625064 | 3.917172 | false | false | false | false |
mfenniak/heroku-buildpack-python-libffi | vendor/pip-1.3.1/pip/commands/search.py | 63 | 4751 | import sys
import textwrap
import pkg_resources
import pip.download
from pip.basecommand import Command, SUCCESS
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from distutils.version import StrictVersion, LooseVersion
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'--index',
dest='index',
metavar='URL',
default='https://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url, pip.download.xmlrpclib_transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if score is None:
score = 0
if name not in packages.keys():
packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a list sorted by score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
# in case of abnormal version number, fall back to LooseVersion
except ValueError:
pass
try:
return cmp(LooseVersion(version1), LooseVersion(version2))
except TypeError:
# certain LooseVersion comparions raise due to unorderable types,
# fallback to string comparison
return cmp([str(v) for v in LooseVersion(version1).version],
[str(v) for v in LooseVersion(version2).version])
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
| mit | e200a7704eaf7f3f07c36a81a9163172 | 35.546154 | 102 | 0.604925 | 4.257168 | false | false | false | false |
abelfunctions/abelfunctions | abelfunctions/tests/test_riemann_surface_path.py | 1 | 18004 | import unittest
import numpy
from numpy import pi, sqrt
from sage.all import QQ, QQbar, e, I
from abelfunctions.complex_path import (
ComplexLine,
ComplexArc,
ComplexRay,
)
from abelfunctions.puiseux import puiseux
from abelfunctions.riemann_surface import RiemannSurface
from abelfunctions.riemann_surface_path import(
ordered_puiseux_series,
RiemannSurfacePathPuiseux,
RiemannSurfacePathSmale,
)
class TestOrderedPuiseuxSeries(unittest.TestCase):
def setUp(self):
R = QQ['x,y']
x,y = R.gens()
f1 = y**2 - x
self.f1 = f1
self.X1 = RiemannSurface(f1)
f2 = y**3 - x
self.f2 = f2
self.X2 = RiemannSurface(f2)
def test_example_puiseux(self):
p = puiseux(self.f1, 0)[0]
px = p.xseries()
R = px[0].parent()
x = R.gen()
half = QQ(1)/2
self.assertEqual(px[0].truncate(1), -x**half)
self.assertEqual(px[1].truncate(1), x**half)
p = puiseux(self.f2, 0)[0]
px = p.xseries()
R = px[0].parent()
x = R.gen()
third = QQ(1)/3
S = QQ['t']; t = S.gen()
alpha,beta,gamma = (t**3 - 1).roots(ring=QQbar, multiplicities=False)
self.assertEqual(px[0].truncate(1), alpha*x**third)
self.assertEqual(px[1].truncate(1), gamma*x**third)
self.assertEqual(px[2].truncate(1), beta*x**third)
def test_example_puiseux_extend(self):
p = puiseux(self.f1, 0)[0]
p.extend(10)
px = p.xseries()
R = px[0].parent()
x = R.gen()
half = QQ(1)/2
self.assertEqual(px[0].truncate(9), -x**half)
self.assertEqual(px[1].truncate(9), x**half)
p = puiseux(self.f2, 0)[0]
p.extend(10)
px = p.xseries()
R = px[0].parent()
x = R.gen()
third = QQ(1)/3
S = QQ['t']; t = S.gen()
alpha,beta,gamma = (t**3 - 1).roots(ring=QQbar, multiplicities=False)
self.assertEqual(px[0].truncate(9), alpha*x**third)
self.assertEqual(px[1].truncate(9), gamma*x**third)
self.assertEqual(px[2].truncate(9), beta*x**third)
def test_example_puiseux_extend_to_x(self):
p = puiseux(self.f1, 0)[0]
p.extend_to_x(0.5)
px = p.xseries()
R = px[0].parent()
x = R.gen()
half = QQ(1)/2
self.assertEqual(px[0].truncate(2), -x**half)
self.assertEqual(px[1].truncate(2), x**half)
p = puiseux(self.f2, 0)[0]
p.extend_to_x(0.5)
px = p.xseries()
R = px[0].parent()
x = R.gen()
third = QQ(1)/3
S = QQ['t']; t = S.gen()
alpha,beta,gamma = (t**3 - 1).roots(ring=QQbar, multiplicities=False)
self.assertEqual(px[0].truncate(2), alpha*x**third)
self.assertEqual(px[1].truncate(2), gamma*x**third)
self.assertEqual(px[2].truncate(2), beta*x**third)
def test_ordered_puiseux_series_discriminant(self):
# testing f1
target_point = 0
gammax = ComplexLine(1, target_point)
y0 = [-1,1]
p, place = ordered_puiseux_series(self.X1, gammax, y0, target_point)
P = p[0].parent()
x = P.gen()
half = QQ(1)/2
self.assertEqual(p[0].truncate(1), -x**half)
self.assertEqual(p[1].truncate(1), x**half)
y0 = [1,-1]
p, place = ordered_puiseux_series(self.X1, gammax, y0, target_point)
self.assertEqual(p[0].truncate(1), x**half)
self.assertEqual(p[1].truncate(1), -x**half)
# testing f2
S = QQ['t']; t = S.gen()
alpha,beta,gamma = (t**3 - 1).roots(ring=QQbar, multiplicities=False)
third = QQ(1)/3
y0 = [alpha, beta, gamma]
p, place = ordered_puiseux_series(self.X2, gammax, y0, target_point)
self.assertEqual(p[0].truncate(1), alpha*x**third)
self.assertEqual(p[1].truncate(1), beta*x**third)
self.assertEqual(p[2].truncate(1), gamma*x**third)
y0 = [beta, gamma, alpha]
p, place = ordered_puiseux_series(self.X2, gammax, y0, target_point)
self.assertEqual(p[0].truncate(1), beta*x**third)
self.assertEqual(p[1].truncate(1), gamma*x**third)
self.assertEqual(p[2].truncate(1), alpha*x**third)
y0 = [beta, alpha, gamma]
p, place = ordered_puiseux_series(self.X2, gammax, y0, target_point)
self.assertEqual(p[0].truncate(1), beta*x**third)
self.assertEqual(p[1].truncate(1), alpha*x**third)
self.assertEqual(p[2].truncate(1), gamma*x**third)
def test_ordered_puiseux_series_regular(self):
# testing f1
target_point = 4
gammax = ComplexLine(1, target_point)
y0 = [-1,1]
p, place = ordered_puiseux_series(self.X1, gammax, y0, target_point)
P = p[0].parent()
x = P.gen()
self.assertEqual(p[0].truncate(3), -2 - QQ(1)/4*x + QQ(1)/64*x**2)
self.assertEqual(p[1].truncate(3), 2 + QQ(1)/4*x - QQ(1)/64*x**2)
y0 = [1,-1]
p, place = ordered_puiseux_series(self.X1, gammax, y0, target_point)
self.assertEqual(p[0].truncate(3), 2 + QQ(1)/4*x - QQ(1)/64*x**2)
self.assertEqual(p[1].truncate(3), -2 - QQ(1)/4*x + QQ(1)/64*x**2)
class TestRiemannSurfacePathPuiseux(unittest.TestCase):
def setUp(self):
R = QQ['x,y']
x,y = R.gens()
f1 = y**2 - x
self.f1 = f1
self.X1 = RiemannSurface(f1)
f2 = y**3 - x
self.f2 = f2
self.X2 = RiemannSurface(f2)
def test_construction(self):
gammax = ComplexLine(1,0)
y0 = [-1,1]
gamma = RiemannSurfacePathPuiseux(self.X1, gammax, y0)
def test_analytic_continuation_X1(self):
gammax = ComplexLine(1,0)
y0 = [-1,1]
gamma = RiemannSurfacePathPuiseux(self.X1, gammax, y0)
y = gamma.get_y(0)
self.assertAlmostEqual(y[0], -1)
self.assertAlmostEqual(y[1], 1)
y = gamma.get_y(0.5)
self.assertAlmostEqual(y[0], -sqrt(complex(0.5)))
self.assertAlmostEqual(y[1], sqrt(complex(0.5)))
y = gamma.get_y(0.75)
self.assertAlmostEqual(y[0], -sqrt(complex(0.25)))
self.assertAlmostEqual(y[1], sqrt(complex(0.25)))
y = gamma.get_y(1)
self.assertAlmostEqual(y[0], 0)
self.assertAlmostEqual(y[1], 0)
gammax = ComplexArc(2,2,0,pi)
y0 = [-2,2]
gamma = RiemannSurfacePathPuiseux(self.X1, gammax, y0)
y = gamma.get_y(0)
self.assertAlmostEqual(y[0], -2)
self.assertAlmostEqual(y[1], 2)
y = gamma.get_y(1)
self.assertAlmostEqual(y[0], 0)
self.assertAlmostEqual(y[1], 0)
def test_analytic_continuation_X2(self):
S = QQ['t']; t = S.gen()
a,b,c = (t**3 - 1).roots(ring=QQbar, multiplicities=False)
gammax = ComplexLine(1,0)
y0 = [a,b,c]
gamma = RiemannSurfacePathPuiseux(self.X2, gammax, y0)
y = gamma.get_y(0)
self.assertAlmostEqual(y[0], a)
self.assertAlmostEqual(y[1], b)
self.assertAlmostEqual(y[2], c)
scale = (0.5)**(1/3.)
y = gamma.get_y(0.5)
self.assertAlmostEqual(y[0], scale*a)
self.assertAlmostEqual(y[1], scale*b)
self.assertAlmostEqual(y[2], scale*c)
y = gamma.get_y(1)
self.assertAlmostEqual(y[0], 0)
self.assertAlmostEqual(y[1], 0)
self.assertAlmostEqual(y[2], 0)
def test_rays(self):
# test that analytic continuation to places at infinity work
gammax = ComplexRay(-9)
y0 = [-3.j,3.j]
gamma = RiemannSurfacePathPuiseux(self.X1, gammax, y0)
y = gamma.get_y(0)
self.assertAlmostEqual(y[0], -3.j)
self.assertAlmostEqual(y[1], 3.j)
# note: the infinity behavior may change in the future
y = gamma.get_y(1)
self.assertTrue(numpy.isnan(y[0]))
self.assertTrue(numpy.isnan(y[1]))
class TestRiemannSurfacePathSmale(unittest.TestCase):
def setUp(self):
R = QQ['x,y']
x,y = R.gens()
f1 = y**2 - x
self.f1 = f1
self.X1 = RiemannSurface(f1)
f2 = y**3 - x
self.f2 = f2
self.X2 = RiemannSurface(f2)
def test_construction(self):
gammax = ComplexLine(1,4)
y0 = [-1,1]
gamma = RiemannSurfacePathSmale(self.X1, gammax, y0)
def test_analytic_continuation_X1(self):
gammax = ComplexLine(1,4)
y0 = [-1,1]
gamma = RiemannSurfacePathSmale(self.X1, gammax, y0)
y = gamma.get_y(0)
self.assertAlmostEqual(y[0], -1)
self.assertAlmostEqual(y[1], 1)
y = gamma.get_y(0.5)
self.assertAlmostEqual(y[0], -sqrt(2.5))
self.assertAlmostEqual(y[1], sqrt(2.5))
y = gamma.get_y(0.75)
self.assertAlmostEqual(y[0], -sqrt(3.25))
self.assertAlmostEqual(y[1], sqrt(3.25))
y = gamma.get_y(1)
self.assertAlmostEqual(y[0], -2)
self.assertAlmostEqual(y[1], 2)
def test_analytic_continuation_X1_big_jump(self):
# tests that smale will handle the case when checkpoints don't exist or
# are far away from each other
gammax = ComplexLine(1,9)
y0 = [-1,1]
gamma = RiemannSurfacePathSmale(self.X1, gammax, y0,
ncheckpoints=1)
y = gamma.get_y(0)
self.assertAlmostEqual(y[0], -1)
self.assertAlmostEqual(y[1], 1)
y = gamma.get_y(0.5)
self.assertAlmostEqual(y[0], -sqrt(5))
self.assertAlmostEqual(y[1], sqrt(5))
y = gamma.get_y(0.75)
self.assertAlmostEqual(y[0], -sqrt(7))
self.assertAlmostEqual(y[1], sqrt(7))
y = gamma.get_y(1)
self.assertAlmostEqual(y[0], -3)
self.assertAlmostEqual(y[1], 3)
def tests_monodromy(self):
gammax = ComplexArc(1, 0, 0, 2*pi)
y0 = [-1,1]
gamma = RiemannSurfacePathSmale(self.X1, gammax, y0)
y = gamma.get_y(0.0)
self.assertAlmostEqual(y[0], -1)
self.assertAlmostEqual(y[1], 1)
y = gamma.get_y(1.0)
self.assertAlmostEqual(y[0], 1)
self.assertAlmostEqual(y[1], -1)
class TestRiemannSurfacePathComposite(unittest.TestCase):
def setUp(self):
R = QQ['x,y']
x,y = R.gens()
f1 = y**2 - x
self.f1 = f1
self.X1 = RiemannSurface(f1)
f2 = y**3 - x
self.f2 = f2
self.X2 = RiemannSurface(f2)
def test_singleton_segment(self):
gamma1x = ComplexLine(1,4)
y01 = [-1,1]
gamma1 = RiemannSurfacePathSmale(self.X1, gamma1x, y01)
self.assertEqual(gamma1.segments[0], gamma1)
gamma2x = ComplexLine(4,9)
y02 = [-2,2]
gamma2 = RiemannSurfacePathSmale(self.X1, gamma2x, y02)
self.assertEqual(gamma2.segments[0], gamma2)
def test_segments(self):
gamma1x = ComplexLine(1,4)
y01 = [-1,1]
gamma1 = RiemannSurfacePathSmale(self.X1, gamma1x, y01)
gamma2x = ComplexLine(4,9)
y02 = [-2,2]
gamma2 = RiemannSurfacePathSmale(self.X1, gamma2x, y02)
gamma = gamma1 + gamma2
self.assertEqual(gamma.segments[0], gamma1)
self.assertEqual(gamma.segments[1], gamma2)
self.assertEqual(gamma[0], gamma1)
self.assertEqual(gamma[1], gamma2)
def test_get_x(self):
gamma1x = ComplexLine(1,4)
y01 = [-1,1]
gamma1 = RiemannSurfacePathSmale(self.X1, gamma1x, y01)
gamma2x = ComplexLine(4,9)
y02 = [-2,2]
gamma2 = RiemannSurfacePathSmale(self.X1, gamma2x, y02)
gamma = gamma1 + gamma2
x = gamma.get_x(0)
self.assertAlmostEqual(x, 1)
x = gamma.get_x(0.25)
self.assertAlmostEqual(x, 2.5)
x = gamma.get_x(0.5)
self.assertAlmostEqual(x, 4)
x = gamma.get_x(0.75)
self.assertAlmostEqual(x, 6.5)
x = gamma.get_x(1.0)
self.assertAlmostEqual(x, 9)
def test_analytic_continuation(self):
# method 1: adding two RSPs
gamma1x = ComplexLine(1,4)
y01 = [-1,1]
gamma1 = RiemannSurfacePathSmale(self.X1, gamma1x, y01)
gamma2x = ComplexLine(4,9)
y02 = [-2,2]
gamma2 = RiemannSurfacePathSmale(self.X1, gamma2x, y02)
gamma = gamma1 + gamma2
y = gamma.get_y(0)
self.assertAlmostEqual(y[0], -1)
self.assertAlmostEqual(y[1], 1)
y = gamma.get_y(0.25)
self.assertAlmostEqual(y[0], -sqrt(2.5))
self.assertAlmostEqual(y[1], sqrt(2.5))
y = gamma.get_y(0.5)
self.assertAlmostEqual(y[0], -2)
self.assertAlmostEqual(y[1], 2)
y = gamma.get_y(0.75)
self.assertAlmostEqual(y[0], -sqrt(6.5))
self.assertAlmostEqual(y[1], sqrt(6.5))
y = gamma.get_y(1.0)
self.assertAlmostEqual(y[0], -3)
self.assertAlmostEqual(y[1], 3)
# method 2: composite ComplexPath with one RSP
gammax = gamma1x + gamma2x
y0 = [-1,1]
gamma = RiemannSurfacePathSmale(self.X1, gammax, y0)
y = gamma.get_y(0)
self.assertAlmostEqual(y[0], -1)
self.assertAlmostEqual(y[1], 1)
y = gamma.get_y(0.25)
self.assertAlmostEqual(y[0], -sqrt(2.5))
self.assertAlmostEqual(y[1], sqrt(2.5))
y = gamma.get_y(0.5)
self.assertAlmostEqual(y[0], -2)
self.assertAlmostEqual(y[1], 2)
y = gamma.get_y(0.75)
self.assertAlmostEqual(y[0], -sqrt(6.5))
self.assertAlmostEqual(y[1], sqrt(6.5))
y = gamma.get_y(1.0)
self.assertAlmostEqual(y[0], -3)
self.assertAlmostEqual(y[1], 3)
def test_addition_fails(self):
# case 1: the x-points don't match
gamma1x = ComplexLine(1,4)
y01 = [-1,1]
gamma1 = RiemannSurfacePathSmale(self.X1, gamma1x, y01)
gamma2x = ComplexLine(9,10)
y02 = [-3,3]
gamma2 = RiemannSurfacePathSmale(self.X1, gamma2x, y02)
with self.assertRaises(ValueError):
gamma = gamma1 + gamma2
# case 2: x-points match but y-fibre doesn't
gamma1x = ComplexLine(1,4)
y01 = [-1,1]
gamma1 = RiemannSurfacePathSmale(self.X1, gamma1x, y01)
gamma2x = ComplexLine(4,9)
y02 = [2,-2] # swapped: gamma1 ends at [-2,2]
gamma2 = RiemannSurfacePathSmale(self.X1, gamma2x, y02)
with self.assertRaises(ValueError):
gamma = gamma1 + gamma2
class TestParameterize(unittest.TestCase):
def setUp(self):
R = QQ['x,y']
x,y = R.gens()
f1 = y**2 - x
self.f1 = f1
self.X1 = RiemannSurface(f1)
f2 = y**3 - x
self.f2 = f2
self.X2 = RiemannSurface(f2)
def test_simple_line_smale(self):
gammax = ComplexLine(1,2)
gamma = RiemannSurfacePathSmale(self.X1, gammax, [-1,1])
nu = lambda x,y: y
nu_gamma = gamma.parameterize(nu)
val = nu_gamma(0.0)
self.assertAlmostEqual(val, -1)
val = nu_gamma(0.5)
self.assertAlmostEqual(val, -sqrt(1.5))
val = nu_gamma(1.0)
self.assertAlmostEqual(val, -sqrt(2.0))
@unittest.skip('Skip until differentials and localization are worked out.')
def test_simple_line_puiseux_discriminant(self):
gammax = ComplexLine(2,0) #dxds = 2
y0 = [-sqrt(2.0),sqrt(2.0)]
gamma = RiemannSurfacePathPuiseux(self.X1, gammax, y0)
nu = lambda x,y: y
nu_gamma = gamma.parameterize(nu)
val = nu_gamma(0.0)
self.assertAlmostEqual(val, -sqrt(2.0))
val = nu_gamma(0.5)
self.assertAlmostEqual(val, -sqrt(1.0))
val = nu_gamma(1.0)
self.assertAlmostEqual(val, 0)
def test_simple_line_dxds(self):
gammax = ComplexLine(1,3) # dx/ds = 2
gamma = RiemannSurfacePathSmale(self.X1, gammax, [-1,1])
nu = lambda x,y: y
nu_gamma = gamma.parameterize(nu)
val = nu_gamma(0.0)
self.assertAlmostEqual(val, -2)
val = nu_gamma(0.5)
self.assertAlmostEqual(val, -2*sqrt(2.0))
val = nu_gamma(1.0)
self.assertAlmostEqual(val, -2*sqrt(3.0))
def test_simple_arc(self):
gammax = ComplexArc(1,0,0,pi)
gamma = RiemannSurfacePathSmale(self.X1, gammax, [-1,1])
nu = lambda x,y: y
nu_gamma = gamma.parameterize(nu)
val = nu_gamma(0.0)
test = gammax.derivative(0.0)*(-1)
self.assertAlmostEqual(val, test)
val = nu_gamma(0.5)
test = gammax.derivative(0.5)*(-sqrt(1.j))
self.assertAlmostEqual(val, test)
val = nu_gamma(1.0)
test = gammax.derivative(1.0)*(-1.j)
self.assertAlmostEqual(val, test)
def test_simple_composite(self):
gammax1 = ComplexLine(4,1)
gamma1 = RiemannSurfacePathSmale(self.X1, gammax1, [-2,2])
gammax2 = ComplexArc(1,0,0,pi)
gamma2 = RiemannSurfacePathSmale(self.X1, gammax2, [-1,1])
gamma = gamma1 + gamma2
nu = lambda x,y: y
nu_gamma = gamma.parameterize(nu)
val = nu_gamma(0.0)
test = gammax1.derivative(0.0)*(-2)
self.assertAlmostEqual(val, test)
val = nu_gamma(0.25)
test = gammax1.derivative(0.5)*(-sqrt(2.5))
self.assertAlmostEqual(val, test)
eps = 1e-12
val = nu_gamma(0.5-eps)
test = gammax1.derivative(1.0-eps/2)*(-1)
self.assertAlmostEqual(val, test)
val = nu_gamma(0.5)
test = gammax2.derivative(0.0)*(-1)
self.assertAlmostEqual(val, test)
val = nu_gamma(0.5+eps)
test = gammax2.derivative(eps/2)*(-1)
self.assertAlmostEqual(val, test)
val = nu_gamma(0.75)
test = gammax2.derivative(0.5)*(-sqrt(1.j))
self.assertAlmostEqual(val, test)
val = nu_gamma(1.0)
test = gammax2.derivative(1.0)*(-1.j)
self.assertAlmostEqual(val, test)
| mit | 1c5a6a19d4035260e241d9ab08c1fccd | 29.828767 | 79 | 0.564041 | 2.958265 | false | true | false | false |
abelfunctions/abelfunctions | abelfunctions/complex_path_factory.py | 1 | 27469 | r"""X-Path Factory :mod:`abelfunctions.complex_path_factory`
=================================================
Module for computing the monodromy group of the set of discriminant points
of a complex plane algebraic curve.
"""
import numpy
from numpy import double, floor, angle
from sage.all import infinity, QQbar, scatter_plot
from abelfunctions.complex_path import (
ComplexLine,
ComplexArc,
ComplexPath,
)
class ComplexPathFactory(object):
r"""Factory for computing complex paths on the x-projection of a Riemann surface
determined by an algebraic curve :math:`C : f(x,y) = 0`.
Since paths on a Riemann surface are computed via analytic continuation
care needs to be taken when the x-part of the path gets close to a
discriminant point of the algebraic curve from which the Riemann surface is
derived. This is because some of the y-sheets of the curve, when considered
as a covering of the complex x-plane, coalesce at the discriminant points.
Therefore, "bounding circles" need to be computed at each discriminant
point.
Attributes
----------
riemann_surface : RiemannSurface
The Riemann surface on which to construct the x-paths.
base_point : complex
If a base point isn't provided, one will be chosen.
kappa : double (default: 3/5)
A scaling factor between 0.5 and 1.0 used to modify the radius
of the bounding circles.
discriminant_points
The discriminant points of the curve.
discriminant_points_complex
Floating point approximations of the discriminant points. Used for
computational efficiency since converting from QQbar to CDF is slow
Methods
-------
.. autosummary::
closest_discriminant_point
radius
intersecting_discriminant_points
intersects_discriminant_points
intersection_points
path_to_monodromy_point
path
monodromy_path
monodromy_path_infinity
show_paths
"""
@property
def base_point(self):
return self._base_point
@property
def discriminant_points(self):
return self._discriminant_points
@property
def discriminant_points_complex(self):
return self._discriminant_points_complex
@property
def radii(self):
return self._radii
def __init__(self, f, base_point=None, kappa=3./5.):
"""Initialize a complex path factory.
Complex path factories require a base point from which most complex
paths begin on a Riemann surface. In particular, this base point is
used as the base point in constructing the monodromy group of the
Riemann surface.
Parameters
----------
f : polynomial
The plane algebraic curve defining the Riemann surface.
base_point : complex
The base point of the factory and of the monodromy group of the
Riemann surface. If not provided one will be chosen based on the
discriminant point placement.
kappa : double
A scaling factor used to determine the radii of the "bounding
circles" around each discriminant point. `kappa = 1.0` means the
bounding circles are made as large as possible, resulting in
possibly touching circles between two or more discriminant points.
"""
self.f = f
# compute the discriminant points and determine a base point if none
# was provided
b,d,dc = self._compute_discriminant_points(base_point)
self._base_point = b
self._discriminant_points = d
self._discriminant_points_complex = dc
# compute the bounding circle radii from the discriminant points
r = self._compute_radii(kappa)
self._radii = r
def _compute_discriminant_points(self, base_point):
r"""Computes and stores the discriminant points of the underlying curve.
A discriminant point :math:`x=b` is an x-point where at least one
y-root lying above has multiplicity greater than one. A
:class:`PuiseuxTSeries` is required to represent a place on the Riemann
surface whose x-projection is a discriminant point. These kinds of
places are of type :class:`DiscriminantPlace`.
.. note::
The ordering of the discriminant points is important for the
purposes of computing the monodromy group, which is done in the
:class:`RiemannSurfacePathFactory` attribute, `PathFactory`.
Parameters
----------
None
Returns
-------
list : complex
Return a list of ordered discriminant points from the base point.
"""
# compute the symbolic and numerical discriminant points
f = self.f
x,y = f.parent().gens()
res = f.resultant(f.derivative(y), y).univariate_polynomial()
rts = res.roots(ring=QQbar, multiplicities=False)
discriminant_points = numpy.array(rts)
discriminant_points_complex = numpy.array(rts, dtype=complex)
# determine a base_point, if not specified
if not base_point:
a = min(complex(bi).real for bi in discriminant_points)
a = a - 1
aint = complex(floor(a))
base_point = aint
# sort the discriminant points first by argument with the base point
# and then by distance from the base point. the points need to be exact
centered_points = discriminant_points_complex - base_point
distances = abs(centered_points)
arguments = angle(centered_points)
sort_index = numpy.lexsort((distances, arguments))
# sort and return
discriminant_points = discriminant_points[sort_index]
discriminant_points_complex = discriminant_points_complex[sort_index]
return base_point, discriminant_points, discriminant_points_complex
def closest_discriminant_point(self, x, exact=True):
r"""Returns the closest discriminant point to a point x.
An often-used helper function by several components of
:class:`RiemannSurface`.
Parameters
----------
x : complex
A complex x-point.
exact : boolean
If `True`, returns a `sympy.Expr` representing the discriminant
point exactly. Otherwise, returns a numerical approximation.
Returns
-------
complex or sympy.Expr
The discriminant point, either exact or numerical.
"""
# use floating points approximations for performance
b = self.discriminant_points_complex
x = complex(x)
idx = numpy.argmin(abs(b - x))
if exact:
return self.discriminant_points[idx]
return self.discriminant_points_complex[idx]
def _compute_radii(self, kappa):
"""Returns the radii of the bounding circles.
Parameters
----------
kappa : double
A scaling factor between 0.5 and 1.0. `kappa = 1.0` means that the
bounding circles are taken to be as large as possible without
overlapping.
Returns
-------
radii : array
An ordered list of radii. The radius at index `k` is associated
with the discriminant point at index `k` in
`self.discriminant_points`.
"""
# special case when there is only one finite discriminant point: take
# the distance from the base point to the discriminant point (scaled by
# kappa, of course)
if len(self.discriminant_points_complex) == 1:
b = self.discriminant_points_complex[0]
radius = numpy.abs(self.base_point - b)
radius *= kappa/2.0
radii = numpy.array([radius], dtype=double)
return radii
# when there is more than one discriminant point we scale disctances
# accordingly. coerce to numerical.
radii = []
b = self.discriminant_points_complex
for bi in b:
dists = [abs(bi - bj) for bj in self.discriminant_points_complex
if bi != bj]
rho = min(dists)
radius = rho*kappa/2.0
radii.append(radius)
radii = numpy.array(radii, dtype=double)
# final check: assert that the base point is sufficiently far away from
# the discriminant points
dists = [abs(bi - self.base_point) for bi in b]
dists = numpy.array(dists, dtype=double) - radii
if any(dists < 0):
raise ValueError('Base point lies in the bounding circles of the '
'discriminant points. Use different base point or '
'circle scaling factor kappa.')
return radii
def radius(self, bi):
"""Returns the radius of the bounding circle around `bi`.
Parameters
----------
bi : complex
A discriminant point of the algebraic curve.
Returns
-------
radius : double
The radius of the bounding circle.
"""
# find the index where bi appears in the list of discriminant points.
# it's done numerically in case a numerical approximation bi is given
bi = complex(bi)
index = 0
for z in self.discriminant_points_complex:
if abs(z-bi) < 1e-14:
break
index += 1
# raise an error if not found
if index == len(self.discriminant_points_complex):
raise ValueError('%s is not a discriminant point of %s' % (bi, self.f))
radius = self.radii[index]
return radius
def intersecting_discriminant_points(self, z0, z1, exact=False):
r"""Return the discriminant points which are too close to the line from
`z0` to `z1` along with the corresponding orientations.
Parameters
----------
z0 : complex
Line start.
z1 : complex
Line end.
Returns
-------
"""
if exact:
points = [bi for bi in self.discriminant_points
if self.intersects_discriminant_point(z0, z1, bi)]
else:
points = [bi for bi in self.discriminant_points_complex
if self.intersects_discriminant_point(z0, z1, bi)]
return points
def intersects_discriminant_point(self, z0, z1, bi):
"""Returns `True` if the line from `z0` to `z1` intersects the bounding circle
around the discriminant point `bi`.
Parameters
----------
z0 : complex
Line starting point.
z1 : complex
Line ending point.
bi : complex
A discriminant point.
Returns
-------
is_intersecting : bool
`True` if the line from `z0` to `z1` gets too close to `bi`.
"""
# first check the perpendicular distance from bi to the line
# passing through z0 and z1
z0 = complex(z0)
z1 = complex(z1)
bi = complex(bi)
direction = numpy.sign(angle(z1-z0) - angle(bi-z0))
normv = abs(z1-z0)
v = 1.0j*direction*(z1 - z0)
r = z0 - bi
# degenerate case: the line through z0 and z1 crosses bi. in this case
# just check if the branch point lies in between
if direction == 0:
if (abs(bi - z0) <= normv) and (abs(bi - z1) <= normv):
return True
return False
# return False if the distance from the _line_ passing through
# z0 and z1 to bi is greater than the radius fo teh bounding
# circle.
distance = (v.real*r.real + v.imag*r.imag)
distance = distance / normv
if distance > self.radius(bi):
return False
# also need to check if bi "lies between" the _line segment_
# between z0 and z1. use the distance vector w = d*v/|v|. the
# distance from vtilde to z0 and z1 should be less that the
# distance between z0 and z1
w = distance*v/normv + bi
if (abs(w - z0) <= normv) and (abs(w - z1) <= normv):
return True
return False
def intersection_points(self, z0, z1, b, R):
"""Returns the complex points `w0,w1` where the line from `z0` to `z1`
intersects the bounding circle around `bi`.
Parameters
----------
z0 : complex
Line starting point.
z1 : complex
Line ending point.
bi : complex
A discriminant point.
Ri : double
The radius of the circle around bi.
Returns
-------
w0, w1 : complex
Points on the bounding circle of `bi` where the line z0-z1
intersects.
"""
# special case when z1 = b:
if abs(z1 - b) < 1e-14:
R = self.radius(b)
b = complex(b)
l = lambda s: z0 + (b - z0)*s
s = 1.0 - R/abs(z0 - b)
z = l(s)
return z,z
# construct the polynomial giving the distance from the line l(t),
# parameterized by t in [0,1], to bi.
z0 = complex(z0)
z1 = complex(z1)
b = complex(b)
R = double(R)
v = z1 - z0
w = z0 - b
p2 = v.real**2 + v.imag**2
p1 = 2*(v.real*w.real + v.imag*w.imag)
p0 = w.real**2 + w.imag**2 - R**2 # solving |l(t) - bi| = Ri
# find the roots of this polynomial and sort by increasing t
p = numpy.poly1d([p2, p1, p0])
t = numpy.roots(p)
t.sort()
# compute ordered intersection points
w0 = v*t[0] + z0 # first intersection point
w1 = v*t[1] + z0 # second intersection point
return w0,w1
def path_to_discriminant_point(self, bi):
r"""Returns the complex path to the bounding circle around `bi` which avoids
other discriminant points.
This is a specific implementation of the routine used in
:meth:`path_to_point`. Although similar, this routine takes branch
point ordering into account when determining whether to go above or
below intersecting discriminant points. (See
:meth:`intersecting_discriminant_points`)
Parameters
----------
bi : complex
A discriminant / branch point of the curve.
Returns
-------
gamma : ComplexPath
The corresponding monodromy path.
See Also
--------
intersecting_discriminant_points
path_to_point
"""
# make sure we have the discriminant point exactly
point = self.closest_discriminant_point(bi, exact=True)
if abs(complex(point) - complex(bi)) > 1e-4:
raise ValueError('%s is not a discriminant point of %s'%(bi,self.f))
bi = point
Ri = self.radius(bi)
# compute the list points we need to stay sufficiently away from and
# sort them in increasing distance from the base point
z0 = self.base_point
_,z1 = self.intersection_points(z0, complex(bi), bi, Ri)
points_to_avoid = self.intersecting_discriminant_points(z0, z1, exact=False)
points_to_avoid.sort(key=lambda bj: abs(bj-z0))
# determine the relative orientations of the avoiding discriminant
# points with the point bi. recall that the ordering of discriminant
# points establishes the orientation. (points earlier in the list lie
# below those later in the list.)
#
# positive/negative orientation with a given bj means we need to go
# above/below bj, respectively.
orientations = []
i = numpy.argwhere(self.discriminant_points_complex == complex(bi)).item(0)
for bj in points_to_avoid:
j = numpy.argwhere(self.discriminant_points_complex == bj).item(0)
if i < j:
orientations.append(-1)
else:
orientations.append(1)
# we now have sorted orientations and points to avoid. for each such
# point:
#
# 1. determine the points of intersection with the bounding circle
# 2. determine the appropriate arc along the bounding circle
# 3. construct the path segment using a line (if necessary) and the arc
segments = []
for j in range(len(points_to_avoid)):
bj = points_to_avoid[j]
oj = orientations[j]
Rj = self.radius(bj)
w0,w1 = self.intersection_points(z0,z1,bj,Rj)
arc = self.avoiding_arc(w0,w1,bj,Rj,orientation=oj)
if abs(z0-w0) > 1e-14:
segments.append(ComplexLine(z0,w0))
segments.append(arc)
# repeat by setting the new "start point" to be w1, the last point
# reached on the arc.
z0 = w1
# build the avoiding path from the segments
segments.append(ComplexLine(z0,z1))
if len(segments) == 1:
path = segments[0]
else:
path = ComplexPath(segments)
return path
def path(self, z0, z1):
r"""Returns the complex path to the bounding circle around `bi` which avoids
other discriminant points.
This is a specific implementation of the routine used in :meth:`path`.
Although similar, this routine takes branch point ordering into account
when determining whether to go above or below intersecting discriminant
points. (See :meth:`intersecting_discriminant_points`)
Parameters
----------
bi : complex
A discriminant / branch point of the curve.
Returns
-------
gamma : ComplexPath
The corresponding monodromy path.
See Also
--------
intersecting_discriminant_points
path
"""
# compute the list points we need to stay sufficiently away from and
# sort them in increasing distance from the base point
points_to_avoid = self.intersecting_discriminant_points(z0, z1, exact=False)
points_to_avoid.sort(key=lambda bj: abs(bj-z0))
# for each points we want to avoid
#
# 1. determine the points of intersection with the bounding circle
# 2. determine the appropriate arc along the bounding circle
# 3. construct the path segment using a line (if necessary) and the arc
segments = []
for j in range(len(points_to_avoid)):
bj = points_to_avoid[j]
Rj = self.radius(bj)
w0,w1 = self.intersection_points(z0,z1,bj,Rj)
arc = self.avoiding_arc(w0,w1,bj,Rj)
if abs(z0-w0) > 1e-14:
segments.append(ComplexLine(z0,w0))
segments.append(arc)
# repeat by setting the new "start point" to be w1, the last point
# reached on the arc.
z0 = w1
# append the final line and build the avoiding path from the segments
segments.append(ComplexLine(z0,z1))
if len(segments) == 1:
path = segments[0]
else:
path = ComplexPath(segments)
return path
def monodromy_path(self, bi, nrots=1):
"""Returns the complex path starting from the base point, going around the
discriminant point `bi` `nrots` times, and returning to the base
x-point.
The sign of `nrots` indicates the sign of the direction.
Parameters
----------
bi : complex
A discriminant point.
nrots : integer (default `1`)
A number of rotations around this discriminant point.
Returns
-------
path : ComplexPath
A complex path representing the monodromy path with `nrots`
rotations about the discriminant point `bi`.
"""
if bi in [infinity, numpy.Infinity, 'oo']:
return self.monodromy_path_infinity(nrots=nrots)
path_to_bi = self.path_to_discriminant_point(bi)
# determine the rotational path around the discriminant point
z = path_to_bi(1.0)
bi = complex(bi)
Ri = self.radius(bi)
theta = angle(z - bi)
dtheta = numpy.pi if nrots > 0 else -numpy.pi
circle = ComplexArc(Ri, bi, theta, dtheta) + \
ComplexArc(Ri, bi, theta + dtheta, dtheta)
path_around_bi = circle
for _ in range(abs(nrots)-1):
path_around_bi += circle
# the monodromy path is the sum of the path to the point, the
# rotational part, and the return path to the base point
path = path_to_bi + path_around_bi + path_to_bi.reverse()
return path
def monodromy_path_infinity(self, nrots=1):
"""Returns the complex path starting at the base point, going around
infinity `nrots` times, and returning to the base point.
This path is sure to not only encircle all of the discriminant
points but also stay sufficiently outside the bounding circles
of the points.
Parameters
----------
nrots : integer, (default `1`)
The number of rotations around infinity.
Returns
-------
RiemannSurfacePath
The complex path encircling infinity.
"""
path = []
# determine the radius R of the circle, centered at the origin,
# encircling all of the discriminant points and the bounding circles
b = self.discriminant_points
R = numpy.abs(self.base_point)
for bi in b:
radius = self.radius(bi)
Ri = numpy.abs(bi) + 2*radius # to be safely away
R = Ri if Ri > R else R
# the path begins with a line starting at the base point and ending at
# the point -R (where the circle will begin)
path = ComplexLine(self.base_point, -R)
# the positive direction around infinity is equal to the
# negative direction around the origin
dtheta = -numpy.pi if nrots > 0 else numpy.pi
for _ in range(abs(nrots)):
path += ComplexArc(R, 0, numpy.pi, dtheta)
path += ComplexArc(R, 0, 0, dtheta)
# return to the base point
path += ComplexLine(-R, self.base_point)
# determine if the circle actually touches the base point. this occurs
# when the base point is further away from the origin than the bounding
# circles of discriminant points. in this case, the path only consists
# of the arcs defining the circle
if abs(self.base_point + R) < 1e-15:
path = ComplexPath(path.segments[1:-1])
return path
def show_paths(self, *args, **kwds):
"""Plots all of the monodromy paths of the curve.
Returns
-------
None
"""
# fill the bounding circles around each discriminant point
a = complex(self.base_point)
b = numpy.array(self.discriminant_points, dtype=complex)
# plot the base point and the discriminant points
pts = [(a.real, a.imag)]
plt = scatter_plot(pts, facecolor='red', **kwds)
pts = list(zip(b.real, b.imag))
plt += scatter_plot(pts, facecolor='black', **kwds)
# plot the monodromy paths
for bi in b:
path = self.monodromy_path(bi)
plt += path.plot(**kwds)
return plt
def avoiding_arc(self, w0, w1, b, R, orientation=None):
"""Returns the arc `(radius, center, starting_theta, dtheta)`, from the points
`w0` and `w1` on the bounding circle around `bi`.
The arc is constructed in such a way so that the monodromy properties
of the path are conserved.
Parameters
----------
w0 : complex
The starting point of the arc on the bounding circle of `bi`.
w1 : complex
The ending point of the arc on the bounding circle of `bi`.
b : complex
The discriminant point to avoid.
R : double
The radius of the bounding circle.
Returns
-------
arc : ComplexArc
An arc from `w0` to `w1` around `bi`.
"""
w0 = complex(w0)
w1 = complex(w1)
b = complex(b)
R = double(R)
# ASSUMPTION: Re(w0) < Re(w1)
if w0.real >= w1.real:
raise ValueError('Cannot construct avoiding arc: all paths must '
'travel from left to right unless "reversed".')
# ASSERTION: w0 and w1 lie on the circle of radius Ri centered at bi
R0 = abs(w0 - b)
R1 = abs(w1 - b)
if abs(R0 - R) > 1e-13 or abs(R1 - R) > 1e-13:
raise ValueError('Cannot construct avoiding arc: '
'%s and %s must lie on the bounding circle of '
'radius %s centered at %s'%(w0,w1,R,b))
# degenerate case: w0, bi, w1 are co-linear
#
# if no orientation is provided then go above. otherwise, adhere to the
# orientation: orientation = +1/-1 means the path goes above/below
phi_w0_w1 = numpy.angle(w1-w0)
phi_w0_b = numpy.angle(b-w0)
if abs(phi_w0_w1 - phi_w0_b) < 1e-13:
theta0 = numpy.angle(w0-b)
dtheta = -numpy.pi # default above
if not orientation is None:
dtheta *= orientation
return ComplexArc(R, b, theta0, dtheta)
# otherwise: w0, bi, w1 are not co-linear
#
# first determine if the line form w0 to w1 is above or below the
# branch point bi. this will determine if dtheta is negative or
# positive, respectively
if phi_w0_b <= phi_w0_w1:
dtheta_sign = -1
else:
dtheta_sign = 1
# now determine the angle between w0 and w1 on the circle. since w0,
# bi, and w1 are not colinear this angle must be normalized to be in
# the interval (-pi,pi)
theta0 = numpy.angle(w0 - b)
theta1 = numpy.angle(w1 - b)
dtheta = theta1 - theta0
if dtheta > numpy.pi:
dtheta = 2*numpy.pi - dtheta
elif dtheta < -numpy.pi:
dtheta = 2*numpy.pi + dtheta
# sanity check: |dtheta| should be less than pi
if abs(dtheta) >= numpy.pi:
raise ValueError('Cannot construct avoiding arc: '
'|dtheta| must be less than pi.')
dtheta = dtheta_sign * abs(dtheta)
# finally, take orentation into account. orientation is a stronger
# condition than the above computations.
#
# in the case when the signs of the orientation and the dtheta are
# opposite then do nothing since: orentation = +1/-1 implies go
# above/below implies dtheta negative/positive.
#
# when the signs are same then make adjustments:
if not orientation is None:
if orientation == 1 and dtheta > 0:
dtheta = dtheta - 2*numpy.pi
elif orientation == -1 and dtheta < 0:
dtheta = 2*numpy.pi + dtheta
# add the path from z0 to w1 going around bi
arc = ComplexArc(R, b, theta0, dtheta)
return arc
| mit | 3eb8b59840c7f190639f19a86f452d19 | 35.143421 | 86 | 0.585897 | 4.044317 | false | false | false | false |
pmorissette/bt | bt/algos.py | 1 | 76303 | """
A collection of Algos used to create Strategy logic.
"""
from __future__ import division
import abc
import random
import re
import numpy as np
import pandas as pd
import sklearn.covariance
import bt
from bt.core import Algo, AlgoStack, SecurityBase, is_zero
def run_always(f):
"""
Run always decorator to be used with Algo
to ensure stack runs the decorated Algo
on each pass, regardless of failures in the stack.
"""
f.run_always = True
return f
class PrintDate(Algo):
"""
This Algo simply print's the current date.
Can be useful for debugging purposes.
"""
def __call__(self, target):
print(target.now)
return True
class PrintTempData(Algo):
"""
This Algo prints the temp data.
Useful for debugging.
Args:
* fmt_string (str): A string that will later be formatted with the
target's temp dict. Therefore, you should provide
what you want to examine within curly braces ( { } )
"""
def __init__(self, fmt_string=None):
super(PrintTempData, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
if self.fmt_string:
print(self.fmt_string.format(**target.temp))
else:
print(target.temp)
return True
class PrintInfo(Algo):
"""
Prints out info associated with the target strategy. Useful for debugging
purposes.
Args:
* fmt_string (str): A string that will later be formatted with the
target object's __dict__ attribute. Therefore, you should provide
what you want to examine within curly braces ( { } )
Ex:
PrintInfo('Strategy {name} : {now}')
This will print out the name and the date (now) on each call.
Basically, you provide a string that will be formatted with target.__dict__
"""
def __init__(self, fmt_string="{name} {now}"):
super(PrintInfo, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
print(self.fmt_string.format(**target.__dict__))
return True
class Debug(Algo):
"""
Utility Algo that calls pdb.set_trace when triggered.
In the debug session, 'target' is available and can be examined through the
StrategyBase interface.
"""
def __call__(self, target):
import pdb
pdb.set_trace()
return True
class RunOnce(Algo):
"""
Returns True on first run then returns False.
Args:
* run_on_first_call: bool which determines if it runs the first time the algo is called
As the name says, the algo only runs once. Useful in situations
where we want to run the logic once (buy and hold for example).
"""
def __init__(self):
super(RunOnce, self).__init__()
self.has_run = False
def __call__(self, target):
# if it hasn't run then we will
# run it and set flag
if not self.has_run:
self.has_run = True
return True
# return false to stop future execution
return False
class RunPeriod(Algo):
def __init__(
self, run_on_first_date=True, run_on_end_of_period=False, run_on_last_date=False
):
super(RunPeriod, self).__init__()
self._run_on_first_date = run_on_first_date
self._run_on_end_of_period = run_on_end_of_period
self._run_on_last_date = run_on_last_date
def __call__(self, target):
# get last date
now = target.now
# if none nothing to do - return false
if now is None:
return False
# not a known date in our universe
if now not in target.data.index:
return False
# get index of the current date
index = target.data.index.get_loc(target.now)
result = False
# index 0 is a date added by the Backtest Constructor
if index == 0:
return False
# first date
if index == 1:
if self._run_on_first_date:
result = True
# last date
elif index == (len(target.data.index) - 1):
if self._run_on_last_date:
result = True
else:
# create pandas.Timestamp for useful .week,.quarter properties
now = pd.Timestamp(now)
index_offset = -1
if self._run_on_end_of_period:
index_offset = 1
date_to_compare = target.data.index[index + index_offset]
date_to_compare = pd.Timestamp(date_to_compare)
result = self.compare_dates(now, date_to_compare)
return result
@abc.abstractmethod
def compare_dates(self, now, date_to_compare):
raise (NotImplementedError("RunPeriod Algo is an abstract class!"))
class RunDaily(RunPeriod):
"""
Returns True on day change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's day has changed
compared to the last(or next if run_on_end_of_period) date, if not returns False.
Useful for daily rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.date() != date_to_compare.date():
return True
return False
class RunWeekly(RunPeriod):
"""
Returns True on week change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's week has changed
since relative to the last(or next) date, if not returns False. Useful for
weekly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.week != date_to_compare.week:
return True
return False
class RunMonthly(RunPeriod):
"""
Returns True on month change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's month has changed
since relative to the last(or next) date, if not returns False. Useful for
monthly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.month != date_to_compare.month:
return True
return False
class RunQuarterly(RunPeriod):
"""
Returns True on quarter change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's quarter has changed
since relative to the last(or next) date, if not returns False. Useful for
quarterly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.quarter != date_to_compare.quarter:
return True
return False
class RunYearly(RunPeriod):
"""
Returns True on year change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's year has changed
since relative to the last(or next) date, if not returns False. Useful for
yearly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year:
return True
return False
class RunOnDate(Algo):
"""
Returns True on a specific set of dates.
Args:
* dates (list): List of dates to run Algo on.
"""
def __init__(self, *dates):
"""
Args:
* dates (*args): A list of dates. Dates will be parsed
by pandas.to_datetime so pass anything that it can
parse. Typically, you will pass a string 'yyyy-mm-dd'.
"""
super(RunOnDate, self).__init__()
# parse dates and save
self.dates = [pd.to_datetime(d) for d in dates]
def __call__(self, target):
return target.now in self.dates
class RunAfterDate(Algo):
"""
Returns True after a date has passed
Args:
* date: Date after which to start trading
Note:
This is useful for algos that rely on trailing averages where you
don't want to start trading until some amount of data has been built up
"""
def __init__(self, date):
"""
Args:
* date: Date after which to start trading
"""
super(RunAfterDate, self).__init__()
# parse dates and save
self.date = pd.to_datetime(date)
def __call__(self, target):
return target.now > self.date
class RunAfterDays(Algo):
"""
Returns True after a specific number of 'warmup' trading days have passed
Args:
* days (int): Number of trading days to wait before starting
Note:
This is useful for algos that rely on trailing averages where you
don't want to start trading until some amount of data has been built up
"""
def __init__(self, days):
"""
Args:
* days (int): Number of trading days to wait before starting
"""
super(RunAfterDays, self).__init__()
self.days = days
def __call__(self, target):
if self.days > 0:
self.days -= 1
return False
return True
class RunIfOutOfBounds(Algo):
"""
This algo returns true if any of the target weights deviate by an amount greater
than tolerance. For example, it will be run if the tolerance is set to 0.5 and
a security grows from a target weight of 0.2 to greater than 0.3.
A strategy where rebalancing is performed quarterly or whenever any
security's weight deviates by more than 20% could be implemented by:
Or([runQuarterlyAlgo,runIfOutOfBoundsAlgo(0.2)])
Args:
* tolerance (float): Allowed deviation of each security weight.
Requires:
* Weights
"""
def __init__(self, tolerance):
self.tolerance = float(tolerance)
super(RunIfOutOfBounds, self).__init__()
def __call__(self, target):
if "weights" not in target.temp:
return True
targets = target.temp["weights"]
for cname in target.children:
if cname in targets:
c = target.children[cname]
deviation = abs((c.weight - targets[cname]) / targets[cname])
if deviation > self.tolerance:
return True
if "cash" in target.temp:
cash_deviation = abs(
(target.capital - targets.value) / targets.value - target.temp["cash"]
)
if cash_deviation > self.tolerance:
return True
return False
class RunEveryNPeriods(Algo):
"""
This algo runs every n periods.
Args:
* n (int): Run each n periods
* offset (int): Applies to the first run. If 0, this algo will run the
first time it is called.
This Algo can be useful for the following type of strategy:
Each month, select the top 5 performers. Hold them for 3 months.
You could then create 3 strategies with different offsets and create a
master strategy that would allocate equal amounts of capital to each.
"""
def __init__(self, n, offset=0):
super(RunEveryNPeriods, self).__init__()
self.n = n
self.offset = offset
self.idx = n - offset - 1
self.lcall = 0
def __call__(self, target):
# ignore multiple calls on same period
if self.lcall == target.now:
return False
else:
self.lcall = target.now
# run when idx == (n-1)
if self.idx == (self.n - 1):
self.idx = 0
return True
else:
self.idx += 1
return False
class SelectAll(Algo):
"""
Sets temp['selected'] with all securities (based on universe).
Selects all the securities and saves them in temp['selected'].
By default, SelectAll does not include securities that have no
data (nan) on current date or those whose price is zero or negative.
Args:
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(self, include_no_data=False, include_negative=False):
super(SelectAll, self).__init__()
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if self.include_no_data:
target.temp["selected"] = target.universe.columns
else:
universe = target.universe.loc[target.now].dropna()
if self.include_negative:
target.temp["selected"] = list(universe.index)
else:
target.temp["selected"] = list(universe[universe > 0].index)
return True
class SelectThese(Algo):
"""
Sets temp['selected'] with a set list of tickers.
Args:
* ticker (list): List of tickers to select.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(self, tickers, include_no_data=False, include_negative=False):
super(SelectThese, self).__init__()
self.tickers = tickers
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if self.include_no_data:
target.temp["selected"] = self.tickers
else:
universe = target.universe.loc[target.now, self.tickers].dropna()
if self.include_negative:
target.temp["selected"] = list(universe.index)
else:
target.temp["selected"] = list(universe[universe > 0].index)
return True
class SelectHasData(Algo):
"""
Sets temp['selected'] based on all items in universe that meet
data requirements.
This is a more advanced version of SelectAll. Useful for selecting
tickers that need a certain amount of data for future algos to run
properly.
For example, if we need the items with 3 months of data or more,
we could use this Algo with a lookback period of 3 months.
When providing a lookback period, it is also wise to provide a min_count.
This is basically the number of data points needed within the lookback
period for a series to be considered valid. For example, in our 3 month
lookback above, we might want to specify the min_count as being
57 -> a typical trading month has give or take 20 trading days. If we
factor in some holidays, we can use 57 or 58. It's really up to you.
If you don't specify min_count, min_count will default to ffn's
get_num_days_required.
Args:
* lookback (DateOffset): A DateOffset that determines the lookback
period.
* min_count (int): Minimum number of days required for a series to be
considered valid. If not provided, ffn's get_num_days_required is
used to estimate the number of points required.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(
self,
lookback=pd.DateOffset(months=3),
min_count=None,
include_no_data=False,
include_negative=False,
):
super(SelectHasData, self).__init__()
self.lookback = lookback
if min_count is None:
min_count = bt.ffn.get_num_days_required(lookback)
self.min_count = min_count
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if "selected" in target.temp:
selected = target.temp["selected"]
else:
selected = target.universe.columns
filt = target.universe.loc[target.now - self.lookback :, selected]
cnt = filt.count()
cnt = cnt[cnt >= self.min_count]
if not self.include_no_data:
cnt = cnt[~target.universe.loc[target.now, selected].isnull()]
if not self.include_negative:
cnt = cnt[target.universe.loc[target.now, selected] > 0]
target.temp["selected"] = list(cnt.index)
return True
class SelectN(Algo):
"""
Sets temp['selected'] based on ranking temp['stat'].
Selects the top or botton N items based on temp['stat'].
This is usually some kind of metric that will be computed in a
previous Algo and will be used for ranking purposes. Can select
top or bottom N based on sort_descending parameter.
Args:
* n (int): select top n items.
* sort_descending (bool): Should the stat be sorted in descending order
before selecting the first n items?
* all_or_none (bool): If true, only populates temp['selected'] if we
have n items. If we have less than n, then temp['selected'] = [].
* filter_selected (bool): If True, will only select from the existing
'selected' list.
Sets:
* selected
Requires:
* stat
"""
def __init__(
self, n, sort_descending=True, all_or_none=False, filter_selected=False
):
super(SelectN, self).__init__()
if n < 0:
raise ValueError("n cannot be negative")
self.n = n
self.ascending = not sort_descending
self.all_or_none = all_or_none
self.filter_selected = filter_selected
def __call__(self, target):
stat = target.temp["stat"].dropna()
if self.filter_selected and "selected" in target.temp:
stat = stat.loc[stat.index.intersection(target.temp["selected"])]
stat.sort_values(ascending=self.ascending, inplace=True)
# handle percent n
keep_n = self.n
if self.n < 1:
keep_n = int(self.n * len(stat))
sel = list(stat[:keep_n].index)
if self.all_or_none and len(sel) < keep_n:
sel = []
target.temp["selected"] = sel
return True
class SelectMomentum(AlgoStack):
"""
Sets temp['selected'] based on a simple momentum filter.
Selects the top n securities based on the total return over
a given lookback period. This is just a wrapper around an
AlgoStack with two algos: StatTotalReturn and SelectN.
Note, that SelectAll() or similar should be called before
SelectMomentum(), as StatTotalReturn uses values of temp['selected']
Args:
* n (int): select first N elements
* lookback (DateOffset): lookback period for total return
calculation
* lag (DateOffset): Lag interval for total return calculation
* sort_descending (bool): Sort descending (highest return is best)
* all_or_none (bool): If true, only populates temp['selected'] if we
have n items. If we have less than n, then temp['selected'] = [].
Sets:
* selected
Requires:
* selected
"""
def __init__(
self,
n,
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=0),
sort_descending=True,
all_or_none=False,
):
super(SelectMomentum, self).__init__(
StatTotalReturn(lookback=lookback, lag=lag),
SelectN(n=n, sort_descending=sort_descending, all_or_none=all_or_none),
)
class SelectWhere(Algo):
"""
Selects securities based on an indicator DataFrame.
Selects securities where the value is True on the current date
(target.now) only if current date is present in signal DataFrame.
For example, this could be the result of a pandas boolean comparison such
as data > 100.
Args:
* signal (str|DataFrame): Boolean DataFrame containing selection logic.
If a string is passed, frame is accessed using target.get_data
This is the preferred way of using the algo.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(self, signal, include_no_data=False, include_negative=False):
super(SelectWhere, self).__init__()
if isinstance(signal, pd.DataFrame):
self.signal_name = None
self.signal = signal
else:
self.signal_name = signal
self.signal = None
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
# get signal Series at target.now
if self.signal_name is None:
signal = self.signal
else:
signal = target.get_data(self.signal_name)
if target.now in signal.index:
sig = signal.loc[target.now]
# get tickers where True
# selected = sig.index[sig]
selected = sig[sig == True].index # noqa: E712
# save as list
if not self.include_no_data:
universe = target.universe.loc[target.now, list(selected)].dropna()
if self.include_negative:
selected = list(universe.index)
else:
selected = list(universe[universe > 0].index)
target.temp["selected"] = list(selected)
return True
class SelectRandomly(AlgoStack):
"""
Sets temp['selected'] based on a random subset of
the items currently in temp['selected'].
Selects n random elements from the list stored in temp['selected'].
This is useful for benchmarking against a strategy where we believe
the selection algorithm is adding value.
For example, if we are testing a momentum strategy and we want to see if
selecting securities based on momentum is better than just selecting
securities randomly, we could use this Algo to create a random Strategy
used for random benchmarking.
Note:
Another selection algorithm should be use prior to this Algo to
populate temp['selected']. This will typically be SelectAll.
Args:
* n (int): Select N elements randomly.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
Requires:
* selected
"""
def __init__(self, n=None, include_no_data=False, include_negative=False):
super(SelectRandomly, self).__init__()
self.n = n
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if "selected" in target.temp:
sel = target.temp["selected"]
else:
sel = list(target.universe.columns)
if not self.include_no_data:
universe = target.universe.loc[target.now, sel].dropna()
if self.include_negative:
sel = list(universe.index)
else:
sel = list(universe[universe > 0].index)
if self.n is not None:
n = self.n if self.n < len(sel) else len(sel)
sel = random.sample(sel, int(n))
target.temp["selected"] = sel
return True
class SelectRegex(Algo):
"""
Sets temp['selected'] based on a regex on their names.
Useful when working with a large universe of different kinds of securities
Args:
* regex (str): regular expression on the name
Sets:
* selected
Requires:
* selected
"""
def __init__(self, regex):
super(SelectRegex, self).__init__()
self.regex = re.compile(regex)
def __call__(self, target):
selected = target.temp["selected"]
selected = [s for s in selected if self.regex.search(s)]
target.temp["selected"] = selected
return True
class ResolveOnTheRun(Algo):
"""
Looks at securities set in temp['selected'] and searches for names that
match the names of "aliases" for on-the-run securities in the provided
data. Then replaces the alias with the name of the underlying security
appropriate for the given date, and sets it back on temp['selected']
Args:
* on_the_run (str): Name of a Data frame with
- columns set to "on the run" ticker names
- index set to the timeline for the backtest
- values are the actual security name to use for the given date
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Requires:
* selected
Sets:
* selected
"""
def __init__(self, on_the_run, include_no_data=False, include_negative=False):
super(ResolveOnTheRun, self).__init__()
self.on_the_run = on_the_run
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
# Resolve real tickers based on OTR
on_the_run = target.get_data(self.on_the_run)
selected = target.temp["selected"]
aliases = [s for s in selected if s in on_the_run.columns]
resolved = on_the_run.loc[target.now, aliases].tolist()
if not self.include_no_data:
universe = target.universe.loc[target.now, resolved].dropna()
if self.include_negative:
resolved = list(universe.index)
else:
resolved = list(universe[universe > 0].index)
target.temp["selected"] = resolved + [
s for s in selected if s not in on_the_run.columns
]
return True
class SetStat(Algo):
"""
Sets temp['stat'] for use by downstream algos (such as SelectN).
Args:
* stat (str|DataFrame): A dataframe of the same dimension as target.universe
If a string is passed, frame is accessed using target.get_data
This is the preferred way of using the algo.
Sets:
* stat
"""
def __init__(self, stat):
if isinstance(stat, pd.DataFrame):
self.stat_name = None
self.stat = stat
else:
self.stat_name = stat
self.stat = None
def __call__(self, target):
if self.stat_name is None:
stat = self.stat
else:
stat = target.get_data(self.stat_name)
target.temp["stat"] = stat.loc[target.now]
return True
class StatTotalReturn(Algo):
"""
Sets temp['stat'] with total returns over a given period.
Sets the 'stat' based on the total return of each element in
temp['selected'] over a given lookback period. The total return
is determined by ffn's calc_total_return.
Args:
* lookback (DateOffset): lookback period.
* lag (DateOffset): Lag interval. Total return is calculated in
the inteval [now - lookback - lag, now - lag]
Sets:
* stat
Requires:
* selected
"""
def __init__(self, lookback=pd.DateOffset(months=3), lag=pd.DateOffset(days=0)):
super(StatTotalReturn, self).__init__()
self.lookback = lookback
self.lag = lag
def __call__(self, target):
selected = target.temp["selected"]
t0 = target.now - self.lag
prc = target.universe.loc[t0 - self.lookback : t0, selected]
target.temp["stat"] = prc.calc_total_return()
return True
class WeighEqually(Algo):
"""
Sets temp['weights'] by calculating equal weights for all items in
selected.
Equal weight Algo. Sets the 'weights' to 1/n for each item in 'selected'.
Sets:
* weights
Requires:
* selected
"""
def __init__(self):
super(WeighEqually, self).__init__()
def __call__(self, target):
selected = target.temp["selected"]
n = len(selected)
if n == 0:
target.temp["weights"] = {}
else:
w = 1.0 / n
target.temp["weights"] = {x: w for x in selected}
return True
class WeighSpecified(Algo):
"""
Sets temp['weights'] based on a provided dict of ticker:weights.
Sets the weights based on pre-specified targets.
Args:
* weights (dict): target weights -> ticker: weight
Sets:
* weights
"""
def __init__(self, **weights):
super(WeighSpecified, self).__init__()
self.weights = weights
def __call__(self, target):
# added copy to make sure these are not overwritten
target.temp["weights"] = self.weights.copy()
return True
class ScaleWeights(Algo):
"""
Sets temp['weights'] based on a scaled version of itself.
Useful for going short, or scaling up/down when using
:class:`FixedIncomeStrategy <bt.core.FixedIncomeStrategy>`.
Args:
* scale (float): the scaling factor
Sets:
* weights
Requires:
* weights
"""
def __init__(self, scale):
super(ScaleWeights, self).__init__()
self.scale = scale
def __call__(self, target):
target.temp["weights"] = {
k: self.scale * w for k, w in target.temp["weights"].items()
}
return True
class WeighTarget(Algo):
"""
Sets target weights based on a target weight DataFrame.
If the target weight dataFrame is of same dimension
as the target.universe, the portfolio will effectively be rebalanced on
each period. For example, if we have daily data and the target DataFrame
is of the same shape, we will have daily rebalancing.
However, if we provide a target weight dataframe that has only month end
dates, then rebalancing only occurs monthly.
Basically, if a weight is provided on a given date, the target weights are
set and the algo moves on (presumably to a Rebalance algo). If not, not
target weights are set.
Args:
* weights (str|DataFrame): DataFrame containing the target weights
If a string is passed, frame is accessed using target.get_data
This is the preferred way of using the algo.
Sets:
* weights
"""
def __init__(self, weights):
super(WeighTarget, self).__init__()
if isinstance(weights, pd.DataFrame):
self.weights_name = None
self.weights = weights
else:
self.weights_name = weights
self.weights = None
def __call__(self, target):
# get current target weights
if self.weights_name is None:
weights = self.weights
else:
weights = target.get_data(self.weights_name)
if target.now in weights.index:
w = weights.loc[target.now]
# dropna and save
target.temp["weights"] = w.dropna()
return True
else:
return False
class WeighInvVol(Algo):
"""
Sets temp['weights'] based on the inverse volatility Algo.
Sets the target weights based on ffn's calc_inv_vol_weights. This
is a commonly used technique for risk parity portfolios. The least
volatile elements receive the highest weight under this scheme. Weights
are proportional to the inverse of their volatility.
Args:
* lookback (DateOffset): lookback period for estimating volatility
Sets:
* weights
Requires:
* selected
"""
def __init__(self, lookback=pd.DateOffset(months=3), lag=pd.DateOffset(days=0)):
super(WeighInvVol, self).__init__()
self.lookback = lookback
self.lag = lag
def __call__(self, target):
selected = target.temp["selected"]
if len(selected) == 0:
target.temp["weights"] = {}
return True
if len(selected) == 1:
target.temp["weights"] = {selected[0]: 1.0}
return True
t0 = target.now - self.lag
prc = target.universe.loc[t0 - self.lookback : t0, selected]
tw = bt.ffn.calc_inv_vol_weights(prc.to_returns().dropna())
target.temp["weights"] = tw.dropna()
return True
class WeighERC(Algo):
"""
Sets temp['weights'] based on equal risk contribution algorithm.
Sets the target weights based on ffn's calc_erc_weights. This
is an extension of the inverse volatility risk parity portfolio in
which the correlation of asset returns is incorporated into the
calculation of risk contribution of each asset.
The resulting portfolio is similar to a minimum variance portfolio
subject to a diversification constraint on the weights of its components
and its volatility is located between those of the minimum variance and
equally-weighted portfolios (Maillard 2008).
See:
https://en.wikipedia.org/wiki/Risk_parity
Args:
* lookback (DateOffset): lookback period for estimating covariance
* initial_weights (list): Starting asset weights [default inverse vol].
* risk_weights (list): Risk target weights [default equal weight].
* covar_method (str): method used to estimate the covariance. See ffn's
calc_erc_weights for more details. (default ledoit-wolf).
* risk_parity_method (str): Risk parity estimation method. see ffn's
calc_erc_weights for more details. (default ccd).
* maximum_iterations (int): Maximum iterations in iterative solutions
(default 100).
* tolerance (float): Tolerance level in iterative solutions (default 1E-8).
Sets:
* weights
Requires:
* selected
"""
def __init__(
self,
lookback=pd.DateOffset(months=3),
initial_weights=None,
risk_weights=None,
covar_method="ledoit-wolf",
risk_parity_method="ccd",
maximum_iterations=100,
tolerance=1e-8,
lag=pd.DateOffset(days=0),
):
super(WeighERC, self).__init__()
self.lookback = lookback
self.initial_weights = initial_weights
self.risk_weights = risk_weights
self.covar_method = covar_method
self.risk_parity_method = risk_parity_method
self.maximum_iterations = maximum_iterations
self.tolerance = tolerance
self.lag = lag
def __call__(self, target):
selected = target.temp["selected"]
if len(selected) == 0:
target.temp["weights"] = {}
return True
if len(selected) == 1:
target.temp["weights"] = {selected[0]: 1.0}
return True
t0 = target.now - self.lag
prc = target.universe.loc[t0 - self.lookback : t0, selected]
tw = bt.ffn.calc_erc_weights(
prc.to_returns().dropna(),
initial_weights=self.initial_weights,
risk_weights=self.risk_weights,
covar_method=self.covar_method,
risk_parity_method=self.risk_parity_method,
maximum_iterations=self.maximum_iterations,
tolerance=self.tolerance,
)
target.temp["weights"] = tw.dropna()
return True
class WeighMeanVar(Algo):
"""
Sets temp['weights'] based on mean-variance optimization.
Sets the target weights based on ffn's calc_mean_var_weights. This is a
Python implementation of Markowitz's mean-variance optimization.
See:
http://en.wikipedia.org/wiki/Modern_portfolio_theory#The_efficient_frontier_with_no_risk-free_asset
Args:
* lookback (DateOffset): lookback period for estimating volatility
* bounds ((min, max)): tuple specifying the min and max weights for
each asset in the optimization.
* covar_method (str): method used to estimate the covariance. See ffn's
calc_mean_var_weights for more details.
* rf (float): risk-free rate used in optimization.
Sets:
* weights
Requires:
* selected
"""
def __init__(
self,
lookback=pd.DateOffset(months=3),
bounds=(0.0, 1.0),
covar_method="ledoit-wolf",
rf=0.0,
lag=pd.DateOffset(days=0),
):
super(WeighMeanVar, self).__init__()
self.lookback = lookback
self.lag = lag
self.bounds = bounds
self.covar_method = covar_method
self.rf = rf
def __call__(self, target):
selected = target.temp["selected"]
if len(selected) == 0:
target.temp["weights"] = {}
return True
if len(selected) == 1:
target.temp["weights"] = {selected[0]: 1.0}
return True
t0 = target.now - self.lag
prc = target.universe.loc[t0 - self.lookback : t0, selected]
tw = bt.ffn.calc_mean_var_weights(
prc.to_returns().dropna(),
weight_bounds=self.bounds,
covar_method=self.covar_method,
rf=self.rf,
)
target.temp["weights"] = tw.dropna()
return True
class WeighRandomly(Algo):
"""
Sets temp['weights'] based on a random weight vector.
Sets random target weights for each security in 'selected'.
This is useful for benchmarking against a strategy where we believe
the weighing algorithm is adding value.
For example, if we are testing a low-vol strategy and we want to see if
our weighing strategy is better than just weighing
securities randomly, we could use this Algo to create a random Strategy
used for random benchmarking.
This is an Algo wrapper around ffn's random_weights function.
Args:
* bounds ((low, high)): Tuple including low and high bounds for each
security
* weight_sum (float): What should the weights sum up to?
Sets:
* weights
Requires:
* selected
"""
def __init__(self, bounds=(0.0, 1.0), weight_sum=1):
super(WeighRandomly, self).__init__()
self.bounds = bounds
self.weight_sum = weight_sum
def __call__(self, target):
sel = target.temp["selected"]
n = len(sel)
w = {}
try:
rw = bt.ffn.random_weights(n, self.bounds, self.weight_sum)
w = dict(zip(sel, rw))
except ValueError:
pass
target.temp["weights"] = w
return True
class LimitDeltas(Algo):
"""
Modifies temp['weights'] based on weight delta limits.
Basically, this can be used if we want to restrict how much a security's
target weight can change from day to day. Useful when we want to be more
conservative about how much we could actually trade on a given day without
affecting the market.
For example, if we have a strategy that is currently long 100% one
security, and the weighing Algo sets the new weight to 0%, but we
use this Algo with a limit of 0.1, the new target weight will
be 90% instead of 0%.
Args:
* limit (float, dict): Weight delta limit. If float, this will be a
global limit for all securities. If dict, you may specify by-ticker
limit.
Sets:
* weights
Requires:
* weights
"""
def __init__(self, limit=0.1):
super(LimitDeltas, self).__init__()
self.limit = limit
# determine if global or specific
self.global_limit = True
if isinstance(limit, dict):
self.global_limit = False
def __call__(self, target):
tw = target.temp["weights"]
all_keys = set(list(target.children.keys()) + list(tw.keys()))
for k in all_keys:
tgt = tw[k] if k in tw else 0.0
cur = target.children[k].weight if k in target.children else 0.0
delta = tgt - cur
# check if we need to limit
if self.global_limit:
if abs(delta) > self.limit:
tw[k] = cur + (self.limit * np.sign(delta))
else:
# make sure we have a limit defined in case of limit dict
if k in self.limit:
lmt = self.limit[k]
if abs(delta) > lmt:
tw[k] = cur + (lmt * np.sign(delta))
return True
class LimitWeights(Algo):
"""
Modifies temp['weights'] based on weight limits.
This is an Algo wrapper around ffn's limit_weights. The purpose of this
Algo is to limit the weight of any one specifc asset. For example, some
Algos will set some rather extreme weights that may not be acceptable.
Therefore, we can use this Algo to limit the extreme weights. The excess
weight is then redistributed to the other assets, proportionally to
their current weights.
See ffn's limit_weights for more information.
Args:
* limit (float): Weight limit.
Sets:
* weights
Requires:
* weights
"""
def __init__(self, limit=0.1):
super(LimitWeights, self).__init__()
self.limit = limit
def __call__(self, target):
if "weights" not in target.temp:
return True
tw = target.temp["weights"]
if len(tw) == 0:
return True
# if the limit < equal weight then set weights to 0
if self.limit < 1.0 / len(tw):
tw = {}
else:
tw = bt.ffn.limit_weights(tw, self.limit)
target.temp["weights"] = tw
return True
class TargetVol(Algo):
"""
Updates temp['weights'] based on the target annualized volatility desired.
Args:
* target_volatility: annualized volatility to target
* lookback (DateOffset): lookback period for estimating volatility
* lag (DateOffset): amount of time to wait to calculate the covariance
* covar_method: method of calculating volatility
* annualization_factor: number of periods to annualize by.
It is assumed that target volatility is already annualized by this factor.
Updates:
* weights
Requires:
* temp['weights']
"""
def __init__(
self,
target_volatility,
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=0),
covar_method="standard",
annualization_factor=252,
):
super(TargetVol, self).__init__()
self.target_volatility = target_volatility
self.lookback = lookback
self.lag = lag
self.covar_method = covar_method
self.annualization_factor = annualization_factor
def __call__(self, target):
current_weights = target.temp["weights"]
selected = current_weights.keys()
# if there were no weights already set then skip
if len(selected) == 0:
return True
t0 = target.now - self.lag
prc = target.universe.loc[t0 - self.lookback : t0, selected]
returns = bt.ffn.to_returns(prc)
# calc covariance matrix
if self.covar_method == "ledoit-wolf":
covar = sklearn.covariance.ledoit_wolf(returns)
elif self.covar_method == "standard":
covar = returns.cov()
else:
raise NotImplementedError("covar_method not implemented")
weights = pd.Series(
[current_weights[x] for x in covar.columns], index=covar.columns
)
vol = np.sqrt(
np.matmul(weights.values.T, np.matmul(covar.values, weights.values))
* self.annualization_factor
)
if isinstance(self.target_volatility, (float, int)):
self.target_volatility = {
k: self.target_volatility for k in target.temp["weights"].keys()
}
for k in target.temp["weights"].keys():
if k in self.target_volatility.keys():
target.temp["weights"][k] = (
target.temp["weights"][k] * self.target_volatility[k] / vol
)
return True
class PTE_Rebalance(Algo):
"""
Triggers a rebalance when PTE from static weights is past a level.
Args:
* PTE_volatility_cap: annualized volatility to target
* target_weights: dataframe of weights that needs to have the same index as the price dataframe
* lookback (DateOffset): lookback period for estimating volatility
* lag (DateOffset): amount of time to wait to calculate the covariance
* covar_method: method of calculating volatility
* annualization_factor: number of periods to annualize by.
It is assumed that target volatility is already annualized by this factor.
"""
def __init__(
self,
PTE_volatility_cap,
target_weights,
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=0),
covar_method="standard",
annualization_factor=252,
):
super(PTE_Rebalance, self).__init__()
self.PTE_volatility_cap = PTE_volatility_cap
self.target_weights = target_weights
self.lookback = lookback
self.lag = lag
self.covar_method = covar_method
self.annualization_factor = annualization_factor
def __call__(self, target):
if target.now is None:
return False
if target.positions.shape == (0, 0):
return True
positions = target.positions.loc[target.now]
if positions is None:
return True
prices = target.universe.loc[target.now, positions.index]
if prices is None:
return True
current_weights = positions * prices / target.value
target_weights = self.target_weights.loc[target.now, :]
cols = list(current_weights.index.copy())
for c in target_weights.keys():
if c not in cols:
cols.append(c)
weights = pd.Series(np.zeros(len(cols)), index=cols)
for c in cols:
if c in current_weights:
weights[c] = current_weights[c]
if c in target_weights:
weights[c] -= target_weights[c]
t0 = target.now - self.lag
prc = target.universe.loc[t0 - self.lookback : t0, cols]
returns = bt.ffn.to_returns(prc)
# calc covariance matrix
if self.covar_method == "ledoit-wolf":
covar = sklearn.covariance.ledoit_wolf(returns)
elif self.covar_method == "standard":
covar = returns.cov()
else:
raise NotImplementedError("covar_method not implemented")
PTE_vol = np.sqrt(
np.matmul(weights.values.T, np.matmul(covar.values, weights.values))
* self.annualization_factor
)
if pd.isnull(PTE_vol):
return False
# vol is too high
if PTE_vol > self.PTE_volatility_cap:
return True
else:
return False
return True
class CapitalFlow(Algo):
"""
Used to model capital flows. Flows can either be inflows or outflows.
This Algo can be used to model capital flows. For example, a pension
fund might have inflows every month or year due to contributions. This
Algo will affect the capital of the target node without affecting returns
for the node.
Since this is modeled as an adjustment, the capital will remain in the
strategy until a re-allocation/rebalancement is made.
Args:
* amount (float): Amount of adjustment
"""
def __init__(self, amount):
"""
CapitalFlow constructor.
Args:
* amount (float): Amount to adjust by
"""
super(CapitalFlow, self).__init__()
self.amount = float(amount)
def __call__(self, target):
target.adjust(self.amount)
return True
class CloseDead(Algo):
"""
Closes all positions for which prices are equal to zero (we assume
that these stocks are dead) and removes them from temp['weights'] if
they enter it by any chance.
To be called before Rebalance().
In a normal workflow it is not needed, as those securities will not
be selected by SelectAll(include_no_data=False) or similar method, and
Rebalance() closes positions that are not in temp['weights'] anyway.
However in case when for some reasons include_no_data=False could not
be used or some modified weighting method is used, CloseDead() will
allow to avoid errors.
Requires:
* weights
"""
def __init__(self):
super(CloseDead, self).__init__()
def __call__(self, target):
if "weights" not in target.temp:
return True
targets = target.temp["weights"]
for c in target.children:
if target.universe[c].loc[target.now] <= 0:
target.close(c)
if c in targets:
del targets[c]
return True
class SetNotional(Algo):
"""
Sets the notional_value to use as the base for rebalancing for
:class:`FixedIncomeStrategy <bt.core.FixedIncomeStrategy>` targets
Args:
* notional_value (str): Name of a pd.Series object containing the
target notional values of the strategy over time.
Sets:
* notional_value
"""
def __init__(self, notional_value):
self.notional_value = notional_value
super(SetNotional, self).__init__()
def __call__(self, target):
notional_value = target.get_data(self.notional_value)
if target.now in notional_value.index:
target.temp["notional_value"] = notional_value.loc[target.now]
return True
else:
return False
class Rebalance(Algo):
"""
Rebalances capital based on temp['weights']
Rebalances capital based on temp['weights']. Also closes
positions if open but not in target_weights. This is typically
the last Algo called once the target weights have been set.
Requires:
* weights
* cash (optional): You can set a 'cash' value on temp. This should be a
number between 0-1 and determines the amount of cash to set aside.
For example, if cash=0.3, the strategy will allocate 70% of its
value to the provided weights, and the remaining 30% will be kept
in cash. If this value is not provided (default), the full value
of the strategy is allocated to securities.
* notional_value (optional): Required only for fixed_income targets. This is the base
balue of total notional that will apply to the weights.
"""
def __init__(self):
super(Rebalance, self).__init__()
def __call__(self, target):
if "weights" not in target.temp:
return True
targets = target.temp["weights"]
# save value because it will change after each call to allocate
# use it as base in rebalance calls
# call it before de-allocation so that notional_value is correct
if target.fixed_income:
if "notional_value" in target.temp:
base = target.temp["notional_value"]
else:
base = target.notional_value
else:
base = target.value
# de-allocate children that are not in targets and have non-zero value
# (open positions)
for cname in target.children:
# if this child is in our targets, we don't want to close it out
if cname in targets:
continue
# get child and value
c = target.children[cname]
if target.fixed_income:
v = c.notional_value
else:
v = c.value
# if non-zero and non-null, we need to close it out
if v != 0.0 and not np.isnan(v):
target.close(cname, update=False)
# If cash is set (it should be a value between 0-1 representing the
# proportion of cash to keep), calculate the new 'base'
if "cash" in target.temp and not target.fixed_income:
base = base * (1 - target.temp["cash"])
# Turn off updating while we rebalance each child
for item in targets.items():
target.rebalance(item[1], child=item[0], base=base, update=False)
# Now update
target.root.update(target.now)
return True
class RebalanceOverTime(Algo):
"""
Similar to Rebalance but rebalances to target
weight over n periods.
Rebalances towards a target weight over a n periods. Splits up the weight
delta over n periods.
This can be useful if we want to make more conservative rebalacing
assumptions. Some strategies can produce large swings in allocations. It
might not be reasonable to assume that this rebalancing can occur at the
end of one specific period. Therefore, this algo can be used to simulate
rebalancing over n periods.
This has typically been used in monthly strategies where we want to spread
out the rebalancing over 5 or 10 days.
Note:
This Algo will require the run_always wrapper in the above case. For
example, the RunMonthly will return True on the first day, and
RebalanceOverTime will be 'armed'. However, RunMonthly will return
False the rest days of the month. Therefore, we must specify that we
want to always run this algo.
Args:
* n (int): number of periods over which rebalancing takes place.
Requires:
* weights
"""
def __init__(self, n=10):
super(RebalanceOverTime, self).__init__()
self.n = float(n)
self._rb = Rebalance()
self._weights = None
self._days_left = None
def __call__(self, target):
# new weights specified - update rebalance data
if "weights" in target.temp:
self._weights = target.temp["weights"]
self._days_left = self.n
# if _weights are not None, we have some work to do
if self._weights is not None:
tgt = {}
# scale delta relative to # of periods left and set that as the new
# target
for cname in self._weights.keys():
curr = (
target.children[cname].weight if cname in target.children else 0.0
)
dlt = (self._weights[cname] - curr) / self._days_left
tgt[cname] = curr + dlt
# mock weights and call real Rebalance
target.temp["weights"] = tgt
self._rb(target)
# dec _days_left. If 0, set to None & set _weights to None
self._days_left -= 1
if self._days_left == 0:
self._days_left = None
self._weights = None
return True
class Require(Algo):
"""
Flow control Algo.
This algo returns the value of a predicate
on an temp entry. Useful for controlling
flow.
For example, we might want to make sure we have some items selected.
We could pass a lambda function that checks the len of 'selected':
pred=lambda x: len(x) == 0
item='selected'
Args:
* pred (Algo): Function that returns a Bool given the strategy. This
is the definition of an Algo. However, this is typically used
with a simple lambda function.
* item (str): An item within temp.
* if_none (bool): Result if the item required is not in temp or if it's
value if None
"""
def __init__(self, pred, item, if_none=False):
super(Require, self).__init__()
self.item = item
self.pred = pred
self.if_none = if_none
def __call__(self, target):
if self.item not in target.temp:
return self.if_none
item = target.temp[self.item]
if item is None:
return self.if_none
return self.pred(item)
class Not(Algo):
"""
Flow control Algo
It is usful for "inverting" other flow control algos,
For example Not( RunAfterDate(...) ), Not( RunAfterDays(...) ), etc
Args:
* list_of_algos (Algo): The algo to run and invert the return value of
"""
def __init__(self, algo):
super(Not, self).__init__()
self._algo = algo
def __call__(self, target):
return not self._algo(target)
class Or(Algo):
"""
Flow control Algo
It useful for combining multiple signals into one signal.
For example, we might want two different rebalance signals to work together:
runOnDateAlgo = bt.algos.RunOnDate(pdf.index[0]) # where pdf.index[0] is the first date in our time series
runMonthlyAlgo = bt.algos.RunMonthly()
orAlgo = Or([runMonthlyAlgo,runOnDateAlgo])
orAlgo will return True if it is the first date or if it is 1st of the month
Args:
* list_of_algos: Iterable list of algos.
Runs each algo and
returns true if any algo returns true.
"""
def __init__(self, list_of_algos):
super(Or, self).__init__()
self._list_of_algos = list_of_algos
return
def __call__(self, target):
res = False
for algo in self._list_of_algos:
tempRes = algo(target)
res = res | tempRes
return res
class SelectTypes(Algo):
"""
Sets temp['selected'] based on node type.
If temp['selected'] is already set, it will filter the existing
selection.
Args:
* include_types (list): Types of nodes to include
* exclude_types (list): Types of nodes to exclude
Sets:
* selected
"""
def __init__(self, include_types=(bt.core.Node,), exclude_types=()):
super(SelectTypes, self).__init__()
self.include_types = include_types
self.exclude_types = exclude_types or (type(None),)
def __call__(self, target):
selected = [
sec_name
for sec_name, sec in target.children.items()
if isinstance(sec, self.include_types)
and not isinstance(sec, self.exclude_types)
]
if "selected" in target.temp:
selected = [s for s in selected if s in target.temp["selected"]]
target.temp["selected"] = selected
return True
class ClosePositionsAfterDates(Algo):
"""
Close positions on securities after a given date.
This can be used to make sure positions on matured/redeemed securities are
closed. It can also be used as part of a strategy to, i.e. make sure
the strategy doesn't hold any securities with time to maturity less than a year
Note that if placed after a RunPeriod algo in the stack, that the actual
closing of positions will occur after the provided date. For this to work,
the "price" of the security (even if matured) must exist up until that date.
Alternatively, run this with the @run_always decorator to close the positions
immediately.
Also note that this algo does not operate using temp['weights'] and Rebalance.
This is so that hedges (which are excluded from that workflow) will also be
closed as necessary.
Args:
* close_dates (str): the name of a dataframe indexed by security name, with columns
"date": the date after which we want to close the position ASAP
Sets:
* target.perm['closed'] : to keep track of which securities have already closed
"""
def __init__(self, close_dates):
super(ClosePositionsAfterDates, self).__init__()
self.close_dates = close_dates
def __call__(self, target):
if "closed" not in target.perm:
target.perm["closed"] = set()
close_dates = target.get_data(self.close_dates)["date"]
# Find securities that are candidate for closing
sec_names = [
sec_name
for sec_name, sec in target.children.items()
if isinstance(sec, SecurityBase)
and sec_name in close_dates.index
and sec_name not in target.perm["closed"]
]
# Check whether closed
is_closed = close_dates.loc[sec_names] <= target.now
# Close position
for sec_name in is_closed[is_closed].index:
target.close(sec_name, update=False)
target.perm["closed"].add(sec_name)
# Now update
target.root.update(target.now)
return True
class RollPositionsAfterDates(Algo):
"""
Roll securities based on the provided map.
This can be used for any securities which have "On-The-Run" and "Off-The-Run"
versions (treasury bonds, index swaps, etc).
Also note that this algo does not operate using temp['weights'] and Rebalance.
This is so that hedges (which are excluded from that workflow) will also be
rolled as necessary.
Args:
* roll_data (str): the name of a dataframe indexed by security name, with columns
- "date": the first date at which the roll can occur
- "target": the security name we are rolling into
- "factor": the conversion factor. One unit of the original security
rolls into "factor" units of the new one.
Sets:
* target.perm['rolled'] : to keep track of which securities have already rolled
"""
def __init__(self, roll_data):
super(RollPositionsAfterDates, self).__init__()
self.roll_data = roll_data
def __call__(self, target):
if "rolled" not in target.perm:
target.perm["rolled"] = set()
roll_data = target.get_data(self.roll_data)
transactions = {}
# Find securities that are candidate for roll
sec_names = [
sec_name
for sec_name, sec in target.children.items()
if isinstance(sec, SecurityBase)
and sec_name in roll_data.index
and sec_name not in target.perm["rolled"]
]
# Calculate new transaction and close old position
for sec_name, sec_fields in roll_data.loc[sec_names].iterrows():
if sec_fields["date"] <= target.now:
target.perm["rolled"].add(sec_name)
new_quantity = sec_fields["factor"] * target[sec_name].position
new_sec = sec_fields["target"]
if new_sec in transactions:
transactions[new_sec] += new_quantity
else:
transactions[new_sec] = new_quantity
target.close(sec_name, update=False)
# Do all the new transactions at the end, to do any necessary aggregations first
for new_sec, quantity in transactions.items():
target.transact(quantity, new_sec, update=False)
# Now update
target.root.update(target.now)
return True
class SelectActive(Algo):
"""
Sets temp['selected'] based on filtering temp['selected'] to exclude
those securities that have been closed or rolled after a certain date
using ClosePositionsAfterDates or RollPositionsAfterDates. This makes sure
not to select them again for weighting (even if they have prices).
Requires:
* selected
* perm['closed'] or perm['rolled']
Sets:
* selected
"""
def __call__(self, target):
selected = target.temp["selected"]
rolled = target.perm.get("rolled", set())
closed = target.perm.get("closed", set())
selected = [s for s in selected if s not in set.union(rolled, closed)]
target.temp["selected"] = selected
return True
class ReplayTransactions(Algo):
"""
Replay a list of transactions that were executed.
This is useful for taking a blotter of actual trades that occurred,
and measuring performance against hypothetical strategies.
In particular, one can replay the outputs of backtest.Result.get_transactions
Note that this allows the timestamps and prices of the reported transactions
to be completely arbitrary, so while the strategy may track performance
on a daily basis, it will accurately account for the actual PNL of
the trades based on where they actually traded, and the bidofferpaid
attribute on the strategy will capture the "slippage" as measured
against the daily prices.
Args:
* transactions (str): name of a MultiIndex dataframe with format
Date, Security | quantity, price.
Note this schema follows the output of backtest.Result.get_transactions
"""
def __init__(self, transactions):
super(ReplayTransactions, self).__init__()
self.transactions = transactions
def __call__(self, target):
timeline = target.data.index
index = timeline.get_loc(target.now)
end = target.now
if index == 0:
start = pd.Timestamp.min
else:
start = timeline[index - 1]
# Get the transactions since the last update
all_transactions = target.get_data(self.transactions)
timestamps = all_transactions.index.get_level_values("Date")
transactions = all_transactions[(timestamps > start) & (timestamps <= end)]
for (_, security), transaction in transactions.iterrows():
c = target[security]
c.transact(
transaction["quantity"], price=transaction["price"], update=False
)
# Now update
target.root.update(target.now)
return True
class SimulateRFQTransactions(Algo):
"""
An algo that simulates the outcomes from RFQs (Request for Quote)
using a "model" that determines which ones becomes transactions and at what price
those transactions happen. This can be used from the perspective of the sender of the
RFQ or the receiver.
Args:
* rfqs (str): name of a dataframe with columns
Date, Security | quantity, *additional columns as required by model
* model (object): a function/callable object with arguments
- rfqs : data frame of rfqs to respond to
- target : the strategy object, for access to position and value data
and which returns a set of transactions, a MultiIndex DataFrame with:
Date, Security | quantity, price
"""
def __init__(self, rfqs, model):
super(SimulateRFQTransactions, self).__init__()
self.rfqs = rfqs
self.model = model
def __call__(self, target):
timeline = target.data.index
index = timeline.get_loc(target.now)
end = target.now
if index == 0:
start = pd.Timestamp.min
else:
start = timeline[index - 1]
# Get the RFQs since the last update
all_rfqs = target.get_data(self.rfqs)
timestamps = all_rfqs.index.get_level_values("Date")
rfqs = all_rfqs[(timestamps > start) & (timestamps <= end)]
# Turn the RFQs into transactions
transactions = self.model(rfqs, target)
for (_, security), transaction in transactions.iterrows():
c = target[security]
c.transact(
transaction["quantity"], price=transaction["price"], update=False
)
# Now update
target.root.update(target.now)
return True
def _get_unit_risk(security, data, index=None):
try:
unit_risks = data[security]
unit_risk = unit_risks.values[index]
except Exception:
# No risk data, assume zero
unit_risk = 0.0
return unit_risk
class UpdateRisk(Algo):
"""
Tracks a risk measure on all nodes of the strategy. To use this node, the
``additional_data`` argument on :class:`Backtest <bt.backtest.Backtest>` must
have a "unit_risk" key. The value should be a dictionary, keyed
by risk measure, of DataFrames with a column per security that is sensitive to that measure.
Args:
* name (str): the name of the risk measure (IR01, PVBP, IsIndustials, etc).
The name must coincide with the keys of the dictionary passed to additional_data as the
"unit_risk" argument.
* history (int): The level of depth in the tree at which to track the time series of risk numbers.
i.e. 0=no tracking, 1=first level only, etc. More levels is more expensive.
Modifies:
* The "risk" attribute on the target and all its children
* If history==True, the "risks" attribute on the target and all its children
"""
def __init__(self, measure, history=0):
super(UpdateRisk, self).__init__(name="UpdateRisk>%s" % measure)
self.measure = measure
self.history = history
def _setup_risk(self, target, set_history):
"""Setup risk attributes on the node in question"""
target.risk = {}
if set_history:
target.risks = pd.DataFrame(index=target.data.index)
def _setup_measure(self, target, set_history):
"""Setup a risk measure within the risk attributes on the node in question"""
target.risk[self.measure] = np.NaN
if set_history:
target.risks[self.measure] = np.NaN
def _set_risk_recursive(self, target, depth, unit_risk_frame):
set_history = depth < self.history
# General setup of risk on nodes
if not hasattr(target, "risk"):
self._setup_risk(target, set_history)
if self.measure not in target.risk:
self._setup_measure(target, set_history)
if isinstance(target, bt.core.SecurityBase):
# Use target.root.now as non-traded securities may not have been updated yet
# and there is no need to update them here as we only use position
index = unit_risk_frame.index.get_loc(target.root.now)
unit_risk = _get_unit_risk(target.name, unit_risk_frame, index)
if is_zero(target.position):
risk = 0.0
else:
risk = unit_risk * target.position * target.multiplier
else:
risk = 0.0
for child in target.children.values():
self._set_risk_recursive(child, depth + 1, unit_risk_frame)
risk += child.risk[self.measure]
target.risk[self.measure] = risk
if depth < self.history:
target.risks.loc[target.now, self.measure] = risk
def __call__(self, target):
unit_risk_frame = target.get_data("unit_risk")[self.measure]
self._set_risk_recursive(target, 0, unit_risk_frame)
return True
class PrintRisk(Algo):
"""
This Algo prints the risk data.
Args:
* fmt_string (str): A string that will later be formatted with the
target object's risk attributes. Therefore, you should provide
what you want to examine within curly braces ( { } )
If not provided, will print the entire dictionary with no formatting.
"""
def __init__(self, fmt_string=""):
super(PrintRisk, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
if hasattr(target, "risk"):
if self.fmt_string:
print(self.fmt_string.format(**target.risk))
else:
print(target.risk)
return True
class HedgeRisks(Algo):
"""
Hedges risk measures with selected instruments.
Make sure that the UpdateRisk algo has been called beforehand.
Args:
* measures (list): the names of the risk measures to hedge
* pseudo (bool): whether to use the pseudo-inverse to compute
the inverse Jacobian. If False, will fail if the number
of selected instruments is not equal to the number of
measures, or if the Jacobian is singular
* strategy (StrategyBase): If provided, will hedge the risk
from this strategy in addition to the risk from target.
This is to allow separate tracking of hedged and unhedged
performance. Note that risk_strategy must occur earlier than
'target' in a depth-first traversal of the children of the root,
otherwise hedging will occur before positions of risk_strategy are
updated.
* throw_nan (bool): Whether to throw on nan hedge notionals, rather
than simply not hedging.
Requires:
* selected
"""
def __init__(self, measures, pseudo=False, strategy=None, throw_nan=True):
super(HedgeRisks, self).__init__()
if len(measures) == 0:
raise ValueError("Must pass in at least one measure to hedge")
self.measures = measures
self.pseudo = pseudo
self.strategy = strategy
self.throw_nan = throw_nan
def _get_target_risk(self, target, measure):
if not hasattr(target, "risk"):
raise ValueError("risk not set up on target %s" % target.name)
if measure not in target.risk:
raise ValueError("measure %s not set on target %s" % (measure, target.name))
return target.risk[measure]
def __call__(self, target):
securities = target.temp["selected"]
# Get target risk
target_risk = np.array(
[self._get_target_risk(target, m) for m in self.measures]
)
if self.strategy is not None:
# Add the target risk of the strategy to the risk of the target
# (which contains existing hedges)
target_risk += np.array(
[self._get_target_risk(self.strategy, m) for m in self.measures]
)
# Turn target_risk into a column array
target_risk = target_risk.reshape(len(self.measures), 1)
# Get hedge risk as a Jacobian matrix
data = []
for m in self.measures:
d = target.get_data("unit_risk").get(m)
if d is None:
raise ValueError(
"unit_risk for %s not present in temp on %s"
% (self.measure, target.name)
)
i = d.index.get_loc(target.now)
data.append((i, d))
hedge_risk = np.array(
[[_get_unit_risk(s, d, i) for (i, d) in data] for s in securities]
)
# Get hedge ratios
if self.pseudo:
inv = np.linalg.pinv(hedge_risk).T
else:
inv = np.linalg.inv(hedge_risk).T
notionals = np.matmul(inv, -target_risk).flatten()
# Hedge
for notional, security in zip(notionals, securities):
if np.isnan(notional) and self.throw_nan:
raise ValueError("%s has nan hedge notional" % security)
target.transact(notional, security)
return True
| mit | ce644626aca7b452c6e043b3bb31655a | 30.067997 | 114 | 0.60514 | 4.130068 | false | false | false | false |
abelfunctions/abelfunctions | abelfunctions/tests/test_puiseux_series_ring.py | 2 | 10394 | import unittest
from .test_abelfunctions import AbelfunctionsTestCase
from abelfunctions.puiseux_series_ring import PuiseuxSeriesRing
from abelfunctions.puiseux_series_ring_element import LaurentSeries_V
from sage.all import SR, CC
from sage.rings.laurent_series_ring import LaurentSeriesRing
from sage.rings.rational_field import QQ
from sage.rings.qqbar import QQbar
from sage.rings.infinity import infinity
class TestPuiseuxSeriesRing(unittest.TestCase):
def test_construction_QQ(self):
R = PuiseuxSeriesRing(QQ, 'x')
x = R.gen()
def test_construction_SR(self):
R = PuiseuxSeriesRing(SR, 'x')
x = R.gen()
def test_construction_QQbar(self):
R = PuiseuxSeriesRing(QQbar, 'x')
x = R.gen()
def test_change_ring(self):
R = PuiseuxSeriesRing(QQ, 'x')
S = R.change_ring(QQbar)
self.assertEqual(R.base_ring(), QQ)
self.assertEqual(S.base_ring(), QQbar)
T = R.change_ring(SR)
self.assertEqual(T.base_ring(), SR)
B = QQ['a,b']
U = R.change_ring(B)
self.assertEqual(U.base_ring(), B)
class TestPuiseuxSeries(unittest.TestCase):
def setUp(self):
pass
def test_laurent_ramification(self):
R = PuiseuxSeriesRing(QQ, 'x')
x = R.gen()
y = R.laurent_series_ring().gen()
p = x
self.assertEqual(p.laurent_part, y)
self.assertEqual(p.ramification_index, 1)
p = x**2
self.assertEqual(p.laurent_part, y**2)
self.assertEqual(p.ramification_index, 1)
p = x**(QQ(1)/2)
self.assertEqual(p.laurent_part, y)
self.assertEqual(p.ramification_index, 2)
p = x**(QQ(2)/3)
self.assertEqual(p.laurent_part, y**2)
self.assertEqual(p.ramification_index, 3)
p = 1 + 42*x**(QQ(1)/2) + 99*x**(QQ(1)/3)
self.assertEqual(p.laurent_part, 1 + 99*y**2 + 42*y**3)
self.assertEqual(p.ramification_index, 6)
def test_LaurentSeries_V(self):
L = LaurentSeriesRing(QQ,'t')
t = L.gen()
l = 1*t**(-3) + 2 + 3*t**1 + 4*t**2 + 5*t**9
m = LaurentSeries_V(l,1)
self.assertEqual(l, m)
m = LaurentSeries_V(l, 2)
self.assertEqual(m.exponents(), [-6,0,2,4,18])
self.assertEqual(m.coefficients(), [1,2,3,4,5])
m = LaurentSeries_V(l, -1)
self.assertEqual(m.exponents(), [-9,-2,-1,0,3])
self.assertEqual(m.coefficients(), [5,4,3,2,1])
m = LaurentSeries_V(l, -3)
self.assertEqual(m.exponents(), [-27,-6,-3,0,9])
self.assertEqual(m.coefficients(), [5,4,3,2,1])
def test_repr(self):
R = PuiseuxSeriesRing(QQ, 't')
t = R.gen()
p = R(1)
s = '1'
self.assertEqual(str(p), s)
p = t
s = 't'
self.assertEqual(str(p), s)
p = t**2
s = 't^2'
self.assertEqual(str(p), s)
half = QQ(1)/QQ(2)
p = t**half
s = 't^(1/2)'
self.assertEqual(str(p), s)
p = t**(-half)
s = 't^(-1/2)'
self.assertEqual(str(p), s)
def test_add(self):
R = PuiseuxSeriesRing(QQ, 't')
t = R.gen()
half = QQ(1)/QQ(2)
p = 1
q = t
r = 1 + t
self.assertEqual(p + q, r)
p = 1
q = t**half
r = 1 + t**half
self.assertEqual(p + q, r)
p = 1 + t
q = 1 + t + t**2
r = 2 + 2*t + t**2
self.assertEqual(p + q, r)
p = 1 + t**(QQ(1)/2)
q = 1 + t**(QQ(1)/2) + t
r = 2 + 2*t**(QQ(1)/2) + t
self.assertEqual(p + q, r)
def test_sub(self):
R = PuiseuxSeriesRing(QQ, 't')
t = R.gen()
half = QQ(1)/QQ(2)
p = 1 + t
q = 1 + t + t**2
r = t**2
self.assertEqual(q - p, r)
p = 1 + t**half
q = 1 + t**half + t
r = t
self.assertEqual(q - p, r)
def test_mul(self):
R = PuiseuxSeriesRing(QQ, 't')
t = R.gen()
half = QQ(1)/QQ(2)
p = t**half
q = t**half
r = t
self.assertEqual(p * q, r)
p = 1 + t
q = 1 + t + t**2
r = 1 + 2*t + 2*t**2 + t**3
self.assertEqual(p * q, r)
p = 1 + t**half
q = 1 + t**half + t
r = 1 + 2*t**half + 2*t + t**(half+1)
self.assertEqual(p * q, r)
def test_change_ring(self):
R = PuiseuxSeriesRing(QQ, 'x')
x = R.gen()
half = QQ(1)/2
p = x**(-half) + 1 + x + half*x**(5*half)
S = PuiseuxSeriesRing(CC, 'x')
q = p.change_ring(CC)
self.assertEqual(q.parent(), S)
T = PuiseuxSeriesRing(SR, 'x')
r = p.change_ring(SR)
self.assertEqual(r.parent(), T)
B = QQ['a,b']
U = PuiseuxSeriesRing(B, 'x')
s = p.change_ring(B)
self.assertEqual(s.parent(), U)
def test_bigoh(self):
R = PuiseuxSeriesRing(QQ, 'x')
x = R.gen()
half = QQ(1)/QQ(2)
p = x**(3*half)
q = p.add_bigoh(half)
self.assertEqual(q.prec(), half)
self.assertEqual(q.laurent_part.prec(), 1)
p = x**(3*half)
q = p.add_bigoh(4)
self.assertEqual(q.prec(), 4)
self.assertEqual(q.laurent_part.prec(), 8)
# def test_div(self):
# L = LaurentSeriesRing(QQ, 't')
# t = L.gen()
# p = PuiseuxXSeries(L(1), e=1)
# q = PuiseuxXSeries(t, e=1)
# r = PuiseuxXSeries(t, e=1)
# s = PuiseuxXSeries(t**(-1), e=1)
# self.assertEqual(q/p, r)
# self.assertEqual(p/q, s)
# p = PuiseuxXSeries(t, e=1)
# q = PuiseuxXSeries(t**2, e=1)
# r = PuiseuxXSeries(t, e=1)
# s = PuiseuxXSeries(t**(-1), e=1)
# self.assertEqual(q/p, r)
# self.assertEqual(p/q, s)
# p = PuiseuxXSeries(L(1), e=3)
# q = PuiseuxXSeries(t, e=3)
# r = PuiseuxXSeries(t, e=3)
# s = PuiseuxXSeries(t**(-1), e=3)
# self.assertEqual(q/p, r)
# self.assertEqual(p/q, s)
# p = PuiseuxXSeries(t, e=3)
# q = PuiseuxXSeries(t**2, e=3)
# r = PuiseuxXSeries(t, e=3)
# s = PuiseuxXSeries(t**(-1), e=3)
# self.assertEqual(q/p, r)
# self.assertEqual(p/q, s)
# p = PuiseuxXSeries(t, e=3)
# q = PuiseuxXSeries(t, e=2)
# r = PuiseuxXSeries(t**1, e=6)
# s = PuiseuxXSeries(t**(-1), e=6)
# self.assertEqual(q/p, r)
# self.assertEqual(p/q, s)
# def test_pow(self):
# from sage.rings.big_oh import O
# L = LaurentSeriesRing(QQ, 't')
# t = L.gen()
# n = 2
# p = PuiseuxXSeries(1 + t, e=1)
# r = PuiseuxXSeries(1 + 2*t + t**2, e=1)
# self.assertEqual(p**n, r)
# p = PuiseuxXSeries(1 + t + O(t**2), e=1)
# r = PuiseuxXSeries(1 + 2*t, e=1)
# self.assertEqual(p**n, r)
# def test_valuation(self):
# L = LaurentSeriesRing(QQ, 't')
# t = L.gen()
# p = PuiseuxXSeries(t + t**3, e=1)
# self.assertEqual(p.valuation(), QQ(1))
# p = PuiseuxXSeries(t + t**3, e=2)
# self.assertEqual(p.valuation(), QQ(1)/2)
# def test_prec(self):
# L = LaurentSeriesRing(QQ, 't')
# t = L.gen()
# p = PuiseuxXSeries(t + t**3, e=1)
# self.assertEqual(p.valuation(), QQ(1))
# p = PuiseuxXSeries(t + t**3, e=2)
# self.assertEqual(p.valuation(), QQ(1)/2)
# def test_list(self):
# L = LaurentSeriesRing(QQ, 't')
# t = L.gen()
# p = PuiseuxXSeries(2*t**(-1) + 3 + 5*t**3 + 7*t**8, e=3)
# exponents = p.exponents()
# coefficients = p.coefficients()
# list = p.list()
# self.assertEqual(exponents, [QQ(-1)/3, 0, 1, QQ(8)/3])
# self.assertEqual(coefficients, [2, 3, 5, 7])
# self.assertEqual(list, [2,3,0,0,5,0,0,0,0,7])
# def test_different_parents(self):
# L = LaurentSeriesRing(QQ, 't')
# t = L.gen()
# M = LaurentSeriesRing(SR, 't')
# s = M.gen()
# p = PuiseuxXSeries(t, e=1)
# q = PuiseuxXSeries(s, e=1)
# print p
# print q
# r = PuiseuxXSeries(2*t, e=1)
# print r
# print p+q
# self.assertEqual(p+q, r)
# # r = PuiseuxXSeries(t**2, e=1)
# # self.assertEqual(p*q, r)
# # r = PuiseuxXSeries(L(1), e=1)
# # self.assertEqual(p/q, r)
# def test_symbolic(self):
# from sage.all import SR
# L = LaurentSeriesRing(SR, 't')
# t = L.gen()
# a = SR('a')
# p = PuiseuxXSeries(t**(-1) + a + 5*t + t**3 + 9*t**5, e=3, a=1)
# self.assertTrue(a in p.list())
# def test_prec(self):
# L = LaurentSeriesRing(QQ, 't')
# t = L.gen()
# p = PuiseuxXSeries(L(1), 0, 1, order=5)
# q = PuiseuxXSeries(t**3, 0, 1, order=5)
# r = PuiseuxXSeries(t**(-1) + t**2, 0, 1, order=5)
# s = PuiseuxXSeries(t**(-2) + t**(-1), 0, 1, order=5)
# self.assertEqual((p*p).prec(), 5)
# self.assertEqual((q*q).prec(), 8)
# self.assertEqual((r*r).prec(), 4)
# self.assertEqual((s*s).prec(), 3)
# # ramified
# p = PuiseuxXSeries(L(1), 0, 2, order=5)
# q = PuiseuxXSeries(t**3, 0, 2, order=5)
# r = PuiseuxXSeries(t**(-1) + t**2, 0, 2, order=5)
# s = PuiseuxXSeries(t**(-2) + t**(-1), 0, 2, order=5)
# self.assertEqual((p*p).prec(), QQ(5)/2)
# self.assertEqual((q*q).prec(), QQ(8)/2)
# self.assertEqual((r*r).prec(), QQ(4)/2)
# self.assertEqual((s*s).prec(), QQ(3)/2)
# def test_prec_bigoh(self):
# from sage.rings.big_oh import O
# L = LaurentSeriesRing(QQ, 't')
# t = L.gen()
# # same as test_prec, but using bigoh notation instead
# p = PuiseuxXSeries(L(1) + O(t**5), 0, 2)
# q = PuiseuxXSeries(t**3 + O(t**5), 0, 2)
# r = PuiseuxXSeries(t**(-1) + t**2 + O(t**5), 0, 2)
# s = PuiseuxXSeries(t**(-2) + t**(-1) + O(t**5), 0, 2)
# self.assertEqual((p*p).prec(), QQ(5)/2)
# self.assertEqual((q*q).prec(), QQ(8)/2)
# self.assertEqual((r*r).prec(), QQ(4)/2)
# self.assertEqual((s*s).prec(), QQ(3)/2)
if __name__ == '__main__':
unittest.main()
| mit | 2a998244f0ac508f07120ccac0b62de2 | 27.398907 | 73 | 0.489513 | 2.630061 | false | true | false | false |
abelfunctions/abelfunctions | abelfunctions/integralbasis.py | 1 | 11914 | r"""Integral Basis :mod:`abelfunctions.integralbasis`
=================================================
A module for computing integral bases of algebraic function fields of the form
:math:`O(X) = \mathbb{C}[x,y] / (f(x,y))` where :math:`X : f(x,y) = 0`.
For polynomials over :math:`\mathbb{Q}[x,y]` we use Singular's very fast
implementation.
The slow / general-purpose algorithm is based off of the paper "An Algorithm
for Computing an Integral Basis in an Algebraic Function Field" by Mark van
Hoeij [vHoeij]_.
An integral basis for :math:`O(X)` is a set of :math:`\beta_i \in
\mathbb{C}(x,y)` such that
.. math::
\overline{O(X)} = \beta_1\mathbb{C}[x,y] + \cdots + \beta_g\mathbb{C}[x,y].
This data is necessary for computing a basis for the space of holomorphic
differentials :math:`\Omega_X^1` defined on the Riemann surface :math:`X` which
is implemented in ``differentials``.
Functions
---------
.. autosummary::
integral_basis
References
----------
.. [vHoeij] Mark van Hoeij. "An Algorithm for Computing an Integral Basis in an
Algebraic Function Field". J. Symbolic Computation. (1994) 18, p. 353-363
.. [Singular] Wolfram Decker, Gert-Martin Greuel, Gerhard Pfister, and Hans
Schonemann. "Singular: library for computing the normalization of affine
rings". (2015).
Examples
--------
Contents
--------
"""
from abelfunctions.puiseux import puiseux
from abelfunctions.puiseux_series_ring import PuiseuxSeriesRing
from sage.all import cached_function
from sage.functions.other import ceil
from sage.matrix.constructor import Matrix
from sage.rings.polynomial.all import PolynomialRing
from sage.rings.rational_field import QQ
from sage.rings.qqbar import QQbar
import warnings
def Int(i, px):
r"""Computes :math:`Int_i = \sum_{k \neq i} v(p_i-p_k)`.
``Int`` is used in :func:`compute_expansion_bounds` for determining
sufficient bounds on Puiseux series expansions.
Parameters
----------
i : int
Index of the Puiseux series in the list, `px`, to compute `Int` of.
p : list, PuiseuxXSeries
A list of :class:`PuiseuxXSeries`.
Returns
-------
val : rational
The `Int` of the `i`th element of `px`.
"""
n = len(px)
pxi = px[i]
val = QQ(0)
for k in range(n):
if k != i:
val += (pxi-px[k]).valuation()
return val
def compute_expansion_bounds(px):
r"""Returns a list of necessary bounds on each Puiseux series in ``px``.
Computes the expansion bounds :math:`N_1, \ldots, N_n` such that for
all polynomials :math:`G \in L[x,y]` the truncation :math:`r_i` of
the Puiseux series :math:`p_i` satisfying :math:`v(r_i - p_i) > N_i`
satisfies the relation
.. math::
\forall M,i, v(G(r_i)) > M
if and only if
.. math::
\forall M,i, v(G(p_i)) > M.
That is, the truncations :math:`r_i` are sufficiently long so that
polynomial evaluation of :math:`r_i` and :math:`p_i` has the same
valuation.
Parameters
----------
px : list, PuiseuxXSeries
Returns
-------
list, int
A list of degree bounds for each PuiseuxXSeries in ``px``.
"""
n = len(px)
N = []
max_Int = max([Int(k, px) for k in range(n)])
for i in range(n):
pairwise_diffs = [(px[k] - px[i]).valuation()
for k in range(n) if k != i]
Ni = max(pairwise_diffs) + max_Int - Int(i, px) + 1
N.append(Ni)
return N
def compute_series_truncations(f, alpha):
r"""Computes Puiseux series at :math:`x=\alpha` with necessary terms.
The Puiseux series expansions of :math:`f = f(x,y)` centered at
:math:`\alpha` are computed up to the number of terms needed for the
integral basis algorithm to be successful. The expansion degree bounds are
determined by :func:`compute_expansion_bounds`.
Parameters
----------
f : polynomial
alpha : complex
Returns
-------
list : PuiseuxXSeries
A list of Puiseux series expansions centered at :math:`x = \alpha` with
enough terms to compute integral bases as SymPy expressions.
"""
# compute the parametric Puiseix series with the minimal number of terms
# needed to distinguish them.
pt = puiseux(f,alpha)
px = [p for P in pt for p in P.xseries()]
# compute the orders necessary for the integral basis algorithm. the orders
# are on the Puiseux x-series (non-parametric) so scale by the ramification
# index of each series
N = compute_expansion_bounds(px)
for i in range(len(N)):
e = px[i].ramification_index
N[i] = ceil(N[i]*e)
order = max(N) + 1
for pti in pt:
pti.extend(order=order)
# recompute the corresponding x-series with the extened terms
px = [p for P in pt for p in P.xseries()]
return px
def integral_basis(f):
r"""Returns the integral basis of the algebraic function field of `f`.
An integral basis for the algebraic function field :math:`O(X)` is a
set of :math:`\beta_i \in \mathbb{C}(x,y)` such that
.. math::
\overline{O(X)} = \beta_1 \mathbb{C}[x,y] + \cdots + \beta_g
\mathbb{C}[x,y].
Parameters
----------
f : sympy.Expr
x : sympy.Symbol
y : sympy.Symbol
Returns
-------
list, sympy.Expr
A list of rational functions representing an integral basis.
"""
R = f.parent()
x,y = R.gens()
# The base algorithm assumes f is monic. If this is not the case then
# monicize by applying the map `y -> y/lc(x), f -> lc^(d-1) f` where lc(x)
# is the leading coefficient of f.
d = f.degree(y)
lc = f.polynomial(y).leading_coefficient()
if lc.degree() > 0:
# we have to carefully manage rings here. the path is:
# R(x)[y] -> R[x][y] -> R[x,y]
fmonic = f(x,y/lc)*lc**(d-1) # element of R(x)[y]
B = R.base_ring()
fmonic = fmonic.change_ring(B[x]) # element of R[x][y]
fmonic = R(fmonic) # element of R[x,y]
else:
fmonic = f/R.base_ring()(lc)
lc = 1
# if the curve lives in QQ[x,y] then use singular. otherwise, use slow
# self-implemented version
try:
fmonic = fmonic.change_ring(QQ)
except:
warnings.warn('using slower integral basis algorithm: '
'cannot coerce curve %s to QQ[%s,%s]'%(fmonic,x,y))
b = _integral_basis_monic(fmonic)
else:
b = _integral_basis_monic_singular(fmonic)
# reverse leading coefficient scaling
for i in range(1,len(b)):
b[i] = b[i](x,lc*y)
return b
def _integral_basis_monic_singular(f):
r"""Computes an integral basis using singular.
Note that singular can only compute integral bases of algebraic function
fields over :math:`\mathbb{Q}[x,y]`. It will fail over other extensions.
Parameters
----------
f : curve
Returns
-------
b : list
A list of integral basis elements.
"""
from sage.all import singular
singular.load('integralbasis.lib')
l = singular.integralBasis(f,2)
ideal, denom = l.sage()
numerators = ideal.gens()
b = [numer/denom for numer in numerators]
return b
def _integral_basis_monic(f):
r"""Returns the integral basis of a monic curve.
Called by :func:`integral_basis` after monicizing its input curve.
Parameters
----------
f : polynomial
Returns
-------
list : rational functions
A list of rational functions representing an integral basis of the
monic curve.
See Also
--------
integral_basis : generic integral basis function
"""
R = f.parent()
x,y = R.gens()
# compute df: the set of monic, irreducible polynomials k such that k**2
# divides the resultant
n = f.degree(y)
res = f.resultant(f.derivative(y),y).univariate_polynomial()
factor = res.squarefree_decomposition()
df = [k for k,deg in factor
if (deg > 1) and (k.leading_coefficient() == 1)]
# for each element k of df, take any root of k and compute the
# corresponding Puisuex series centered at that point
r = []
alpha = []
for k in df:
alphak = k.roots(ring=QQbar, multiplicities=False)[0]
alpha.append(alphak)
rk = compute_series_truncations(f,alphak)
r.append(rk)
# main loop
b = [R.fraction_field()(1)]
for d in range(1,n):
bd = compute_bd(f,b,df,r,alpha)
b.append(bd)
return b
def compute_bd(f, b, df, r, alpha):
"""Determine the next integral basis element form those already computed."""
# obtain the ring of Puiseux series in which the truncated series
# live. these should already be such that the base ring is SR, the symbolic
# ring. (below we will have to introduce symbolic indeterminants)
R = f.parent()
F = R.fraction_field()
x,y = R.gens()
# construct a list of indeterminants and a guess for the next integral
# basis element. to make computations uniform in the univariate and
# multivariate cases an additional generator of the underlying polynomial
# ring is introduced.
d = len(b)
Q = PolynomialRing(QQbar, ['a%d'%n for n in range(d)] + ['dummy'])
a = tuple(Q.gens())
b = tuple(b)
P = PuiseuxSeriesRing(Q, str(x))
xx = P.gen()
bd = F(y*b[-1])
# XXX HACK
for l in range(len(r)):
for k in range(len(r[l])):
r[l][k] = r[l][k].change_ring(Q)
# sufficiently singularize the current integral basis element guess at each
# of the singular points of df
for l in range(len(df)):
k = df[l] # factor
# alphak = alpha[l] # point at which the truncated series are centered
rk = r[l] # truncated puiseux series
# singularize the current guess at the current point using each
# truncated Puiseux seriesx
sufficiently_singular = False
while not sufficiently_singular:
# from each puiseux series, rki, centered at alphak construct a
# system of equations from the negative exponent terms appearing in
# the expression A(x,rki))
equations = []
for rki in rk:
# A = sum(a[j] * b[j](xx,rki) for j in range(d))
A = evaluate_A(a,b,xx,rki,d)
A += bd(xx, rki)
# implicit division by x-alphak, hence truncation to x^1
terms = A.truncate(1).coefficients()
equations.extend(terms)
# attempt to solve this linear system of equations. if a (unique)
# solution exists then the integral basis element is not singular
# enough at alphak
sols = solve_coefficient_system(Q, equations, a)
if not sols is None:
bdm1 = sum(F(sols[i][0])*b[i] for i in range(d))
bd = F(bdm1 + bd)/ F(k)
else:
sufficiently_singular = True
return bd
@cached_function
def evaluate_A(a,b,xx,rki,d):
A = sum(a[j] * b[j](xx,rki) for j in range(d))
return A
def solve_coefficient_system(Q, equations, vars):
# NOTE: to make things easier (and uniform) in the univariate case a dummy
# variable is added to the polynomial ring. See compute_bd()
a = Q.gens()[:-1]
B = Q.base_ring()
# construct the coefficient system and right-hand side
system = [[e.coefficient({ai:1}) for ai in a] for e in equations]
rhs = [-e.constant_coefficient() for e in equations]
system = Matrix(B, system)
rhs = Matrix(B, rhs).transpose()
# we only allow unique solutions. return None if there are infinitely many
# solutions or if no solution exists. Sage will raise a ValueError in both
# circumstances
try:
sol = system.solve_right(rhs)
except ValueError:
return None
return sol
| mit | e57f9250b2f4e867c1f67b1fb6a3c66f | 29.392857 | 80 | 0.61113 | 3.465387 | false | false | false | false |
thefactory/marathon-python | marathon/models/queue.py | 1 | 1946 | from .base import MarathonResource
from .app import MarathonApp
class MarathonQueueItem(MarathonResource):
"""Marathon queue item.
See: https://mesosphere.github.io/marathon/docs/rest-api.html#queue
List all the tasks queued up or waiting to be scheduled. This is mainly
used for troubleshooting and occurs when scaling changes are requested and the
volume of scaling changes out paces the ability to schedule those tasks. In
addition to the application in the queue, you see also the task count that
needs to be started.
If the task has a rate limit, then a delay to the start gets applied. You
can see this delay for every application with the seconds to wait before
the next launch will be tried.
:param app:
:type app: :class:`marathon.models.app.MarathonApp` or dict
:param delay: queue item delay
:type delay: :class:`marathon.models.app.MarathonQueueItemDelay` or dict
:param bool overdue:
"""
def __init__(self, app=None, overdue=None, count=None, delay=None, since=None,
processed_offers_summary=None, last_unused_offers=None):
self.app = app if isinstance(
app, MarathonApp) else MarathonApp().from_json(app)
self.overdue = overdue
self.count = count
self.delay = delay if isinstance(
delay, MarathonQueueItemDelay) else MarathonQueueItemDelay().from_json(delay)
self.since = since
self.processed_offers_summary = processed_offers_summary
self.last_unused_offers = last_unused_offers
class MarathonQueueItemDelay(MarathonResource):
"""Marathon queue item delay.
:param int time_left_seconds: Seconds to wait before the next launch will be tried.
:param bool overdue: Is the queue item overdue.
"""
def __init__(self, time_left_seconds=None, overdue=None):
self.time_left_seconds = time_left_seconds
self.overdue = overdue
| mit | e8a699281b2278f15321332ad3357a9b | 37.156863 | 89 | 0.701439 | 3.97955 | false | false | false | false |
vnpy/vnpy | vnpy/trader/constant.py | 1 | 4083 | """
General constant enums used in the trading platform.
"""
from enum import Enum
class Direction(Enum):
"""
Direction of order/trade/position.
"""
LONG = "多"
SHORT = "空"
NET = "净"
class Offset(Enum):
"""
Offset of order/trade.
"""
NONE = ""
OPEN = "开"
CLOSE = "平"
CLOSETODAY = "平今"
CLOSEYESTERDAY = "平昨"
class Status(Enum):
"""
Order status.
"""
SUBMITTING = "提交中"
NOTTRADED = "未成交"
PARTTRADED = "部分成交"
ALLTRADED = "全部成交"
CANCELLED = "已撤销"
REJECTED = "拒单"
class Product(Enum):
"""
Product class.
"""
EQUITY = "股票"
FUTURES = "期货"
OPTION = "期权"
INDEX = "指数"
FOREX = "外汇"
SPOT = "现货"
ETF = "ETF"
BOND = "债券"
WARRANT = "权证"
SPREAD = "价差"
FUND = "基金"
class OrderType(Enum):
"""
Order type.
"""
LIMIT = "限价"
MARKET = "市价"
STOP = "STOP"
FAK = "FAK"
FOK = "FOK"
RFQ = "询价"
class OptionType(Enum):
"""
Option type.
"""
CALL = "看涨期权"
PUT = "看跌期权"
class Exchange(Enum):
"""
Exchange.
"""
# Chinese
CFFEX = "CFFEX" # China Financial Futures Exchange
SHFE = "SHFE" # Shanghai Futures Exchange
CZCE = "CZCE" # Zhengzhou Commodity Exchange
DCE = "DCE" # Dalian Commodity Exchange
INE = "INE" # Shanghai International Energy Exchange
GFEX = "GFEX" # Guangzhou Futures Exchange
SSE = "SSE" # Shanghai Stock Exchange
SZSE = "SZSE" # Shenzhen Stock Exchange
BSE = "BSE" # Beijing Stock Exchange
SGE = "SGE" # Shanghai Gold Exchange
WXE = "WXE" # Wuxi Steel Exchange
CFETS = "CFETS" # CFETS Bond Market Maker Trading System
XBOND = "XBOND" # CFETS X-Bond Anonymous Trading System
# Global
SMART = "SMART" # Smart Router for US stocks
NYSE = "NYSE" # New York Stock Exchnage
NASDAQ = "NASDAQ" # Nasdaq Exchange
ARCA = "ARCA" # ARCA Exchange
EDGEA = "EDGEA" # Direct Edge Exchange
ISLAND = "ISLAND" # Nasdaq Island ECN
BATS = "BATS" # Bats Global Markets
IEX = "IEX" # The Investors Exchange
AMEX = "AMEX" # American Stock Exchange
TSE = "TSE" # Toronto Stock Exchange
NYMEX = "NYMEX" # New York Mercantile Exchange
COMEX = "COMEX" # COMEX of CME
GLOBEX = "GLOBEX" # Globex of CME
IDEALPRO = "IDEALPRO" # Forex ECN of Interactive Brokers
CME = "CME" # Chicago Mercantile Exchange
ICE = "ICE" # Intercontinental Exchange
SEHK = "SEHK" # Stock Exchange of Hong Kong
HKFE = "HKFE" # Hong Kong Futures Exchange
SGX = "SGX" # Singapore Global Exchange
CBOT = "CBT" # Chicago Board of Trade
CBOE = "CBOE" # Chicago Board Options Exchange
CFE = "CFE" # CBOE Futures Exchange
DME = "DME" # Dubai Mercantile Exchange
EUREX = "EUX" # Eurex Exchange
APEX = "APEX" # Asia Pacific Exchange
LME = "LME" # London Metal Exchange
BMD = "BMD" # Bursa Malaysia Derivatives
TOCOM = "TOCOM" # Tokyo Commodity Exchange
EUNX = "EUNX" # Euronext Exchange
KRX = "KRX" # Korean Exchange
OTC = "OTC" # OTC Product (Forex/CFD/Pink Sheet Equity)
IBKRATS = "IBKRATS" # Paper Trading Exchange of IB
# Special Function
LOCAL = "LOCAL" # For local generated data
class Currency(Enum):
"""
Currency.
"""
USD = "USD"
HKD = "HKD"
CNY = "CNY"
CAD = "CAD"
class Interval(Enum):
"""
Interval of bar data.
"""
MINUTE = "1m"
HOUR = "1h"
DAILY = "d"
WEEKLY = "w"
TICK = "tick"
| mit | 8e68175bbadd5b1d621286d2896ac493 | 25.046053 | 71 | 0.51907 | 2.928254 | false | false | false | false |
stamparm/maltrail | core/httpd.py | 1 | 32994 | #!/usr/bin/env python
"""
Copyright (c) 2014-2022 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function
import datetime
import glob
import gzip
import hashlib
import io
import json
import mimetypes
import os
import re
import socket
import subprocess
import sys
import threading
import time
import traceback
from core.addr import addr_to_int
from core.addr import int_to_addr
from core.addr import make_mask
from core.attribdict import AttribDict
from core.common import get_regex
from core.common import ipcat_lookup
from core.common import worst_asns
from core.compat import xrange
from core.enums import HTTP_HEADER
from core.settings import config
from core.settings import CONTENT_EXTENSIONS_EXCLUSIONS
from core.settings import DATE_FORMAT
from core.settings import DISABLED_CONTENT_EXTENSIONS
from core.settings import DISPOSED_NONCES
from core.settings import HTML_DIR
from core.settings import HTTP_TIME_FORMAT
from core.settings import IS_WIN
from core.settings import MAX_NOFILE
from core.settings import NAME
from core.settings import PING_RESPONSE
from core.settings import SESSION_COOKIE_NAME
from core.settings import SESSION_COOKIE_FLAG_SAMESITE
from core.settings import SESSION_EXPIRATION_HOURS
from core.settings import SESSION_ID_LENGTH
from core.settings import SESSIONS
from core.settings import UNAUTHORIZED_SLEEP_TIME
from core.settings import UNICODE_ENCODING
from core.settings import VERSION
from thirdparty import six
from thirdparty.six.moves import BaseHTTPServer as _BaseHTTPServer
from thirdparty.six.moves import http_client as _http_client
from thirdparty.six.moves import socketserver as _socketserver
from thirdparty.six.moves import urllib as _urllib
try:
# Reference: https://bugs.python.org/issue7980
# Reference: http://code-trick.com/python-bug-attribute-error-_strptime/
import _strptime
except ImportError:
pass
try:
import resource
resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_NOFILE, MAX_NOFILE))
except:
pass
_fail2ban_cache = None
_fail2ban_key = None
def start_httpd(address=None, port=None, join=False, pem=None):
"""
Starts HTTP server
"""
class ThreadingServer(_socketserver.ThreadingMixIn, _BaseHTTPServer.HTTPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_BaseHTTPServer.HTTPServer.server_bind(self)
def finish_request(self, *args, **kwargs):
try:
_BaseHTTPServer.HTTPServer.finish_request(self, *args, **kwargs)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
class SSLThreadingServer(ThreadingServer):
def __init__(self, server_address, pem, HandlerClass):
if six.PY2:
import OpenSSL # pyopenssl
ThreadingServer.__init__(self, server_address, HandlerClass)
for method in ("TLSv1_2_METHOD", "TLSv1_1_METHOD", "TLSv1_METHOD", "TLS_METHOD", "SSLv23_METHOD", "SSLv2_METHOD"):
if hasattr(OpenSSL.SSL, method):
ctx = OpenSSL.SSL.Context(getattr(OpenSSL.SSL, method))
break
ctx.use_privatekey_file(pem)
ctx.use_certificate_file(pem)
self.socket = OpenSSL.SSL.Connection(ctx, socket.socket(self.address_family, self.socket_type))
self.server_bind()
self.server_activate()
else:
import ssl
ThreadingServer.__init__(self, server_address, ReqHandler)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(pem, pem)
self.socket = ctx.wrap_socket(socket.socket(self.address_family, self.socket_type), server_side=True)
self.server_bind()
self.server_activate()
def shutdown_request(self, request):
try:
request.shutdown()
except:
pass
class ReqHandler(_BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "")
params = {}
content = None
skip = False
if hasattr(self, "data"):
params.update(_urllib.parse.parse_qs(self.data))
if query:
params.update(_urllib.parse.parse_qs(query))
for key in params:
if params[key]:
params[key] = params[key][-1]
if path == '/':
path = "index.html"
path = path.strip('/')
extension = os.path.splitext(path)[-1].lower()
if hasattr(self, "_%s" % path):
content = getattr(self, "_%s" % path)(params)
else:
path = path.replace('/', os.path.sep)
path = os.path.abspath(os.path.join(HTML_DIR, path)).strip()
if not os.path.isfile(path) and os.path.isfile("%s.html" % path):
path = "%s.html" % path
if any((config.IP_ALIASES,)) and self.path.split('?')[0] == "/js/main.js":
content = open(path, 'r').read()
content = re.sub(r"\bvar IP_ALIASES =.+", "var IP_ALIASES = {%s};" % ", ".join('"%s": "%s"' % (_.split(':', 1)[0].strip(), _.split(':', 1)[-1].strip()) for _ in config.IP_ALIASES), content)
if ".." not in os.path.relpath(path, HTML_DIR) and os.path.isfile(path) and (extension not in DISABLED_CONTENT_EXTENSIONS or os.path.split(path)[-1] in CONTENT_EXTENSIONS_EXCLUSIONS):
mtime = time.gmtime(os.path.getmtime(path))
if_modified_since = self.headers.get(HTTP_HEADER.IF_MODIFIED_SINCE)
if if_modified_since and extension not in (".htm", ".html"):
if_modified_since = [_ for _ in if_modified_since.split(';') if _.upper().endswith("GMT")][0]
if time.mktime(mtime) <= time.mktime(time.strptime(if_modified_since, HTTP_TIME_FORMAT)):
self.send_response(_http_client.NOT_MODIFIED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
skip = True
if not skip:
content = content or open(path, "rb").read()
last_modified = time.strftime(HTTP_TIME_FORMAT, mtime)
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, mimetypes.guess_type(path)[0] or "application/octet-stream")
self.send_header(HTTP_HEADER.LAST_MODIFIED, last_modified)
# For CSP policy directives see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/
self.send_header(HTTP_HEADER.CONTENT_SECURITY_POLICY, "default-src 'self'; style-src 'self' 'unsafe-inline'; img-src * blob:; script-src 'self' 'unsafe-eval' https://stat.ripe.net; frame-src *; object-src 'none'; block-all-mixed-content;")
if os.path.basename(path) == "index.html":
content = re.sub(b'\s*<script[^>]+src="js/demo.js"></script>', b'', content)
if extension not in (".htm", ".html"):
self.send_header(HTTP_HEADER.EXPIRES, "Sun, 17-Jan-2038 19:14:07 GMT") # Reference: http://blog.httpwatch.com/2007/12/10/two-simple-rules-for-http-caching/
self.send_header(HTTP_HEADER.CACHE_CONTROL, "max-age=3600, must-revalidate") # Reference: http://stackoverflow.com/a/5084555
else:
self.send_header(HTTP_HEADER.CACHE_CONTROL, "no-cache")
else:
self.send_response(_http_client.NOT_FOUND)
self.send_header(HTTP_HEADER.CONNECTION, "close")
content = b'<!DOCTYPE html><html lang="en"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL %s was not found on this server.</p></body></html>' % self.path.split('?')[0]
if content is not None:
if isinstance(content, six.text_type):
content = content.encode(UNICODE_ENCODING)
for match in re.finditer(b"<\\!(\\w+)\\!>", content):
name = match.group(1).decode(UNICODE_ENCODING)
_ = getattr(self, "_%s" % name.lower(), None)
if _:
content = self._format(content, **{name: _()})
if "gzip" in self.headers.get(HTTP_HEADER.ACCEPT_ENCODING, ""):
self.send_header(HTTP_HEADER.CONTENT_ENCODING, "gzip")
_ = six.BytesIO()
compress = gzip.GzipFile("", "w+b", 9, _)
compress._stream = _
compress.write(content)
compress.flush()
compress.close()
content = compress._stream.getvalue()
self.send_header(HTTP_HEADER.CONTENT_LENGTH, str(len(content)))
self.end_headers()
try:
if content:
self.wfile.write(content)
self.wfile.flush()
except:
pass
def do_POST(self):
length = self.headers.get(HTTP_HEADER.CONTENT_LENGTH)
data = self.rfile.read(int(length)).decode(UNICODE_ENCODING)
data = _urllib.parse.unquote_plus(data)
self.data = data
self.do_GET()
def get_session(self):
retval = None
cookie = self.headers.get(HTTP_HEADER.COOKIE)
if cookie:
match = re.search(r"%s\s*=\s*([^;]+)" % SESSION_COOKIE_NAME, cookie)
if match:
session = match.group(1)
if session in SESSIONS:
if SESSIONS[session].client_ip != self.client_address[0]:
pass
elif SESSIONS[session].expiration > time.time():
retval = SESSIONS[session]
else:
del SESSIONS[session]
if retval is None and not config.USERS:
retval = AttribDict({"username": "?"})
return retval
def delete_session(self):
cookie = self.headers.get(HTTP_HEADER.COOKIE)
if cookie:
match = re.search(r"%s=(.+)" % SESSION_COOKIE_NAME, cookie)
if match:
session = match.group(1)
if session in SESSIONS:
del SESSIONS[session]
def version_string(self):
return "%s/%s" % (NAME, self._version())
def end_headers(self):
if not hasattr(self, "_headers_ended"):
_BaseHTTPServer.BaseHTTPRequestHandler.end_headers(self)
self._headers_ended = True
def log_message(self, format, *args):
return
def finish(self):
try:
_BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
def _version(self):
version = VERSION
try:
for line in open(os.path.join(os.path.dirname(__file__), "settings.py"), 'r'):
match = re.search(r'VERSION = "([^"]*)', line)
if match:
version = match.group(1)
break
except:
pass
return version
def _statics(self):
latest = max(glob.glob(os.path.join(os.path.dirname(__file__), "..", "trails", "static", "malware", "*.txt")), key=os.path.getmtime)
return "/%s" % datetime.datetime.fromtimestamp(os.path.getmtime(latest)).strftime(DATE_FORMAT)
def _logo(self):
if config.HEADER_LOGO:
retval = config.HEADER_LOGO
else:
retval = '<img src="images/mlogo.png" style="width: 25px">altrail'
return retval
def _format(self, content, **params):
if content:
for key, value in params.items():
content = content.replace(b"<!%s!>" % key.encode(UNICODE_ENCODING), value.encode(UNICODE_ENCODING))
return content
def _login(self, params):
valid = False
if params.get("username") and params.get("hash") and params.get("nonce"):
if params.get("nonce") not in DISPOSED_NONCES:
DISPOSED_NONCES.add(params.get("nonce"))
for entry in (config.USERS or []):
entry = re.sub(r"\s", "", entry)
username, stored_hash, uid, netfilter = entry.split(':')
try:
uid = int(uid)
except ValueError:
uid = None
if username == params.get("username"):
try:
if params.get("hash") == hashlib.sha256((stored_hash.strip() + params.get("nonce")).encode(UNICODE_ENCODING)).hexdigest():
valid = True
break
except:
if config.SHOW_DEBUG:
traceback.print_exc()
if valid:
_ = os.urandom(SESSION_ID_LENGTH)
session_id = _.hex() if hasattr(_, "hex") else _.encode("hex")
expiration = time.time() + 3600 * SESSION_EXPIRATION_HOURS
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
cookie = "%s=%s; expires=%s; path=/; HttpOnly" % (SESSION_COOKIE_NAME, session_id, time.strftime(HTTP_TIME_FORMAT, time.gmtime(expiration)))
if config.USE_SSL:
cookie += "; Secure"
if SESSION_COOKIE_FLAG_SAMESITE:
cookie += "; SameSite=strict"
self.send_header(HTTP_HEADER.SET_COOKIE, cookie)
if netfilter in ("", '*', "::", "0.0.0.0/0"):
netfilters = None
else:
addresses = set()
netmasks = set()
for item in set(re.split(r"[;,]", netfilter)):
item = item.strip()
if '/' in item:
_ = item.split('/')[-1]
if _.isdigit() and int(_) >= 16:
lower = addr_to_int(item.split('/')[0])
mask = make_mask(int(_))
upper = lower | (0xffffffff ^ mask)
while lower <= upper:
addresses.add(int_to_addr(lower))
lower += 1
else:
netmasks.add(item)
elif '-' in item:
_ = item.split('-')
lower, upper = addr_to_int(_[0]), addr_to_int(_[1])
while lower <= upper:
addresses.add(int_to_addr(lower))
lower += 1
elif re.search(r"\d+\.\d+\.\d+\.\d+", item):
addresses.add(item)
netfilters = netmasks
if addresses:
netfilters.add(get_regex(addresses))
SESSIONS[session_id] = AttribDict({"username": username, "uid": uid, "netfilters": netfilters, "mask_custom": config.ENABLE_MASK_CUSTOM and uid >= 1000, "expiration": expiration, "client_ip": self.client_address[0]})
else:
time.sleep(UNAUTHORIZED_SLEEP_TIME)
self.send_response(_http_client.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
content = "Login %s" % ("success" if valid else "failed")
if not IS_WIN:
try:
subprocess.check_output("logger -p auth.info -t \"%s[%d]\" \"%s password for %s from %s port %s\"" % (NAME.lower(), os.getpid(), "Accepted" if valid else "Failed", params.get("username"), self.client_address[0], self.client_address[1]), stderr=subprocess.STDOUT, shell=True)
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
return content
def _logout(self, params):
self.delete_session()
self.send_response(_http_client.FOUND)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.LOCATION, "/")
def _whoami(self, params):
session = self.get_session()
username = session.username if session else ""
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return username
def _check_ip(self, params):
session = self.get_session()
if session is None:
self.send_response(_http_client.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
try:
result_worst = worst_asns(params.get("address"))
if result_worst:
result_ipcat = result_worst
else:
_ = (ipcat_lookup(params.get("address")) or "").lower().split(' ')
result_ipcat = _[1] if _[0] == 'the' else _[0]
return ("%s" if not params.get("callback") else "%s(%%s)" % params.get("callback")) % json.dumps({"ipcat": result_ipcat, "worst_asns": str(result_worst is not None).lower()})
except:
if config.SHOW_DEBUG:
traceback.print_exc()
def _trails(self, params):
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return open(config.TRAILS_FILE, "rb").read()
def _ping(self, params):
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return PING_RESPONSE
def _fail2ban(self, params):
global _fail2ban_cache
global _fail2ban_key
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
content = ""
key = int(time.time()) >> 3
if config.FAIL2BAN_REGEX:
try:
re.compile(config.FAIL2BAN_REGEX)
except re.error:
content = "invalid regular expression used in option FAIL2BAN_REGEX"
else:
if key == _fail2ban_key:
content = _fail2ban_cache
else:
result = set()
_ = os.path.join(config.LOG_DIR, "%s.log" % datetime.datetime.now().strftime("%Y-%m-%d"))
if os.path.isfile(_):
for line in open(_, "r"):
if re.search(config.FAIL2BAN_REGEX, line, re.I):
result.add(line.split()[3])
content = "\n".join(result)
_fail2ban_cache = content
_fail2ban_key = key
else:
content = "configuration option FAIL2BAN_REGEX not set"
return content
def _events(self, params):
session = self.get_session()
if session is None:
self.send_response(_http_client.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
start, end, size, total = None, None, -1, None
content = None
log_exists = False
dates = params.get("date", "")
if ".." in dates:
pass
elif '_' not in dates:
try:
date = datetime.datetime.strptime(dates, "%Y-%m-%d").strftime("%Y-%m-%d")
event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date)
if os.path.exists(event_log_path):
range_handle = open(event_log_path, "rb")
log_exists = True
except ValueError:
print("[!] invalid date format in request")
log_exists = False
else:
logs_data = ""
date_interval = dates.split("_", 1)
try:
start_date = datetime.datetime.strptime(date_interval[0], "%Y-%m-%d").date()
end_date = datetime.datetime.strptime(date_interval[1], "%Y-%m-%d").date()
for i in xrange(int((end_date - start_date).days) + 1):
date = start_date + datetime.timedelta(i)
event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date.strftime("%Y-%m-%d"))
if os.path.exists(event_log_path):
log_handle = open(event_log_path, "rb")
logs_data += log_handle.read()
log_handle.close()
range_handle = io.BytesIO(logs_data)
log_exists = True
except ValueError:
print("[!] invalid date format in request")
log_exists = False
if log_exists:
range_handle.seek(0, 2)
total = range_handle.tell()
range_handle.seek(0)
if self.headers.get(HTTP_HEADER.RANGE):
match = re.search(r"bytes=(\d+)-(\d+)", self.headers[HTTP_HEADER.RANGE])
if match:
start, end = int(match.group(1)), int(match.group(2))
max_size = end - start + 1
end = min(total - 1, end)
size = end - start + 1
if start == 0 or not session.range_handle:
session.range_handle = range_handle
if session.netfilters is None and not session.mask_custom:
session.range_handle.seek(start)
self.send_response(_http_client.PARTIAL_CONTENT)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, total))
content = session.range_handle.read(size)
else:
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
buffer, addresses, netmasks, regex = io.StringIO(), set(), [], ""
for netfilter in session.netfilters or []:
if not netfilter:
continue
if '/' in netfilter:
netmasks.append(netfilter)
elif re.search(r"\A[\d.]+\Z", netfilter):
addresses.add(netfilter)
elif "\\." in netfilter:
regex = r"\b(%s)\b" % netfilter
else:
print("[!] invalid network filter '%s'" % netfilter)
return
for line in session.range_handle:
display = session.netfilters is None
ip = None
line = line.decode(UNICODE_ENCODING, "ignore")
if regex:
match = re.search(regex, line)
if match:
ip = match.group(1)
display = True
if not display and (addresses or netmasks):
for match in re.finditer(r"\b(\d+\.\d+\.\d+\.\d+)\b", line):
if not display:
ip = match.group(1)
else:
break
if ip in addresses:
display = True
break
elif netmasks:
for _ in netmasks:
prefix, mask = _.split('/')
if addr_to_int(ip) & make_mask(int(mask)) == addr_to_int(prefix):
addresses.add(ip)
display = True
break
if session.mask_custom and "(custom)" in line:
line = re.sub(r'("[^"]+"|[^ ]+) \(custom\)', "- (custom)", line)
if display:
if ",%s" % ip in line or "%s," % ip in line:
line = re.sub(r" ([\d.,]+,)?%s(,[\d.,]+)? " % re.escape(ip), " %s " % ip, line)
buffer.write(line)
if buffer.tell() >= max_size:
break
content = buffer.getvalue()
end = start + len(content) - 1
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, end + 1 + max_size * (len(content) >= max_size)))
if len(content) < max_size:
session.range_handle.close()
session.range_handle = None
if size == -1:
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
self.end_headers()
with range_handle as f:
while True:
data = f.read(io.DEFAULT_BUFFER_SIZE)
if not data:
break
else:
self.wfile.write(data)
else:
self.send_response(_http_client.OK) # instead of _http_client.NO_CONTENT (compatibility reasons)
self.send_header(HTTP_HEADER.CONNECTION, "close")
if self.headers.get(HTTP_HEADER.RANGE):
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes 0-0/0")
return content
def _counts(self, params):
counts = {}
session = self.get_session()
if session is None:
self.send_response(_http_client.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "application/json")
match = re.search(r"\d+\-\d+\-\d+", params.get("from", ""))
if match:
min_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
else:
min_ = datetime.datetime.fromtimestamp(0)
match = re.search(r"\d+\-\d+\-\d+", params.get("to", ""))
if match:
max_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
else:
max_ = datetime.datetime.now()
min_ = min_.replace(hour=0, minute=0, second=0, microsecond=0)
max_ = max_.replace(hour=23, minute=59, second=59, microsecond=999999)
for filepath in sorted(glob.glob(os.path.join(config.LOG_DIR, "*.log"))):
filename = os.path.basename(filepath)
if not re.search(r"\A\d{4}-\d{2}-\d{2}\.log\Z", filename):
continue
try:
current = datetime.datetime.strptime(os.path.splitext(filename)[0], DATE_FORMAT)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
else:
if min_ <= current <= max_:
timestamp = int(time.mktime(current.timetuple()))
size = os.path.getsize(filepath)
with open(filepath, "rb") as f:
content = f.read(io.DEFAULT_BUFFER_SIZE)
if size >= io.DEFAULT_BUFFER_SIZE:
total = 1.0 * (1 + content.count(b'\n')) * size / io.DEFAULT_BUFFER_SIZE
counts[timestamp] = int(round(total / 100.0) * 100)
else:
counts[timestamp] = content.count(b'\n')
return json.dumps(counts)
class SSLReqHandler(ReqHandler):
def setup(self):
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
# IPv6 support
if ':' in (address or ""):
address = address.strip("[]")
_BaseHTTPServer.HTTPServer.address_family = socket.AF_INET6
# Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py
_AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0)
_NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV
_address = socket.getaddrinfo(address, int(port) if str(port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4]
else:
_address = (address or '', int(port) if str(port or "").isdigit() else 0)
try:
if pem:
server = SSLThreadingServer(_address, pem, SSLReqHandler)
else:
server = ThreadingServer(_address, ReqHandler)
except Exception as ex:
if "Address already in use" in str(ex):
sys.exit("[!] another instance already running")
elif "Name or service not known" in str(ex):
sys.exit("[!] invalid configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
elif "Cannot assign requested address" in str(ex):
sys.exit("[!] can't use configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
else:
raise
print("[i] starting HTTP%s server at http%s://%s:%d/" % ('S' if pem else "", 's' if pem else "", server.server_address[0], server.server_address[1]))
print("[^] running...")
if join:
server.serve_forever()
else:
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
| mit | 0b1a8f3a86f01d4ee3492aa91e941b6a | 43.109626 | 294 | 0.490907 | 4.42101 | false | false | false | false |
marcwebbie/passpie | passpie/process.py | 2 | 1376 | import logging
import os
from subprocess import Popen, PIPE
from ._compat import basestring
DEVNULL = open(os.devnull, 'w')
class Proc(Popen):
def communicate(self, **kwargs):
if kwargs.get('input') and isinstance(kwargs['input'], basestring):
kwargs['input'] = kwargs['input'].encode('utf-8')
return super(Proc, self).communicate(**kwargs)
def __exit__(self, *args, **kwargs):
if hasattr(super(Proc, self), '__exit__'):
super(Proc, self).__exit__(*args, **kwargs)
def __enter__(self, *args, **kwargs):
if hasattr(super(Proc, self), '__enter__'):
return super(Proc, self).__enter__(*args, **kwargs)
return self
def call(*args, **kwargs):
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
stderr = PIPE
else:
stderr = DEVNULL
kwargs.setdefault('stderr', stderr)
kwargs.setdefault('stdout', PIPE)
kwargs.setdefault('stdin', PIPE)
kwargs.setdefault('shell', False)
kwargs_input = kwargs.pop('input', None)
with Proc(*args, **kwargs) as proc:
logging.debug(" ".join(args[0]))
output, error = proc.communicate(input=kwargs_input)
try:
output = output.decode('utf-8')
error = error.decode('utf-8')
except AttributeError:
pass
return output, error
| mit | 21852a88d9ce300b3c9d1c1c5a44de9a | 27.666667 | 75 | 0.594477 | 4.035191 | false | false | false | false |
hips/autograd | autograd/scipy/special.py | 2 | 5135 | from __future__ import absolute_import
import scipy.special
import autograd.numpy as np
from autograd.extend import primitive, defvjp, defjvp
from autograd.numpy.numpy_vjps import unbroadcast_f, repeat_to_match_shape
### Beta function ###
beta = primitive(scipy.special.beta)
betainc = primitive(scipy.special.betainc)
betaln = primitive(scipy.special.betaln)
defvjp(beta,
lambda ans, a, b: unbroadcast_f(a, lambda g: g * ans * (psi(a) - psi(a + b))),
lambda ans, a, b: unbroadcast_f(b, lambda g: g * ans * (psi(b) - psi(a + b))))
defvjp(betainc,
lambda ans, a, b, x: unbroadcast_f(x, lambda g: g * np.power(x, a - 1) * np.power(1 - x, b - 1) / beta(a, b)),
argnums=[2])
defvjp(betaln,
lambda ans, a, b: unbroadcast_f(a, lambda g: g * (psi(a) - psi(a + b))),
lambda ans, a, b: unbroadcast_f(b, lambda g: g * (psi(b) - psi(a + b))))
### Gamma functions ###
polygamma = primitive(scipy.special.polygamma)
psi = primitive(scipy.special.psi) # psi(x) is just polygamma(0, x)
digamma = primitive(scipy.special.digamma) # digamma is another name for psi.
gamma = primitive(scipy.special.gamma)
gammaln = primitive(scipy.special.gammaln)
gammainc = primitive(scipy.special.gammainc)
gammaincc = primitive(scipy.special.gammaincc)
gammasgn = primitive(scipy.special.gammasgn)
rgamma = primitive(scipy.special.rgamma)
multigammaln = primitive(scipy.special.multigammaln)
defvjp(gammasgn, None)
defvjp(polygamma, None, lambda ans, n, x: lambda g: g * polygamma(n + 1, x))
defvjp(psi, lambda ans, x: lambda g: g * polygamma(1, x))
defvjp(digamma, lambda ans, x: lambda g: g * polygamma(1, x))
defvjp(gamma, lambda ans, x: lambda g: g * ans * psi(x))
defvjp(gammaln, lambda ans, x: lambda g: g * psi(x))
defvjp(rgamma, lambda ans, x: lambda g: g * psi(x) / -gamma(x))
defvjp(multigammaln,lambda ans, a, d: lambda g:
g * np.sum(digamma(np.expand_dims(a, -1) - np.arange(d)/2.), -1),
None)
def make_gammainc_vjp_arg1(sign):
def gammainc_vjp_arg1(ans, a, x):
coeffs = sign * np.exp(-x) * np.power(x, a - 1) / gamma(a)
return unbroadcast_f(x, lambda g: g * coeffs)
return gammainc_vjp_arg1
defvjp(gammainc, make_gammainc_vjp_arg1(1), argnums=[1])
defvjp(gammaincc, make_gammainc_vjp_arg1(-1), argnums=[1])
### Bessel functions ###
j0 = primitive(scipy.special.j0)
y0 = primitive(scipy.special.y0)
j1 = primitive(scipy.special.j1)
y1 = primitive(scipy.special.y1)
jn = primitive(scipy.special.jn)
yn = primitive(scipy.special.yn)
defvjp(j0,lambda ans, x: lambda g: -g * j1(x))
defvjp(y0,lambda ans, x: lambda g: -g * y1(x))
defvjp(j1,lambda ans, x: lambda g: g * (j0(x) - jn(2, x)) / 2.0)
defvjp(y1,lambda ans, x: lambda g: g * (y0(x) - yn(2, x)) / 2.0)
defvjp(jn, None, lambda ans, n, x: lambda g: g * (jn(n - 1, x) - jn(n + 1, x)) / 2.0)
defvjp(yn, None, lambda ans, n, x: lambda g: g * (yn(n - 1, x) - yn(n + 1, x)) / 2.0)
### Faster versions of common Bessel functions ###
i0 = primitive(scipy.special.i0)
i1 = primitive(scipy.special.i1)
iv = primitive(scipy.special.iv)
ive = primitive(scipy.special.ive)
defvjp(i0, lambda ans, x: lambda g: g * i1(x))
defvjp(i1, lambda ans, x: lambda g: g * (i0(x) + iv(2, x)) / 2.0)
defvjp(iv, None, lambda ans, n, x: lambda g: g * (iv(n - 1, x) + iv(n + 1, x)) / 2.0)
defvjp(ive, None, lambda ans, n, x: lambda g: g * (ans * (n / x - np.sign(x)) + ive(n + 1, x)))
### Error Function ###
inv_root_pi = 0.56418958354775627928
erf = primitive(scipy.special.erf)
erfc = primitive(scipy.special.erfc)
defvjp(erf, lambda ans, x: lambda g: 2.*g*inv_root_pi*np.exp(-x**2))
defvjp(erfc,lambda ans, x: lambda g: -2.*g*inv_root_pi*np.exp(-x**2))
### Inverse error function ###
root_pi = 1.7724538509055159
erfinv = primitive(scipy.special.erfinv)
erfcinv = primitive(scipy.special.erfcinv)
defvjp(erfinv,lambda ans, x: lambda g: g * root_pi / 2 * np.exp(erfinv(x)**2))
defvjp(erfcinv,lambda ans, x: lambda g: -g * root_pi / 2 * np.exp(erfcinv(x)**2))
### Logit and Expit ###
logit = primitive(scipy.special.logit)
expit = primitive(scipy.special.expit)
defvjp(logit,lambda ans, x: lambda g: g / ( x * (1 - x)))
defvjp(expit,lambda ans, x: lambda g: g * ans * (1 - ans))
### logsumexp ###
logsumexp = primitive(scipy.special.logsumexp)
def make_grad_logsumexp(ans, x, axis=None, b=1.0, keepdims=False):
shape, dtype = np.shape(x), np.result_type(x)
def vjp(g):
g_repeated, _ = repeat_to_match_shape(g, shape, dtype, axis, keepdims)
ans_repeated, _ = repeat_to_match_shape(ans, shape, dtype, axis, keepdims)
return g_repeated * b * np.exp(x - ans_repeated)
return vjp
defvjp(logsumexp, make_grad_logsumexp)
def fwd_grad_logsumexp(g, ans, x, axis=None, b=1.0, keepdims=False):
if not keepdims:
if isinstance(axis, int):
ans = np.expand_dims(ans, axis)
elif isinstance(axis, tuple):
for ax in sorted(axis):
ans = np.expand_dims(ans, ax)
return np.sum(g * b * np.exp(x - ans), axis=axis, keepdims=keepdims)
defjvp(logsumexp, fwd_grad_logsumexp)
| mit | b408a8f734951b3ad126429d7552cd18 | 39.433071 | 117 | 0.643817 | 2.532051 | false | false | false | false |
hips/autograd | examples/negative_binomial_maxlike.py | 3 | 2159 | from __future__ import division, print_function
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.scipy.special import gammaln
from autograd import grad
import scipy.optimize
# The code in this example implements a method for finding a stationary point of
# the negative binomial likelihood via Newton's method, described here:
# https://en.wikipedia.org/wiki/Negative_binomial_distribution#Maximum_likelihood_estimation
def newton(f, x0):
# wrap scipy.optimize.newton with our automatic derivatives
return scipy.optimize.newton(f, x0, fprime=grad(f), fprime2=grad(grad(f)))
def negbin_loglike(r, p, x):
# the negative binomial log likelihood we want to maximize
return gammaln(r+x) - gammaln(r) - gammaln(x+1) + x*np.log(p) + r*np.log(1-p)
def negbin_sample(r, p, size):
# a negative binomial is a gamma-compound-Poisson
return npr.poisson(npr.gamma(r, p/(1-p), size=size))
def fit_maxlike(x, r_guess):
# follows Wikipedia's section on negative binomial max likelihood
assert np.var(x) > np.mean(x), "Likelihood-maximizing parameters don't exist!"
loglike = lambda r, p: np.sum(negbin_loglike(r, p, x))
p = lambda r: np.sum(x) / np.sum(r+x)
rprime = lambda r: grad(loglike)(r, p(r))
r = newton(rprime, r_guess)
return r, p(r)
if __name__ == "__main__":
# generate data
npr.seed(0)
data = negbin_sample(r=5, p=0.5, size=1000)
# fit likelihood-extremizing parameters
r, p = fit_maxlike(data, r_guess=1)
# report fit
print('Fit parameters:')
print('r={r}, p={p}'.format(r=r, p=p))
print('Check that we are at a local stationary point:')
loglike = lambda r, p: np.sum(negbin_loglike(r, p, data))
grad_both = grad(loglike, argnum=(0, 1))
print(grad_both(r, p))
import matplotlib.pyplot as plt
xm = data.max()
plt.figure()
plt.hist(data, bins=np.arange(xm+1)-0.5, normed=True, label='normed data counts')
plt.xlim(0,xm)
plt.plot(np.arange(xm), np.exp(negbin_loglike(r, p, np.arange(xm))), label='maxlike fit')
plt.xlabel('k')
plt.ylabel('p(k)')
plt.legend(loc='best')
plt.show()
| mit | dfebfe93899cc218c620d7f562367ff1 | 31.712121 | 93 | 0.670218 | 2.941417 | false | false | false | false |
hips/autograd | autograd/misc/flatten.py | 3 | 1120 | """
Handy functions for flattening nested containers containing numpy
arrays. The main purpose is to make examples and optimizers simpler.
"""
from autograd import make_vjp
from autograd.builtins import type
import autograd.numpy as np
def flatten(value):
"""Flattens any nesting of tuples, lists, or dicts, with numpy arrays or
scalars inside. Returns 1D numpy array and an unflatten function.
Doesn't preserve mixed numeric types (e.g. floats and ints). Assumes dict
keys are sortable."""
unflatten, flat_value = make_vjp(_flatten)(value)
return flat_value, unflatten
def _flatten(value):
t = type(value)
if t in (list, tuple):
return _concatenate(map(_flatten, value))
elif t is dict:
return _concatenate(_flatten(value[k]) for k in sorted(value))
else:
return np.ravel(value)
def _concatenate(lst):
lst = list(lst)
return np.concatenate(lst) if lst else np.array([])
def flatten_func(func, example):
_ex, unflatten = flatten(example)
_func = lambda _x, *args: flatten(func(unflatten(_x), *args))[0]
return _func, unflatten, _ex
| mit | 50073291c6a1fa823b7a8361c86bd1be | 32.939394 | 77 | 0.69375 | 3.708609 | false | false | false | false |
conan-io/conan-package-tools | cpt/packager.py | 1 | 40145 | import os
import platform
import re
import sys
import copy
from collections import defaultdict
from itertools import product
import six
from conans import tools
from conans.client.conan_api import Conan
from conans.client.runner import ConanRunner
from conans.model.ref import ConanFileReference
from conans.model.version import Version
from cpt import get_client_version
from cpt.auth import AuthManager
from cpt.builds_generator import BuildConf, BuildGenerator
from cpt.ci_manager import CIManager
from cpt.printer import Printer
from cpt.profiles import get_profiles, save_profile_to_tmp
from cpt.remotes import RemotesManager
from cpt.runner import CreateRunner, DockerCreateRunner
from cpt.tools import get_bool_from_env, get_custom_bool_from_env
from cpt.tools import split_colon_env
from cpt.uploader import Uploader
from cpt.config import ConfigManager
def load_cf_class(path, conan_api):
client_version = get_client_version()
client_version = Version(client_version)
if client_version < Version("1.7.0"):
from conans.client.loader_parse import load_conanfile_class
return load_conanfile_class(path)
elif client_version < Version("1.14.0"):
return conan_api._loader.load_class(path)
elif client_version < Version("1.15.0"):
remotes = conan_api._cache.registry.remotes.list
for remote in remotes:
conan_api.python_requires.enable_remotes(remote_name=remote)
return conan_api._loader.load_class(path)
elif client_version < Version("1.16.0"):
remotes = conan_api._cache.registry.load_remotes()
conan_api.python_requires.enable_remotes(remotes=remotes)
return conan_api._loader.load_class(path)
elif client_version < Version("1.18.0"):
remotes = conan_api._cache.registry.load_remotes()
conan_api._python_requires.enable_remotes(remotes=remotes)
return conan_api._loader.load_class(path)
else:
if not conan_api.app:
conan_api.create_app()
remotes = conan_api.app.cache.registry.load_remotes()
conan_api.app.python_requires.enable_remotes(remotes=remotes)
if client_version < Version("1.20.0"):
return conan_api.app.loader.load_class(path)
elif client_version < Version("1.21.0"):
return conan_api.app.loader.load_basic(path)
else:
conan_api.app.pyreq_loader.enable_remotes(remotes=remotes)
return conan_api.app.loader.load_named(path, None, None, None, None)
class PlatformInfo(object):
"""Easy mockable for testing"""
@staticmethod
def system():
import platform
return platform.system()
class ConanOutputRunner(ConanRunner):
def __init__(self):
super(ConanOutputRunner, self).__init__()
class OutputInternal(object):
def __init__(self):
self.output = ""
def write(self, data):
self.output += str(data)
sys.stdout.write(data)
self._output = OutputInternal()
@property
def output(self):
return self._output.output
def __call__(self, command):
return super(ConanOutputRunner, self).__call__(command, output=self._output)
class ConanMultiPackager(object):
""" Help to generate common builds (setting's combinations), adjust the environment,
and run conan create command in docker containers"""
def __init__(self, username=None, channel=None, runner=None,
gcc_versions=None, visual_versions=None, visual_runtimes=None,
visual_toolsets=None,
apple_clang_versions=None,
msvc_versions=None, msvc_runtimes=None, msvc_runtime_types=None,
archs=None, options=None,
use_docker=None, curpage=None, total_pages=None,
docker_image=None, reference=None, password=None,
remotes=None,
upload=None, stable_branch_pattern=None,
vs10_x86_64_enabled=False,
mingw_configurations=None,
stable_channel=None,
platform_info=None,
upload_retry=None,
clang_versions=None,
login_username=None,
upload_only_when_stable=None,
upload_only_when_tag=None,
upload_only_recipe=None,
upload_force=None,
build_types=None,
cppstds=None,
skip_check_credentials=False,
allow_gcc_minors=False,
exclude_vcvars_precommand=False,
docker_run_options=None,
docker_image_skip_update=False,
docker_image_skip_pull=False,
docker_entry_script=None,
docker_32_images=None,
docker_conan_home=None,
docker_shell=None,
pip_install=None,
build_policy=None,
require_overrides=None,
always_update_conan_in_docker=False,
conan_api=None,
client_cache=None,
conanfile=None,
ci_manager=None,
out=None,
test_folder=None,
cwd=None,
config_url=None,
config_args=None,
upload_dependencies=None,
force_selinux=None,
skip_recipe_export=False,
update_dependencies=None,
lockfile=None,
global_conf=None):
conan_version = get_client_version()
self.printer = Printer(out)
self.printer.print_rule()
self.printer.print_ascci_art()
self.cwd = cwd or os.getcwd()
if not conan_api:
self.conan_api, _, _ = Conan.factory()
self.conan_api.create_app()
self.client_cache = self.conan_api.app.cache
else:
self.conan_api = conan_api
self.client_cache = client_cache
self.ci_manager = ci_manager or CIManager(self.printer)
self.remotes_manager = RemotesManager(self.conan_api, self.printer, remotes, upload)
self.username = username or os.getenv("CONAN_USERNAME", None)
self.skip_check_credentials = skip_check_credentials or get_bool_from_env("CONAN_SKIP_CHECK_CREDENTIALS")
self.auth_manager = AuthManager(self.conan_api, self.printer, login_username, password,
default_username=self.username,
skip_check_credentials=self.skip_check_credentials)
# Upload related variables
self.upload_retry = upload_retry or os.getenv("CONAN_UPLOAD_RETRY", 3)
if upload_only_when_stable is not None:
self.upload_only_when_stable = upload_only_when_stable
else:
self.upload_only_when_stable = get_bool_from_env("CONAN_UPLOAD_ONLY_WHEN_STABLE")
if upload_only_when_tag is not None:
self.upload_only_when_tag = upload_only_when_tag
else:
self.upload_only_when_tag = get_bool_from_env("CONAN_UPLOAD_ONLY_WHEN_TAG")
self.upload_only_recipe = upload_only_recipe or get_bool_from_env("CONAN_UPLOAD_ONLY_RECIPE")
self.upload_force = upload_force if upload_force is not None \
else get_custom_bool_from_env("CONAN_UPLOAD_FORCE", True)
self.remotes_manager.add_remotes_to_conan()
self.uploader = Uploader(self.conan_api, self.remotes_manager, self.auth_manager,
self.printer, self.upload_retry, self.upload_force)
self._builds = []
self._named_builds = {}
self._packages_summary = []
self._update_conan_in_docker = always_update_conan_in_docker or get_bool_from_env("CONAN_ALWAYS_UPDATE_CONAN_DOCKER")
self._platform_info = platform_info or PlatformInfo()
self.stable_branch_pattern = stable_branch_pattern or \
os.getenv("CONAN_STABLE_BRANCH_PATTERN",
"master$ main$ release.* stable.*")
self.stable_channel = stable_channel or os.getenv("CONAN_STABLE_CHANNEL", "stable")
self.stable_channel = self.stable_channel.rstrip()
self.partial_reference = reference or os.getenv("CONAN_REFERENCE", None)
self.channel = self._get_specified_channel(channel, reference)
self.conanfile = conanfile or os.getenv("CONAN_CONANFILE", "conanfile.py")
if self.partial_reference:
if "@" in self.partial_reference:
self.reference = ConanFileReference.loads(self.partial_reference)
else:
name, version = self.partial_reference.split("/")
self.reference = ConanFileReference(name, version, self.username, self.channel)
else:
if not os.path.exists(os.path.join(self.cwd, self.conanfile)):
raise Exception("Conanfile not found, specify a 'reference' "
"parameter with name and version")
conanfile = load_cf_class(os.path.join(self.cwd, self.conanfile), self.conan_api)
name, version = conanfile.name, conanfile.version
if name and version:
self.reference = ConanFileReference(name, version, self.username, self.channel)
else:
self.reference = None
self._docker_image = docker_image or os.getenv("CONAN_DOCKER_IMAGE", None)
# If CONAN_DOCKER_IMAGE is specified, then use docker is True
self.use_docker = (use_docker or get_bool_from_env("CONAN_USE_DOCKER") or
self._docker_image is not None)
self.docker_conan_home = docker_conan_home or os.getenv("CONAN_DOCKER_HOME", None)
os_name = self._platform_info.system() if not self.use_docker else "Linux"
self.build_generator = BuildGenerator(reference, os_name, gcc_versions,
apple_clang_versions, clang_versions,
visual_versions, visual_runtimes, visual_toolsets,
vs10_x86_64_enabled,
msvc_versions, msvc_runtimes, msvc_runtime_types,
mingw_configurations, archs, allow_gcc_minors,
build_types, options, cppstds)
self.build_policy = (build_policy or
self.ci_manager.get_commit_build_policy() or
split_colon_env("CONAN_BUILD_POLICY"))
if isinstance(self.build_policy, list):
self.build_policy = ",".join(self.build_policy)
self.require_overrides = require_overrides or split_colon_env("CONAN_REQUIRE_OVERRIDES") or None
self.sudo_docker_command = ""
if "CONAN_DOCKER_USE_SUDO" in os.environ:
self.sudo_docker_command = "sudo -E" if get_bool_from_env("CONAN_DOCKER_USE_SUDO") else ""
elif platform.system() != "Windows":
self.sudo_docker_command = "sudo -E"
self.sudo_pip_command = ""
if "CONAN_PIP_USE_SUDO" in os.environ:
self.sudo_pip_command = "sudo -E" if get_bool_from_env("CONAN_PIP_USE_SUDO") else ""
elif platform.system() != "Windows" and self._docker_image and 'conanio/' not in str(self._docker_image):
self.sudo_pip_command = "sudo -E"
self.pip_command = os.getenv("CONAN_PIP_COMMAND", "pip")
pip_found = True if tools.os_info.is_windows else tools.which(self.pip_command)
if not pip_found or not "pip" in self.pip_command:
raise Exception("CONAN_PIP_COMMAND: '{}' is not a valid pip command.".format(self.pip_command))
self.docker_pip_command = os.getenv("CONAN_DOCKER_PIP_COMMAND", "pip")
self.docker_shell = docker_shell or os.getenv("CONAN_DOCKER_SHELL")
if self.is_wcow:
if self.docker_conan_home is None:
self.docker_conan_home = "C:/Users/ContainerAdministrator"
self.docker_shell = docker_shell or "cmd /C"
else:
if self.docker_conan_home is None:
self.docker_conan_home = "/home/conan"
self.docker_shell = docker_shell or "/bin/sh -c"
self.docker_platform_param = ""
self.lcow_user_workaround = ""
if self.is_lcow:
self.docker_platform_param = "--platform=linux"
# With LCOW, Docker doesn't respect USER directive in dockerfile yet
self.lcow_user_workaround = "sudo su conan && "
self.exclude_vcvars_precommand = exclude_vcvars_precommand or \
get_bool_from_env("CONAN_EXCLUDE_VCVARS_PRECOMMAND")
self._docker_image_skip_update = docker_image_skip_update or \
get_bool_from_env("CONAN_DOCKER_IMAGE_SKIP_UPDATE")
self._docker_image_skip_pull = docker_image_skip_pull or \
get_bool_from_env("CONAN_DOCKER_IMAGE_SKIP_PULL")
self.runner = runner or os.system
self.output_runner = ConanOutputRunner()
self.docker_run_options = docker_run_options or split_colon_env("CONAN_DOCKER_RUN_OPTIONS")
if isinstance(self.docker_run_options, list):
self.docker_run_options = " ".join(self.docker_run_options)
self.docker_entry_script = docker_entry_script or os.getenv("CONAN_DOCKER_ENTRY_SCRIPT")
self.pip_install = pip_install or split_colon_env("CONAN_PIP_INSTALL")
self.upload_dependencies = upload_dependencies or split_colon_env("CONAN_UPLOAD_DEPENDENCIES") or ""
if isinstance(self.upload_dependencies, list):
self.upload_dependencies = ",".join(self.upload_dependencies)
if "all" in self.upload_dependencies and self.upload_dependencies != "all":
raise Exception("Upload dependencies only accepts or 'all' or package references. Do not mix both!")
self.update_dependencies = update_dependencies or get_bool_from_env("CONAN_UPDATE_DEPENDENCIES")
if self.channel:
os.environ["CONAN_CHANNEL"] = self.channel
if docker_32_images is not None:
self.docker_32_images = docker_32_images
else:
self.docker_32_images = os.getenv("CONAN_DOCKER_32_IMAGES", False)
self.force_selinux = force_selinux or get_bool_from_env("CONAN_FORCE_SELINUX")
self.curpage = curpage or os.getenv("CONAN_CURRENT_PAGE", 1)
self.total_pages = total_pages or os.getenv("CONAN_TOTAL_PAGES", 1)
self.conan_pip_package = os.getenv("CONAN_PIP_PACKAGE", "conan==%s" % conan_version)
if self.conan_pip_package in ("0", "False"):
self.conan_pip_package = ""
self.vs10_x86_64_enabled = vs10_x86_64_enabled
self.builds_in_current_page = []
self.global_conf = global_conf or os.getenv("CONAN_GLOBAL_CONF")
self.test_folder = test_folder or os.getenv("CPT_TEST_FOLDER")
self.config_url = config_url or os.getenv("CONAN_CONFIG_URL")
self.skip_recipe_export = skip_recipe_export or \
get_bool_from_env("CONAN_SKIP_RECIPE_EXPORT")
self.config_args = config_args or os.getenv("CONAN_CONFIG_ARGS")
self.lockfile = lockfile or os.getenv("CONAN_LOCKFILE")
def valid_pair(var, value):
return (isinstance(value, six.string_types) or
isinstance(value, bool) or
isinstance(value, list)) and not var.startswith("_") and "password" not in var
with self.printer.foldable_output("local_vars"):
self.printer.print_dict({var: value
for var, value in self.__dict__.items()
if valid_pair(var, value)})
# For Docker on Windows, including Linux containers on Windows
@property
def is_lcow(self):
return self.container_os == "linux" and platform.system() == "Windows"
@property
def is_wcow(self):
return self.container_os == "windows" and platform.system() == "Windows"
@property
def container_os(self):
# CONAN_DOCKER_PLATFORM=linux must be specified for LCOW
if self.use_docker:
if "CONAN_DOCKER_PLATFORM" in os.environ:
return os.getenv("CONAN_DOCKER_PLATFORM", "windows").lower()
else:
return "windows"
else:
return ""
@property
def packages_summary(self):
return self._packages_summary
def save_packages_summary(self, file):
self.printer.print_message("Saving packages summary to " + file)
import json
import datetime
def default(o):
if isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
with open(file, 'w') as outfile:
json.dump(self.packages_summary, outfile, default = default)
@property
def items(self):
return self._builds
@items.setter
def items(self, confs):
self.builds = confs
@property
def builds(self):
# Retrocompatibility iterating
self.printer.print_message("WARNING",
"\n\n\n******* ITERATING THE CONAN_PACKAGE_TOOLS BUILDS WITH "
".builds is deprecated use '.items' instead (unpack 5 elements: "
"settings, options, env_vars, build_requires, reference *******"
"**\n\n\n")
return [elem[0:4] for elem in self._builds]
@builds.setter
def builds(self, confs):
"""For retro compatibility directly assigning builds"""
self._named_builds = {}
self._builds = []
for values in confs:
if len(values) == 2:
self._builds.append(BuildConf(values[0], values[1], {}, {}, self.reference))
elif len(values) == 4:
self._builds.append(BuildConf(values[0], values[1], values[2], values[3],
self.reference))
elif len(values) != 5:
raise Exception("Invalid build configuration, has to be a tuple of "
"(settings, options, env_vars, build_requires, reference)")
else:
self._builds.append(BuildConf(*values))
@property
def named_builds(self):
return self._named_builds
@named_builds.setter
def named_builds(self, confs):
self._builds = []
self._named_builds = {}
for key, pages in confs.items():
for values in pages:
if len(values) == 2:
bc = BuildConf(values[0], values[1], {}, {}, self.reference)
self._named_builds.setdefault(key, []).append(bc)
elif len(values) == 4:
bc = BuildConf(values[0], values[1], values[2], values[3], self.reference)
self._named_builds.setdefault(key, []).append(bc)
elif len(values) != 5:
raise Exception("Invalid build configuration, has to be a tuple of "
"(settings, options, env_vars, build_requires, reference)")
else:
self._named_builds.setdefault(key, []).append(BuildConf(*values))
def login(self, remote_name):
self.auth_manager.login(remote_name)
def add_common_builds(self, shared_option_name=None, pure_c=None,
dll_with_static_runtime=False, reference=None, header_only=True,
build_all_options_values=None):
if reference:
if "@" in reference:
reference = ConanFileReference.loads(reference)
else:
name, version = reference.split("/")
reference = ConanFileReference(name, version, self.username, self.channel)
else:
reference = self.reference
if not reference:
raise Exception("Specify a CONAN_REFERENCE or name and version fields in the recipe")
if shared_option_name is None:
env_shared_option_name = os.getenv("CONAN_SHARED_OPTION_NAME", None)
shared_option_name = env_shared_option_name if str(env_shared_option_name).lower() != "false" else False
if pure_c is None:
pure_c = get_custom_bool_from_env("CONAN_PURE_C", True)
build_all_options_values = build_all_options_values or split_colon_env("CONAN_BUILD_ALL_OPTIONS_VALUES") or []
if not isinstance(build_all_options_values, list):
raise Exception("'build_all_options_values' must be a list. e.g. ['foo:opt', 'foo:bar']")
conanfile = None
if os.path.exists(os.path.join(self.cwd, self.conanfile)):
conanfile = load_cf_class(os.path.join(self.cwd, self.conanfile), self.conan_api)
header_only_option = None
if conanfile:
if hasattr(conanfile, "options") and conanfile.options and "header_only" in conanfile.options:
header_only_option = "%s:header_only" % reference.name
if shared_option_name is None:
if conanfile:
if hasattr(conanfile, "options") and conanfile.options and "shared" in conanfile.options:
shared_option_name = "%s:shared" % reference.name
# filter only valid options
raw_options_for_building = [opt[opt.find(":") + 1:] for opt in build_all_options_values]
for raw_option in reversed(raw_options_for_building):
if hasattr(conanfile, "options") and conanfile.options and \
not isinstance(conanfile.options.get(raw_option), list):
raw_options_for_building.remove(raw_option)
if raw_options_for_building and conanfile:
# get option and its values
cloned_options = copy.copy(conanfile.options)
for key, value in conanfile.options.items():
if key == "shared" and shared_option_name:
continue
elif key not in raw_options_for_building:
del cloned_options[key]
cloned_options2 = {}
for key, value in cloned_options.items():
# add package reference to the option name
if not key.startswith("{}:".format(reference.name)):
cloned_options2["{}:{}".format(reference.name, key)] = value
# combine all options x values (cartesian product)
build_all_options_values = [dict(zip(cloned_options2, v)) for v in product(*cloned_options2.values())]
builds = self.build_generator.get_builds(pure_c, shared_option_name,
dll_with_static_runtime, reference,
build_all_options_values)
if header_only_option and header_only:
if conanfile.default_options.get("header_only"):
cloned_builds = copy.deepcopy(builds)
for settings, options, env_vars, build_requires, reference in cloned_builds:
options.update({header_only_option: False})
builds.extend(cloned_builds)
else:
settings, options, env_vars, build_requires, reference = builds[0]
cloned_options = copy.copy(options)
cloned_options.update({header_only_option: True})
builds.append(BuildConf(copy.copy(settings), cloned_options, copy.copy(env_vars),
copy.copy(build_requires), reference))
self._builds.extend(builds)
def add(self, settings=None, options=None, env_vars=None, build_requires=None, reference=None):
settings = settings or {}
options = options or {}
env_vars = env_vars or {}
build_requires = build_requires or {}
if reference:
reference = ConanFileReference.loads("%s@%s/%s" % (reference,
self.username, self.channel))
reference = reference or self.reference
self._builds.append(BuildConf(settings, options, env_vars, build_requires, reference))
def remove_build_if(self, predicate):
filtered_builds = []
for build in self.items:
if not predicate(build):
filtered_builds.append(build)
self._builds = filtered_builds
def update_build_if(self, predicate, new_settings=None, new_options=None, new_env_vars=None,
new_build_requires=None, new_reference=None):
updated_builds = []
for build in self.items:
if predicate(build):
if new_settings:
build.settings.update(new_settings)
if new_options:
build.options.update(new_options)
if new_build_requires:
build.build_requires.update(new_build_requires)
if new_env_vars:
build.env_vars.update(new_env_vars)
if new_reference:
build.reference = new_reference
updated_builds.append(build)
self._builds = updated_builds
def run(self, base_profile_name=None, summary_file=None, base_profile_build_name=None):
env_vars = self.auth_manager.env_vars()
env_vars.update(self.remotes_manager.env_vars())
with tools.environment_append(env_vars):
self.printer.print_message("Running builds...")
if self.ci_manager.skip_builds():
self.printer.print_message("Skipped builds due [skip ci] commit message")
return 99
if not self.skip_check_credentials and self._upload_enabled():
self.auth_manager.login(self.remotes_manager.upload_remote_name)
if self.conan_pip_package and not self.use_docker:
with self.printer.foldable_output("pip_update"):
self.runner('%s %s install -q %s' % (self.sudo_pip_command,
self.pip_command,
self.conan_pip_package))
if self.pip_install:
packages = " ".join(self.pip_install)
self.printer.print_message("Install extra python packages: {}".format(packages))
self.runner('%s %s install -q %s' % (self.sudo_pip_command,
self.pip_command, packages))
self.run_builds(base_profile_name=base_profile_name,
base_profile_build_name=base_profile_build_name)
summary_file = summary_file or os.getenv("CPT_SUMMARY_FILE", None)
if summary_file:
self.save_packages_summary(summary_file)
def _upload_enabled(self):
if not self.remotes_manager.upload_remote_name:
return False
if not self.auth_manager.credentials_ready(self.remotes_manager.upload_remote_name):
return False
if self.upload_only_when_tag and not self.ci_manager.is_tag():
self.printer.print_message("Skipping upload, not tag branch")
return False
st_channel = self.stable_channel or "stable"
if self.upload_only_when_stable and self.channel != st_channel and not self.upload_only_when_tag:
self.printer.print_message("Skipping upload, not stable channel")
return False
if not os.getenv("CONAN_TEST_SUITE", False):
if self.ci_manager.is_pull_request():
# PENDING! can't found info for gitlab/bamboo
self.printer.print_message("Skipping upload, this is a Pull Request")
return False
def raise_error(field):
raise Exception("Upload not possible, '%s' is missing!" % field)
if not self.channel and "@" not in self.partial_reference:
raise_error("channel")
if not self.username and "@" not in self.partial_reference:
raise_error("username")
return True
def run_builds(self, curpage=None, total_pages=None, base_profile_name=None,
base_profile_build_name=None):
if len(self.named_builds) > 0 and len(self.items) > 0:
raise Exception("Both bulk and named builds are set. Only one is allowed.")
self.builds_in_current_page = []
if len(self.items) > 0:
curpage = curpage or int(self.curpage)
total_pages = total_pages or int(self.total_pages)
for index, build in enumerate(self.items):
if curpage is None or total_pages is None or (index % total_pages) + 1 == curpage:
self.builds_in_current_page.append(build)
elif len(self.named_builds) > 0:
curpage = curpage or self.curpage
if curpage not in self.named_builds:
raise Exception("No builds set for page %s" % curpage)
for build in self.named_builds[curpage]:
self.builds_in_current_page.append(build)
self.printer.print_current_page(curpage, total_pages)
self.printer.print_jobs(self.builds_in_current_page)
pulled_docker_images = defaultdict(lambda: False)
skip_recipe_export = False
base_profile_build_name = base_profile_build_name or os.getenv("CONAN_BASE_PROFILE_BUILD")
if base_profile_build_name is not None:
if get_client_version() < Version("1.24.0"):
raise Exception("Conan Profile Build requires >= 1.24")
self.printer.print_message("**************************************************")
self.printer.print_message("Using specified "
"build profile: %s" % base_profile_build_name)
self.printer.print_message("**************************************************")
# FIXME: Remove in Conan 1.3, https://github.com/conan-io/conan/issues/2787
for index, build in enumerate(self.builds_in_current_page):
self.printer.print_message("Build: %s/%s" % (index+1, len(self.builds_in_current_page)))
base_profile_name = base_profile_name or os.getenv("CONAN_BASE_PROFILE")
if base_profile_name:
self.printer.print_message("**************************************************")
self.printer.print_message("Using specified default "
"base profile: %s" % base_profile_name)
self.printer.print_message("**************************************************")
if self.config_url:
ConfigManager(self.conan_api, self.printer).install(url=self.config_url, args=self.config_args)
profile_text, base_profile_text = get_profiles(self.client_cache, build,
base_profile_name)
profile_build_text, base_profile_build_text = get_profiles(self.client_cache, build,
base_profile_build_name, True)
if not self.use_docker:
profile_abs_path = save_profile_to_tmp(profile_text)
if base_profile_build_text:
profile_build_abs_path = save_profile_to_tmp(profile_build_text)
else:
profile_build_abs_path = None
r = CreateRunner(profile_abs_path, build.reference, self.conan_api,
self.uploader,
exclude_vcvars_precommand=self.exclude_vcvars_precommand,
build_policy=self.build_policy,
require_overrides=self.require_overrides,
runner=self.runner,
cwd=self.cwd,
printer=self.printer,
upload=self._upload_enabled(),
upload_only_recipe=self.upload_only_recipe,
test_folder=self.test_folder,
config_url=self.config_url,
config_args=self.config_args,
upload_dependencies=self.upload_dependencies,
conanfile=self.conanfile,
lockfile=self.lockfile,
skip_recipe_export=skip_recipe_export,
update_dependencies=self.update_dependencies,
profile_build_abs_path=profile_build_abs_path,
global_conf=self.global_conf,
)
r.run()
self._packages_summary.append({"configuration": build, "package" : r.results})
else:
if not base_profile_build_text:
profile_build_text = None
docker_image = self._get_docker_image(build)
r = DockerCreateRunner(profile_text, base_profile_text, base_profile_name,
build.reference,
conan_pip_package=self.conan_pip_package,
docker_image=docker_image,
sudo_docker_command=self.sudo_docker_command,
sudo_pip_command=self.sudo_pip_command,
docker_image_skip_update=self._docker_image_skip_update,
docker_image_skip_pull=self._docker_image_skip_pull,
build_policy=self.build_policy,
require_overrides=self.require_overrides,
always_update_conan_in_docker=self._update_conan_in_docker,
upload=self._upload_enabled(),
upload_retry=self.upload_retry,
upload_only_recipe=self.upload_only_recipe,
upload_force=self.upload_force,
runner=self.runner,
docker_shell=self.docker_shell,
docker_conan_home=self.docker_conan_home,
docker_platform_param=self.docker_platform_param,
docker_run_options=self.docker_run_options,
lcow_user_workaround=self.lcow_user_workaround,
test_folder=self.test_folder,
pip_install=self.pip_install,
docker_pip_command=self.docker_pip_command,
config_url=self.config_url,
config_args=self.config_args,
printer=self.printer,
upload_dependencies=self.upload_dependencies,
conanfile=self.conanfile,
lockfile=self.lockfile,
force_selinux=self.force_selinux,
skip_recipe_export=skip_recipe_export,
update_dependencies=self.update_dependencies,
profile_build_text=profile_build_text,
base_profile_build_text=base_profile_build_text,
global_conf=self.global_conf,
cwd=self.cwd)
r.run(pull_image=not pulled_docker_images[docker_image],
docker_entry_script=self.docker_entry_script)
pulled_docker_images[docker_image] = True
skip_recipe_export = self.skip_recipe_export
def _get_docker_image(self, build):
if self._docker_image:
docker_image = self._docker_image
else:
compiler_name = build.settings.get("compiler", "")
compiler_version = build.settings.get("compiler.version", "")
docker_image = self._autodetect_docker_base_image(compiler_name, compiler_version)
arch = build.settings.get("arch", "") or build.settings.get("arch_build", "")
if self.docker_32_images and arch == "x86":
build.settings["arch_build"] = "x86"
docker_arch_suffix = "x86"
elif arch != "x86" and arch != "x86_64":
docker_arch_suffix = arch
else:
docker_arch_suffix = None
if docker_arch_suffix and "-" not in docker_image:
docker_image = "%s-%s" % (docker_image, docker_arch_suffix)
return docker_image
@staticmethod
def _autodetect_docker_base_image(compiler_name, compiler_version):
if compiler_name not in ["clang", "gcc"]:
raise Exception("Docker image cannot be autodetected for "
"the compiler %s" % compiler_name)
if compiler_name == "gcc" and Version(compiler_version) > Version("5"):
compiler_version = Version(compiler_version).major(fill=False)
return "conanio/%s%s" % (compiler_name, compiler_version.replace(".", ""))
def _get_channel(self, specified_channel, stable_channel, upload_when_tag):
if not specified_channel:
return
branch = self.ci_manager.get_branch()
self.printer.print_message("Branch detected", branch)
for pattern in self.stable_branch_pattern.split(" "):
prog = re.compile(pattern)
if branch and prog.match(branch):
self.printer.print_message("Info",
"Redefined channel by CI branch matching with '%s', "
"setting CONAN_CHANNEL to '%s'" % (pattern,
stable_channel))
return stable_channel
if self.ci_manager.is_tag() and upload_when_tag:
self.printer.print_message("Info",
"Redefined channel by branch tag, "
"setting CONAN_CHANNEL to '%s'" % stable_channel)
return stable_channel
return specified_channel
def _get_specified_channel(self, channel, reference):
partial_reference = reference or os.getenv("CONAN_REFERENCE", None)
specified_channel = None
# without name/channel e.g. zlib/1.2.11@
if partial_reference:
if "@" in partial_reference:
specified_channel = channel or os.getenv("CONAN_CHANNEL", None)
else:
specified_channel = channel or os.getenv("CONAN_CHANNEL", "testing")
specified_channel = specified_channel.rstrip()
else:
if self.username:
specified_channel = channel or os.getenv("CONAN_CHANNEL", "testing")
specified_channel = specified_channel.rstrip()
else:
specified_channel = channel or os.getenv("CONAN_CHANNEL", None)
return self._get_channel(specified_channel, self.stable_channel, self.upload_only_when_tag)
| mit | 1b73eb038461b2a7c4320f5f8da99749 | 47.020335 | 125 | 0.558849 | 4.226679 | false | false | false | false |
euske/pdfminer | pdfminer/pdffont.py | 1 | 27098 | #!/usr/bin/env python
import sys
import struct
from io import BytesIO
from .cmapdb import CMapDB
from .cmapdb import CMapParser
from .cmapdb import FileUnicodeMap
from .cmapdb import CMap
from .encodingdb import EncodingDB
from .encodingdb import name2unicode
from .psparser import PSStackParser
from .psparser import PSEOF
from .psparser import LIT
from .psparser import KWD
from .psparser import STRICT
from .psparser import PSLiteral
from .psparser import literal_name
from .pdftypes import PDFException
from .pdftypes import resolve1
from .pdftypes import int_value
from .pdftypes import num_value
from .pdftypes import bytes_value
from .pdftypes import list_value
from .pdftypes import dict_value
from .pdftypes import stream_value
from .fontmetrics import FONT_METRICS
from .utils import apply_matrix_norm
from .utils import nunpack
from .utils import choplist
from .utils import isnumber
def get_widths(seq):
widths = {}
r = []
for v in seq:
if isinstance(v, list):
if r:
char1 = r[-1]
for (i, w) in enumerate(v):
widths[char1+i] = w
r = []
elif isnumber(v):
r.append(v)
if len(r) == 3:
(char1, char2, w) = r
for i in range(char1, char2+1):
widths[i] = w
r = []
return widths
#assert get_widths([1]) == {}
#assert get_widths([1,2,3]) == {1:3, 2:3}
#assert get_widths([1,[2,3],6,[7,8]]) == {1:2,2:3, 6:7,7:8}
def get_widths2(seq):
widths = {}
r = []
for v in seq:
if isinstance(v, list):
if r:
char1 = r[-1]
for (i, (w, vx, vy)) in enumerate(choplist(3, v)):
widths[char1+i] = (w, (vx, vy))
r = []
elif isnumber(v):
r.append(v)
if len(r) == 5:
(char1, char2, w, vx, vy) = r
for i in range(char1, char2+1):
widths[i] = (w, (vx, vy))
r = []
return widths
#assert get_widths2([1]) == {}
#assert get_widths2([1,2,3,4,5]) == {1:(3, (4,5)), 2:(3, (4,5))}
#assert get_widths2([1,[2,3,4,5],6,[7,8,9]]) == {1:(2, (3,4)), 6:(7, (8,9))}
## FontMetricsDB
##
class FontMetricsDB:
@classmethod
def get_metrics(klass, fontname):
return FONT_METRICS[fontname]
## Type1FontHeaderParser
##
class Type1FontHeaderParser(PSStackParser):
KEYWORD_BEGIN = KWD(b'begin')
KEYWORD_END = KWD(b'end')
KEYWORD_DEF = KWD(b'def')
KEYWORD_PUT = KWD(b'put')
KEYWORD_DICT = KWD(b'dict')
KEYWORD_ARRAY = KWD(b'array')
KEYWORD_READONLY = KWD(b'readonly')
KEYWORD_FOR = KWD(b'for')
KEYWORD_FOR = KWD(b'for')
def __init__(self, data):
PSStackParser.__init__(self, data)
self._cid2unicode = {}
return
def get_encoding(self):
while 1:
try:
(cid, name) = self.nextobject()
except PSEOF:
break
try:
self._cid2unicode[cid] = name2unicode(name)
except KeyError:
pass
return self._cid2unicode
def do_keyword(self, pos, token):
if token is self.KEYWORD_PUT:
((_, key), (_, value)) = self.pop(2)
if (isinstance(key, int) and
isinstance(value, PSLiteral)):
self.add_results((key, literal_name(value)))
return
NIBBLES = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', 'e', 'e-', None, '-')
## CFFFont
## (Format specified in Adobe Technical Note: #5176
## "The Compact Font Format Specification")
##
def getdict(data):
d = {}
fp = BytesIO(data)
stack = []
while 1:
c = fp.read(1)
if not c:
break
b0 = ord(c)
if b0 <= 21:
d[b0] = stack
stack = []
continue
if b0 == 30:
s = ''
loop = True
while loop:
b = ord(fp.read(1))
for n in (b >> 4, b & 15):
if n == 15:
loop = False
else:
s += NIBBLES[n]
value = float(s)
elif 32 <= b0 and b0 <= 246:
value = b0-139
else:
b1 = ord(fp.read(1))
if 247 <= b0 and b0 <= 250:
value = ((b0-247) << 8)+b1+108
elif 251 <= b0 and b0 <= 254:
value = -((b0-251) << 8)-b1-108
else:
b2 = ord(fp.read(1))
if 128 <= b1:
b1 -= 256
if b0 == 28:
value = b1 << 8 | b2
else:
value = b1 << 24 | b2 << 16 | struct.unpack('>H', fp.read(2))[0]
stack.append(value)
return d
class CFFFont:
STANDARD_STRINGS = (
'.notdef', 'space', 'exclam', 'quotedbl', 'numbersign',
'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft',
'parenright', 'asterisk', 'plus', 'comma', 'hyphen', 'period',
'slash', 'zero', 'one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal',
'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G',
'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a',
'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'braceleft', 'bar', 'braceright', 'asciitilde', 'exclamdown',
'cent', 'sterling', 'fraction', 'yen', 'florin', 'section',
'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash',
'dagger', 'daggerdbl', 'periodcentered', 'paragraph', 'bullet',
'quotesinglbase', 'quotedblbase', 'quotedblright',
'guillemotright', 'ellipsis', 'perthousand', 'questiondown',
'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve',
'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut',
'ogonek', 'caron', 'emdash', 'AE', 'ordfeminine', 'Lslash',
'Oslash', 'OE', 'ordmasculine', 'ae', 'dotlessi', 'lslash',
'oslash', 'oe', 'germandbls', 'onesuperior', 'logicalnot', 'mu',
'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn',
'onequarter', 'divide', 'brokenbar', 'degree', 'thorn',
'threequarters', 'twosuperior', 'registered', 'minus', 'eth',
'multiply', 'threesuperior', 'copyright', 'Aacute',
'Acircumflex', 'Adieresis', 'Agrave', 'Aring', 'Atilde',
'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde',
'Oacute', 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde',
'Scaron', 'Uacute', 'Ucircumflex', 'Udieresis', 'Ugrave',
'Yacute', 'Ydieresis', 'Zcaron', 'aacute', 'acircumflex',
'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', 'eacute',
'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex',
'odieresis', 'ograve', 'otilde', 'scaron', 'uacute',
'ucircumflex', 'udieresis', 'ugrave', 'yacute', 'ydieresis',
'zcaron', 'exclamsmall', 'Hungarumlautsmall', 'dollaroldstyle',
'dollarsuperior', 'ampersandsmall', 'Acutesmall',
'parenleftsuperior', 'parenrightsuperior', 'twodotenleader',
'onedotenleader', 'zerooldstyle', 'oneoldstyle', 'twooldstyle',
'threeoldstyle', 'fouroldstyle', 'fiveoldstyle', 'sixoldstyle',
'sevenoldstyle', 'eightoldstyle', 'nineoldstyle',
'commasuperior', 'threequartersemdash', 'periodsuperior',
'questionsmall', 'asuperior', 'bsuperior', 'centsuperior',
'dsuperior', 'esuperior', 'isuperior', 'lsuperior', 'msuperior',
'nsuperior', 'osuperior', 'rsuperior', 'ssuperior', 'tsuperior',
'ff', 'ffi', 'ffl', 'parenleftinferior', 'parenrightinferior',
'Circumflexsmall', 'hyphensuperior', 'Gravesmall', 'Asmall',
'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall',
'Hsmall', 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall',
'Nsmall', 'Osmall', 'Psmall', 'Qsmall', 'Rsmall', 'Ssmall',
'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', 'Ysmall',
'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall',
'Zcaronsmall', 'Dieresissmall', 'Brevesmall', 'Caronsmall',
'Dotaccentsmall', 'Macronsmall', 'figuredash', 'hypheninferior',
'Ogoneksmall', 'Ringsmall', 'Cedillasmall', 'questiondownsmall',
'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths',
'onethird', 'twothirds', 'zerosuperior', 'foursuperior',
'fivesuperior', 'sixsuperior', 'sevensuperior', 'eightsuperior',
'ninesuperior', 'zeroinferior', 'oneinferior', 'twoinferior',
'threeinferior', 'fourinferior', 'fiveinferior', 'sixinferior',
'seveninferior', 'eightinferior', 'nineinferior',
'centinferior', 'dollarinferior', 'periodinferior',
'commainferior', 'Agravesmall', 'Aacutesmall',
'Acircumflexsmall', 'Atildesmall', 'Adieresissmall',
'Aringsmall', 'AEsmall', 'Ccedillasmall', 'Egravesmall',
'Eacutesmall', 'Ecircumflexsmall', 'Edieresissmall',
'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall',
'Oacutesmall', 'Ocircumflexsmall', 'Otildesmall',
'Odieresissmall', 'OEsmall', 'Oslashsmall', 'Ugravesmall',
'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000',
'001.001', '001.002', '001.003', 'Black', 'Bold', 'Book',
'Light', 'Medium', 'Regular', 'Roman', 'Semibold',
)
class INDEX:
def __init__(self, fp):
self.fp = fp
self.offsets = []
(count, offsize) = struct.unpack('>HB', self.fp.read(3))
for i in range(count+1):
self.offsets.append(nunpack(self.fp.read(offsize)))
self.base = self.fp.tell()-1
self.fp.seek(self.base+self.offsets[-1])
return
def __repr__(self):
return '<INDEX: size=%d>' % len(self)
def __len__(self):
return len(self.offsets)-1
def __getitem__(self, i):
self.fp.seek(self.base+self.offsets[i])
return self.fp.read(self.offsets[i+1]-self.offsets[i])
def __iter__(self):
return iter(self[i] for i in range(len(self)))
def __init__(self, name, fp):
self.name = name
self.fp = fp
# Header
(_major, _minor, hdrsize, offsize) = struct.unpack('BBBB', self.fp.read(4))
self.fp.read(hdrsize-4)
# Name INDEX
self.name_index = self.INDEX(self.fp)
# Top DICT INDEX
self.dict_index = self.INDEX(self.fp)
# String INDEX
self.string_index = self.INDEX(self.fp)
# Global Subr INDEX
self.subr_index = self.INDEX(self.fp)
# Top DICT DATA
self.top_dict = getdict(self.dict_index[0])
(charset_pos,) = self.top_dict.get(15, [0])
(encoding_pos,) = self.top_dict.get(16, [0])
(charstring_pos,) = self.top_dict.get(17, [0])
# CharStrings
self.fp.seek(charstring_pos)
self.charstring = self.INDEX(self.fp)
self.nglyphs = len(self.charstring)
# Encodings
self.code2gid = {}
self.gid2code = {}
self.fp.seek(encoding_pos)
format = self.fp.read(1)
if format == b'\x00':
# Format 0
(n,) = struct.unpack('B', self.fp.read(1))
for (code, gid) in enumerate(struct.unpack('B'*n, self.fp.read(n))):
self.code2gid[code] = gid
self.gid2code[gid] = code
elif format == b'\x01':
# Format 1
(n,) = struct.unpack('B', self.fp.read(1))
code = 0
for i in range(n):
(first, nleft) = struct.unpack('BB', self.fp.read(2))
for gid in range(first, first+nleft+1):
self.code2gid[code] = gid
self.gid2code[gid] = code
code += 1
else:
raise ValueError('unsupported encoding format: %r' % format)
# Charsets
self.name2gid = {}
self.gid2name = {}
self.fp.seek(charset_pos)
format = self.fp.read(1)
if format == b'\x00':
# Format 0
n = self.nglyphs-1
for (gid, sid) in enumerate(struct.unpack('>'+'H'*n, self.fp.read(2*n))):
gid += 1
name = self.getstr(sid)
self.name2gid[name] = gid
self.gid2name[gid] = name
elif format == b'\x01':
# Format 1
(n,) = struct.unpack('B', self.fp.read(1))
sid = 0
for i in range(n):
(first, nleft) = struct.unpack('BB', self.fp.read(2))
for gid in range(first, first+nleft+1):
name = self.getstr(sid)
self.name2gid[name] = gid
self.gid2name[gid] = name
sid += 1
elif format == b'\x02':
# Format 2
assert 0
else:
raise ValueError('unsupported charset format: %r' % format)
#print(self.code2gid)
#print(self.name2gid)
#assert 0
return
def getstr(self, sid):
if sid < len(self.STANDARD_STRINGS):
return self.STANDARD_STRINGS[sid]
return self.string_index[sid-len(self.STANDARD_STRINGS)]
## TrueTypeFont
##
class TrueTypeFont:
class CMapNotFound(Exception):
pass
def __init__(self, name, fp):
self.name = name
self.fp = fp
self.tables = {}
self.fonttype = fp.read(4)
(ntables, _1, _2, _3) = struct.unpack('>HHHH', fp.read(8))
for _ in range(ntables):
(name, tsum, offset, length) = struct.unpack('>4sLLL', fp.read(16))
self.tables[name] = (offset, length)
return
def create_unicode_map(self):
if 'cmap' not in self.tables:
raise TrueTypeFont.CMapNotFound
(base_offset, length) = self.tables['cmap']
fp = self.fp
fp.seek(base_offset)
(version, nsubtables) = struct.unpack('>HH', fp.read(4))
subtables = []
for i in range(nsubtables):
subtables.append(struct.unpack('>HHL', fp.read(8)))
char2gid = {}
# Only supports subtable type 0, 2 and 4.
for (_1, _2, st_offset) in subtables:
fp.seek(base_offset+st_offset)
(fmttype, fmtlen, fmtlang) = struct.unpack('>HHH', fp.read(6))
if fmttype == 0:
char2gid.update(enumerate(struct.unpack('>256B', fp.read(256))))
elif fmttype == 2:
subheaderkeys = struct.unpack('>256H', fp.read(512))
firstbytes = [0]*8192
for (i, k) in enumerate(subheaderkeys):
firstbytes[k//8] = i
nhdrs = max(subheaderkeys)//8 + 1
hdrs = []
for i in range(nhdrs):
(firstcode, entcount, delta, offset) = struct.unpack('>HHhH', fp.read(8))
hdrs.append((i, firstcode, entcount, delta, fp.tell()-2+offset))
for (i, firstcode, entcount, delta, pos) in hdrs:
if not entcount:
continue
first = firstcode + (firstbytes[i] << 8)
fp.seek(pos)
for c in range(entcount):
gid = struct.unpack('>H', fp.read(2))
if gid:
gid += delta
char2gid[first+c] = gid
elif fmttype == 4:
(segcount, _1, _2, _3) = struct.unpack('>HHHH', fp.read(8))
segcount //= 2
ecs = struct.unpack('>%dH' % segcount, fp.read(2*segcount))
fp.read(2)
scs = struct.unpack('>%dH' % segcount, fp.read(2*segcount))
idds = struct.unpack('>%dh' % segcount, fp.read(2*segcount))
pos = fp.tell()
idrs = struct.unpack('>%dH' % segcount, fp.read(2*segcount))
for (ec, sc, idd, idr) in zip(ecs, scs, idds, idrs):
if idr:
fp.seek(pos+idr)
for c in range(sc, ec+1):
char2gid[c] = (struct.unpack('>H', fp.read(2))[0] + idd) & 0xffff
else:
for c in range(sc, ec+1):
char2gid[c] = (c + idd) & 0xffff
else:
assert 0
# create unicode map
unicode_map = FileUnicodeMap()
for (char, gid) in char2gid.items():
unicode_map.add_cid2unichr(gid, char)
return unicode_map
## Fonts
##
class PDFFontError(PDFException):
pass
class PDFUnicodeNotDefined(PDFFontError):
pass
LITERAL_STANDARD_ENCODING = LIT('StandardEncoding')
LITERAL_TYPE1C = LIT('Type1C')
# PDFFont
class PDFFont:
def __init__(self, descriptor, widths, default_width=None):
self.descriptor = descriptor
self.widths = widths
self.fontname = resolve1(descriptor.get('FontName', 'unknown'))
if isinstance(self.fontname, PSLiteral):
self.fontname = literal_name(self.fontname)
self.flags = int_value(descriptor.get('Flags', 0))
self.ascent = num_value(descriptor.get('Ascent', 0))
self.descent = num_value(descriptor.get('Descent', 0))
self.italic_angle = num_value(descriptor.get('ItalicAngle', 0))
self.default_width = default_width or num_value(descriptor.get('MissingWidth', 0))
self.leading = num_value(descriptor.get('Leading', 0))
self.bbox = list_value(descriptor.get('FontBBox', (0, 0, 0, 0)))
self.hscale = self.vscale = .001
return
def __repr__(self):
return '<PDFFont>'
def is_vertical(self):
return False
def is_multibyte(self):
return False
def decode(self, data):
return list(data)
def get_ascent(self):
return self.ascent * self.vscale
def get_descent(self):
return self.descent * self.vscale
def get_width(self):
w = self.bbox[2]-self.bbox[0]
if w == 0:
w = -self.default_width
return w * self.hscale
def get_height(self):
h = self.bbox[3]-self.bbox[1]
if h == 0:
h = self.ascent - self.descent
return h * self.vscale
def char_width(self, cid):
try:
return self.widths[cid] * self.hscale
except KeyError:
try:
return self.widths[self.to_unichr(cid)] * self.hscale
except (KeyError, PDFUnicodeNotDefined):
return self.default_width * self.hscale
def char_disp(self, cid):
return 0
def string_width(self, s):
return sum(self.char_width(cid) for cid in self.decode(s))
# PDFSimpleFont
class PDFSimpleFont(PDFFont):
def __init__(self, descriptor, widths, spec):
# Font encoding is specified either by a name of
# built-in encoding or a dictionary that describes
# the differences.
if 'Encoding' in spec:
encoding = resolve1(spec['Encoding'])
else:
encoding = LITERAL_STANDARD_ENCODING
if isinstance(encoding, dict):
name = literal_name(encoding.get('BaseEncoding', LITERAL_STANDARD_ENCODING))
diff = list_value(encoding.get('Differences', None))
self.cid2unicode = EncodingDB.get_encoding(name, diff)
else:
self.cid2unicode = EncodingDB.get_encoding(literal_name(encoding))
self.unicode_map = None
if 'ToUnicode' in spec:
strm = stream_value(spec['ToUnicode'])
self.unicode_map = FileUnicodeMap()
CMapParser(self.unicode_map, BytesIO(strm.get_data())).run()
PDFFont.__init__(self, descriptor, widths)
return
def to_unichr(self, cid):
if self.unicode_map:
try:
return self.unicode_map.get_unichr(cid)
except KeyError:
pass
try:
return self.cid2unicode[cid]
except KeyError:
raise PDFUnicodeNotDefined(None, cid)
# PDFType1Font
class PDFType1Font(PDFSimpleFont):
def __init__(self, rsrcmgr, spec):
try:
self.basefont = literal_name(spec['BaseFont'])
except KeyError:
if STRICT:
raise PDFFontError('BaseFont is missing')
self.basefont = 'unknown'
try:
(descriptor, widths) = FontMetricsDB.get_metrics(self.basefont)
except KeyError:
descriptor = dict_value(spec.get('FontDescriptor', {}))
firstchar = int_value(spec.get('FirstChar', 0))
#lastchar = int_value(spec.get('LastChar', 255))
widths = list_value(spec.get('Widths', [0]*256))
widths = dict((i+firstchar, w) for (i, w) in enumerate(widths))
PDFSimpleFont.__init__(self, descriptor, widths, spec)
if 'Encoding' not in spec and 'FontFile' in descriptor:
# try to recover the missing encoding info from the font file.
self.fontfile = stream_value(descriptor.get('FontFile'))
length1 = int_value(self.fontfile['Length1'])
data = self.fontfile.get_data()[:length1]
parser = Type1FontHeaderParser(BytesIO(data))
self.cid2unicode = parser.get_encoding()
return
def __repr__(self):
return '<PDFType1Font: basefont=%r>' % self.basefont
# PDFTrueTypeFont
class PDFTrueTypeFont(PDFType1Font):
def __repr__(self):
return '<PDFTrueTypeFont: basefont=%r>' % self.basefont
# PDFType3Font
class PDFType3Font(PDFSimpleFont):
def __init__(self, rsrcmgr, spec):
firstchar = int_value(spec.get('FirstChar', 0))
#lastchar = int_value(spec.get('LastChar', 0))
widths = list_value(spec.get('Widths', [0]*256))
widths = dict((i+firstchar, w) for (i, w) in enumerate(widths))
if 'FontDescriptor' in spec:
descriptor = dict_value(spec['FontDescriptor'])
else:
descriptor = {'Ascent': 0, 'Descent': 0,
'FontBBox': spec['FontBBox']}
PDFSimpleFont.__init__(self, descriptor, widths, spec)
self.matrix = tuple(list_value(spec.get('FontMatrix')))
(_, self.descent, _, self.ascent) = self.bbox
(self.hscale, self.vscale) = apply_matrix_norm(self.matrix, (1, 1))
return
def __repr__(self):
return '<PDFType3Font>'
# PDFCIDFont
class PDFCIDFont(PDFFont):
def __init__(self, rsrcmgr, spec):
try:
self.basefont = literal_name(spec['BaseFont'])
except KeyError:
if STRICT:
raise PDFFontError('BaseFont is missing')
self.basefont = 'unknown'
self.cidsysteminfo = dict_value(spec.get('CIDSystemInfo', {}))
registry = bytes_value(self.cidsysteminfo.get('Registry', b'unknown'))
ordering = bytes_value(self.cidsysteminfo.get('Ordering', b'unknown'))
self.cidcoding = (registry + b'-' + ordering).decode('ascii')
try:
name = literal_name(spec['Encoding'])
except KeyError:
if STRICT:
raise PDFFontError('Encoding is unspecified')
name = 'unknown'
try:
self.cmap = CMapDB.get_cmap(name)
except CMapDB.CMapNotFound as e:
if STRICT:
raise PDFFontError(e)
self.cmap = CMap()
try:
descriptor = dict_value(spec['FontDescriptor'])
except KeyError:
if STRICT:
raise PDFFontError('FontDescriptor is missing')
descriptor = {}
ttf = None
if 'FontFile2' in descriptor:
self.fontfile = stream_value(descriptor.get('FontFile2'))
ttf = TrueTypeFont(self.basefont,
BytesIO(self.fontfile.get_data()))
self.unicode_map = None
if 'ToUnicode' in spec:
strm = stream_value(spec['ToUnicode'])
self.unicode_map = FileUnicodeMap()
CMapParser(self.unicode_map, BytesIO(strm.get_data())).run()
elif self.cidcoding in ('Adobe-Identity', 'Adobe-UCS'):
if ttf:
try:
self.unicode_map = ttf.create_unicode_map()
except TrueTypeFont.CMapNotFound:
pass
else:
try:
self.unicode_map = CMapDB.get_unicode_map(self.cidcoding, self.cmap.is_vertical())
except CMapDB.CMapNotFound as e:
pass
self.vertical = self.cmap.is_vertical()
if self.vertical:
# writing mode: vertical
widths = get_widths2(list_value(spec.get('W2', [])))
self.disps = dict((cid, (vx, vy)) for (cid, (_, (vx, vy))) in widths.items())
(vy, w) = spec.get('DW2', [880, -1000])
self.default_disp = (None, vy)
widths = dict((cid, w) for (cid, (w, _)) in widths.items())
default_width = w
else:
# writing mode: horizontal
self.disps = {}
self.default_disp = 0
widths = get_widths(list_value(spec.get('W', [])))
default_width = spec.get('DW', 1000)
PDFFont.__init__(self, descriptor, widths, default_width=default_width)
return
def __repr__(self):
return '<PDFCIDFont: basefont=%r, cidcoding=%r>' % (self.basefont, self.cidcoding)
def is_vertical(self):
return self.vertical
def is_multibyte(self):
return True
def decode(self, data):
return self.cmap.decode(data)
def char_disp(self, cid):
"Returns an integer for horizontal fonts, a tuple for vertical fonts."
return self.disps.get(cid, self.default_disp)
def to_unichr(self, cid):
try:
if not self.unicode_map:
raise KeyError(cid)
return self.unicode_map.get_unichr(cid)
except KeyError:
raise PDFUnicodeNotDefined(self.cidcoding, cid)
# main
def main(argv):
for fname in argv[1:]:
with open(fname, 'rb') as fp:
#font = TrueTypeFont(fname, fp)
font = CFFFont(fname, fp)
print(font)
return
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit | 2ad22a810be76b93701d9a2b944aeb2f | 35.718157 | 98 | 0.539449 | 3.452414 | false | false | false | false |
euske/pdfminer | tools/runapp.py | 1 | 3528 | #!/usr/bin/env python
##
## WebApp class runner
##
## usage:
## $ runapp.py pdf2html.cgi
##
import sys
import urllib
from httplib import responses
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
## WebAppHandler
##
class WebAppHandler(SimpleHTTPRequestHandler):
APP_CLASS = None
def do_POST(self):
return self.run_cgi()
def send_head(self):
return self.run_cgi()
def run_cgi(self):
rest = self.path
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = '/' + script
scriptfile = self.translate_path(scriptname)
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE'):
env.setdefault(k, "")
app = self.APP_CLASS(infp=self.rfile, outfp=self.wfile, environ=env)
status = app.setup()
self.send_response(status, responses[status])
app.run()
return
# main
def main(argv):
import getopt, imp
def usage():
print('usage: %s [-h host] [-p port] [-n name] module.class' % argv[0])
return 100
try:
(opts, args) = getopt.getopt(argv[1:], 'h:p:n:')
except getopt.GetoptError:
return usage()
host = ''
port = 8080
name = 'WebApp'
for (k, v) in opts:
if k == '-h': host = v
elif k == '-p': port = int(v)
elif k == '-n': name = v
if not args: return usage()
path = args.pop(0)
module = imp.load_source('app', path)
WebAppHandler.APP_CLASS = getattr(module, name)
print('Listening %s:%d...' % (host,port))
httpd = HTTPServer((host,port), WebAppHandler)
httpd.serve_forever()
return
if __name__ == '__main__': sys.exit(main(sys.argv))
| mit | 744c045482fe81937576d0f2e09fffea | 30.221239 | 79 | 0.549036 | 3.644628 | false | false | false | false |
plamere/spotipy | examples/title_chain.py | 1 | 1871 | from spotipy.oauth2 import SpotifyClientCredentials
import spotipy
import random
'''
generates a list of songs where the first word in each subsequent song
matches the last word of the previous song.
usage: python title_chain.py [song name]
'''
client_credentials_manager = SpotifyClientCredentials()
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
skiplist = set(['dm', 'remix'])
max_offset = 500
seen = set()
def find_songs_that_start_with_word(word):
max_titles = 20
max_offset = 200
offset = 0
out = []
while offset < max_offset and len(out) < max_titles:
results = sp.search(q=word, type='track', limit=50, offset=offset)
if len(results['tracks']['items']) == 0:
break
for item in results['tracks']['items']:
name = item['name'].lower()
if name in seen:
continue
seen.add(name)
if '(' in name:
continue
if '-' in name:
continue
if '/' in name:
continue
words = name.split()
if len(words) > 1 and words[0] == word \
and words[-1] not in skiplist:
# print " ", name, len(out)
out.append(item)
offset += 50
# print "found", len(out), "matches"
return out
def make_chain(word):
which = 1
while True:
songs = find_songs_that_start_with_word(word)
if len(songs) > 0:
song = random.choice(songs)
print(which, song['name'] + " by " + song['artists'][0]['name'])
which += 1
word = song['name'].lower().split()[-1]
else:
break
if __name__ == '__main__':
import sys
title = ' '.join(sys.argv[1:])
make_chain(sys.argv[1].lower())
| mit | aed01a7df6c8542116fb9c77bc7b1094 | 26.115942 | 76 | 0.538215 | 3.818367 | false | false | false | false |
fuzeman/trakt.py | trakt/core/context_collection.py | 2 | 3772 | from __future__ import absolute_import, division, print_function
from trakt.core.helpers import synchronized
from six.moves import xrange
from six.moves import _thread as thread
from threading import RLock
import logging
log = logging.getLogger(__name__)
class ListCollection(object):
def __init__(self, *lists):
self._lists = lists or []
self._lock = RLock()
@synchronized(lambda self: self._lock)
def append(self, value):
collection = self._lists[-1]
if type(collection) is not list:
raise ValueError()
collection.append(value)
@synchronized(lambda self: self._lock)
def find_list(self, index):
count = len(self)
if index >= count:
raise IndexError()
if index < 0:
index += count
pos = 0
for lst in self.lists():
l_len = len(lst)
if pos <= index < pos + l_len:
return lst, index - pos
else:
pos += l_len
return None, None
@synchronized(lambda self: self._lock)
def lists(self, resolve=True):
for collection in self._lists:
if resolve and callable(collection):
collection = collection()
yield collection
@synchronized(lambda self: self._lock)
def pop(self, index=None):
if index is None:
index = len(self) - 1
list, index = self.find_list(index)
if list is None:
raise IndexError()
return list.pop(index)
@synchronized(lambda self: self._lock)
def __eq__(self, other):
if len(self) != len(other):
return False
for x in xrange(len(self)):
if self[x] != other[x]:
return False
return True
@synchronized(lambda self: self._lock)
def __contains__(self, value):
for x in self:
if x == value:
return True
return False
def __getitem__(self, index):
list, index = self.find_list(index)
if list is None:
raise IndexError()
return list[index]
@synchronized(lambda self: self._lock)
def __iter__(self):
for lst in self.lists():
# Yield items from each list
for x in lst:
yield x
@synchronized(lambda self: self._lock)
def __len__(self):
return sum([len(lst) for lst in self.lists()])
def __setitem__(self, index, value):
list, index = self.find_list(index)
if list is None:
raise IndexError()
list[index] = value
def __repr__(self):
return '[%s]' % ', '.join(repr(x) for x in self)
__hash__ = None
class ContextCollection(object):
def __init__(self, base=None):
self.base = base or []
self._lock = RLock()
self._threads = {}
@synchronized(lambda self: self._lock)
def build(self, ident):
if ident not in self._threads:
self._threads[ident] = ListCollection(lambda: self.base, [])
return self._threads[ident]
@property
def current(self):
ident = thread.get_ident()
try:
return self._threads[ident]
except KeyError:
return self.build(ident)
def append(self, value):
self.current.append(value)
@synchronized(lambda self: self._lock)
def clear(self):
ident = thread.get_ident()
if ident not in self._threads:
return
del self._threads[ident]
def pop(self, index=None):
return self.current.pop(index)
def __getitem__(self, index):
return self.current[index]
def __len__(self):
return len(self.current)
| mit | 1806cd82d09f46425a9859b945527b49 | 22.141104 | 72 | 0.550901 | 4.167956 | false | false | false | false |
fuzeman/trakt.py | trakt/interfaces/sync/core/mixins.py | 2 | 2967 | from __future__ import absolute_import, division, print_function
from trakt.core.helpers import dictfilter
from trakt.core.pagination import PaginationIterator
from trakt.interfaces.base import Interface, authenticated
from trakt.mapper.sync import SyncMapper
import requests
class Get(Interface):
flags = {}
@authenticated
def get(self, media=None, store=None, params=None, query=None, flat=False, **kwargs):
if not params:
params = []
params.insert(0, media)
# Request resource
response = self.http.get(
params=params,
query=query,
**dictfilter(kwargs, get=[
'exceptions'
], pop=[
'authenticated',
'pagination',
'validate_token'
])
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, PaginationIterator):
if not flat:
raise ValueError('Pagination is only supported with `flat=True`')
return items.with_mapper(lambda items: SyncMapper.process(
self.client, store, items,
media=media,
flat=flat,
**self.flags
))
if isinstance(items, requests.Response):
return items
if type(items) is not list:
return None
# Map items
return SyncMapper.process(
self.client, store, items,
media=media,
flat=flat,
**self.flags
)
#
# Shortcut methods
#
@authenticated
def movies(self, store=None, **kwargs):
return self.get(
'movies',
store=store,
**kwargs
)
@authenticated
def shows(self, store=None, **kwargs):
return self.get(
'shows',
store=store,
**kwargs
)
class Add(Interface):
@authenticated
def add(self, items, **kwargs):
response = self.http.post(
data=items,
**dictfilter(kwargs, pop=[
'authenticated',
'validate_token'
])
)
return self.get_data(response, **kwargs)
class Remove(Interface):
@authenticated
def remove(self, items, **kwargs):
response = self.http.post(
'remove',
data=items,
**dictfilter(kwargs, pop=[
'authenticated',
'validate_token'
])
)
return self.get_data(response, **kwargs)
class Delete(Interface):
@authenticated
def delete(self, playbackid, **kwargs):
response = self.http.delete(
path=str(playbackid),
**dictfilter(kwargs, pop=[
'authenticated',
'validate_token'
])
)
return 200 <= response.status_code < 300
| mit | b555edc5c05aeb79c08c013549dde5e6 | 23.121951 | 89 | 0.519717 | 4.65047 | false | false | false | false |
fuzeman/trakt.py | tests/sync/watchlist/test_mixed.py | 1 | 5667 | # flake8: noqa: F403, F405
from tests.core import mock
from trakt import Trakt
from trakt.objects import Movie, Show, Season, Episode
from datetime import datetime
from dateutil.tz import tzutc
from hamcrest import *
from httmock import HTTMock
def test_basic():
with HTTMock(mock.sync_watchlist, mock.unknown):
with Trakt.configuration.auth('mock', 'mock'):
watchlist = Trakt['sync/watchlist'].get(pagination=True, per_page=3)
# Ensure collection is valid
assert_that(watchlist, not_none())
# Resolve all pages
items = list(watchlist)
# Validate items
assert_that(items, contains(
# TRON: Legacy (2010)
all_of(
instance_of(Movie),
has_properties({
'pk': ('imdb', 'tt1104001'),
'title': 'TRON: Legacy',
'year': 2010,
# Timestamps
'listed_at': datetime(2014, 9, 1, 9, 10, 11, tzinfo=tzutc()),
# Keys
'keys': [
('imdb', 'tt1104001'),
('tmdb', '20526'),
('slug', 'tron-legacy-2010'),
('trakt', '1')
]
})
),
# Breaking Bad (2008)
all_of(
instance_of(Show),
has_properties({
'pk': ('tvdb', '81189'),
'title': 'Breaking Bad',
'year': 2008,
# Timestamps
'listed_at': datetime(2014, 9, 1, 9, 10, 11, tzinfo=tzutc()),
# Keys
'keys': [
('tvdb', '81189'),
('tmdb', '1396'),
('imdb', 'tt0903747'),
('tvrage', '18164'),
('slug', 'breaking-bad'),
('trakt', '1')
]
})
),
# Breaking Bad (2008) - S03
all_of(
instance_of(Season),
has_properties({
'pk': 3,
# Timestamps
'listed_at': datetime(2014, 9, 1, 9, 10, 11, tzinfo=tzutc()),
# Keys
'keys': [
3,
('tvdb', '171641'),
('tmdb', '3575')
]
})
),
# Breaking Bad (2008) - S04E01
all_of(
instance_of(Episode),
has_properties({
'pk': (4, 1),
'title': 'Box Cutter',
# Timestamps
'listed_at': datetime(2014, 9, 1, 9, 10, 11, tzinfo=tzutc()),
# Keys
'keys': [
(4, 1),
('tvdb', '2639411'),
('tmdb', '62118'),
('imdb', 'tt1683084'),
('trakt', '49')
]
})
)
))
def test_pagination_disabled():
with HTTMock(mock.sync_watchlist, mock.unknown):
with Trakt.configuration.auth('mock', 'mock'):
watchlist = Trakt['sync/watchlist'].get(page=None, per_page=None)
# Ensure collection is valid
assert_that(watchlist, not_none())
# Resolve iterator
items = list(watchlist)
# Validate items
assert_that(items, contains(
# TRON: Legacy (2010)
all_of(
instance_of(Movie),
has_properties({
'pk': ('imdb', 'tt1104001'),
'title': 'TRON: Legacy',
'year': 2010,
# Timestamps
'listed_at': datetime(2014, 9, 1, 9, 10, 11, tzinfo=tzutc()),
# Keys
'keys': [
('imdb', 'tt1104001'),
('tmdb', '20526'),
('slug', 'tron-legacy-2010'),
('trakt', '1')
]
})
),
# Breaking Bad (2008)
all_of(
instance_of(Show),
has_properties({
'pk': ('tvdb', '81189'),
'title': 'Breaking Bad',
'year': 2008,
# Timestamps
'listed_at': datetime(2014, 9, 1, 9, 10, 11, tzinfo=tzutc()),
# Keys
'keys': [
('tvdb', '81189'),
('tmdb', '1396'),
('imdb', 'tt0903747'),
('tvrage', '18164'),
('slug', 'breaking-bad'),
('trakt', '1')
]
})
),
# Breaking Bad (2008) - S03
all_of(
instance_of(Season),
has_properties({
'pk': 3,
# Timestamps
'listed_at': datetime(2014, 9, 1, 9, 10, 11, tzinfo=tzutc()),
# Keys
'keys': [
3,
('tvdb', '171641'),
('tmdb', '3575')
]
})
),
# Breaking Bad (2008) - S04E01
all_of(
instance_of(Episode),
has_properties({
'pk': (4, 1),
'title': 'Box Cutter',
# Timestamps
'listed_at': datetime(2014, 9, 1, 9, 10, 11, tzinfo=tzutc()),
# Keys
'keys': [
(4, 1),
('tvdb', '2639411'),
('tmdb', '62118'),
('imdb', 'tt1683084'),
('trakt', '49')
]
})
)
))
| mit | d979f876eb3b758619a9d866e880ec05 | 26.509709 | 80 | 0.369861 | 4.296437 | false | false | false | false |
fuzeman/trakt.py | tests/oauth/test_oauth.py | 1 | 5552 | from __future__ import absolute_import, division, print_function
from tests.core import mock
from tests.core.helpers import assert_url
from trakt import Trakt, TraktClient
from httmock import HTTMock
from threading import Event
import calendar
import datetime
import pytest
def test_authorize_url():
Trakt.site_url = 'http://mock'
assert_url(Trakt['oauth'].authorize_url('urn:ietf:wg:oauth:2.0:oob'), '/oauth/authorize', {
'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob',
'response_type': 'code',
'client_id': 'mock-client_id'
})
assert_url(
Trakt['oauth'].authorize_url('urn:ietf:wg:oauth:2.0:oob', state='state', username='username'),
'/oauth/authorize', {
'username': 'username',
'state': 'state',
'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob',
'response_type': 'code',
'client_id': 'mock-client_id'
}
)
with pytest.raises(ValueError):
TraktClient()['oauth'].authorize_url('urn:ietf:wg:oauth:2.0:oob')
def test_pin_url():
with Trakt.configuration.app(id=1234):
assert_url(Trakt['oauth/pin'].url(), '/pin/1234')
with pytest.raises(ValueError):
Trakt['oauth/pin'].url()
def test_token():
with HTTMock(mock.oauth_token, mock.unknown):
# Validate `token_exchange` request/response
assert Trakt['oauth'].token_exchange('ABCD1234', 'urn:ietf:wg:oauth:2.0:oob') == {
'access_token': 'mock-access_token',
'token_type': 'bearer',
'expires_in': 7200,
'refresh_token': 'mock-refresh_token',
'scope': 'public'
}
# Ensure `token_exchange` raises a `ValueError` on incorrect configuration
with pytest.raises(ValueError):
assert TraktClient()['oauth'].token_exchange('ABCD1234', 'urn:ietf:wg:oauth:2.0:oob')
def test_token_exchange():
with HTTMock(mock.oauth_token, mock.unknown):
# Validate `token_exchange` request/response
assert Trakt['oauth'].token_exchange('ABCD1234', 'urn:ietf:wg:oauth:2.0:oob') == {
'access_token': 'mock-access_token',
'token_type': 'bearer',
'expires_in': 7200,
'refresh_token': 'mock-refresh_token',
'scope': 'public'
}
# Ensure `token_exchange` raises a `ValueError` on incorrect configuration
with pytest.raises(ValueError):
assert TraktClient()['oauth'].token_exchange('ABCD1234', 'urn:ietf:wg:oauth:2.0:oob')
def test_token_refresh():
with HTTMock(mock.oauth_token, mock.unknown):
# Validate `token_exchange` request/response
assert Trakt['oauth'].token_refresh('mock-refresh_token', 'urn:ietf:wg:oauth:2.0:oob') == {
'access_token': 'mock-access_token',
'token_type': 'bearer',
'expires_in': 7200,
'refresh_token': 'mock-refresh_token',
'scope': 'public'
}
# Ensure `token_exchange` raises a `ValueError` on incorrect configuration
with pytest.raises(ValueError):
assert TraktClient()['oauth'].token_refresh('mock-refresh_token', 'urn:ietf:wg:oauth:2.0:oob')
def test_request():
with HTTMock(mock.oauth_token, mock.fixtures, mock.unknown):
# Mock authorization
authorization = {
'access_token': 'mock',
'token_type': 'bearer',
'created_at': calendar.timegm(datetime.datetime.utcnow().utctimetuple()),
'expires_in': 7 * 24 * 60 * 60,
'refresh_token': 'mock-refresh_token',
'scope': 'public'
}
# Test valid token
with Trakt.configuration.oauth.from_response(authorization):
assert Trakt['sync/collection'].movies() is not None
# Test expired token
authorization['expires_in'] = 0
with Trakt.configuration.oauth.from_response(authorization):
assert Trakt['sync/collection'].movies() is None
# Test token refreshing
with Trakt.configuration.oauth.from_response(authorization, refresh=True):
assert Trakt['sync/collection'].movies() is not None
def test_refresh_deadlock():
with HTTMock(mock.oauth_token, mock.sync_get, mock.unknown):
# Construct client
client = TraktClient()
# Configure client
client.configuration.defaults.client(
id='mock-client_id',
secret='mock-client_secret'
)
# Bind to events
refreshed = Event()
looped = Event()
@client.on('oauth.refresh')
def on_token_refreshed(username, authorization):
if refreshed.is_set():
looped.set()
return
refreshed.set()
# Test refresh recursion
assert client['sync/collection'].movies() is None
# Attempt request with expired authorization
expired_authorization = {
'access_token': 'mock-access_token',
'token_type': 'bearer',
'created_at': calendar.timegm(datetime.datetime.utcnow().utctimetuple()),
'expires_in': 0,
'refresh_token': 'mock-refresh_token',
'scope': 'public'
}
with client.configuration.oauth.from_response(expired_authorization, refresh=True, username='mock'):
assert client['sync/collection'].movies() is not None
# Ensure requests inside "oauth.refresh" don't cause refresh loops
assert not looped.is_set()
| mit | 56c332f9d38f37b1c43f5e29c9efe539 | 33.271605 | 108 | 0.597442 | 3.893408 | false | true | false | false |
fuzeman/trakt.py | trakt/interfaces/sync/history.py | 2 | 2373 | from __future__ import absolute_import, division, print_function
from trakt.core.helpers import to_iso8601_datetime
from trakt.interfaces.base import authenticated
from trakt.interfaces.sync.core.mixins import Get, Add, Remove
class SyncHistoryInterface(Get, Add, Remove):
path = 'sync/history'
flags = {'is_watched': True}
def get(self, media=None, id=None, start_at=None, end_at=None, store=None,
extended=None, page=None, per_page=None, **kwargs):
if not media and id:
raise ValueError('The "id" parameter also requires the "media" parameter to be defined')
# Build parameters
params = []
if id:
params.append(id)
# Build query
query = {
'extended': extended,
'page': page,
'limit': per_page
}
if start_at:
query['start_at'] = to_iso8601_datetime(start_at)
if end_at:
query['end_at'] = to_iso8601_datetime(end_at)
# Request watched history
return super(SyncHistoryInterface, self).get(
media, store, params,
flat=True,
query=query,
**kwargs
)
#
# Shortcut methods
#
@authenticated
def movies(self, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
'movies',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
@authenticated
def shows(self, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
'shows',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
@authenticated
def seasons(self, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
'seasons',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
@authenticated
def episodes(self, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
'episodes',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
| mit | bc45f35144f033088aa152761cbd1be3 | 25.076923 | 100 | 0.524231 | 3.871126 | false | false | false | false |
fuzeman/trakt.py | trakt/interfaces/__init__.py | 2 | 2427 | from __future__ import absolute_import, division, print_function
from trakt.interfaces import auth
from trakt.interfaces import calendars
from trakt.interfaces import lists
from trakt.interfaces import movies
from trakt.interfaces import oauth
from trakt.interfaces import scrobble
from trakt.interfaces import search
from trakt.interfaces import shows
from trakt.interfaces import sync
from trakt.interfaces import users
INTERFACES = [
# /
auth.AuthInterface,
oauth.OAuthInterface,
oauth.DeviceOAuthInterface,
oauth.PinOAuthInterface,
scrobble.ScrobbleInterface,
search.SearchInterface,
# /calendars/
calendars.AllCalendarsInterface,
calendars.MyCalendarsInterface,
# /lists/
lists.ListsInterface,
# /sync/
sync.SyncInterface,
sync.SyncCollectionInterface,
sync.SyncHistoryInterface,
sync.SyncPlaybackInterface,
sync.SyncRatingsInterface,
sync.SyncWatchedInterface,
sync.SyncWatchlistInterface,
# /shows/
shows.ShowsInterface,
# /movies/
movies.MoviesInterface,
# /users/
users.UsersInterface,
users.UsersProfileInterface,
users.UsersSettingsInterface,
# /users/following
users.UsersFollowingInterface,
# /users/following
users.UsersFriendsInterface,
# /users/history
users.UsersHistoryInterface,
# /users/lists/
users.UsersListsInterface,
users.UsersListInterface,
# /users/ratings
users.UsersRatingsInterface,
# /users/watched
users.UsersWatchedInterface,
# /users/watchlist
users.UsersWatchlistInterface
]
def get_interfaces():
for interface in INTERFACES:
if not interface.path:
continue
path = interface.path.strip('/')
if path:
path = path.split('/')
else:
path = []
yield path, interface
def construct_map(client, d=None, interfaces=None):
if d is None:
d = {}
if interfaces is None:
interfaces = get_interfaces()
for path, interface in interfaces:
if len(path) == 0:
continue
key = path.pop(0)
if len(path) == 0:
d[key] = interface(client)
continue
value = d.get(key, {})
if type(value) is not dict:
value = {None: value}
construct_map(client, value, [(path, interface)])
d[key] = value
return d
| mit | 62bfb41b1905b102c4a7b805c42146fc | 19.567797 | 64 | 0.658838 | 4.099662 | false | false | false | false |
fuzeman/trakt.py | trakt/interfaces/users/history.py | 2 | 3409 | from __future__ import absolute_import, division, print_function
from trakt.core.helpers import clean_username, dictfilter, to_iso8601_datetime
from trakt.core.pagination import PaginationIterator
from trakt.interfaces.base import Interface, authenticated
from trakt.mapper import SyncMapper
import requests
class UsersHistoryInterface(Interface):
path = 'users/*/history'
flags = {'is_watched': True}
def get(self, username, media=None, id=None, start_at=None, end_at=None, store=None,
extended=None, page=None, per_page=None, **kwargs):
if not media and id:
raise ValueError('The "id" parameter also requires the "media" parameter to be defined')
# Build parameters
params = []
if media:
params.append(media)
if id:
params.append(id)
# Build query
query = {
'extended': extended,
'page': page,
'limit': per_page
}
if start_at:
query['start_at'] = to_iso8601_datetime(start_at)
if end_at:
query['end_at'] = to_iso8601_datetime(end_at)
# Send request
response = self.http.get(
'/users/%s/history' % (clean_username(username)),
params=params,
query=query,
**dictfilter(kwargs, get=[
'exceptions'
], pop=[
'authenticated',
'pagination',
'validate_token'
])
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, PaginationIterator):
return items.with_mapper(lambda items: SyncMapper.process(
self.client, store, items,
media=media,
flat=True,
**self.flags
))
if isinstance(items, requests.Response):
return items
if type(items) is not list:
return None
return SyncMapper.process(
self.client, store, items,
media=media,
flat=True,
**self.flags
)
#
# Shortcut methods
#
@authenticated
def movies(self, username, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
username, 'movies',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
@authenticated
def shows(self, username, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
username, 'shows',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
@authenticated
def seasons(self, username, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
username, 'seasons',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
@authenticated
def episodes(self, username, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
username, 'episodes',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
| mit | 0fe8c777d4f4e3e842020b448ae1170b | 25.84252 | 100 | 0.523614 | 4.172583 | false | false | false | false |
fuzeman/trakt.py | trakt/objects/comment.py | 6 | 3241 | from __future__ import absolute_import, division, print_function
from trakt.core.helpers import from_iso8601_datetime
from trakt.objects.core.helpers import update_attributes
class Comment(object):
def __init__(self, client, keys):
self._client = client
self.keys = keys
"""
:type: :class:`~python:list` of :class:`~python:tuple`
Keys (for trakt, imdb, tvdb, etc..), defined as:
..code-block::
[
(<service>, <id>)
]
"""
self.parent_id = None
"""
:type: :class:`~python:int`
Parent comment id
"""
self.comment = None
"""
:type: :class:`~python:str`
Comment body
"""
self.spoiler = None
"""
:type: :class:`~python:bool`
Flag indicating this comment has a spoiler
"""
self.review = None
"""
:type: :class:`~python:bool`
Flag indicating this comment is a review
"""
self.replies = None
"""
:type: :class:`~python:int`
Number of replies
"""
self.likes = None
"""
:type: :class:`~python:int`
Number of likes
"""
self.created_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this comment was created
"""
self.liked_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this comment was liked
"""
self.user = None
"""
:type: :class:`~python:dict`
Author details
"""
self.user_rating = None
"""
:type: :class:`~python:float`
Author rating for the item
"""
@property
def id(self):
"""Retrieve the comment identifier.
:rtype: :class:`~python:int`
"""
if self.pk is None:
return None
_, sid = self.pk
return sid
@property
def pk(self):
"""Retrieve the primary key (unique identifier for the comment).
:return: :code:`("trakt", <id>)` or :code:`None` if no primary key is available
:rtype: :class:`~python:tuple`
"""
if not self.keys:
return None
return self.keys[0]
def _update(self, info=None):
if not info:
return
if 'created_at' in info:
self.created_at = from_iso8601_datetime(info.get('created_at'))
if 'liked_at' in info:
self.liked_at = from_iso8601_datetime(info.get('liked_at'))
update_attributes(self, info, [
'parent_id',
'comment',
'spoiler',
'review',
'replies',
'likes',
'user',
'user_rating'
])
@classmethod
def _construct(cls, client, keys, info, **kwargs):
if not info:
return None
c = cls(client, keys, **kwargs)
c._update(info)
return c
def __repr__(self):
return '<Comment %r (%s)>' % (self.comment, self.id)
def __str__(self):
return self.__repr__()
| mit | a7cc918a62c846277f4bdec6ad8149d2 | 19.25625 | 87 | 0.487812 | 4.225554 | false | false | false | false |
lneuhaus/pyrpl | pyrpl/software_modules/lockbox/output.py | 1 | 16258 | from __future__ import division
import numpy as np
from scipy import interpolate
from ...software_modules.lockbox.input import Signal
from ...attributes import BoolProperty, FloatProperty, SelectProperty, \
FilterProperty, FrequencyProperty, IntProperty
from ...curvedb import CurveDB
from ...hardware_modules.asg import Asg0, Asg1
from ...hardware_modules.pid import Pid
from ...widgets.module_widgets import OutputSignalWidget
class AdditionalFilterAttribute(FilterProperty):
# proxy to the pid inputfilter attribute that emits a signal when changed
def valid_frequencies(self, obj):
return obj.pid.__class__.inputfilter.valid_frequencies(obj.pid)
def get_value(self, obj):
return obj.pid.inputfilter
def set_value(self, obj, value):
obj.pid.inputfilter = value
obj.lockbox._signal_launcher.update_transfer_function.emit([obj])
class OutputSignal(Signal):
"""
As many output signals as desired can be added to the lockbox. Each
output defines:
- name: the name of the output.
- dc_gain: how much the model's variable is expected to change for 1 V
on the output (in *unit*)
- unit: see above, should be one of the units available in the model.
- sweep_amplitude/offset/frequency/waveform: what properties to use when
sweeping the output
- output_channel: what physical output is used.
- p/i: the gains to use in a loop: those values are to be understood as
full loop gains (p in [1], i in [Hz])
- additional_filter: a filter (4 cut-off frequencies) to add to the loop
(in sweep and lock mode)
- extra_module: extra module to add just before the output (usually iir).
- extra_module_state: name of the state to use for the extra_module.
- tf_curve: the index of the curve describing the analog transfer
function behind the output.
- tf_filter: alternatively, the analog transfer function can be specified
by a filter (4 cut-off frequencies).
- desired_unity_gain_frequency: desired value for unity gain frequency.
- tf_type: ["flat", "curve", "filter"], how is the analog transfer
function specified.
"""
_widget_class = OutputSignalWidget
_gui_attributes = ['unit',
'sweep_amplitude',
'sweep_offset',
'sweep_frequency',
'sweep_waveform',
'dc_gain',
'output_channel',
'p',
'i',
'additional_filter',
'analog_filter_cutoff',
'extra_module',
'extra_module_state',
'desired_unity_gain_frequency',
'max_voltage',
'min_voltage']
_setup_attributes = _gui_attributes + ['assisted_design', 'tf_curve',
'tf_type']
# main attributes
dc_gain = FloatProperty(default=1.0, min=-1e10, max=1e10, call_setup=True)
output_channel = SelectProperty(options=['out1', 'out2',
'pwm0', 'pwm1'])
unit = SelectProperty(default='V/V',
options=lambda inst:
[u+"/V" for u in inst.lockbox._output_units],
call_setup=True,
ignore_errors=True)
tf_type = SelectProperty(["flat", "filter", "curve"],
default="filter",
call_setup=True)
tf_curve = IntProperty(call_setup=True)
# sweep properties
sweep_amplitude = FloatProperty(default=1., min=-1, max=1, call_setup=True)
sweep_offset = FloatProperty(default=0.0, min=-1, max=1, call_setup=True)
sweep_frequency = FrequencyProperty(default=50.0, call_setup=True)
sweep_waveform = SelectProperty(options=Asg1.waveforms, default='ramp', call_setup=True)
# gain properties
assisted_design = BoolProperty(default=True, call_setup=True)
desired_unity_gain_frequency = FrequencyProperty(default=100.0, min=0, max=1e10, call_setup=True)
analog_filter_cutoff = FrequencyProperty(default=0, min=0, max=1e10, increment=0.1, call_setup=True)
p = FloatProperty(min=-1e10, max=1e10, call_setup=True)
i = FloatProperty(min=-1e10, max=1e10, call_setup=True)
# additional filter properties
additional_filter = AdditionalFilterAttribute() #call_setup=True)
extra_module = SelectProperty(['None', 'iir', 'pid', 'iq'], call_setup=True)
extra_module_state = SelectProperty(options=['None'], call_setup=True)
# internal state of the output
current_state = SelectProperty(options=['lock', 'unlock', 'sweep'],
default='unlock')
max_voltage = FloatProperty(default=1.0, min=-1.0, max=1.0,
call_setup=True,
doc="positive saturation voltage")
min_voltage = FloatProperty(default=-1.0,
min=-1.0, max=1.0,
call_setup=True,
doc="negative saturation voltage")
def signal(self):
return self.pid.name
@property
def pid(self):
if not hasattr(self, '_pid') or self._pid is None:
self._pid = self.pyrpl.pids.pop(self.name)
self._setup_pid_output()
return self._pid
@property
def is_saturated(self):
"""
Returns
-------
True: if the output has saturated
False: otherwise
"""
ival, max, min = self.pid.ival, self.max_voltage, \
self.min_voltage
sample = getattr(self.pyrpl.rp.sampler, self.pid.name)
# criterion for saturation: integrator value saturated
# and current value (including pid) as well
if (ival > max or ival < min) and (sample > max or sample < min):
return True
else:
return False
def _setup_pid_output(self):
self.pid.max_voltage = self.max_voltage
self.pid.min_voltage = self.min_voltage
if self.output_channel.startswith('out'):
self.pid.output_direct = self.output_channel
for pwm in [self.pyrpl.rp.pwm0, self.pyrpl.rp.pwm1]:
if pwm.input == self.pid.name:
pwm.input = 'off'
elif self.output_channel.startswith('pwm'):
self.pid.output_direct = 'off'
pwm = getattr(self.pyrpl.rp, self.output_channel)
pwm.input = self.pid
else:
raise NotImplementedError(
"Selected output_channel '%s' is not implemented"
% self.output_channel)
def _clear(self):
"""
Free up resources associated with the output
"""
self.pyrpl.pids.free(self.pid)
self._pid = None
super(OutputSignal, self)._clear()
def unlock(self, reset_offset=False):
self.pid.p = 0
self.pid.i = 0
if reset_offset:
self.pid.ival = 0
self.current_state = 'unlock'
# benefit from the occasion and do proper initialization
self._setup_pid_output()
def sweep(self):
self.unlock(reset_offset=True)
self.pid.input = self.lockbox.asg
self.lockbox.asg.setup(amplitude=self.sweep_amplitude,
offset=self.sweep_offset,
frequency=self.sweep_frequency,
waveform=self.sweep_waveform,
trigger_source='immediately',
cycles_per_burst=0)
self.pid.setpoint = 0.
self.pid.p = 1.
self.current_state = 'sweep'
def lock(self, input=None, setpoint=None, offset=None, gain_factor=None):
"""
Closes the lock loop, using the required p and i parameters.
"""
# store lock parameters in case an update is requested
self._lock_input = self._lock_input if input is None else input
self._lock_setpoint = self._lock_setpoint if setpoint is None else setpoint
self._lock_gain_factor = self._lock_gain_factor if gain_factor is None else gain_factor
# Parameter 'offset' is not internally stored because another call to 'lock()'
# shouldnt reset the offset by default as this would un-lock an existing lock
#self._setup_pid_output() # optional to ensure that pid output is properly set
self._setup_pid_lock(input=self._lock_input,
setpoint=self._lock_setpoint,
offset=offset,
gain_factor=self._lock_gain_factor)
self.current_state = 'lock'
def _setup_pid_lock(self, input, setpoint, offset=None, gain_factor=1.0):
"""
If current mode is "lock", updates the gains of the underlying pid module such that:
- input.gain * pid.p * output.dc_gain = output.p
- input.gain * pid.i * output.dc_gain = output.i
"""
if isinstance(input, str): # used to be basestring
input = self.lockbox.inputs[input]
# The total loop is composed of the pid and external components.
# The external parts are 1) the output with the predefined gain and 2)
# the input (error signal) with a setpoint-dependent slope.
# 1) model the output: dc_gain converted into units of setpoint_unit_per_V
output_unit = self.unit.split('/')[0]
external_loop_gain = self.dc_gain * self.lockbox._unit_in_setpoint_unit(output_unit)
# 2) model the input: slope comes in units of V_per_setpoint_unit,
# which cancels previous unit and we end up with a dimensionless ext. gain.
external_loop_gain *= input.expected_slope(setpoint)
# we should avoid setting gains to infinity
if external_loop_gain == 0:
self._logger.warning("External loop gain for output %s is zero. "
"Skipping pid lock for this step. ",
self.name)
if offset is not None:
self.pid.ival = offset
else: # write values to pid module
# set gains to zero before switching setpoint and input,
# to avoid huge gains while transiting
self.pid.p = 0
self.pid.i = 0
self.pid.setpoint = input.expected_signal(setpoint) + input.calibration_data._analog_offset
self.pid.input = input.signal()
# set offset if applicable
if offset is not None:
self.pid.ival = offset
# set gains
self.pid.p = self.p / external_loop_gain * gain_factor
self.pid.i = self.i / external_loop_gain * gain_factor
def _setup_offset(self, offset):
self.pid.ival = offset
def _setup(self):
# synchronize assisted_design parameters with p/i setting
self._setup_ongoing = True
if self.assisted_design:
self.i = self.desired_unity_gain_frequency
if self.analog_filter_cutoff == 0:
self.p = 0
else:
self.p = self.i / self.analog_filter_cutoff
else:
self.desired_unity_gain_frequency = self.i
if self.p == 0:
self.analog_filter_cutoff = 0
else:
self.analog_filter_cutoff = self.i / self.p
self._setup_ongoing = False
# re-enable lock/sweep/unlock with new parameters
if self.current_state == 'sweep':
self.sweep()
elif self.current_state == 'unlock':
self.unlock()
elif self.current_state == 'lock':
self.lock()
# plot current transfer function
self.lockbox._signal_launcher.update_transfer_function.emit([self])
##############################
# transfer function plotting #
##############################
def tf_freqs(self):
"""
Frequency values to plot the transfer function. Frequency (abcissa) of
the tf_curve if tf_type=="curve", else: logspace(0, 6, 20000)
"""
if self.tf_type == 'curve': # req axis should be that of the curve
try:
c = CurveDB.get(self.tf_curve)
except:
self._logger.warning("Cannot load specified transfer function %s",
self.tf_curve)
else:
return c.data.index
# by default
return np.logspace(0, 6, 2000)
def transfer_function(self, freqs):
"""
Returns the design transfer function for the output
"""
analog_tf = np.ones(len(freqs), dtype=complex)
if self.tf_type == 'filter':
# use logic implemented in PID to simulate analog filters
analog_tf = Pid._filter_transfer_function(freqs, self.analog_filter_cutoff)
if self.tf_type == 'curve':
curve = CurveDB.get(self.tf_curve)
x = curve.data.index
y = curve.data.values
# sample the curve transfer function at the requested frequencies
ampl = interpolate.interp1d(x, abs(y))(freqs)
phase = interpolate.interp1d(x, np.unwrap(np.angle(y)))(freqs)
analog_tf = ampl * np.exp(1j * phase)
# multiply by PID transfer function to get the loop transfer function
# same as Pid.transfer_function(freqs) but avoids reading registers form FPGA
result = analog_tf * Pid._transfer_function(
freqs, p=self.p, i=self.i,
frequency_correction=self.pid._frequency_correction,
filter_values=self.additional_filter)
return result
# TODO: re-implement this function for if an iir filter is set
# def setup_iir(self, **kwargs):
# """
# Inserts an iir filter before the output pid. For correct routing,
# the pid input must be set correctly, as the iir filter will reuse
# the pid input setting as its own input and send its output through
# the pid.
#
# Parameters
# ----------
# kwargs: dict
# Any kwargs that are accepted by IIR.setup(). By default,
# the output's iir section in the config file is used for these
# parameters.
#
# Returns
# -------
# None
# """
# # load data from config file
# try:
# iirconfig = self._config.iir._dict
# except KeyError:
# logger.debug("No iir filter was defined for output %s. ",
# self._name)
# return
# else:
# logger.debug("Setting up IIR filter for output %s. ", self._name)
# # overwrite defaults with kwargs
# iirconfig.update(kwargs)
# if 'curve' in iirconfig:
# iirconfig.update(bodefit.iirparams_from_curve(
# id=iirconfig.pop('curve')))
# else:
# # workaround for complex numbers from yaml
# iirconfig["zeros"] = [complex(n) for n in iirconfig.pop("zeros")]
# iirconfig["poles"] = [complex(n) for n in iirconfig.pop("poles")]
# # get module
# if not hasattr(self, "iir"):
# self.iir = self._rp.iirs.pop()
# logger.debug("IIR filter retrieved for output %s. ", self._name)
# # output_direct off, since iir goes through pid
# iirconfig["output_direct"] = "off"
# # input setting -> copy the pid input if it is not erroneously on iir
# pidinput = self.pid.input
# if pidinput != 'iir':
# iirconfig["input"] = pidinput
# # setup
# self.iir.setup(**iirconfig)
# # route iir output through pid
# self.pid.input = self.iir.name
class PiezoOutput(OutputSignal):
unit = SelectProperty(default='m/V',
options=lambda inst:
[u + "/V" for u in inst.lockbox._output_units],
call_setup=True)
| mit | bef9d14127dfc71a973e4311bf0373ad | 42.587131 | 104 | 0.570119 | 4.054364 | false | false | false | false |
lneuhaus/pyrpl | pyrpl/redpitaya.py | 1 | 23600 | ###############################################################################
# pyrpl - DSP servo controller for quantum optics with the RedPitaya
# Copyright (C) 2014-2016 Leonhard Neuhaus (neuhaus@spectro.jussieu.fr)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
from . import redpitaya_client
from . import hardware_modules as rp
from .sshshell import SshShell
from .pyrpl_utils import get_unique_name_list_from_class_list, update_with_typeconversion
from .memory import MemoryTree
from .errors import ExpectedPyrplError
from .widgets.startup_widget import HostnameSelectorWidget
import logging
import os
import random
import socket
from time import sleep
import numpy as np
from paramiko import SSHException
from scp import SCPClient, SCPException
from collections import OrderedDict
# input is the wrong function in python 2
try:
raw_input
except NameError: # Python 3
raw_input = input
# default parameters for redpitaya object creation
defaultparameters = dict(
hostname='', #'192.168.1.100', # the ip or hostname of the board, '' triggers gui
port=2222, # port for PyRPL datacommunication
sshport=22, # port of ssh server - default 22
user='root',
password='root',
delay=0.05, # delay between ssh commands - console is too slow otherwise
autostart=True, # autostart the client?
reloadserver=False, # reinstall the server at startup if not necessary?
reloadfpga=True, # reload the fpga bitfile at startup?
serverbinfilename='fpga.bin', # name of the binfile on the server
serverdirname = "//opt//pyrpl//", # server directory for server app and bitfile
leds_off=True, # turn off all GPIO lets at startup (improves analog performance)
frequency_correction=1.0, # actual FPGA frequency is 125 MHz * frequency_correction
timeout=1, # timeout in seconds for ssh communication
monitor_server_name='monitor_server', # name of the server program on redpitaya
silence_env=False, # suppress all environment variables that may override the configuration?
gui=True # show graphical user interface or work on command-line only?
)
class RedPitaya(object):
cls_modules = [rp.HK, rp.AMS, rp.Scope, rp.Sampler, rp.Asg0, rp.Asg1] + \
[rp.Pwm] * 2 + [rp.Iq] * 3 + [rp.Pid] * 3 + [rp.Trig] + [ rp.IIR]
def __init__(self, config=None, # configfile is needed to store parameters. None simulates one
**kwargs):
""" this class provides the basic interface to the redpitaya board
The constructor installs and starts the communication interface on the RedPitaya
at 'hostname' that allows remote control and readout
'config' is the config file or MemoryTree of the config file. All keyword arguments
may be specified in the branch 'redpitaya' of this config file. Alternatively,
they can be overwritten by keyword arguments at the function call.
'config=None' specifies that no persistent config file is saved on the disc.
Possible keyword arguments and their defaults are:
hostname='192.168.1.100', # the ip or hostname of the board
port=2222, # port for PyRPL datacommunication
sshport=22, # port of ssh server - default 22
user='root',
password='root',
delay=0.05, # delay between ssh commands - console is too slow otherwise
autostart=True, # autostart the client?
reloadserver=False, # reinstall the server at startup if not necessary?
reloadfpga=True, # reload the fpga bitfile at startup?
filename='fpga//red_pitaya.bin', # name of the bitfile for the fpga, None is default file
serverbinfilename='fpga.bin', # name of the binfile on the server
serverdirname = "//opt//pyrpl//", # server directory for server app and bitfile
leds_off=True, # turn off all GPIO lets at startup (improves analog performance)
frequency_correction=1.0, # actual FPGA frequency is 125 MHz * frequency_correction
timeout=3, # timeout in seconds for ssh communication
monitor_server_name='monitor_server', # name of the server program on redpitaya
silence_env=False, # suppress all environment variables that may override the configuration?
gui=True # show graphical user interface or work on command-line only?
if you are experiencing problems, try to increase delay, or try
logging.getLogger().setLevel(logging.DEBUG)"""
self.logger = logging.getLogger(name=__name__)
#self.license()
# make or retrieve the config file
if isinstance(config, MemoryTree):
self.c = config
else:
self.c = MemoryTree(config)
# get the parameters right (in order of increasing priority):
# 1. defaults
# 2. environment variables
# 3. config file
# 4. command line arguments
# 5. (if missing information) request from GUI or command-line
self.parameters = defaultparameters # BEWARE: By not copying the
# dictionary, defaultparameters are modified in the session (which
# can be advantageous for instance with hostname in unit_tests)
# get parameters from os.environment variables
if not self.parameters['silence_env']:
for k in self.parameters.keys():
if "REDPITAYA_"+k.upper() in os.environ:
newvalue = os.environ["REDPITAYA_"+k.upper()]
oldvalue = self.parameters[k]
self.parameters[k] = type(oldvalue)(newvalue)
if k == "password": # do not show the password on the screen
oldvalue = "********"
newvalue = "********"
self.logger.debug("Variable %s with value %s overwritten "
"by environment variable REDPITAYA_%s "
"with value %s. Use argument "
"'silence_env=True' if this is not "
"desired!",
k, oldvalue, k.upper(), newvalue)
# settings from config file
try:
update_with_typeconversion(self.parameters, self.c._get_or_create('redpitaya')._data)
except BaseException as e:
self.logger.warning("An error occured during the loading of your "
"Red Pitaya settings from the config file: %s",
e)
# settings from class initialisation / command line
update_with_typeconversion(self.parameters, kwargs)
# get missing connection settings from gui/command line
if self.parameters['hostname'] is None or self.parameters['hostname']=='':
gui = 'gui' not in self.c._keys() or self.c.gui
if gui:
self.logger.info("Please choose the hostname of "
"your Red Pitaya in the hostname "
"selector window!")
startup_widget = HostnameSelectorWidget(config=self.parameters)
hostname_kwds = startup_widget.get_kwds()
else:
hostname = raw_input('Enter hostname [192.168.1.100]: ')
hostname = '192.168.1.100' if hostname == '' else hostname
hostname_kwds = dict(hostname=hostname)
if not "sshport" in kwargs:
sshport = raw_input('Enter sshport [22]: ')
sshport = 22 if sshport == '' else int(sshport)
hostname_kwds['sshport'] = sshport
if not 'user' in kwargs:
user = raw_input('Enter username [root]: ')
user = 'root' if user == '' else user
hostname_kwds['user'] = user
if not 'password' in kwargs:
password = raw_input('Enter password [root]: ')
password = 'root' if password == '' else password
hostname_kwds['password'] = password
self.parameters.update(hostname_kwds)
# optional: write configuration back to config file
self.c["redpitaya"] = self.parameters
# save default port definition for possible automatic port change
self.parameters['defaultport'] = self.parameters['port']
# frequency_correction is accessed by child modules
self.frequency_correction = self.parameters['frequency_correction']
# memorize whether server is running - nearly obsolete
self._serverrunning = False
self.client = None # client class
self._slaves = [] # slave interfaces to same redpitaya
self.modules = OrderedDict() # all submodules
# provide option to simulate a RedPitaya
if self.parameters['hostname'] in ['_FAKE_REDPITAYA_', '_FAKE_']:
self.startdummyclient()
self.logger.warning("Simulating RedPitaya because (hostname=="
+self.parameters["hostname"]+"). Incomplete "
"functionality possible. ")
return
elif self.parameters['hostname'] in ['_NONE_']:
self.modules = []
self.logger.warning("No RedPitaya created (hostname=="
+ self.parameters["hostname"] + ")."
" No hardware modules are available. ")
return
# connect to the redpitaya board
self.start_ssh()
# start other stuff
if self.parameters['reloadfpga']: # flash fpga
self.update_fpga()
if self.parameters['reloadserver']: # reinstall server app
self.installserver()
if self.parameters['autostart']: # start client
self.start()
self.logger.info('Successfully connected to Redpitaya with hostname '
'%s.'%self.ssh.hostname)
self.parent = self
def start_ssh(self, attempt=0):
"""
Extablishes an ssh connection to the RedPitaya board
returns True if a successful connection has been established
"""
try:
# close pre-existing connection if necessary
self.end_ssh()
except:
pass
if self.parameters['hostname'] == "_FAKE_REDPITAYA_":
# simulation mode - start without connecting
self.logger.warning("(Re-)starting client in dummy mode...")
self.startdummyclient()
return True
else: # normal mode - establish ssh connection and
try:
# start ssh connection
self.ssh = SshShell(hostname=self.parameters['hostname'],
sshport=self.parameters['sshport'],
user=self.parameters['user'],
password=self.parameters['password'],
delay=self.parameters['delay'],
timeout=self.parameters['timeout'])
# test ssh connection for exceptions
self.ssh.ask()
except BaseException as e: # connection problem
if attempt < 3:
# try to connect up to 3 times
return self.start_ssh(attempt=attempt+1)
else: # even multiple attempts did not work
raise ExpectedPyrplError(
"\nCould not connect to the Red Pitaya device with "
"the following parameters: \n\n"
"\thostname: %s\n"
"\tssh port: %s\n"
"\tusername: %s\n"
"\tpassword: ****\n\n"
"Please confirm that the device is reachable by typing "
"its hostname/ip address into a web browser and "
"checking that a page is displayed. \n\n"
"Error message: %s" % (self.parameters["hostname"],
self.parameters["sshport"],
self.parameters["user"],
e))
else:
# everything went well, connection is established
# also establish scp connection
self.ssh.startscp()
return True
def switch_led(self, gpiopin=0, state=False):
self.ssh.ask("echo " + str(gpiopin) + " > /sys/class/gpio/export")
sleep(self.parameters['delay'])
self.ssh.ask(
"echo out > /sys/class/gpio/gpio" +
str(gpiopin) +
"/direction")
sleep(self.parameters['delay'])
if state:
state = "1"
else:
state = "0"
self.ssh.ask("echo " + state + " > /sys/class/gpio/gpio" +
str(gpiopin) + "/value")
sleep(self.parameters['delay'])
def update_fpga(self, filename=None):
if filename is None:
try:
source = self.parameters['filename']
except KeyError:
source = None
self.end()
sleep(self.parameters['delay'])
self.ssh.ask('rw')
sleep(self.parameters['delay'])
self.ssh.ask('mkdir ' + self.parameters['serverdirname'])
sleep(self.parameters['delay'])
if source is None or not os.path.isfile(source):
if source is not None:
self.logger.warning('Desired bitfile "%s" does not exist. Using default file.',
source)
source = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'fpga', 'red_pitaya.bin')
if not os.path.isfile(source):
raise IOError("Wrong filename",
"The fpga bitfile was not found at the expected location. Try passing the arguments "
"dirname=\"c://github//pyrpl//pyrpl//\" adapted to your installation directory of pyrpl "
"and filename=\"red_pitaya.bin\"! Current dirname: "
+ self.parameters['dirname'] +
" current filename: "+self.parameters['filename'])
for i in range(3):
try:
self.ssh.scp.put(source,
os.path.join(self.parameters['serverdirname'],
self.parameters['serverbinfilename']))
except (SCPException, SSHException):
# try again before failing
self.start_ssh()
sleep(self.parameters['delay'])
else:
break
# kill all other servers to prevent reading while fpga is flashed
self.end()
self.ssh.ask('killall nginx')
self.ssh.ask('systemctl stop redpitaya_nginx') # for 0.94 and higher
self.ssh.ask('cat '
+ os.path.join(self.parameters['serverdirname'], self.parameters['serverbinfilename'])
+ ' > //dev//xdevcfg')
sleep(self.parameters['delay'])
self.ssh.ask('rm -f '+ os.path.join(self.parameters['serverdirname'], self.parameters['serverbinfilename']))
self.ssh.ask("nginx -p //opt//www//")
self.ssh.ask('systemctl start redpitaya_nginx') # for 0.94 and higher #needs test
sleep(self.parameters['delay'])
self.ssh.ask('ro')
def fpgarecentlyflashed(self):
self.ssh.ask()
result =self.ssh.ask("echo $(($(date +%s) - $(date +%s -r \""
+ os.path.join(self.parameters['serverdirname'], self.parameters['serverbinfilename']) +"\")))")
age = None
for line in result.split('\n'):
try:
age = int(line.strip())
except:
pass
else:
break
if not age:
self.logger.debug("Could not retrieve bitfile age from: %s",
result)
return False
elif age > 10:
self.logger.debug("Found expired bitfile. Age: %s", age)
return False
else:
self.logger.debug("Found recent bitfile. Age: %s", age)
return True
def installserver(self):
self.endserver()
sleep(self.parameters['delay'])
self.ssh.ask('rw')
sleep(self.parameters['delay'])
self.ssh.ask('mkdir ' + self.parameters['serverdirname'])
sleep(self.parameters['delay'])
self.ssh.ask("cd " + self.parameters['serverdirname'])
#try both versions
for serverfile in ['monitor_server','monitor_server_0.95']:
sleep(self.parameters['delay'])
try:
self.ssh.scp.put(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'monitor_server', serverfile),
self.parameters['serverdirname'] + self.parameters['monitor_server_name'])
except (SCPException, SSHException):
self.logger.exception("Upload error. Try again after rebooting your RedPitaya..")
sleep(self.parameters['delay'])
self.ssh.ask('chmod 755 ./'+self.parameters['monitor_server_name'])
sleep(self.parameters['delay'])
self.ssh.ask('ro')
result = self.ssh.ask("./"+self.parameters['monitor_server_name']+" "+ str(self.parameters['port']))
sleep(self.parameters['delay'])
result += self.ssh.ask()
if not "sh" in result:
self.logger.debug("Server application started on port %d",
self.parameters['port'])
return self.parameters['port']
else: # means we tried the wrong binary version. make sure server is not running and try again with next file
self.endserver()
#try once more on a different port
if self.parameters['port'] == self.parameters['defaultport']:
self.parameters['port'] = random.randint(self.parameters['defaultport'],50000)
self.logger.warning("Problems to start the server application. Trying again with a different port number %d",self.parameters['port'])
return self.installserver()
self.logger.error("Server application could not be started. Try to recompile monitor_server on your RedPitaya (see manual). ")
return None
def startserver(self):
self.endserver()
sleep(self.parameters['delay'])
if self.fpgarecentlyflashed():
self.logger.info("FPGA is being flashed. Please wait for 2 "
"seconds.")
sleep(2.0)
result = self.ssh.ask(self.parameters['serverdirname']+"/"+self.parameters['monitor_server_name']
+" "+ str(self.parameters['port']))
if not "sh" in result: # sh in result means we tried the wrong binary version
self.logger.debug("Server application started on port %d",
self.parameters['port'])
self._serverrunning = True
return self.parameters['port']
#something went wrong
return self.installserver()
def endserver(self):
try:
self.ssh.ask('\x03') #exit running server application
except:
self.logger.exception("Server not responding...")
if 'pitaya' in self.ssh.ask():
self.logger.debug('>') # formerly 'console ready'
sleep(self.parameters['delay'])
# make sure no other monitor_server blocks the port
self.ssh.ask('killall ' + self.parameters['monitor_server_name'])
self._serverrunning = False
def endclient(self):
del self.client
self.client = None
def start(self):
if self.parameters['leds_off']:
self.switch_led(gpiopin=0, state=False)
self.switch_led(gpiopin=7, state=False)
self.startserver()
sleep(self.parameters['delay'])
self.startclient()
def end(self):
self.endserver()
self.endclient()
def end_ssh(self):
self.ssh.channel.close()
def end_all(self):
self.end()
self.end_ssh()
def restart(self):
self.end()
self.start()
def restartserver(self, port=None):
"""restart the server. usually executed when client encounters an error"""
if port is not None:
if port < 0: #code to try a random port
self.parameters['port'] = random.randint(2223,50000)
else:
self.parameters['port'] = port
return self.startserver()
def license(self):
self.logger.info("""\r\n pyrpl Copyright (C) 2014-2017 Leonhard Neuhaus
This program comes with ABSOLUTELY NO WARRANTY; for details read the file
"LICENSE" in the source directory. This is free software, and you are
welcome to redistribute it under certain conditions; read the file
"LICENSE" in the source directory for details.\r\n""")
def startclient(self):
self.client = redpitaya_client.MonitorClient(
self.parameters['hostname'], self.parameters['port'], restartserver=self.restartserver)
self.makemodules()
self.logger.debug("Client started successfully. ")
def startdummyclient(self):
self.client = redpitaya_client.DummyClient()
self.makemodules()
def makemodule(self, name, cls):
module = cls(self, name)
setattr(self, name, module)
self.modules[name] = module
def makemodules(self):
"""
Automatically generates modules from the list RedPitaya.cls_modules
"""
names = get_unique_name_list_from_class_list(self.cls_modules)
for cls, name in zip(self.cls_modules, names):
self.makemodule(name, cls)
def make_a_slave(self, port=None, monitor_server_name=None, gui=False):
if port is None:
port = self.parameters['port'] + len(self._slaves)*10 + 1
if monitor_server_name is None:
monitor_server_name = self.parameters['monitor_server_name'] + str(port)
slaveparameters = dict(self.parameters)
slaveparameters.update(dict(
port=port,
autostart=True,
reloadfpga=False,
reloadserver=False,
monitor_server_name=monitor_server_name,
silence_env=True))
r = RedPitaya(**slaveparameters) #gui=gui)
r._master = self
self._slaves.append(r)
return r
| mit | 1752ef5eb6e89fd280bf11d18f133142 | 45.825397 | 145 | 0.572415 | 4.410391 | false | false | false | false |
lneuhaus/pyrpl | pyrpl/acquisition_module.py | 1 | 21707 | """
Everything involving asynchronicity in acquisition instruments is in this file.
In particular, this includes getting curves and continuously averaging curves.
Using the coroutine syntax introduced in python 3.4+ would make the code
more elegant, but it would not be compatible with python 2.7. Hence we have
chosen to implement all asynchronous methods such that a promise is returned (a
Future-object in python). The promise implements the following methods:
- await_result(): returns the acquisition result once it is ready.
- add_done_callback(func): the function func(value) is used as "done-callback)
All asynchronous methods also have a blocking equivalent that directly
returns the result once it is ready:
- curve_async <---> curve
- single_async <---> single
Finally, this implmentation using standard python Futures makes it
possible to use transparently pyrpl asynchronous methods inside python 3.x
coroutines.
Example:
This example shows a typical acquisition use case where a sequence of
n aquisitions of simultaneous oscilloscope and network analyzer traces
are launched
::
from asyncio import ensure_future, event_loop
async def my_acquisition_routine(n):
for i in range(n):
print("acquiring scope")
fut = ensure_future(p.rp.scope.run_single())
print("acquiring na")
data2 = await p.networkanalyzer.run_single()
# both acquisitions are carried out simultaneously
data1 = await fut
print("loop %i"%i, data1, data2)
ensure_future(my_acquisition_coroutine(10))
eventloop.run_until_complete()
"""
from copy import copy
from .module_attributes import *
from .async_utils import PyrplFuture, Future, MainThreadTimer, CancelledError
class AcquisitionError(ValueError):
pass
class CurveFuture(PyrplFuture):
"""
The basic acquisition of instruments is an asynchronous process:
For instance, when the scope acquisition has been launched, we know
that the curve won't be ready before duration(), but if the scope is
waiting for a trigger event, this could take much longer. Of course,
we want the event loop to stay alive while waiting for a pending curve.
That's the purpose of this future object.
After its creation, it will perform the following actions:
1. stay inactive for a time given by instrument._remaining_time()
2. after that, it will check every min_refresh_delay if a new curve is ready with instrument._data_ready()
3. when data is ready, its result will be set with the instrument data, as returned by instrument._get_data()
"""
def __init__(self, module, min_delay_ms=20):
self._module = module
self.min_delay_ms = min_delay_ms
super(CurveFuture, self).__init__()
self._init_timer()
self._module._start_acquisition()
def _init_timer(self):
if self.min_delay_ms == 0:
# make sure 1st instrument interrogation occurs before time
delay = self._module._remaining_time() * 1000 - 1
else:
# 1 ms loss due to timer inaccuracy is acceptable
delay = max(self.min_delay_ms,
self._module._remaining_time() * 1000)
self._timer = MainThreadTimer(max(0, delay)) # avoid negative times
# delays
self._timer.timeout.connect(self._set_data_as_result)
self._timer.start()
def _get_one_curve(self):
if self._module._data_ready():
return self._module._get_curve()
else:
return None
def _set_data_as_result(self):
data = self._get_one_curve()
if data is not None:
self.set_result(data)
if self._module.running_state in ["paused", "stopped"]:
self._module._free_up_resources()
else:
self._timer.setInterval(self.min_delay_ms)
self._timer.start()
def set_exception(self, exception): # pragma: no cover
self._timer.stop()
super(CurveFuture, self).set_exception(exception)
def cancel(self):
self._timer.stop()
super(CurveFuture, self).cancel()
class RunFuture(PyrplFuture):
"""
Uses several CurveFuture to perform an average.
2 extra functions are provided to control the acquisition:
pause(): stalls the acquisition
start(): (re-)starts the acquisition (needs to be called at the beginning)
The format for curves are:
- Scope:
- data_x : self.times
- data_avg: np.array((ch1, ch2))
- Specan or NA:
- data_x : frequencies
- data_avg: np.array(y_complex)
"""
def __init__(self, module, min_delay_ms):
self._run_continuous = False
self._module = module
self._min_delay_ms = min_delay_ms
super(RunFuture, self).__init__()
self.data_avg = None
self.data_x = copy(self._module.data_x) # in case it is saved later
self._fut = None
self.current_avg = 0
self._paused = True
def _new_curve_arrived(self, curve):
try:
result = curve.result()
except (AcquisitionError, CancelledError):
if self._module.running_state in ["running_continuous",
"running_single"]:
return
else:
self.cancel()
if self._module.running_state in ["running_continuous",
"running_single"]:
self.current_avg = min(self.current_avg + 1,
self._module.trace_average)
if self.data_avg is None:
self.data_avg = result
else:
self.data_avg = (self.data_avg * (self.current_avg - 1) +
result) / self.current_avg
self._module._emit_signal_by_name('display_curve',
[self._module.data_x,
self.data_avg])
if self._is_run_over():
if not self.done():
self.set_result(self.data_avg)
self._module.running_state = "stopped" # should be 'paused'
# if we want to average over the single run, but for
# scope and specan, it is more convenient to restart
# averaging (basically saves the button stop in the GUI)
else:
if not self._paused:
self.start()
def _is_run_over(self):
if self._run_continuous:
return False
else:
return self.current_avg >= self._module.trace_average
def cancel(self):
self.pause()
super(RunFuture, self).cancel()
def pause(self):
self._paused = True
self._module._free_up_resources()
if self._fut is not None:
self._fut.cancel()
def start(self):
self._paused = False
if self._fut is not None:
self._fut.cancel()
self._fut = self._module._curve_async(self._min_delay_ms)
self._fut.add_done_callback(self._new_curve_arrived)
def _set_run_continuous(self):
"""
Makes the RunFuture continuous (used when setting "running_continuous")
"""
self._run_continuous = True
self._min_delay_ms = self._module.MIN_DELAY_CONTINUOUS_MS
class RunningStateProperty(SelectProperty):
def __init__(self, options=["running_single", "running_continuous", "paused", "stopped"], **kwargs):
"""
A property to indicate whether the instrument is currently running or not.
Changing the running_state performs the necessary actions to enable the
selected state. The state can be one of the following:
- 'running_single': takes a single acquisition (trace_average averages). Acquisitions are automatically restarted until the desired number of averages is acquired.
- 'running_continuous': continuously takes a acquisitions, eternally averages and restarts automatically.
- 'paused': acquisition interrupted, but no need to restart averaging at next call of running_continous.
- 'stopped': acquisition interrupted, averaging will restart at next call of running_continuous.
"""
super(RunningStateProperty, self).__init__(options=options, **kwargs)
# Changing running_state is handled here instead of inside _setup()
# (with a call_setup=True option) because the precise actions to be
# taken depend on the previous state of running_state. Such a behavior
# would not be straightforward to implement in _setup()
def set_value(self, obj, val):
"""
This is the master property: changing this value triggers all the logic
to change the acquisition mode
"""
# touching the running_state cancels the pending curve_future object
# (no effect if future is already done)
obj._curve_future.cancel()
previous_state = obj.running_state
SelectProperty.set_value(self, obj, val)
if val == "running_single":
# acquire as fast as possible trace_average curves
obj.setup()
elif val == "running_continuous":
if previous_state == 'stopped': # restart averaging...
obj.setup()
else:
obj._run_future._set_run_continuous() # if previous run was
# "running_single" keep averaging in the same run, simply make
# it continuous
obj._run_future.start()
elif val in ["paused", "stopped"]:
if hasattr(obj, '_run_future'):
obj._run_future.cancel() # single cannot be resumed
# on the other hand, continuous can still be started again
# eventhough it is cancelled. Basically, the result will never
# be set, but the acquisition can still be going on indefinitely.
class SignalLauncherAcquisitionModule(SignalLauncher):
""" class that takes care of emitting signals to update all possible
displays"""
display_curve = QtCore.Signal(list) # This signal is emitted when
# curves need to be displayed the argument is [array(times),
# array(curve1), array(curve2)] or [times, None, array(curve2)]
autoscale_x = QtCore.Signal()
# For now, the following signals are only implemented with NA.
update_point = QtCore.Signal(int) # used in NA only
scan_finished = QtCore.Signal() # used in NA only
clear_curve = QtCore.Signal() # NA only
x_log_toggled = QtCore.Signal() # logscale changed
# Following signal only implemented in spec an
unit_changed = QtCore.Signal()
class AcquisitionModule(Module):
"""
The asynchronous mode is supported by a sub-object "run"
of the module. When an asynchronous acquisition is running
and the widget is visible, the current averaged data are
automatically displayed. Also, the run object provides a
function save_curve to store the current averaged curve
on the hard-drive.
The full API of the "run" object is the following.
Methods:
*(All methods return immediately)*
single(): performs an asynchronous acquisition of trace_average curves.
The function returns a promise of the result:
an object with a ready() function, and a get() function that
blocks until data is ready.
continuous(): continuously acquires curves, and performs a
moving average over the trace_average last ones.
pause(): stops the current acquisition without restarting the
averaging
stop(): stops the current acquisition and restarts the averaging.
save_curve(): saves the currently averaged curve (or curves for scope)
curve(): the currently averaged curve
Attributes:
curve_name (str): name of the curve to create upon saving
trace_average (int): number of averages in single (not to confuse with
averaging per point)
data_avg (array of numbers): array containing the current averaged curve
current_avg (int): current number of averages
"""
# The averaged data are stored in a RunFuture object _run_future
#
# _setup() recreates from scratch _run_future by calling _new_run_future()
#
# It is necessary to setup the AcquisitionModule on startup to start
# with clean arrays
#
# Changing any attribute in callback_attribute (mostly every
# setup_attribute except running_state) will force a restart of the
# averaging by calling setup
#
# On the other hand, "running_state" has a customized behavior: it will
# only call setup() when needed and perform customized actions otherwise:
# - paused/stopped -> running_single: start acquisition on new future
# - paused -> running_continuous: start acquisition on same future + set
# future to run_continuous (irreversible)
# - stopped -> running_continuous: start acquisition on new future +
# set future to run_continuous (irreversible) == call setup()
# - running_single/running_continuous -> pause/stop: pause acquisition
_gui_attributes = ['trace_average', 'curve_name']
_setup_on_load = True # acquisition_modules need to be setup() once
# they are loaded
_signal_launcher = SignalLauncherAcquisitionModule
_setup_attributes = ['running_state', 'trace_average', 'curve_name']
_run_future_cls = RunFuture
_curve_future_cls = CurveFuture
MIN_DELAY_SINGLE_MS = 0 # async acquisition should be as fast as
# possible
MIN_DELAY_CONTINUOUS_MS = 40 # leave time for the event loop in
# continuous
running_state = RunningStateProperty(
default='stopped',
doc="Indicates whether the instrument is running acquisitions or not. "
"See :class:`RunningStateProperty` for available options. ")
trace_average = IntProperty(doc="number of curves to average in single mode. In "
"continuous mode, a moving window average is "
"performed.",
default=1,
min=1)
curve_name = StringProperty(doc="name of the curve to save.")
def __init__(self, parent, name=None):
# The curve promise is initialized with a dummy Future, because
# instantiating CurveFuture launches a curve acquisition
self._curve_future = Future()
super(AcquisitionModule, self).__init__(parent, name=name)
self.curve_name = self.name + " curve"
self._run_future = self._run_future_cls(self,
min_delay_ms=self.MIN_DELAY_SINGLE_MS)
# On the other hand, RunFuture has a start method and is not started
# at instanciation.
def _new_curve_future(self, min_delay_ms):
self._curve_future.cancel()
self._curve_future = self._curve_future_cls(self,
min_delay_ms=min_delay_ms)
def _new_run_future(self):
if hasattr(self, "_run_future"):
self._run_future.cancel()
if self.running_state == "running_continuous":
self._run_future = self._run_future_cls(self,
min_delay_ms=self.MIN_DELAY_CONTINUOUS_MS)
self._run_future._set_run_continuous()
else:
self._run_future = self._run_future_cls(self,
min_delay_ms=self.MIN_DELAY_SINGLE_MS)
def _emit_signal_by_name(self, signal_name, *args, **kwds):
"""Let's the module's signal_launcher emit signal name"""
self._signal_launcher.emit_signal_by_name(signal_name, *args, **kwds)
def _curve_async(self, min_delay_ms):
"""
Same as curve_async except this function can be used in any
running_state.
"""
self._start_acquisition()
self._new_curve_future(min_delay_ms=min_delay_ms)
return self._curve_future
def curve_async(self):
"""
Launches the acquisition for one curve with the current parameters.
- If running_state is not "stopped", stops the current acquisition.
- If rolling_mode is True, raises an exception.
- Immediately returns a future object representing the curve.
- The curve can be retrieved by calling result(timeout) on the future object.
- The future is cancelled if the instrument's state is changed before the end of the acquisition, or another call to curve_async() or curve() is made on the same instrument.
"""
if self.running_state is not "stopped":
self.stop()
return self._curve_async(0)
def curve(self, timeout=None):
"""
Same as curve_async, except:
- the function will not return until the curve is ready or timeout occurs.
- the function directly returns an array with the curve instead of a future object
"""
return self.curve_async().await_result(timeout)
def single_async(self):
"""
Performs an asynchronous acquisition of trace_average curves.
- If running_state is not stop, stops the current acquisition.
- Immediately returns a future object representing the curve.
- The curve can be retrieved by calling result(timeout) on the future object.
- The future is cancelled if the instrument's state is changed before the end of the acquisition.
"""
self.running_state = 'running_single'
return self._run_future
def single(self, timeout=None):
"""
Same as single_async, except:
- the function will not return until the averaged curve is ready or timeout occurs.
- the function directly returns an array with the curve instead of a future object.
"""
return self.single_async().await_result(timeout)
def continuous(self):
"""
continuously acquires curves, and performs a moving
average over the trace_average last ones.
"""
self.running_state = 'running_continuous'
# return self._continuous_future
def pause(self):
"""
Stops the current acquisition without restarting the averaging
"""
self.running_state = 'paused'
def stop(self):
"""
Stops the current acquisition and averaging will be restarted
at next run.
"""
self.running_state = 'stopped'
def save_curve(self):
"""
Saves the curve(s) that is (are) currently displayed in the gui in
the db_system. Also, returns the list [curve_ch1, curve_ch2]...
"""
params = self.setup_attributes
params.update(name=self.curve_name)
curve = self._save_curve(self._run_future.data_x,
self._run_future.data_avg,
**params)
return curve
def _clear(self):
super(AcquisitionModule, self)._clear()
self._curve_future.cancel()
self._run_future.cancel()
def _setup(self):
# the _run_future is renewed to match the requested type of run (
# rolling_mode or triggered)
# This is how we make sure changing duration or rolling_mode won't
# freeze the acquisition.
self._new_run_future()
if self.running_state in ["running_single", "running_continuous"]:
self._run_future.start()
self._emit_signal_by_name("autoscale_x")
# Methods to implement in derived class:
# --------------------------------------
def _remaining_time(self):
"""
remaining time (in seconds) until the data has a chance to be ready.
In the case of scope, where trigger might delay the acquisition,
this is the minimum time to wait in the "best case scenario" where
the acquisition would have started immediately after setup().
"""
raise NotImplementedError("To implement in derived class") # pragma: no cover
def _data_ready(self):
"""
:return: True or False
"""
raise NotImplementedError('To implement in derived class') # pragma: no cover
def _get_curve(self):
"""
get the curve from the instrument.
a 1D array for single channel instruments
a 2*n array for the scope
"""
raise NotImplementedError # pragma: no cover
@property
def data_x(self):
"""
x-axis of the curves to plot.
:return:
"""
raise NotImplementedError("To implement in derived class") # pragma: no cover
def _start_acquisition(self):
"""
If anything has to be communicated to the hardware (such as make
trigger ready...) to start the acquisition, it should be done here.
This function will be called only be called by the init-function of
the _curve_future()
Only non-blocking operations are allowed.
"""
pass # pragma: no cover
def _free_up_resources(self):
pass # pragma: no cover
# Shortcut to the RunFuture data (for plotting):
# ----------------------------------------------
@property
def data_avg(self):
return self._run_future.data_avg
@property
def current_avg(self):
return self._run_future.current_avg
| mit | 02a5909dac36e825cab59578a28db904 | 38.253165 | 181 | 0.616806 | 4.321521 | false | false | false | false |
lneuhaus/pyrpl | pyrpl/hardware_modules/sampler.py | 1 | 2772 | import numpy as np
from ..pyrpl_utils import time
from ..attributes import FloatRegister
from ..modules import HardwareModule
from . import DSP_INPUTS
class Sampler(HardwareModule):
""" this module provides a sample of each signal.
This is a momentary workaround, will be improved later on with an upgraded FPGA version """
addr_base = 0x40300000
def stats(self, signal="in1", t=1e-2):
"""
computes the mean, standard deviation, min and max of the chosen signal over duration t
Parameters
----------
signal: input signal
t: duration over which to average
obsolete:
n: equivalent number of FPGA clock cycles to average over
Returns
-------
mean, stddev, max, min: mean and standard deviation of all samples
"""
try: # signal can be a string, or a module (whose name is the name of the signal we'll use)
signal = signal.name
except AttributeError:
pass
nn = 0
cum = 0
cumsq = 0
max = -np.inf
min = np.inf
t0 = time() # get start time
while nn == 0 or time() < t0 + t: # do at least one sample
nn += 1
value = self.__getattribute__(signal)
cum += value
cumsq += (value ** 2.0)
if value > max:
max = value
if value < min:
min = value
nn = float(nn)
mean = cum / nn
variance = (cumsq / nn - mean**2.0)
# while mathematically nonsense, this can happen numerically
if variance < 0:
# this means the variance is tiny and can be assumed zero
variance = 0
stddev = variance ** 0.5
return mean, stddev, max, min
def mean_stddev(self, signal="in1", t=1e-2):
"""
computes the mean and standard deviation of the chosen signal
Parameters
----------
signal: input signal
t: duration over which to average
obsolete:
n: equivalent number of FPGA clock cycles to average over
Returns
-------
mean, stddev: mean and standard deviation of all samples
"""
self._logger.warning("Sampler.mean_stddev() is obsolete. Please use "
"Sampler.stats() instead!")
mean, stddev, max, min = self.stats(signal=signal, t=t)
return mean, stddev
# generate one attribute in Sampler for each DSP signal
for inp, num in DSP_INPUTS.items():
setattr(Sampler,
inp,
FloatRegister(
0x10 + num * 0x10000,
bits=14,
norm=2 ** 13 - 1,
doc="current value of " + inp))
| mit | 25b9829bc35e8928f4d1aa42f64e811a | 29.8 | 100 | 0.550144 | 4.428115 | false | false | false | false |
lneuhaus/pyrpl | pyrpl/software_modules/lockbox/input.py | 1 | 24829 | from __future__ import division
import scipy
import numpy as np
import logging
from ...attributes import SelectProperty, FloatProperty, FrequencyProperty, \
PhaseProperty, FilterProperty, FrequencyRegister, ProxyProperty
from ...widgets.module_widgets import LockboxInputWidget
from ...hardware_modules.dsp import DSP_INPUTS, InputSelectProperty, all_inputs
from ...pyrpl_utils import time, recursive_getattr
from ...module_attributes import ModuleProperty
from ...software_modules.lockbox import LockboxModule, LockboxModuleDictProperty
from ...modules import SignalModule
from ...software_modules.module_managers import InsufficientResourceError
logger = logging.getLogger(__name__)
class CalibrationData(LockboxModule):
""" class to hold the calibration data of an input signal """
_setup_attributes = ["min", "max", "mean", "rms", "_analog_offset", "_asg_phase"]
_gui_attributes = []
min = FloatProperty(doc="min of the signal in V over a lockbox sweep")
max = FloatProperty(doc="max of the signal in V over a lockbox sweep")
mean = FloatProperty(doc="mean of the signal in V over a lockbox sweep")
rms = FloatProperty(min=0, max=2, doc="rms of the signal in V over a "
"lockbox sweep")
_analog_offset = FloatProperty(default=0.0, doc="analog offset of the signal")
_analog_offset_rms = FloatProperty(default=0.0, doc="rms of the analog offset of the signal")
_asg_phase = PhaseProperty(doc="Phase of the asg when error signal is centered "
"in calibration. Not used by all signals. ")
@property
def amplitude(self):
""" small helper function for expected signal """
return 0.5 * (self.max - self.min)
@property
def peak_to_peak(self):
""" small helper function for expected signal """
return self.max - self.min
@property
def offset(self):
""" small helper function for expected signal """
return 0.5 * (self.max + self.min)
def get_stats_from_curve(self, curve):
"""
gets the mean, min, max, rms value of curve (into the corresponding
self's attributes).
"""
if curve is None:
self.logger.warning("Curve object for calibration is None. No calibration will be performed.")
else:
self.mean = curve.mean()
self.rms = curve.std()
self.min = curve.min()
self.max = curve.max()
class Signal(LockboxModule, SignalModule):
"""
represention of a physial signal. Can be either an imput or output signal.
"""
_widget = None
calibration_data = ModuleProperty(CalibrationData)
def signal(self):
""" derived class should define this method which yields the scope-
compatible signal that can be used to monitor this signal"""
raise ValueError("Please define the method 'signal()' if the Signal "
"%s to return a valid scope-compatible input.",
self.name)
return 'off'
def get_analog_offset(self, duration=1.0):
""" function to acquire the analog offset of the signal (with nothing connected).
This offset is subtracted from all raw signals"""
# make sure the lockbox is unlocked, just in case
self.lockbox.unlock()
# sample the input with a rather long duration to get a good average
self.stats(t=duration)
current_residual_offset, current_rms = self.mean, self.rms
last_offset = self.calibration_data._analog_offset
# current_residual_offset = current_offset - last_offset
current_offset = last_offset + current_residual_offset
self.calibration_data._analog_offset = current_offset
self.calibration_data._analog_offset_rms = current_rms
self._logger.info("Calibrated analog offset of signal %s. "
"Old value: %s, new value: %s, difference: %s. "
"Rms of the measurement: %s.",
self.name,
last_offset,
self.calibration_data._analog_offset,
current_residual_offset,
current_rms)
##################################################
# Sampler routines for diagnostics of the signal #
##################################################
@property
def sampler_time(self):
""" specifies the duration over which to sample a signal """
if hasattr(self, '_sampler_time') and self._sampler_time is not None:
return self._sampler_time
elif hasattr(self.lockbox, '_sampler_time') and self.lockbox._sampler_time is not None:
return self.lockbox._sampler_time
else:
return 0.01
def stats(self, t=None):
"""
returns a tuple containing the mean, rms, max, and min of the signal.
"""
# generate new samples for mean, rms, max, min if
# a) executed for the first time,
# b) nonstandard sampler time
# c) last sample older than sampler time
# Point c) ensures that we can call stats several times in
# immediate succession, e.g. to get mean and rms
if not hasattr(self, '_lasttime') or t is not None or \
time() - self._lasttime >= self.sampler_time:
# choose sampler time
if t is None:
t = self.sampler_time
# get fresh data
self._lastmean, self._lastrms, self._lastmax, self._lastmin\
= self.pyrpl.rp.sampler.stats(self.signal(), t=t)
# subtract analog offset from all non-relative values
self._lastmean -= self.calibration_data._analog_offset
self._lastmax -= self.calibration_data._analog_offset
self._lastmin -= self.calibration_data._analog_offset
# save a timestamp and the employed sampler time
self._lasttime = time()
self._lastt = t
return self._lastmean, self._lastrms, self._lastmax, self._lastmin
@property
def mean(self):
# get fresh data
mean, rms, max, min= self.stats()
# compute relative quantity
return mean
@property
def rms(self):
# get fresh data
mean, rms, max, min= self.stats()
# compute relative quantity
return rms
@property
def max(self):
# get fresh data
mean, rms, max, min= self.stats()
# compute relative quantity
return max
@property
def min(self):
# get fresh data
mean, rms, max, min= self.stats()
# compute relative quantity
return min
@property
def relative_mean(self):
"""
returns the ratio between the measured mean value and the expected one.
"""
# compute relative quantity
return self.mean / self.calibration_data.amplitude
@property
def relative_rms(self):
"""
returns the ratio between the measured rms value and the expected mean.
"""
# compute relative quantity
return self.rms / self.calibration_data.amplitude
def diagnostics(self, duration = 1.0):
"""
example code for lock diagnostics:
Parameters
----------
duration: duration over which to average
Returns
-------
relative rms of the signal, normalized by
"""
# samples the input over duration
self.stats(t=duration)
return self.relative_rms
class InputSignal(Signal):
"""
A Signal that corresponds to an inputsignal of the DSPModule inside the
RedPitaya. Moreover, the signal should provide a function to convert the
measured voltage into the value of the model's physical variable in
*unit*. The signal can be calibrated by taking a curve while scanning
an output.
module attributes (see BaseModule):
-----------------------------------
- input_channel: the redpitaya dsp input representing the signal
- min: min of the signal in V over a lockbox sweep
- max: max of the signal in V over a lockbox sweep
- mean: mean of the signal in V over a lockbox sweep
- rms: rms of the signal in V over a lockbox sweep
public methods:
---------------
- acquire(): returns an experimental curve in V obtained from a sweep of
the lockbox.
- calibrate(): acquires a curve and determines all constants needed by
expected_signal
- expected_signal(variable): to be reimplemented in concrete derived class:
Returns the value of the expected signal in V, depending on the variable
value.
- expected_slope: returns the slope of the expected signal wrt variable at
a given value of the variable.
- relative_mean(self): returns the ratio between the measured mean value
and the expected one.
- relative_rms(self): returns the ratio between the measured rms value and
the expected mean.
- variable(): Estimates the model variable from the current value of
the input.
"""
_setup_attributes = ["input_signal"]
_gui_attributes = ["input_signal"]
_widget_class = LockboxInputWidget
plot_range = np.linspace(-5, 5, 200) # range of setpoint values over which to plot signal
input_signal = InputSelectProperty(call_setup=True,
doc="the dsp module or lockbox "
"signal used as input signal")
def __init__(self, parent, name=None):
# self.parameters = dict()
self._lasttime = -1e10
super(InputSignal, self).__init__(parent, name=name)
def _input_signal_dsp_module(self):
""" returns the dsp signal corresponding to input_signal"""
signal = self.input_signal
# problem arises if there is a long loop of logical signals -> iterate
for i in range(5): # try at most 5 hierarchy levels
try:
signal = recursive_getattr(self.pyrpl, signal).signal()
except: # do not insist on this to work as signal may be a str
pass
if signal in DSP_INPUTS:
return signal
# no break ever occured
self._logger.warning("Input signal of input %s cannot be traced "
"to a valid dsp input (it yields %s). Input "
"will be turned 'off'.",
self.name, signal)
return 'off'
def signal(self):
""" returns the signal corresponding to this module that can be used to connect the signal to other modules.
By default, this is the direct input signal. """
return self._input_signal_dsp_module()
def sweep_acquire(self):
"""
returns an experimental curve in V obtained from a sweep of the
lockbox.
"""
try:
with self.pyrpl.scopes.pop(self.name) as scope:
self.lockbox._sweep()
if "sweep" in scope.states:
scope.load_state("sweep")
else:
scope.setup(input1=self.signal(),
input2=self.lockbox.outputs[self.lockbox.default_sweep_output].pid.output_direct,
trigger_source=self.lockbox.asg.name,
trigger_delay=0,
duration=1./self.lockbox.asg.frequency,
ch1_active=True,
ch2_active=True,
average=True,
trace_average=1,
running_state='stopped',
rolling_mode=False)
scope.save_state("autosweep")
curve1, curve2 = scope.curve(timeout=1./self.lockbox.asg.frequency+scope.duration)
times = scope.times
curve1 -= self.calibration_data._analog_offset
return curve1, times
except InsufficientResourceError:
# scope is blocked
self._logger.warning("No free scopes left for sweep_acquire. ")
return None, None
def calibrate(self, autosave=False):
"""
This function should be reimplemented to measure whatever property of
the curve is needed by expected_signal.
"""
curve, times = self.sweep_acquire()
if curve is None:
self._logger.warning('Aborting calibration because no scope is available...')
return None
self.calibration_data.get_stats_from_curve(curve)
# log calibration values
self._logger.info("%s calibration successful - Min: %.3f Max: %.3f Mean: %.3f Rms: %.3f",
self.name,
self.calibration_data.min,
self.calibration_data.max,
self.calibration_data.mean,
self.calibration_data.rms)
# update graph in lockbox
self.lockbox._signal_launcher.input_calibrated.emit([self])
# save data if desired
if autosave:
params = self.calibration_data.setup_attributes
params['name'] = self.name+"_calibration"
newcurve = self._save_curve(times, curve, **params)
self.calibration_data.curve = newcurve
return newcurve
else:
return None
def expected_signal(self, variable):
"""
Returns the value of the expected signal in V, depending on the
setpoint value "variable".
"""
raise NotImplementedError("Formula relating variable and parameters to output should be implemented in derived "
"class")
def expected_slope(self, variable):
"""
Returns the slope of the expected signal wrt variable at a given value
of the variable. May be overwritten by a more efficient (analytical) method
in a derived class.
"""
return scipy.misc.derivative(self.expected_signal,
variable,
dx=1e-9,
n=1, # first derivative
order=3)
def is_locked(self, loglevel=logging.INFO):
""" returns whether the input is locked at the current stage """
# supposed to be locked at this value
setpoint = self.lockbox.current_stage.setpoint
# current values
actmean, actrms = self.mean, self.rms
# get max, min of acceptable error signals
error_threshold = self.lockbox.is_locked_threshold
min = self.expected_signal(setpoint-error_threshold)
max = self.expected_signal(setpoint+error_threshold)
startslope = self.expected_slope(setpoint - error_threshold)
stopslope = self.expected_slope(setpoint + error_threshold)
# no guarantee that min<max
if max < min:
# swap them in this case
max, min = min, max
# now min < max
# if slopes have unequal signs, the signal has a max/min in the
# interval
if startslope*stopslope <= 0:
if startslope > stopslope: # maximum in between, ignore upper limit
max = np.inf
elif startslope < stopslope: # minimum, ignore lower limit
min = -np.inf
if actmean > max or actmean < min:
self._logger.log(loglevel,
"Not locked at stage %s: "
"input %s value of %.2f +- %.2f (setpoint %.2f)"
"is not in error interval [%.2f, %.2f].",
self.lockbox.current_stage.name,
self.name,
actmean,
actrms,
self.expected_signal(setpoint),
min,
max)
return False
# lock seems ok
self._logger.log(loglevel,
"Locked at stage %s: "
"input %s value is %.2f +- %.2f (setpoint %.2f).",
self.lockbox.current_stage.name,
self.name,
actmean,
actrms,
self.expected_signal(setpoint))
return True
# inverse is temporarily broken
#
# def inverse(self, func, y, x0, args=()):
# """
# Finds a solution x to the equation y = func(x) in the vicinity of x0.
#
# Parameters
# ----------
# func: function
# the function
# y: float or np.array(,dtype=float)
# the desired value of the function
# x0: float
# the starting point for the search
# args: tuple
# optional arguments to pass to func
#
# Returns
# -------
# x: float
# the solution. None if no inverse could be found.
# """
# try:
# inverse = [self._inverse(self.expected_signal, yy, x0, args=args) for yy in y]
# if len(inverse) == 1:
# return inverse[0]
# else:
# return inverse
# except TypeError:
# def myfunc(x, *args):
# return func(x, *args) - y
# solution, infodict, ier, mesg = scipy.optimize.fsolve(
# myfunc,
# x0,
# args=args,
# xtol=1e-6,
# epsfcn=1e-8,
# fprime=self.__getattribute__(func.__name__+'_slope'),
# full_output=True)
# if ier == 1: # means solution was found
# return solution[0]
# else:
# return None
#
# def variable(self):
# """
# Estimates the model variable from the current value of the input.
# """
# curve = self.sweep_acquire()
# act = curve.mean()
# set = self.lockbox.setpoint
# variable = self.inverse(act, set)
# if variable is not None:
# return variable
# else:
# logger.warning("%s could not be estimated. Run a calibration!",
# self._variable)
# return None
def _create_widget(self):
widget = super(InputSignal, self)._create_widget()
try:
self.update_graph()
except:
pass
return widget
class InputDirect(InputSignal):
def expected_signal(self, x):
return x
class InputFromOutput(InputDirect):
def calibrate(self, autosave=False):
""" no need to calibrate this """
pass
input_signal = InputSelectProperty(
options=(lambda instance:
['lockbox.outputs.'+k for k in instance.lockbox.outputs.keys()]),
doc="lockbox signal used as input")
def is_locked(self, loglevel=logging.INFO):
""" this is mainly used for coarse locking where significant
effective deviations from the setpoint (in units of setpoint_variable)
may occur. We therefore issue a warning and return True if is_locked is
based on this output. """
inputdsp = self.lockbox.signals[self.input_signal.split('.')[-1]].pid.input
forwarded_input = None
for inp in self.lockbox.inputs:
if inp.signal() == inputdsp:
forwarded_input = inp
break
if forwarded_input is not None:
self._logger.debug("is_locked() for InputFromOutput '%s' is "
"forwarded to is_locked() of input signal '%s'.",
self.name, forwarded_input.name)
return forwarded_input.is_locked(loglevel=loglevel)
else:
self._logger.warning("is_locked() for InputFromOutput '%s' is not "
"implemented. No input for forwarding found.",
self.name)
return True
def expected_signal(self, setpoint):
""" it is assumed that the output has the linear relationship between
setpoint change in output_unit per volt from the redpitaya, which
is configured in the output parameter 'dc_gain'. We only need to
convert units to get the output voltage bringing about a given
setpoint difference. """
# An example:
# The configured output gain is 'output.dc_gain' nm/V.
# setpoint_unit is cavity 'linewidth', the latter given by
# 'lockbox._setpopint_unit_in_unit('nm')' (in nm).
# Therefore, the output voltage corresponding to a change of
# one linewidth is given (in V) by:
# lockbox._setpopint_unit_in_unit('nm')/output.dc_gain
output = self.lockbox.signals[self.input_signal.split('.')[-1]]
output_unit = output.unit.split('/')[0]
setpoint_in_output_unit = \
setpoint * self.lockbox._setpoint_unit_in_unit(output_unit)
return setpoint_in_output_unit / output.dc_gain
class IqQuadratureFactorProperty(FloatProperty):
""" this is a direct link to quadrature_factor because we want to
benefit from its validate_and_normalize function"""
def set_value(self, instance, value):
instance.iq.quadrature_factor = value
return value
def get_value(self, obj):
return obj.iq.quadrature_factor
class IqFilterProperty(FilterProperty):
def set_value(self, instance, val):
try:
val = list(val)
except:
val = [val, val] # preferentially choose second order filter
instance.iq.bandwidth = val
super(IqFilterProperty, self).set_value(instance,
self.get_value(instance))
return val
def get_value(self, instance):
return instance.iq.bandwidth
def valid_frequencies(self, module):
# only allow the low-pass filter options (exclude negative high-pass options)
return [v for v in module.iq.__class__.bandwidth.valid_frequencies(module.iq) if v >= 0]
class InputIq(InputSignal):
""" Base class for demodulated signals. A derived class must implement
the method expected_signal (see InputPdh in fabryperot.py for example)"""
_gui_attributes = ['mod_freq',
'mod_amp',
'mod_phase',
'mod_output',
'bandwidth',
'quadrature_factor']
_setup_attributes = _gui_attributes
@property
def acbandwidth(self):
return self.mod_freq / 128.0
# mod_freq = ProxyProperty("iq.frequency")
# mod_amp = ProxyProperty("iq.amplitude")
# mod_phase = ProxyProperty("iq.phase")
# mod_output = ProxyProperty("iq.output_direct")
# quadrature_factor = ProxyProperty("iq.quadrature_factor")
# bandwidth = ProxyProperty("iq.bandwidth")
mod_freq = FrequencyProperty(min=0.0,
max = FrequencyRegister.CLOCK_FREQUENCY / 2.0,
default = 0.0,
call_setup = True)
mod_amp = FloatProperty(min=-1, max=1, default=0.0, call_setup=True)
mod_phase = PhaseProperty(call_setup=True)
mod_output = SelectProperty(['out1', 'out2'], call_setup=True)
quadrature_factor = IqQuadratureFactorProperty(call_setup=True)
bandwidth = IqFilterProperty(call_setup=True)
@property
def iq(self):
if not hasattr(self, '_iq') or self._iq is None:
self._iq = self.pyrpl.iqs.pop(self.name)
return self._iq
def signal(self):
return self.iq.name
def _clear(self):
self.pyrpl.iqs.free(self.iq)
self._iq = None
super(InputIq, self)._clear()
def _setup(self):
"""
setup a PDH error signal using the attribute values
"""
self.iq.setup(frequency=self.mod_freq,
amplitude=self.mod_amp,
phase=self.mod_phase,
input=self._input_signal_dsp_module(),
gain=0,
bandwidth=self.bandwidth,
acbandwidth=self.acbandwidth,
quadrature_factor=self.quadrature_factor,
output_signal='quadrature',
output_direct=self.mod_output)
| mit | 0a4b49ad32aa13939d946b53b8c95d77 | 39.306818 | 120 | 0.564743 | 4.425058 | false | false | false | false |
lneuhaus/pyrpl | pyrpl/widgets/pyrpl_widget.py | 1 | 13403 | from qtpy import QtCore, QtWidgets
import sys
from traceback import format_exception, format_exception_only
import logging
from .. import APP
class ExceptionLauncher(QtCore.QObject):
# Used to display exceptions in the status bar of PyrplWidgets
show_exception = QtCore.Signal(list) # use a signal to make
# sure no thread is messing up with gui
show_log = QtCore.Signal(list)
def __init__(self):
super(ExceptionLauncher, self).__init__()
def display_exception(self, etype, evalue, tb):
#self.etype = etype
#self.evalue = evalue
#self.tb = tb
self.show_exception.emit([etype, evalue, tb])
self.old_except_hook(etype, evalue, tb)
def display_log(self, record):
self.show_log.emit([record])
EL = ExceptionLauncher()
# Exceptions raised by the event loop should be displayed in the MainWindow status_bar.
# see http://stackoverflow.com/questions/40608610/exceptions-in-pyqt-event-loop-and-ipython
# when running in ipython, we have to monkeypatch sys.excepthook in the qevent loop.
def patch_excepthook():
EL.old_except_hook = sys.excepthook
sys.excepthook = EL.display_exception
TIMER = QtCore.QTimer()
TIMER.setSingleShot(True)
TIMER.setInterval(0)
TIMER.timeout.connect(patch_excepthook)
TIMER.start()
class LogHandler(QtCore.QObject, logging.Handler):
"""
A handler class which sends log strings to a wx object
"""
show_log = QtCore.Signal(list)
def __init__(self):
"""
Initialize the handler
"""
logging.Handler.__init__(self)
QtCore.QObject.__init__(self)
# set format of logged messages
self.setFormatter(logging.Formatter('%(levelname)s (%(name)s): %(message)s'))
def emit(self, record):
"""
Emit a record.
"""
try:
msg = self.format(record)
self.show_log.emit([msg])
#EL.display_log(record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class MyDockWidget(QtWidgets.QDockWidget):
"""
A DockWidget where the inner widget is only created when needed (To reduce load times).
"""
scrollable = True # use scroll bars?
def __init__(self, create_widget_func, name):
"""
create_widget_func is a function to create the widget.
"""
super(MyDockWidget, self).__init__(name)
self.setObjectName(name)
self.setFeatures(
QtWidgets.QDockWidget.DockWidgetFloatable |
QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetVerticalTitleBar|
QtWidgets.QDockWidget.DockWidgetClosable)
self.create_widget_func = create_widget_func
self.widget = None
def showEvent(self, event):
if self.widget is None:
self.widget = self.create_widget_func()
if self.scrollable:
self.scrollarea = QtWidgets.QScrollArea()
self.scrollarea.setWidget(self.widget)
self.scrollarea.setWidgetResizable(True)
self.setWidget(self.scrollarea)
else:
self.setWidget(self.widget)
super(MyDockWidget, self).showEvent(event)
def event(self, event):
event_type = event.type()
if event.type() == 176: # QEvent::NonClientAreaMouseButtonDblClick
if self.isFloating():
if self.isMaximized():
fn = lambda: self.showNormal()
else:
fn = lambda: self.showMaximized()
# strange bug: always goes back to normal
# self.showMaximized()
# dirty workaround: make a timer
self.timer = QtCore.QTimer()
self.timer.timeout.connect(fn)
self.timer.setSingleShot(True)
self.timer.setInterval(1.0)
self.timer.start()
event.accept()
return True
else:
#return super(MyDockWidget, self).event(event)
return QtWidgets.QDockWidget.event(self, event)
class PyrplWidget(QtWidgets.QMainWindow):
def __init__(self, pyrpl_instance):
self.parent = pyrpl_instance
self.logger = self.parent.logger
self.handler = LogHandler()
self.logger.addHandler(self.handler)
super(PyrplWidget, self).__init__()
self.setDockNestingEnabled(True) # allow dockwidget nesting
self.setAnimated(True) # animate docking of dock widgets
self.dock_widgets = {}
self.last_docked = None
self.menu_modules = self.menuBar().addMenu("Modules")
self.module_actions = []
for module in self.parent.software_modules:
self.add_dock_widget(module._create_widget, module.name)
# self.showMaximized() # maximized by default
self.centralwidget = QtWidgets.QFrame()
self.setCentralWidget(self.centralwidget)
self.centrallayout = QtWidgets.QVBoxLayout()
self.centrallayout.setAlignment(QtCore.Qt.AlignCenter)
self.centralwidget.setLayout(self.centrallayout)
self.centralbutton = QtWidgets.QPushButton('Click on "Modules" in the '
'upper left corner to load a '
'specific PyRPL module!')
self.centralbutton.clicked.connect(self.click_menu_modules)
self.centrallayout.addWidget(self.centralbutton)
self.set_window_position()
self.timer_save_pos = QtCore.QTimer()
self.timer_save_pos.setInterval(1000)
self.timer_save_pos.timeout.connect(self.save_window_position)
self.timer_save_pos.start()
self.timer_toolbar = QtCore.QTimer()
self.timer_toolbar.setInterval(1000)
self.timer_toolbar.setSingleShot(True)
self.timer_toolbar.timeout.connect(self.vanish_toolbar)
self.status_bar = self.statusBar()
EL.show_exception.connect(self.show_exception)
self.handler.show_log.connect(self.show_log)
self.setWindowTitle(self.parent.c.pyrpl.name)
self.timers = [self.timer_save_pos, self.timer_toolbar]
#self.set_background_color(self)
def click_menu_modules(self):
self.menu_modules.popup(self.mapToGlobal(QtCore.QPoint(10,10)))
def hide_centralbutton(self):
for dock_widget in self.dock_widgets.values():
if dock_widget.isVisible():
self.centralwidget.hide()
return
# only if no dockwidget is shown, show central button
self.centralwidget.show()
def show_exception(self, typ_val_tb):
"""
show exception in red in toolbar
"""
typ, val, tb = typ_val_tb
self.timer_toolbar.stop()
self.status_bar.showMessage(''.join(format_exception_only(typ, val)))
self.status_bar.setStyleSheet('color: white;background-color: red;')
self._next_toolbar_style = 'color: orange;'
self.status_bar.setToolTip(''.join(format_exception(typ, val, tb)))
self.timer_toolbar.start()
def show_log(self, records):
record = records[0]
self.timer_toolbar.stop()
self.status_bar.showMessage(record)
self.status_bar.setStyleSheet('color: white;background-color: green;')
self._next_toolbar_style = 'color: grey;'
self.timer_toolbar.start()
def vanish_toolbar(self):
"""
Toolbar becomes orange after (called 1s after exception occured)
"""
self.status_bar.setStyleSheet(self._next_toolbar_style)
def _clear(self):
for timer in self.timers:
timer.stop()
def add_dock_widget(self, create_widget, name):
dock_widget = MyDockWidget(create_widget,
name + ' (%s)' % self.parent.name)
self.dock_widgets[name] = dock_widget
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock_widget)
if self.last_docked is not None:
self.tabifyDockWidget(self.last_docked, dock_widget)
# put tabs on top
self.setTabPosition(dock_widget.allowedAreas(),
QtWidgets.QTabWidget.North)
self.last_docked = dock_widget
self.last_docked.hide() # by default no widget is created...
action = QtWidgets.QAction(name, self.menu_modules)
action.setCheckable(True)
self.module_actions.append(action)
self.menu_modules.addAction(action)
# make sure menu and widget are in sync
action.changed.connect(lambda: dock_widget.setVisible(action.isChecked()))
dock_widget.visibilityChanged.connect(lambda:action.setChecked(dock_widget.isVisible()))
dock_widget.visibilityChanged.connect(self.hide_centralbutton)
self.set_background_color(dock_widget)
def remove_dock_widget(self, name):
dock_widget = self.dock_widgets.pop(name)
# return later whether the widget was visible
wasvisible = dock_widget.isVisible()
# disconnect signals from widget
dock_widget.blockSignals(True) # avoid further signals
# remove action button from context menu
for action in self.module_actions:
buttontext = action.text()
if buttontext == name:
action.blockSignals(True) # avoid further signals
self.module_actions.remove(action)
self.menu_modules.removeAction(action)
action.deleteLater()
# remove dock widget
if self.last_docked == dock_widget:
self.last_docked = list(self.dock_widgets.values())[-1]
# not sure what this is supposed to mean, but dict keys/values
# are not indexable in python 3. Please, convert to list before!
self.removeDockWidget(dock_widget)
dock_widget.deleteLater()
# return whether the widget was visible
return wasvisible
def reload_dock_widget(self, name):
"""
This function destroys the old lockbox widget and loads a new one
"""
pyrpl = self.parent
module = getattr(pyrpl, name)
# save window position
self.timer_save_pos.stop()
self.save_window_position()
pyrpl.c._write_to_file() # make sure positions are written
# replace dock widget
self.remove_dock_widget(name)
self.add_dock_widget(module._create_widget, name)
# restore window position and widget visibility
self.set_window_position() # reset the same window position as before
self.timer_save_pos.start()
def save_window_position(self):
# Don't try to save position if window is closed (otherwise, random position is saved)
if self.isVisible():
# pre-serialize binary data as "latin1" string
act_state = (bytes(self.saveState())).decode("latin1")
if (not "dock_positions" in self.parent.c.pyrpl._keys()) or \
(self.parent.c.pyrpl["dock_positions"]!=act_state):
self.parent.c.pyrpl["dock_positions"] = act_state
act_window_pos = self.window_position
saved_window_pos = self.parent.c.pyrpl._get_or_create("window_position")._data
if saved_window_pos != act_window_pos:
self.parent.c.pyrpl.window_position = self.window_position
#else:
# self.logger.debug("Gui is not started. Cannot save position.\n")
def set_window_position(self):
if "dock_positions" in self.parent.c.pyrpl._keys():
try:
self.restoreState(
self.parent.c.pyrpl.dock_positions.encode("latin1"))
except:
self.logger.warning("Sorry, there was a problem with the "
"restoration of Dock positions. ")
try:
coords = self.parent.c.pyrpl["window_position"]._data
except KeyError:
coords = [0, 0, 800, 600]
try:
self.window_position = coords
if QtWidgets.QApplication.desktop().screenNumber(self)==-1:
# window doesn't fit inside screen
self.window_position = (0,0)
except Exception as e:
self.logger.warning("Gui is not started. Cannot set window position.\n"\
+ str(e))
@property
def window_position(self):
xy = self.pos()
x = xy.x()
y = xy.y()
dxdy = self.size()
dx = dxdy.width()
dy = dxdy.height()
return [x, y, dx, dy]
@window_position.setter
def window_position(self, coords):
self.move(coords[0], coords[1])
self.resize(coords[2], coords[3])
def set_background_color(self, widget):
try:
color = str(self.parent.c.pyrpl.background_color)
except KeyError:
return
else:
if color.strip() == "":
return
try: # hex values must receive a preceeding hashtag
int(color, 16)
except ValueError:
pass
else:
color = "#"+color
widget.setStyleSheet("background-color:%s"%color)
| mit | 314c53db0e5ba97e2d81f547859baf2c | 37.185185 | 96 | 0.605238 | 3.998508 | false | false | false | false |
kbr/fritzconnection | fritzconnection/cli/fritzhomeauto.py | 1 | 2507 | """
fritzhomeauto.py
Module to inspect the FritzBox homeautomation API.
CLI interface.
This module is part of the FritzConnection package.
https://github.com/kbr/fritzconnection
License: MIT (https://opensource.org/licenses/MIT)
Author: Klaus Bremer
"""
from ..lib.fritzhomeauto import FritzHomeAutomation
from . utils import get_cli_arguments, get_instance, print_header
def report_verbose(fh):
information = fh.device_information()
for info in information:
width = len(max(info.keys(), key=lambda x: len(x)))
line = f'{{attribute:{width}}} : {{value}}'
for attribute in sorted(info.keys()):
print(line.format(attribute=attribute, value=info[attribute]))
print() # add blank line between devices
def report_compact(fh):
name = 'Device Name'
ain = 'AIN'
power = 'Power[W]'
temperature = 't[°C]'
switch_state = 'switch'
print(f'{name:24}{ain:18}{power:>10}{temperature:>8} {switch_state}')
for di in fh.device_information():
name = di['NewDeviceName']
ain = di['NewAIN']
ain = f"'{ain}'"
power = di['NewMultimeterPower'] * 0.01
temperature = di['NewTemperatureCelsius'] *0.1
switch_state = di['NewSwitchState'].lower()
print(f'{name:24}{ain:18}{power:>10.3f}{temperature:>8.1f} {switch_state}')
print()
def report_status(fh, arguments):
print('FritzHomeautomation:')
print('Status of registered home-automation devices:\n')
if arguments.verbose:
report_verbose(fh)
else:
report_compact(fh)
def switch_device(fh, arguments):
ain = arguments.switch[0]
state = arguments.switch[1].lower() == 'on'
fh.set_switch(identifier=ain, on=state)
def add_arguments(parser):
parser.add_argument('-v', '--verbose',
nargs='?', default=False, const=True,
help='report in verbose mode')
parser.add_argument('-s', '--switch',
nargs=2,
help='set switch state. requires two parameters: '
'ain and state [on|off]')
def main():
arguments = get_cli_arguments(add_arguments)
if not arguments.password:
print('Exit: password required.')
return
fh = get_instance(FritzHomeAutomation, arguments)
if arguments.switch:
switch_device(fh, arguments)
else:
print_header(fh)
report_status(fh, arguments)
if __name__ == '__main__':
main()
| mit | 1af68d1e7b8556f66035ccfd84be1109 | 28.833333 | 85 | 0.612929 | 3.637155 | false | false | false | false |
pyparsing/pyparsing | examples/simpleWiki.py | 1 | 1108 | from pyparsing import *
wikiInput = """
Here is a simple Wiki input:
*This is in italics.*
**This is in bold!**
***This is in bold italics!***
Here's a URL to {{Pyparsing's Wiki Page->https://site-closed.wikispaces.com}}
"""
def convertToHTML(opening, closing):
def conversionParseAction(s, l, t):
return opening + t[0] + closing
return conversionParseAction
italicized = QuotedString("*").setParseAction(convertToHTML("<I>", "</I>"))
bolded = QuotedString("**").setParseAction(convertToHTML("<B>", "</B>"))
boldItalicized = QuotedString("***").setParseAction(convertToHTML("<B><I>", "</I></B>"))
def convertToHTML_A(s, l, t):
try:
text, url = t[0].split("->")
except ValueError:
raise ParseFatalException(s, l, "invalid URL link reference: " + t[0])
return '<A href="{}">{}</A>'.format(url, text)
urlRef = QuotedString("{{", endQuoteChar="}}").setParseAction(convertToHTML_A)
wikiMarkup = urlRef | boldItalicized | bolded | italicized
print(wikiInput)
print()
print(wikiMarkup.transformString(wikiInput))
| mit | a7026243bcd7d35cdfe7f22c086d60c2 | 27.157895 | 88 | 0.636282 | 3.388379 | false | false | false | false |
pyparsing/pyparsing | examples/number_words.py | 1 | 3884 | # number_words.py
#
# Copyright 2020, Paul McGuire
#
# Parser/evaluator for expressions of numbers as written out in words:
# - one
# - seven
# - twelve
# - twenty six
# - forty-two
# - one hundred and seven
#
#
# BNF:
"""
optional_and ::= ["and" | "-"]
optional_dash ::= ["-"]
units ::= one | two | three | ... | nine
teens ::= ten | teens_only
tens ::= twenty | thirty | ... | ninety
one_to_99 ::= units | teens | (tens [optional_dash units])
teens_only ::= eleven | twelve | ... | nineteen
hundreds ::= (units | teens_only | tens optional_dash units) "hundred"
thousands ::= one_to_99 "thousand"
# number from 1-999,999
number ::= [thousands [optional_and]] [hundreds[optional_and]] one_to_99
| [thousands [optional_and]] hundreds
| thousands
"""
import pyparsing as pp
from operator import mul
def define_numeric_word_range(
names: str, from_: int, to_: int = None, step: int = 1
) -> pp.MatchFirst:
"""
Compose a MatchFirst of CaselessKeywords, given their names and values,
which when parsed, are converted to their value
"""
def define_numeric_word(nm: str, val: int):
return pp.CaselessKeyword(nm).add_parse_action(lambda: val)
names = names.split()
if to_ is None:
to_ = from_
values = range(from_, to_ + 1, step)
ret = pp.MatchFirst(
define_numeric_word(name, value) for name, value in zip(names, values)
)
if len(names) == 1:
ret.setName(names[0])
else:
ret.setName("{}-{}".format(names[0], names[-1]))
return ret
def multiply(t):
"""
Parse action for hundreds and thousands.
"""
return mul(*t)
opt_dash = pp.Optional(pp.Suppress("-")).setName("'-'")
opt_and = pp.Optional((pp.CaselessKeyword("and") | "-").suppress()).setName("'and/-'")
units = define_numeric_word_range("one two three four five six seven eight nine", 1, 9)
teens_only = define_numeric_word_range(
"eleven twelve thirteen fourteen fifteen sixteen seventeen eighteen nineteen",
11,
19,
)
ten = define_numeric_word_range("ten", 10)
teens = ten | teens_only
tens = define_numeric_word_range(
"twenty thirty forty fifty sixty seventy eighty ninety", 20, 90, 10
)
one_to_99 = (units | teens | (tens + pp.Optional(opt_dash + units))).setName("1-99")
one_to_99.addParseAction(sum)
hundred = define_numeric_word_range("hundred", 100)
thousand = define_numeric_word_range("thousand", 1000)
hundreds = (units | teens_only | (tens + opt_dash + units)) + hundred
hundreds.setName("100s")
one_to_999 = (
(pp.Optional(hundreds + opt_and) + one_to_99 | hundreds).addParseAction(sum)
).setName("1-999")
thousands = one_to_999 + thousand
thousands.setName("1000s")
# for hundreds and thousands, must scale up (multiply) accordingly
hundreds.addParseAction(multiply)
thousands.addParseAction(multiply)
numeric_expression = (
pp.Optional(thousands + opt_and) + pp.Optional(hundreds + opt_and) + one_to_99
| pp.Optional(thousands + opt_and) + hundreds
| thousands
).setName("numeric_words")
# sum all sub-results into total
numeric_expression.addParseAction(sum)
if __name__ == "__main__":
numeric_expression.runTests(
"""
one
seven
twelve
twenty six
forty-two
two hundred
twelve hundred
one hundred and eleven
ninety nine thousand nine hundred and ninety nine
nine hundred thousand nine hundred and ninety nine
nine hundred and ninety nine thousand nine hundred and ninety nine
nineteen hundred thousand nineteen hundred and ninety nine
# invalid
twenty hundred
""",
postParse=lambda _, s: "{:,}".format(s[0]),
)
# create railroad diagram
numeric_expression.create_diagram("numeric_words_diagram.html", vertical=5)
| mit | c24fba6f7eee1aad5a9d527e5371df05 | 27.350365 | 87 | 0.635427 | 3.124698 | false | false | false | false |
pyparsing/pyparsing | examples/unicode_denormalizer.py | 1 | 3790 | # unicode_denormalizer.py
#
# Demonstration of the pyparsing's transform_string() method, to
# convert identifiers in Python source code to equivalent Unicode
# characters. Python's compiler automatically normalizes Unicode
# characters back to their ASCII equivalents, so that identifiers may
# be rewritten using other Unicode characters, and normalize back to
# the same identifier. For instance, Python treats "print" and "𝕡𝓻ᵢ𝓃𝘁"
# and "𝖕𝒓𝗂𝑛ᵗ" all as the same identifier.
#
# The converter must take care to *only* transform identifiers -
# Python keywords must always be represented in base ASCII form. To
# skip over keywords, they are added to the parser/transformer, but
# contain no transforming parse action.
#
# The converter also detects identifiers in placeholders within f-strings.
#
# Copyright 2022, by Paul McGuire
#
import keyword
import random
import unicodedata
import pyparsing as pp
ppu = pp.pyparsing_unicode
ident_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789·"
ident_char_map = {}.fromkeys(ident_chars, "")
for ch in ppu.identbodychars:
normal = unicodedata.normalize("NFKC", ch)
if normal in ident_char_map:
ident_char_map[normal] += ch
ligature_map = {
'ffl': 'ffl ffl ffl ffl ffl',
'ffi': 'ffi ffi ffi ffi ffi',
'ff': 'ff ff',
'fi': 'fi fi',
'fl': 'fl fl',
'ij': 'ij ij',
'lj': 'lj lj',
'nj': 'nj nj',
'dz': 'dz dz',
'ii': 'ii ⅱ',
'iv': 'iv ⅳ',
'vi': 'vi ⅵ',
'ix': 'ix ⅸ',
'xi': 'xi ⅺ',
}
ligature_transformer = pp.oneOf(ligature_map).add_parse_action(lambda t: random.choice(ligature_map[t[0]].split()))
def make_mixed_font(t):
t_0 = t[0][0]
ret = ['_' if t_0 == '_' else random.choice(ident_char_map.get(t_0, t_0))]
t_rest = ligature_transformer.transform_string(t[0][1:])
ret.extend(random.choice(ident_char_map.get(c, c)) for c in t_rest)
return ''.join(ret)
identifier = pp.pyparsing_common.identifier
identifier.add_parse_action(make_mixed_font)
python_quoted_string = pp.Opt(pp.Char("fF")("f_string_prefix")) + (
pp.quotedString
| pp.QuotedString('"""', multiline=True, unquoteResults=False)
| pp.QuotedString("'''", multiline=True, unquoteResults=False)
)("quoted_string_body")
def mix_fstring_expressions(t):
if not t.f_string_prefix:
return
fstring_arg = pp.QuotedString("{", end_quote_char="}")
fstring_arg.add_parse_action(lambda tt: "{" + transformer.transform_string(tt[0]) + "}")
ret = t.f_string_prefix + fstring_arg.transform_string(t.quoted_string_body)
return ret
python_quoted_string.add_parse_action(mix_fstring_expressions)
any_keyword = pp.MatchFirst(map(pp.Keyword, list(keyword.kwlist) + getattr(keyword, "softkwlist", [])))
# quoted strings and keywords will be parsed, but left untransformed
transformer = python_quoted_string | any_keyword | identifier
def demo():
import textwrap
hello_source = textwrap.dedent("""
def hello():
try:
hello_ = "Hello"
world_ = "World"
print(f"{hello_}, {world_}!")
except TypeError as exc:
print("failed: {}".format(exc))
if __name__ == "__main__":
hello()
""")
source = hello_source
transformed = transformer.transform_string(source)
print(transformed)
# does it really work?
code = compile(transformed, source, mode="exec")
exec(code)
if 0:
# pick some code from the stdlib
import unittest.util as lib_module
import inspect
source = inspect.getsource(lib_module)
transformed = transformer.transform_string(source)
print()
print(transformed)
if __name__ == '__main__':
demo()
| mit | a2e20164aca9775545c4ac79254140bb | 29.284553 | 115 | 0.654765 | 3.236316 | false | false | false | false |
pyparsing/pyparsing | examples/htmlTableParser.py | 1 | 2096 | #
# htmlTableParser.py
#
# Example of parsing a simple HTML table into a list of rows, and optionally into a little database
#
# Copyright 2019, Paul McGuire
#
import pyparsing as pp
import urllib.request
# define basic HTML tags, and compose into a Table
table, table_end = pp.makeHTMLTags("table")
thead, thead_end = pp.makeHTMLTags("thead")
tbody, tbody_end = pp.makeHTMLTags("tbody")
tr, tr_end = pp.makeHTMLTags("tr")
th, th_end = pp.makeHTMLTags("th")
td, td_end = pp.makeHTMLTags("td")
a, a_end = pp.makeHTMLTags("a")
# method to strip HTML tags from a string - will be used to clean up content of table cells
strip_html = (pp.anyOpenTag | pp.anyCloseTag).suppress().transformString
# expression for parsing <a href="url">text</a> links, returning a (text, url) tuple
link = pp.Group(a + a.tag_body("text") + a_end.suppress())
def extract_text_and_url(t):
return (t[0].text, t[0].href)
link.addParseAction(extract_text_and_url)
# method to create table rows of header and data tags
def table_row(start_tag, end_tag):
body = start_tag.tag_body
body.addParseAction(pp.tokenMap(str.strip), pp.tokenMap(strip_html))
row = pp.Group(
tr.suppress()
+ pp.ZeroOrMore(start_tag.suppress() + body + end_tag.suppress())
+ tr_end.suppress()
)
return row
th_row = table_row(th, th_end)
td_row = table_row(td, td_end)
# define expression for overall table - may vary slightly for different pages
html_table = (
table
+ tbody
+ pp.Optional(th_row("headers"))
+ pp.ZeroOrMore(td_row)("rows")
+ tbody_end
+ table_end
)
# read in a web page containing an interesting HTML table
with urllib.request.urlopen(
"https://en.wikipedia.org/wiki/List_of_tz_database_time_zones"
) as page:
page_html = page.read().decode()
tz_table = html_table.searchString(page_html)[0]
# convert rows to dicts
rows = [dict(zip(tz_table.headers, row)) for row in tz_table.rows]
# make a dict keyed by TZ database name
tz_db = {row["TZ database name"]: row for row in rows}
from pprint import pprint
pprint(tz_db["America/Chicago"])
| mit | abe5e92f8d7e4b707b323f55b25f4a14 | 26.220779 | 99 | 0.691317 | 3.091445 | false | false | false | false |
pyparsing/pyparsing | examples/antlr_grammar.py | 1 | 11143 | """
antlr_grammar.py
Created on 4 sept. 2010
@author: luca
Submitted by Luca DallOlio, September, 2010
(Minor updates by Paul McGuire, June, 2012)
(Code idiom updates by Paul McGuire, April, 2019)
"""
from pyparsing import (
Word,
ZeroOrMore,
printables,
Suppress,
OneOrMore,
Group,
LineEnd,
Optional,
White,
originalTextFor,
hexnums,
nums,
Combine,
Literal,
Keyword,
cStyleComment,
Regex,
Forward,
MatchFirst,
And,
oneOf,
alphas,
alphanums,
delimitedList,
Char,
)
# http://www.antlr.org/grammar/ANTLR/ANTLRv3.g
(
QUOTE,
APOS,
EQ,
LBRACK,
RBRACK,
LBRACE,
RBRACE,
LPAR,
RPAR,
ROOT,
BANG,
AT,
TIL,
SEMI,
COLON,
VERT,
) = map(Suppress, "\"'=[]{}()^!@~;:|")
BSLASH = Literal("\\")
keywords = (
SRC_,
SCOPE_,
OPTIONS_,
TOKENS_,
FRAGMENT,
ID,
LEXER,
PARSER,
GRAMMAR,
TREE,
CATCH,
FINALLY,
THROWS,
PROTECTED,
PUBLIC,
PRIVATE,
) = map(
Keyword,
"""src scope options tokens fragment id lexer parser grammar tree catch finally throws protected
public private """.split(),
)
KEYWORD = MatchFirst(keywords)
# Tokens
EOL = Suppress(LineEnd()) # $
SGL_PRINTABLE = Char(printables)
singleTextString = originalTextFor(
ZeroOrMore(~EOL + (White(" \t") | Word(printables)))
).leaveWhitespace()
XDIGIT = hexnums
INT = Word(nums)
ESC = BSLASH + (
oneOf(list(r"nrtbf\">" + "'")) | ("u" + Word(hexnums, exact=4)) | SGL_PRINTABLE
)
LITERAL_CHAR = ESC | ~(APOS | BSLASH) + SGL_PRINTABLE
CHAR_LITERAL = APOS + LITERAL_CHAR + APOS
STRING_LITERAL = APOS + Combine(OneOrMore(LITERAL_CHAR)) + APOS
DOUBLE_QUOTE_STRING_LITERAL = '"' + ZeroOrMore(LITERAL_CHAR) + '"'
DOUBLE_ANGLE_STRING_LITERAL = "<<" + ZeroOrMore(SGL_PRINTABLE) + ">>"
TOKEN_REF = Word(alphas.upper(), alphanums + "_")
RULE_REF = Word(alphas.lower(), alphanums + "_")
ACTION_ESC = (
BSLASH.suppress() + APOS
| BSLASH.suppress()
| BSLASH.suppress() + (~(APOS | QUOTE) + SGL_PRINTABLE)
)
ACTION_CHAR_LITERAL = APOS + (ACTION_ESC | ~(BSLASH | APOS) + SGL_PRINTABLE) + APOS
ACTION_STRING_LITERAL = (
QUOTE + ZeroOrMore(ACTION_ESC | ~(BSLASH | QUOTE) + SGL_PRINTABLE) + QUOTE
)
SRC = SRC_.suppress() + ACTION_STRING_LITERAL("file") + INT("line")
id = TOKEN_REF | RULE_REF
SL_COMMENT = (
Suppress("//") + Suppress("$ANTLR") + SRC
| ZeroOrMore(~EOL + Word(printables)) + EOL
)
ML_COMMENT = cStyleComment
WS = OneOrMore(
Suppress(" ") | Suppress("\t") | (Optional(Suppress("\r")) + Literal("\n"))
)
WS_LOOP = ZeroOrMore(SL_COMMENT | ML_COMMENT)
NESTED_ARG_ACTION = Forward()
NESTED_ARG_ACTION << (
LBRACK
+ ZeroOrMore(NESTED_ARG_ACTION | ACTION_STRING_LITERAL | ACTION_CHAR_LITERAL)
+ RBRACK
)
ARG_ACTION = NESTED_ARG_ACTION
NESTED_ACTION = Forward()
NESTED_ACTION << (
LBRACE
+ ZeroOrMore(
NESTED_ACTION
| SL_COMMENT
| ML_COMMENT
| ACTION_STRING_LITERAL
| ACTION_CHAR_LITERAL
)
+ RBRACE
)
ACTION = NESTED_ACTION + Optional("?")
SCOPE = SCOPE_.suppress()
OPTIONS = OPTIONS_.suppress() + LBRACE # + WS_LOOP + Suppress('{')
TOKENS = TOKENS_.suppress() + LBRACE # + WS_LOOP + Suppress('{')
TREE_BEGIN = ROOT + LPAR
RANGE = Suppress("..")
REWRITE = Suppress("->")
# General Parser Definitions
# Grammar heading
optionValue = id | STRING_LITERAL | CHAR_LITERAL | INT | Literal("*").setName("s")
option = Group(id("id") + EQ + optionValue("value"))("option")
optionsSpec = OPTIONS + Group(OneOrMore(option + SEMI))("options") + RBRACE
tokenSpec = (
Group(TOKEN_REF("token_ref") + (EQ + (STRING_LITERAL | CHAR_LITERAL)("lit")))(
"token"
)
+ SEMI
)
tokensSpec = TOKENS + Group(OneOrMore(tokenSpec))("tokens") + RBRACE
attrScope = SCOPE_.suppress() + id + ACTION
grammarType = LEXER + PARSER + TREE
actionScopeName = id | LEXER("l") | PARSER("p")
action = AT + Optional(actionScopeName + Suppress("::")) + id + ACTION
grammarHeading = (
Optional(ML_COMMENT("ML_COMMENT"))
+ Optional(grammarType)
+ GRAMMAR
+ id("grammarName")
+ SEMI
+ Optional(optionsSpec)
+ Optional(tokensSpec)
+ ZeroOrMore(attrScope)
+ ZeroOrMore(action)
)
modifier = PROTECTED | PUBLIC | PRIVATE | FRAGMENT
ruleAction = AT + id + ACTION
throwsSpec = THROWS.suppress() + delimitedList(id)
ruleScopeSpec = (
(SCOPE_.suppress() + ACTION)
| (SCOPE_.suppress() + delimitedList(id) + SEMI)
| (SCOPE_.suppress() + ACTION + SCOPE_.suppress() + delimitedList(id) + SEMI)
)
unary_op = oneOf("^ !")
notTerminal = CHAR_LITERAL | TOKEN_REF | STRING_LITERAL
terminal = (
CHAR_LITERAL | TOKEN_REF + Optional(ARG_ACTION) | STRING_LITERAL | "."
) + Optional(unary_op)
block = Forward()
notSet = TIL + (notTerminal | block)
rangeNotPython = CHAR_LITERAL("c1") + RANGE + CHAR_LITERAL("c2")
atom = Group(
(rangeNotPython + Optional(unary_op)("op"))
| terminal
| (notSet + Optional(unary_op)("op"))
| (RULE_REF + Optional(ARG_ACTION("arg")) + Optional(unary_op)("op"))
)
element = Forward()
treeSpec = ROOT + LPAR + element * (2,) + RPAR
ebnfSuffix = oneOf("? * +")
ebnf = block + Optional(ebnfSuffix("op") | "=>")
elementNoOptionSpec = (
(id("result_name") + oneOf("= +=")("labelOp") + atom("atom") + Optional(ebnfSuffix))
| (id("result_name") + oneOf("= +=")("labelOp") + block + Optional(ebnfSuffix))
| atom("atom") + Optional(ebnfSuffix)
| ebnf
| ACTION
| (treeSpec + Optional(ebnfSuffix))
) # | SEMPRED ( '=>' -> GATED_SEMPRED | -> SEMPRED )
element <<= Group(elementNoOptionSpec)("element")
# Do not ask me why group is needed twice... seems like the xml that you see is not always the real structure?
alternative = Group(Group(OneOrMore(element))("elements"))
rewrite = Optional(Literal("TODO REWRITE RULES TODO"))
block <<= (
LPAR
+ Optional(Optional(optionsSpec("opts")) + COLON)
+ Group(
alternative("a1")
+ rewrite
+ Group(ZeroOrMore(VERT + alternative("a2") + rewrite))("alternatives")
)("block")
+ RPAR
)
altList = (
alternative("a1")
+ rewrite
+ Group(ZeroOrMore(VERT + alternative("a2") + rewrite))("alternatives")
)
exceptionHandler = CATCH.suppress() + ARG_ACTION + ACTION
finallyClause = FINALLY.suppress() + ACTION
exceptionGroup = (OneOrMore(exceptionHandler) + Optional(finallyClause)) | finallyClause
ruleHeading = (
Optional(ML_COMMENT)("ruleComment")
+ Optional(modifier)("modifier")
+ id("ruleName")
+ Optional("!")
+ Optional(ARG_ACTION("arg"))
+ Optional(Suppress("returns") + ARG_ACTION("rt"))
+ Optional(throwsSpec)
+ Optional(optionsSpec)
+ Optional(ruleScopeSpec)
+ ZeroOrMore(ruleAction)
)
rule = Group(ruleHeading + COLON + altList + SEMI + Optional(exceptionGroup))("rule")
grammarDef = grammarHeading + Group(OneOrMore(rule))("rules")
def grammar():
return grammarDef
def __antlrAlternativesConverter(pyparsingRules, antlrBlock):
rule = None
if (
hasattr(antlrBlock, "alternatives")
and antlrBlock.alternatives != ""
and len(antlrBlock.alternatives) > 0
):
alternatives = []
alternatives.append(__antlrAlternativeConverter(pyparsingRules, antlrBlock.a1))
for alternative in antlrBlock.alternatives:
alternatives.append(
__antlrAlternativeConverter(pyparsingRules, alternative)
)
rule = MatchFirst(alternatives)("anonymous_or")
elif hasattr(antlrBlock, "a1") and antlrBlock.a1 != "":
rule = __antlrAlternativeConverter(pyparsingRules, antlrBlock.a1)
else:
raise Exception("Not yet implemented")
assert rule != None
return rule
def __antlrAlternativeConverter(pyparsingRules, antlrAlternative):
elementList = []
for element in antlrAlternative.elements:
rule = None
if hasattr(element.atom, "c1") and element.atom.c1 != "":
regex = r"[" + str(element.atom.c1[0]) + "-" + str(element.atom.c2[0] + "]")
rule = Regex(regex)("anonymous_regex")
elif hasattr(element, "block") and element.block != "":
rule = __antlrAlternativesConverter(pyparsingRules, element.block)
else:
ruleRef = element.atom[0]
assert ruleRef in pyparsingRules
rule = pyparsingRules[ruleRef](ruleRef)
if hasattr(element, "op") and element.op != "":
if element.op == "+":
rule = Group(OneOrMore(rule))("anonymous_one_or_more")
elif element.op == "*":
rule = Group(ZeroOrMore(rule))("anonymous_zero_or_more")
elif element.op == "?":
rule = Optional(rule)
else:
raise Exception("rule operator not yet implemented : " + element.op)
rule = rule
elementList.append(rule)
if len(elementList) > 1:
rule = Group(And(elementList))("anonymous_and")
else:
rule = elementList[0]
assert rule is not None
return rule
def __antlrRuleConverter(pyparsingRules, antlrRule):
rule = None
rule = __antlrAlternativesConverter(pyparsingRules, antlrRule)
assert rule != None
rule(antlrRule.ruleName)
return rule
def antlrConverter(antlrGrammarTree):
pyparsingRules = {}
antlrTokens = {}
for antlrToken in antlrGrammarTree.tokens:
antlrTokens[antlrToken.token_ref] = antlrToken.lit
for antlrTokenName, antlrToken in list(antlrTokens.items()):
pyparsingRules[antlrTokenName] = Literal(antlrToken)
antlrRules = {}
for antlrRule in antlrGrammarTree.rules:
antlrRules[antlrRule.ruleName] = antlrRule
pyparsingRules[antlrRule.ruleName] = Forward() # antlr is a top down grammar
for antlrRuleName, antlrRule in list(antlrRules.items()):
pyparsingRule = __antlrRuleConverter(pyparsingRules, antlrRule)
assert pyparsingRule != None
pyparsingRules[antlrRuleName] <<= pyparsingRule
return pyparsingRules
if __name__ == "__main__":
text = """\
grammar SimpleCalc;
options {
language = Python;
}
tokens {
PLUS = '+' ;
MINUS = '-' ;
MULT = '*' ;
DIV = '/' ;
}
/*------------------------------------------------------------------
* PARSER RULES
*------------------------------------------------------------------*/
expr : term ( ( PLUS | MINUS ) term )* ;
term : factor ( ( MULT | DIV ) factor )* ;
factor : NUMBER ;
/*------------------------------------------------------------------
* LEXER RULES
*------------------------------------------------------------------*/
NUMBER : (DIGIT)+ ;
/* WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ { $channel = HIDDEN; } ; */
fragment DIGIT : '0'..'9' ;
"""
grammar().validate()
antlrGrammarTree = grammar().parseString(text)
print(antlrGrammarTree.dump())
pyparsingRules = antlrConverter(antlrGrammarTree)
pyparsingRule = pyparsingRules["expr"]
pyparsingTree = pyparsingRule.parseString("2 - 5 * 42 + 7 / 25")
print(pyparsingTree.dump())
| mit | e3767837dac73989cf70ca6458ba032a | 27.719072 | 110 | 0.609351 | 3.393118 | false | false | false | false |
pyparsing/pyparsing | examples/httpServerLogParser.py | 1 | 3565 | # httpServerLogParser.py
#
# Copyright (c) 2016, Paul McGuire
#
"""
Parser for HTTP server log output, of the form:
195.146.134.15 - - [20/Jan/2003:08:55:36 -0800]
"GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html"
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
127.0.0.1 - u.surname@domain.com [12/Sep/2006:14:13:53 +0300]
"GET /skins/monobook/external.png HTTP/1.0" 304 - "http://wiki.mysite.com/skins/monobook/main.css"
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.6) Gecko/20060728 Firefox/1.5.0.6"
You can then break it up as follows:
IP ADDRESS - -
Server Date / Time [SPACE]
"GET /path/to/page
HTTP/Type Request"
Success Code
Bytes Sent To Client
Referer
Client Software
"""
from pyparsing import (
alphas,
nums,
dblQuotedString,
Combine,
Word,
Group,
delimitedList,
Suppress,
removeQuotes,
)
import string
def getCmdFields(s, l, t):
t["method"], t["requestURI"], t["protocolVersion"] = t[0].strip('"').split()
logLineBNF = None
def getLogLineBNF():
global logLineBNF
if logLineBNF is None:
integer = Word(nums)
ipAddress = delimitedList(integer, ".", combine=True)
timeZoneOffset = Word("+-", nums)
month = Word(string.ascii_uppercase, string.ascii_lowercase, exact=3)
serverDateTime = Group(
Suppress("[")
+ Combine(
integer
+ "/"
+ month
+ "/"
+ integer
+ ":"
+ integer
+ ":"
+ integer
+ ":"
+ integer
)
+ timeZoneOffset
+ Suppress("]")
)
logLineBNF = (
ipAddress.setResultsName("ipAddr")
+ Suppress("-")
+ ("-" | Word(alphas + nums + "@._")).setResultsName("auth")
+ serverDateTime.setResultsName("timestamp")
+ dblQuotedString.setResultsName("cmd").setParseAction(getCmdFields)
+ (integer | "-").setResultsName("statusCode")
+ (integer | "-").setResultsName("numBytesSent")
+ dblQuotedString.setResultsName("referrer").setParseAction(removeQuotes)
+ dblQuotedString.setResultsName("clientSfw").setParseAction(removeQuotes)
)
return logLineBNF
testdata = """
195.146.134.15 - - [20/Jan/2003:08:55:36 -0800] "GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
111.111.111.11 - - [16/Feb/2004:04:09:49 -0800] "GET /ads/redirectads/336x280redirect.htm HTTP/1.1" 304 - "http://www.foobarp.org/theme_detail.php?type=vs&cat=0&mid=27512" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
11.111.11.111 - - [16/Feb/2004:10:35:12 -0800] "GET /ads/redirectads/468x60redirect.htm HTTP/1.1" 200 541 "http://11.11.111.11/adframe.php?n=ad1f311a&what=zone:56" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1) Opera 7.20 [ru\"]"
127.0.0.1 - u.surname@domain.com [12/Sep/2006:14:13:53 +0300] "GET /skins/monobook/external.png HTTP/1.0" 304 - "http://wiki.mysite.com/skins/monobook/main.css" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.6) Gecko/20060728 Firefox/1.5.0.6"
"""
for line in testdata.split("\n"):
if not line:
continue
fields = getLogLineBNF().parseString(line)
print(fields.dump())
# ~ print repr(fields)
# ~ for k in fields.keys():
# ~ print "fields." + k + " =", fields[k]
print()
| mit | 598711f552a4af2677f8e66ff1294ec3 | 33.61165 | 253 | 0.601964 | 3.036627 | false | false | false | false |
pyparsing/pyparsing | examples/partial_gene_match.py | 1 | 2400 | # partial_gene_match.py
#
# Example showing how to use the CloseMatch class, to find strings in a gene with up to 'n' mismatches
#
import pyparsing as pp
from urllib.request import urlopen
# read in a bunch of genomic data
data_url = "http://toxodb.org/common/downloads/release-6.0/Tgondii/TgondiiApicoplastORFsNAs_ToxoDB-6.0.fasta"
with urlopen(data_url) as datafile:
fastasrc = datafile.read().decode()
# define parser to extract gene definitions
"""
Sample header:
>NC_001799-6-2978-2778 | organism=Toxoplasma_gondii_RH | location=NC_001799:2778-2978(-) | length=201
"""
integer = pp.pyparsing_common.integer
genebit = pp.Group(
">"
+ pp.Word(pp.alphanums.upper() + "-_")("gene_id")
+ "|"
+ pp.Word(pp.printables)("organism")
+ "|"
+ pp.Word(pp.printables)("location")
+ "|"
+ "length="
+ integer("gene_len")
+ pp.LineEnd()
+ pp.Word("ACGTN")[1, ...].addParseAction("".join)("gene")
)
# read gene data from .fasta file - takes just a few seconds
# An important aspect of this parsing process is the reassembly of all the separate lines of the
# gene into a single scannable string. Just searching the raw .fasta file could overlook matches
# if the match is broken up across separate lines. The parse action in the genebit parser does
# this reassembly work.
genedata = genebit[1, ...].parseString(fastasrc)
# using the genedata extracted above, look for close matches of a gene sequence
searchseq = pp.CloseMatch("TTAAATCTAGAAGAT", 3)
for g in genedata:
show_header = True
# scan for close matches, list out found strings, and mark mismatch locations
for t, startLoc, endLoc in searchseq.scanString(g.gene, overlap=True):
if show_header:
# only need to show the header once
print("%s/%s/%s (%d)" % (g.gene_id, g.organism, g.location, g.gene_len))
print("-" * 24)
show_header = False
matched = t[0]
mismatches = t["mismatches"]
print("MATCH:", searchseq.match_string)
print("FOUND:", matched)
if mismatches:
print(
" ",
"".join(
"*" if i in mismatches else " "
for i, c in enumerate(searchseq.match_string)
),
)
else:
print("<exact match>")
print("at location", startLoc)
print()
| mit | e07130f75b02f2d98fd0b46062ea5779 | 33.782609 | 109 | 0.62625 | 3.453237 | false | false | false | false |
codeeu/coding-events | web/processors/media.py | 2 | 1247 | import os
import uuid
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.template.defaultfilters import slugify
from PIL import Image as PilImage
import StringIO
class UploadImageError(Exception):
pass
class ImageSizeTooLargeException(Exception):
pass
def process_image(image_file):
"""
resize an uploaded image and convert to png format
"""
size = 256, 512
image_name = image_file.name
image_basename, image_format = os.path.splitext(image_name)
new_image_name = "%s_%s.png" % (slugify(image_basename), uuid.uuid4())
try:
im = PilImage.open(image_file)
if max(im.size) > max(size):
im.thumbnail(size, PilImage.ANTIALIAS)
thumb_io = StringIO.StringIO()
im.save(thumb_io, format='png')
return InMemoryUploadedFile(
thumb_io,
None,
new_image_name,
'image/png',
thumb_io.len,
None)
except IOError as e:
msg = 'Failed while processing image (image_file=%s, image_name=%s, error_number=%s, error=%s).' \
% (image_file, new_image_name, e.errno, e.strerror, )
raise UploadImageError(msg)
| mit | cfaf599f726e9a9747c32d91bfc86300 | 26.108696 | 106 | 0.636728 | 3.778788 | false | false | false | false |
codeeu/coding-events | api/migrations/0009_auto__add_field_userprofile_role__add_field_userprofile_is_main_contac.py | 2 | 8691 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.role'
db.add_column(u'api_userprofile', 'role',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
# Adding field 'UserProfile.is_main_contact'
db.add_column(u'api_userprofile', 'is_main_contact',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.role'
db.delete_column(u'api_userprofile', 'role')
# Deleting field 'UserProfile.is_main_contact'
db.delete_column(u'api_userprofile', 'is_main_contact')
models = {
'api.event': {
'Meta': {'ordering': "['start_date']", 'object_name': 'Event'},
'audience': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'event_audience'", 'symmetrical': 'False', 'to': "orm['api.EventAudience']"}),
'contact_person': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'event_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'geoposition': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'organizer': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 9, 29, 0, 0)'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '50'}),
'theme': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'event_theme'", 'symmetrical': 'False', 'to': "orm['api.EventTheme']"}),
'title': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'api.eventaudience': {
'Meta': {'object_name': 'EventAudience'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'api.eventtheme': {
'Meta': {'ordering': "['order', 'name']", 'object_name': 'EventTheme'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'api.socialaccountlist': {
'Meta': {'object_name': 'SocialAccountList'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'api.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'bio': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_main_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'role': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['api'] | mit | 388b49a12ab53fb1dbb1529b1b08458d | 70.245902 | 195 | 0.55195 | 3.665542 | false | false | false | false |
codeeu/coding-events | web/tests/test_event_views.py | 2 | 17216 | # coding=utf-8
import datetime
import pytest
import StringIO
import os
from py.path import local
from django.test import TestCase
from django.test import Client
from django.core.urlresolvers import reverse
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.contrib.auth.models import User, Group
from api.models.events import Event
from api.models import UserProfile
from avatar.models import Avatar
from avatar.util import get_primary_avatar
from web.processors.event import create_or_update_event
from web.processors.event import count_approved_events_for_country
from web.tests import EventFactory, ApprovedEventFactory
class EventViewsTestCase(TestCase):
def setUp(self):
self.u1 = User.objects.create(username='user1')
self.up1 = UserProfile.objects.create(user=self.u1)
pending = Event.objects.create(
organizer="Organizer 1",
creator=User.objects.filter(
pk=1)[0],
title="Event 1 - Pending",
description="Some description - Pending",
location="Near here",
start_date=datetime.datetime.now() +
datetime.timedelta(
days=1,
hours=3),
end_date=datetime.datetime.now() +
datetime.timedelta(
days=3,
hours=3),
event_url="http://eee.com",
contact_person="ss@ss.com",
country="SI",
pub_date=datetime.datetime.now(),
tags=[
"tag1",
"tag2"])
client = Client()
def test_index_view_without_approved_events(self):
response = self.client.get(
reverse('web.index'), {}, REMOTE_ADDR='93.103.53.11')
self.assertEquals(200, response.status_code)
self.assertEquals((46.0, 15.0), response.context['lan_lon'])
self.assertEquals('SI', response.context['country']['country_code'])
self.assertTemplateUsed(response, 'pages/index.html')
def test_index_view_changing_remote_in_request(self):
# setup
response = self.client.get(
reverse('web.index'),
{},
HTTP_X_FORWARDED_FOR='93.103.53.11, 93.103.53.11')
# assert
self.assertEquals(200, response.status_code)
self.assertEquals((46.0, 15.0), response.context['lan_lon'])
def test_search_events_with_search_query(self):
ApprovedEventFactory.create(title='Event Arglebargle - Approved')
response = self.client.get(
reverse('web.search_events'), {
'q': 'arglebargle'}, REMOTE_ADDR='93.103.53.11')
self.assertEquals(1, response.context['events'].count())
self.assertEquals('SI', response.context['country'])
def test_search_events_with_unicode_tag_in_search_query(self):
ApprovedEventFactory.create(tags=["jabolčna čežana", "José", "Django"])
response = self.client.get(
reverse('web.search_events'), {
'q': 'čežana'}, REMOTE_ADDR='93.103.53.11')
self.assertEquals(1, response.context['events'].count())
self.assertEquals('SI', response.context['country'])
def test_search_events_with_search_query_multiple_events(self):
approved1 = ApprovedEventFactory.create(
title="Event Arglebargle - Approved", country="SI")
approved2 = ApprovedEventFactory.create(
title="Event Arglebargle - Approved", country="AT")
response = self.client.get(
reverse('web.search_events'), {
'q': 'arglebargle'}, REMOTE_ADDR='93.103.53.11')
self.assertEquals(1, response.context['events'].count())
self.assertEquals('SI', response.context['country'])
approved1.delete()
approved2.delete()
def test_view_event_without_picture(self):
test_event = EventFactory.create()
response = self.client.get(
reverse(
'web.view_event',
args=[
test_event.pk,
test_event.slug]))
assert response.status_code == 200
assert test_event.title in response.content
test_event.delete()
@pytest.mark.django_db
def test_create_event_with_image(admin_user, admin_client, db):
with open(local(__file__).dirname + '/../../static/img/team/alja.jpg') as fp:
io = StringIO.StringIO()
io.write(fp.read())
uploaded_picture = InMemoryUploadedFile(
io, None, "alja.jpg", "jpeg", io.len, None)
uploaded_picture.seek(0)
event_data = {
'audience': [4, 5],
'theme': [1, 2],
'contact_person': u'test@example.com',
'country': u'SI',
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'event_url': u'',
'location': u'Ljubljana, Slovenia',
'organizer': u'Mozilla Slovenija',
'picture': uploaded_picture,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'tags': [u'css', u'html', u'web'],
'title': u'Webmaker Ljubljana',
'user_email': u'test@example.com'
}
response = admin_client.post(reverse('web.add_event'), event_data)
assert response.status_code == 302
response = admin_client.get(response.url)
assert 'event_picture/alja' in response.content
@pytest.mark.django_db
def test_edit_event_with_image(admin_user, admin_client, db):
# First create event
with open(local(__file__).dirname + '/../../static/img/team/alja.jpg') as fp:
io = StringIO.StringIO()
io.write(fp.read())
uploaded_picture = InMemoryUploadedFile(
io, None, "alja17.jpg", "jpeg", io.len, None)
uploaded_picture.seek(0)
event_data = {
'audience': [4, 5],
'theme': [1, 2],
'contact_person': u'test@example.com',
'country': u'SI',
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'event_url': u'',
'location': u'Ljubljana, Slovenia',
'organizer': u'Mozilla Slovenija',
'picture': uploaded_picture,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'tags': [u'css', u'html', u'web'],
'title': u'Webmaker Ljubljana',
'user_email': u'test@example.com'
}
response = admin_client.post(reverse('web.add_event'), event_data)
assert response.status_code == 302
response = admin_client.get(response.url)
assert 'event_picture/alja' in response.content
event = Event.objects.latest('id')
# Then edit it
with open(local(__file__).dirname + '/../../static/img/team/ercchy.jpg') as fp:
io = StringIO.StringIO()
io.write(fp.read())
uploaded_picture = InMemoryUploadedFile(
io, None, "ercchy.jpg", "jpeg", io.len, None)
uploaded_picture.seek(0)
event_data = {
'audience': [6, 7],
'theme': [3, 4],
'contact_person': u'another_person@example.com',
'country': u'SI',
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'event_url': u'',
'location': u'Ljubljana, Slovenia',
'organizer': u'Mozilla Slovenija',
'picture': uploaded_picture,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'tags': [u'css', u'html', u'web'],
'title': u'Webmaker Ljubljana',
'user_email': u'another_person@example.com'
}
response_edited = admin_client.post(
reverse(
'web.edit_event',
args=[
event.id]),
event_data)
assert response_edited.status_code == 302
response = admin_client.get(event.get_absolute_url())
assert 'event_picture/alja17' not in response.content
assert 'event_picture/ercchy' in response.content
# Check if the old event picture has been deleted
old_picture = os.path.isfile(
local(__file__).dirname +
'/../../media/event_picture/alja17.jpg')
assert not old_picture
event_data = {
'audience': [6, 7],
'theme': [3, 4],
'contact_person': u'another_person@example.com',
'country': u'SI',
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'event_url': u'',
'location': u'Ljubljana, Slovenia',
'organizer': u'Mozilla Slovenija',
'picture': '',
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'tags': [u'css', u'html', u'web'],
'title': u'Webmaker Ljubljana',
'user_email': u'another_person@example.com'
}
response_edited = admin_client.post(
reverse(
'web.edit_event',
args=[
event.id]),
event_data)
assert response_edited.status_code == 302
response = admin_client.get(event.get_absolute_url())
assert 'event_picture/ercchy' not in response.content
@pytest.mark.django_db
def test_edit_event_without_end_date(db, admin_user, admin_client):
event = EventFactory.create(creator=admin_user)
event_data = {
'audience': [6, 7],
'theme': [3, 4],
'contact_person': u'another_person@example.com',
'country': u'SI',
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'event_url': u'',
'location': u'Ljubljana, Slovenia',
'organizer': u'Mozilla Slovenija',
'picture': '',
'start_date': datetime.datetime.now(),
'end_date': '',
'tags': [u'css', u'html', u'web'],
'title': u'Webmaker Ljubljana',
'user_email': u'another_person@example.com'
}
response_edited = admin_client.post(
reverse(
'web.edit_event',
args=[
event.id]),
event_data)
assert response_edited.status_code == 200
assert 'end_date' in response_edited.context['form'].errors
event.delete()
@pytest.mark.django_db
def test_scoreboard_links_and_results(admin_user, db, client):
test_country_name = "Slovenia"
test_country_code = "SI"
search_url = (reverse('web.search_events') +
"?country_code=%s&past=yes" % test_country_code)
event_data = {
'audience': [3],
'theme': [1, 2],
'country': test_country_code,
'description': u'Lorem ipsum dolor sit amet.',
'location': test_country_name,
'organizer': u'testko',
"creator": admin_user,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'title': u'Test Approved Event',
'status': "APPROVED",
}
test_approved_event = create_or_update_event(event_id=None, **event_data)
for country in count_approved_events_for_country():
if country['country_code'] == test_country_code:
event_count = country['events']
response = client.get(reverse('web.scoreboard'))
# We're expecting to see this bit of HTML code with the right
# search URL and the right count for events
expected_result = '''
<span class="country-name">%s</span><p> is participating with </p>
<a href="%s">
<span class="event-number">%s event
''' % (test_country_name, search_url, event_count)
expected_result = expected_result.replace('\t', '').replace('\n', '')
scoreboard_content = response.content.replace('\t', '').replace('\n', '')
# The search URL shown on scoreboard also has to match search results
search_response = client.get(search_url)
expected_search_result = '<div class="search-counter">%s event' % event_count
assert expected_result in scoreboard_content
assert expected_search_result in search_response.content
test_approved_event.delete()
@pytest.mark.django_db
def test_ambassadors_list(db, client):
test_country_name = "Austria"
test_country_code = "AT"
test_username = 'test-amb'
test_email = 'test@example.com'
test_first_name = 'Testko'
test_last_name = 'Test'
test_full_name = test_first_name + " " + test_last_name
test_ambassador = User.objects.create(username=test_username,
email=test_email,
first_name=test_first_name,
last_name=test_last_name)
test_ambassador_profile = UserProfile.objects.create(
user=test_ambassador, country=test_country_code)
group = Group.objects.get(name="ambassadors")
group.user_set.add(test_ambassador)
with open(local(__file__).dirname + '/../../static/img/team/alja.jpg') as fp:
io = StringIO.StringIO()
io.write(fp.read())
uploaded_picture = InMemoryUploadedFile(
io, None, "alja17.jpg", "jpeg", io.len, None)
uploaded_picture.seek(0)
avatar = Avatar(user=test_ambassador, primary=True)
avatar.avatar.save(uploaded_picture.name, uploaded_picture)
avatar.save()
new_avatar = get_primary_avatar(test_ambassador, size=80)
test_amb_avatar = new_avatar.avatar_url(80)
response = client.get(reverse('web.ambassadors'))
# We're expecting to the Ambassador under the right country,
# with the right avatar and the right email contact
expected_result = '''
<h2 class="clearfix">%s</h2>
<div class="ambassador clearfix">
<img src="%s" alt="%s" width="80" height="80" class="img-circle" />
<h4>%s <span> <a alt="Send me an email" href="mailto:%s"><i class="fa fa-envelope"></i></a>
''' % (test_country_name, test_amb_avatar, test_username, test_full_name, test_email)
expected_result = expected_result.replace('\t', '').replace('\n', '')
ambassadors_content = response.content.replace('\t', '').replace('\n', '')
# Check this test and modify it to integrating the Ambassadors page changes
# assert expected_result in ambassadors_content
test_ambassador.delete()
avatar.delete()
@pytest.mark.django_db
def test_nonexistent_event(db, client):
response = client.get(
reverse(
'web.view_event',
args=[
1234,
'shouldnt-exist']))
assert response.status_code == 404
@pytest.mark.django_db
def test_geoip_slovenian_ip(db, client):
response = client.get('/', REMOTE_ADDR='93.103.53.1')
assert 'List all events in <span id="country"> Slovenia' in response.content
@pytest.mark.django_db
def test_geoip_invalid_ip(db, client):
response = client.get('/', REMOTE_ADDR='127.0.0.1')
assert 'List all events' in response.content
assert 'List all events <span' not in response.content
@pytest.mark.django_db
def test_list_events_for_country_code(db, client):
response = client.get(reverse('web.view_event_by_country', args=['SI']))
assert response.status_code == 200
| mit | e6b2187611569f91976fbce4e0d4a985 | 37.848758 | 488 | 0.632307 | 3.376496 | false | true | false | false |
skoczen/will | will/backends/io_adapters/shell.py | 5 | 3879 | import cmd
import random
import sys
import time
import logging
import requests
import threading
import readline
import traceback
import warnings
from will import settings
from will.utils import Bunch, UNSURE_REPLIES, html_to_text
from will.abstractions import Message, Person, Channel
from .base import StdInOutIOBackend
warnings.filterwarnings("ignore", category=UserWarning, module='bs4')
class ShellBackend(StdInOutIOBackend):
friendly_name = "Interactive Shell"
internal_name = "will.backends.io_adapters.shell"
partner = Person(
id="you",
handle="shelluser",
mention_handle="@shelluser",
source=Bunch(),
name="Friend",
)
def send_direct_message(self, message_body, **kwargs):
print("Will: %s" % html_to_text(message_body))
def send_room_message(self, room_id, message_body, html=False, color="green", notify=False, **kwargs):
print("Will: %s" % html_to_text(message_body))
def set_room_topic(self, topic):
print("Will: Let's talk about %s" % (topic, ))
def normalize_incoming_event(self, event):
if event["type"] == "message.incoming.stdin":
m = Message(
content=event.data.content.strip(),
type=event.type,
is_direct=True,
is_private_chat=True,
is_group_chat=False,
backend=self.internal_name,
sender=self.partner,
will_is_mentioned=False,
will_said_it=False,
backend_supports_acl=False,
original_incoming_event=event
)
return m
else:
# An event type the shell has no idea how to handle.
return None
def handle_outgoing_event(self, event):
# Print any replies.
if event.type in ["say", "reply"]:
self.send_direct_message(event.content)
if event.type in ["topic_change", ]:
self.set_room_topic(event.content)
elif event.type == "message.no_response":
if event.data and hasattr(event.data, "original_incoming_event") and len(event.data.original_incoming_event.data.content) > 0:
self.send_direct_message(random.choice(UNSURE_REPLIES))
# Regardless of whether or not we had something to say,
# give the user a new prompt.
sys.stdout.write("You: ")
sys.stdout.flush()
def bootstrap(self):
# Bootstrap must provide a way to to have:
# a) self.normalize_incoming_event fired, or incoming events put into self.incoming_queue
# b) any necessary threads running for a)
# c) self.me (Person) defined, with Will's info
# d) self.people (dict of People) defined, with everyone in an organization/backend
# e) self.channels (dict of Channels) defined, with all available channels/rooms.
# Note that Channel asks for members, a list of People.
# f) A way for self.handle, self.me, self.people, and self.channels to be kept accurate,
# with a maximum lag of 60 seconds.
self.people = {}
self.channels = {}
self.me = Person(
id="will",
handle="will",
mention_handle="@will",
source=Bunch(),
name="William T. Botterton",
)
# Do this to get the first "you" prompt.
self.pubsub.publish('message.incoming.stdin', (Message(
content="",
type="message.incoming",
is_direct=True,
is_private_chat=True,
is_group_chat=False,
backend=self.internal_name,
sender=self.partner,
will_is_mentioned=False,
will_said_it=False,
backend_supports_acl=False,
original_incoming_event={}
))
)
| mit | 776e7fd6d10c5e5e9b9b18026882ad75 | 34.263636 | 138 | 0.594225 | 3.998969 | false | false | false | false |
skoczen/will | will/abstractions.py | 5 | 5128 | # -- coding: utf-8 -
import datetime
import hashlib
import logging
from pytz import timezone as pytz_timezone
from will.utils import Bunch
class Message(object):
will_internal_type = "Message"
REQUIRED_FIELDS = [
"is_direct",
"is_private_chat",
"is_group_chat",
"will_is_mentioned",
"will_said_it",
"sender",
"backend_supports_acl",
"content",
"backend",
"original_incoming_event",
]
def __init__(self, *args, **kwargs):
for f in self.REQUIRED_FIELDS:
if not f in kwargs:
raise Exception("Missing %s in Message construction." % f)
for f in kwargs:
self.__dict__[f] = kwargs[f]
if "timestamp" in kwargs:
self.timestamp = kwargs["timestamp"]
else:
self.timestamp = datetime.datetime.now()
# Clean content.
self.content = self._clean_message_content(self.content)
h = hashlib.md5()
h.update(self.timestamp.strftime("%s").encode("utf-8"))
h.update(self.content.encode("utf-8"))
self.hash = h.hexdigest()
self.metadata = Bunch()
if not "original_incoming_event_hash" in kwargs:
if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
self.original_incoming_event_hash = self.original_incoming_event.hash
else:
self.original_incoming_event_hash = self.hash
def __unicode__(self, *args, **kwargs):
if len(self.content) > 20:
content_str = "%s..." % self.content[:20]
else:
content_str = self.content
return u"Message: \"%s\"\n %s (%s) " % (
content_str,
self.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
self.backend,
)
def __str__(self, *args, **kwargs):
return self.__unicode__(*args, **kwargs)
def _clean_message_content(self, s):
# Clear out 'smart' quotes and the like.
s = s.replace("’", "'").replace("‘", "'").replace('“', '"').replace('”', '"')
s = s.replace(u"\u2018", "'").replace(u"\u2019", "'")
s = s.replace(u"\u201c", '"').replace(u"\u201d", '"')
return s
class Event(Bunch):
will_internal_type = "Event"
REQUIRED_FIELDS = [
"type",
"version",
]
def __init__(self, *args, **kwargs):
super(Event, self).__init__(*args, **kwargs)
self.version = 1
for f in self.REQUIRED_FIELDS:
if not f in kwargs and not hasattr(self, f):
raise Exception("Missing %s in Event construction." % f)
if "timestamp" in kwargs:
self.timestamp = kwargs["timestamp"]
else:
self.timestamp = datetime.datetime.now()
h = hashlib.md5()
h.update(self.timestamp.strftime("%s").encode("utf-8"))
h.update(self.type.encode("utf-8"))
self.hash = h.hexdigest()
if not "original_incoming_event_hash" in kwargs:
if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
self.original_incoming_event_hash = self.original_incoming_event.hash
else:
self.original_incoming_event_hash = self.hash
class Person(Bunch):
will_is_person = True
will_internal_type = "Person"
REQUIRED_FIELDS = [
"id",
"handle",
"mention_handle",
"source",
"name",
"first_name"
# "timezone",
]
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
for f in kwargs:
self.__dict__[f] = kwargs[f]
# Provide first_name
if "first_name" not in kwargs:
self.first_name = self.name.split(" ")[0]
for f in self.REQUIRED_FIELDS:
if not hasattr(self, f):
raise Exception("Missing %s in Person construction." % f)
# Set TZ offset.
if hasattr(self, "timezone") and self.timezone:
self.timezone = pytz_timezone(self.timezone)
self.utc_offset = self.timezone._utcoffset
else:
self.timezone = False
self.utc_offset = False
@property
def nick(self):
logging.warn("sender.nick is deprecated and will be removed eventually. Please use sender.handle instead!")
return self.handle
class Channel(Bunch):
will_internal_type = "Channel"
REQUIRED_FIELDS = [
"id",
"name",
"source",
"members",
]
def __init__(self, *args, **kwargs):
super(Channel, self).__init__(*args, **kwargs)
for f in self.REQUIRED_FIELDS:
if not f in kwargs:
raise Exception("Missing %s in Channel construction." % f)
for f in kwargs:
self.__dict__[f] = kwargs[f]
for id, m in self.members.items():
if not m.will_is_person:
raise Exception("Someone in the member list is not a Person instance.\n%s" % m)
| mit | 8755a56f43a42fdd59b4ffb93bbb75c0 | 29.47619 | 115 | 0.544922 | 3.869992 | false | false | false | false |
skoczen/will | will/backends/storage/couchbase_backend.py | 5 | 2382 | from six.moves.urllib import parse
from couchbase import Couchbase, exceptions as cb_exc
from .base import BaseStorageBackend
class CouchbaseStorage(BaseStorageBackend):
"""
A storage backend using Couchbase
You must supply a COUCHBASE_URL setting that is passed through urlparse.
All parameters supplied get passed through to Couchbase
Examples:
* couchbase:///bucket
* couchbase://hostname/bucket
* couchbase://host1,host2/bucket
* couchbase://hostname/bucket?password=123abc&timeout=5
"""
required_settings = [
{
"name": "COUCHBASE_URL",
"obtain_at": """You must supply a COUCHBASE_URL setting that is passed through urlparse.
All parameters supplied get passed through to Couchbase
Examples:
* couchbase:///bucket
* couchbase://hostname/bucket
* couchbase://host1,host2/bucket
* couchbase://hostname/bucket?password=123abc&timeout=55""",
},
]
def __init__(self, settings):
self.verify_settings(quiet=True)
url = parse.urlparse(settings.COUCHBASE_URL)
params = dict([
param.split('=')
for param in url.query.split('&')
])
self.couchbase = Couchbase(host=url.hostname.split(','),
bucket=url.path.strip('/'),
port=url.port or 8091,
**params)
def do_save(self, key, value, expire=None):
res = self.couchbase.set(key, value, ttl=expire)
return res.success
def clear(self, key):
res = self.couchbase.delete(key)
return res.success
def clear_all_keys(self):
"""
Couchbase doesn't support clearing all keys (flushing) without the
Admin username and password. It's not appropriate for Will to have
this information so we don't support clear_all_keys for CB.
"""
return "Sorry, you must flush the Couchbase bucket from the Admin UI"
def do_load(self, key):
try:
res = self.couchbase.get(key)
return res.value
except cb_exc.NotFoundError:
pass
def size(self):
"""
Couchbase doesn't support getting the size of the DB
"""
return "Unknown (See Couchbase Admin UI)"
def bootstrap(settings):
return CouchbaseStorage(settings)
| mit | 025fe7381e8cac8edc43bf03e5e45708 | 28.775 | 100 | 0.61251 | 4.092784 | false | false | false | false |
skoczen/will | will/backends/execution/base.py | 2 | 3800 | import imp
import logging
import signal
import traceback
from will import settings
from will.decorators import require_settings
from will.acl import verify_acl
from will.abstractions import Event
from multiprocessing import Process
class ExecutionBackend(object):
is_will_execution_backend = True
def handle_execution(self, message, context):
raise NotImplementedError
def no_response(self, message):
self.bot.pubsub.publish(
"message.no_response",
message.data,
reference_message=message.data.original_incoming_event
)
def not_allowed(self, message, explanation):
self.bot.pubsub.publish(
"message.outgoing.%s" % message.data.backend,
Event(
type="reply",
content=explanation,
source_message=message,
),
reference_message=message.data.original_incoming_event
)
def execute(self, message, option):
if "acl" in option.context:
acl = option.context["acl"]
if type(acl) == type("test"):
acl = [acl]
allowed = True
if len(acl) > 0:
allowed = verify_acl(message, acl)
if not allowed:
acl_list = ""
more_than_one_s = ""
if len(acl) > 1:
more_than_one_s = "s"
for i in range(0, len(acl)):
if i == 0:
acl_list = "%s" % acl[i]
elif i == len(acl) - 1:
acl_list = "%s or %s" % (acl_list, acl[i])
else:
acl_list = "%s, %s" % (acl_list, acl[i])
explanation = "Sorry, but I don't have you listed in the %s group%s, which is required to do what you asked." % (acl_list, more_than_one_s)
self.not_allowed(
message,
explanation
)
return
if "say_content" in option.context:
# We're coming from a generation engine like a chatterbot, which doesn't *do* things.
self.bot.pubsub.publish(
"message.outgoing.%s" % message.data.backend,
Event(
type="reply",
content=option.context["say_content"],
source_message=message,
),
reference_message=message.data.original_incoming_event
)
else:
module = imp.load_source(option.context.plugin_info["parent_name"], option.context.plugin_info["parent_path"])
cls = getattr(module, option.context.plugin_info["name"])
instantiated_module = cls(message=message)
method = getattr(instantiated_module, option.context.function_name)
thread_args = [message, ] + option.context["args"]
self.run_execute(
method,
*thread_args,
**option.context.search_matches
)
def run_execute(self, target, *args, **kwargs):
try:
t = Process(
target=target,
args=args,
kwargs=kwargs,
)
self.bot.running_execution_threads.append(t)
t.start()
except (KeyboardInterrupt, SystemExit):
pass
except:
logging.critical("Error running %s: \n%s" % (target, traceback.format_exc()))
def __init__(self, bot=None, *args, **kwargs):
self.bot = bot
if not bot:
raise Exception("Can't proceed without an instance of bot passed to the backend.")
super(ExecutionBackend, self).__init__(*args, **kwargs)
| mit | 55f0af8bd83a6f9da8a1284e36e67f64 | 33.234234 | 155 | 0.519737 | 4.470588 | false | false | false | false |
agdsn/sipa | sipa/blueprints/documents.py | 1 | 2011 | import os
from flask import Blueprint, send_from_directory, current_app
from flask_login import current_user
from flask.views import View
from sipa.base import login_manager
bp_documents = Blueprint('documents', __name__)
class StaticFiles(View):
def __init__(self, directory, login_required=False, member_required=False):
self.directory = directory
self.login_required = login_required
self.member_required = member_required
def dispatch_request(self, filename):
if self.login_required and not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
if self.member_required and not current_user.is_member:
return current_app.login_manager.unauthorized()
if os.path.isabs(self.directory):
directory = self.directory
else:
directory = os.path.join(current_app.root_path, self.directory)
return send_from_directory(
directory,
filename,
max_age=current_app.get_send_file_max_age(filename),
)
bp_documents.add_url_rule('/images/<path:filename>',
view_func=StaticFiles.as_view('show_image',
'../content/images'))
login_manager.ignore_endpoint('documents.show_image')
bp_documents.add_url_rule('/documents/<path:filename>',
view_func=StaticFiles.as_view('show_document',
'../content/documents'))
login_manager.ignore_endpoint('documents.show_document')
bp_documents.add_url_rule('/documents_restricted/<path:filename>',
view_func=StaticFiles.as_view('show_document_restricted',
'../content/documents_restricted',
login_required=True,
member_required=True))
| mit | 42dbdac7c8b4cd3d7744496d79622953 | 37.673077 | 90 | 0.57633 | 4.622989 | false | false | false | false |
indico/indico-plugins | payment_sixpay/indico_payment_sixpay/controllers.py | 1 | 16728 | # This file is part of the Indico plugins.
# Copyright (C) 2017 - 2022 Max Fischer, Martin Claus, CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
import json
import time
from urllib.parse import urljoin
import requests
from flask import flash, redirect, request
from requests import RequestException
from werkzeug.exceptions import BadRequest, NotFound
from indico.core.plugins import url_for_plugin
from indico.modules.events.payment.controllers import RHPaymentBase
from indico.modules.events.payment.models.transactions import TransactionAction
from indico.modules.events.payment.notifications import notify_amount_inconsistency
from indico.modules.events.payment.util import TransactionStatus, get_active_payment_plugins, register_transaction
from indico.modules.events.registration.models.registrations import Registration
from indico.web.flask.util import url_for
from indico.web.rh import RH
from indico_payment_sixpay import _
from indico_payment_sixpay.plugin import SixpayPaymentPlugin
from indico_payment_sixpay.util import (PROVIDER_SIXPAY, SIXPAY_JSON_API_SPEC, SIXPAY_PP_ASSERT_URL,
SIXPAY_PP_CANCEL_URL, SIXPAY_PP_CAPTURE_URL, SIXPAY_PP_INIT_URL,
get_request_header, get_terminal_id, to_large_currency, to_small_currency)
class TransactionFailure(Exception):
"""A transaction with SIXPay failed.
:param step: name of the step at which the transaction failed
:param details: verbose description of what went wrong
"""
def __init__(self, step, details=None):
self.step = step
self.details = details
class RHSixpayBase(RH):
"""Request Handler for asynchronous callbacks from SIXPay.
These handlers are used either by
- the user, when he is redirected from SIXPay back to Indico
- SIXPay, when it sends back the result of a transaction
"""
CSRF_ENABLED = False
def _process_args(self):
self.registration = Registration.query.filter_by(uuid=request.args['token']).first()
if not self.registration:
raise BadRequest
try:
self.token = self.registration.transaction.data['Init_PP_response']['Token']
except KeyError:
# if the transaction was already recorded as successful via background notification,
# we no longer have a token in the local transaction
self.token = None
class RHInitSixpayPayment(RHPaymentBase):
def _get_transaction_parameters(self):
"""Get parameters for creating a transaction request."""
settings = SixpayPaymentPlugin.event_settings.get_all(self.event)
format_map = {
'user_id': self.registration.user_id,
'user_name': self.registration.full_name,
'user_firstname': self.registration.first_name,
'user_lastname': self.registration.last_name,
'event_id': self.registration.event_id,
'event_title': self.registration.event.title,
'registration_id': self.registration.id,
'regform_title': self.registration.registration_form.title
}
order_description = settings['order_description'].format(**format_map)
order_identifier = settings['order_identifier'].format(**format_map)
# see the SIXPay Manual
# https://saferpay.github.io/jsonapi/#Payment_v1_PaymentPage_Initialize
# on what these things mean
transaction_parameters = {
'RequestHeader': get_request_header(SIXPAY_JSON_API_SPEC, settings['account_id']),
'TerminalId': get_terminal_id(settings['account_id']),
'Payment': {
'Amount': {
# indico handles price as largest currency, but six expects
# smallest. E.g. EUR: indico uses 100.2 Euro, but six
# expects 10020 Cent
'Value': str(to_small_currency(self.registration.price, self.registration.currency)),
'CurrencyCode': self.registration.currency,
},
'OrderId': order_identifier[:80],
'DESCRIPTION': order_description[:1000],
},
# callbacks of the transaction - where to announce success etc., when redircting the user
'ReturnUrls': {
'Success': url_for_plugin('payment_sixpay.success', self.registration.locator.uuid, _external=True),
'Fail': url_for_plugin('payment_sixpay.failure', self.registration.locator.uuid, _external=True),
'Abort': url_for_plugin('payment_sixpay.cancel', self.registration.locator.uuid, _external=True)
},
'Notification': {
# where to asynchronously call back from SIXPay
'NotifyUrl': url_for_plugin('payment_sixpay.notify', self.registration.locator.uuid, _external=True)
}
}
if settings['notification_mail']:
transaction_parameters['Notification']['MerchantEmails'] = [settings['notification_mail']]
return transaction_parameters
def _init_payment_page(self, transaction_data):
"""Initialize payment page."""
endpoint = urljoin(SixpayPaymentPlugin.settings.get('url'), SIXPAY_PP_INIT_URL)
credentials = (SixpayPaymentPlugin.settings.get('username'), SixpayPaymentPlugin.settings.get('password'))
resp = requests.post(endpoint, json=transaction_data, auth=credentials)
try:
resp.raise_for_status()
except RequestException as exc:
SixpayPaymentPlugin.logger.error('Could not initialize payment: %s', exc.response.text)
raise Exception('Could not initialize payment')
return resp.json()
def _process_args(self):
RHPaymentBase._process_args(self)
if 'sixpay' not in get_active_payment_plugins(self.event):
raise NotFound
if not SixpayPaymentPlugin.instance.supports_currency(self.registration.currency):
raise BadRequest
def _process(self):
transaction_params = self._get_transaction_parameters()
init_response = self._init_payment_page(transaction_params)
payment_url = init_response['RedirectUrl']
# create pending transaction and store Saferpay transaction token
new_indico_txn = register_transaction(
self.registration,
self.registration.price,
self.registration.currency,
TransactionAction.pending,
PROVIDER_SIXPAY,
{'Init_PP_response': init_response}
)
if not new_indico_txn:
# set it on the current transaction if we could not create a next one
# this happens if we already have a pending transaction and it's incredibly
# ugly...
self.registration.transaction.data = {'Init_PP_response': init_response}
return redirect(payment_url)
class SixpayNotificationHandler(RHSixpayBase):
"""Handler for notification from SIXPay service."""
def _process(self):
"""Process the reply from SIXPay about the transaction."""
if self.token is not None:
self._process_confirmation()
def _process_confirmation(self):
"""Process the confirmation response inside indico."""
# assert transaction status from SIXPay
try:
assert_response = self._assert_payment()
if self._is_duplicate_transaction(assert_response):
# we have already handled the transaction
return
elif self._is_captured(assert_response):
# We already captured the payment. This usually happens because sixpay
# calls a background notification endpoint but we also try to capture
# it after being redirected to the user-facing success endpoint
SixpayPaymentPlugin.logger.info('Not processing already-captured transaction')
time.sleep(1) # wait a bit to make sure the other request finished!
return
elif self._is_authorized(assert_response):
self._capture_transaction(assert_response)
self._verify_amount(assert_response)
self._register_payment(assert_response)
except TransactionFailure as exc:
if exc.step == 'capture':
try:
payload = json.loads(exc.details)
except (json.JSONDecodeError, TypeError):
payload = {}
if payload.get('ErrorName') == 'TRANSACTION_ALREADY_CAPTURED':
# Same as the self._is_captured(assert_response) case above, but a race
# between the two requests (user-facing and background) resulted in both
# asserts returning an 'authorized' state
SixpayPaymentPlugin.logger.info('Not processing already-captured transaction (parallel request)')
time.sleep(1) # wait a bit to make sure the other request finished
return
SixpayPaymentPlugin.logger.warning('SIXPay transaction failed during %s: %s', exc.step, exc.details)
raise
def _perform_request(self, task, endpoint, data):
"""Perform a request against SIXPay.
:param task: description of the request, used for error handling
:param endpoint: the URL endpoint *relative* to the SIXPay base URL
:param **data: data passed during the request
This will automatically raise any HTTP errors encountered during the
request. If the request itself fails, a :py:exc:`~.TransactionFailure`
is raised for ``task``.
"""
request_url = urljoin(SixpayPaymentPlugin.settings.get('url'), endpoint)
credentials = (SixpayPaymentPlugin.settings.get('username'), SixpayPaymentPlugin.settings.get('password'))
response = requests.post(request_url, json=data, auth=credentials)
try:
response.raise_for_status()
except requests.HTTPError:
raise TransactionFailure(step=task, details=response.text)
return response
def _assert_payment(self):
"""Check the status of the transaction with SIXPay.
Returns transaction assert data.
"""
account_id = SixpayPaymentPlugin.event_settings.get(self.registration.event, 'account_id')
assert_response = self._perform_request(
'assert',
SIXPAY_PP_ASSERT_URL,
{
'RequestHeader': get_request_header(SIXPAY_JSON_API_SPEC, account_id),
'Token': self.token,
}
)
if assert_response.ok:
return assert_response.json()
def _is_duplicate_transaction(self, transaction_data):
"""Check if this transaction has already been recorded."""
prev_transaction = self.registration.transaction
if (
not prev_transaction or
prev_transaction.provider != PROVIDER_SIXPAY or
'Transaction' not in prev_transaction.data
):
return False
old = prev_transaction.data['Transaction']
new = transaction_data['Transaction']
return (
old['OrderId'] == new['OrderId'] and
old['Type'] == new['Type'] and
old['Id'] == new['Id'] and
old['SixTransactionReference'] == new['SixTransactionReference'] and
old['Amount']['Value'] == new['Amount']['Value'] and
old['Amount']['CurrencyCode'] == new['Amount']['CurrencyCode']
)
def _is_authorized(self, assert_data):
"""Check if payment is authorized."""
return assert_data['Transaction']['Status'] == 'AUTHORIZED'
def _is_captured(self, assert_data):
"""Check if payment is captured, i.e. the cash flow is triggered."""
return assert_data['Transaction']['Status'] == 'CAPTURED'
def _verify_amount(self, assert_data):
"""Verify the amount and currency of the payment.
Sends an email but still registers incorrect payments.
"""
expected_amount = float(self.registration.price)
expected_currency = self.registration.currency
amount = float(assert_data['Transaction']['Amount']['Value'])
currency = assert_data['Transaction']['Amount']['CurrencyCode']
if to_small_currency(expected_amount, expected_currency) == amount and expected_currency == currency:
return True
SixpayPaymentPlugin.logger.warning("Payment doesn't match event's fee: %s %s != %s %s",
amount, currency, to_small_currency(expected_amount, expected_currency),
expected_currency)
notify_amount_inconsistency(self.registration, to_large_currency(amount, currency), currency)
return False
def _capture_transaction(self, assert_data):
"""Confirm to SIXPay that the transaction is accepted.
On success returns the response JSON data.
"""
account_id = SixpayPaymentPlugin.event_settings.get(self.registration.event, 'account_id')
capture_data = {
'RequestHeader': get_request_header(SIXPAY_JSON_API_SPEC, account_id),
'TransactionReference': {'TransactionId': assert_data['Transaction']['Id']}
}
capture_response = self._perform_request('capture', SIXPAY_PP_CAPTURE_URL, capture_data)
return capture_response.json()
def _cancel_transaction(self, assert_data):
"""Inform Sixpay that the transaction is canceled.
Cancel the transaction at Sixpay. This method is implemented but
not used and tested yet.
"""
account_id = SixpayPaymentPlugin.event_settings.get(self.registration.event, 'account_id')
cancel_data = {
'RequestHeader': get_request_header(
SIXPAY_JSON_API_SPEC, account_id
),
'TransactionReference': {
'TransactionId': assert_data['Transaction']['Id']
}
}
cancel_response = self._perform_request(
'cancel', SIXPAY_PP_CANCEL_URL, cancel_data
)
return cancel_response.json()
def _register_payment(self, assert_data):
"""Register the transaction as paid."""
register_transaction(
self.registration,
self.registration.transaction.amount,
self.registration.transaction.currency,
TransactionAction.complete,
PROVIDER_SIXPAY,
data={'Transaction': assert_data['Transaction']}
)
class UserCancelHandler(RHSixpayBase):
"""User redirect target in case of cancelled payment."""
def _process(self):
register_transaction(
self.registration,
self.registration.transaction.amount,
self.registration.transaction.currency,
# XXX: this is indeed reject and not cancel (cancel is "mark as unpaid" and
# only used for manual transactions)
TransactionAction.reject,
provider=PROVIDER_SIXPAY,
)
flash(_('You cancelled the payment.'), 'info')
return redirect(url_for('event_registration.display_regform', self.registration.locator.registrant))
class UserFailureHandler(RHSixpayBase):
"""User redirect target in case of failed payment."""
def _process(self):
register_transaction(
self.registration,
self.registration.transaction.amount,
self.registration.transaction.currency,
TransactionAction.reject,
provider=PROVIDER_SIXPAY,
)
flash(_('Your payment has failed.'), 'info')
return redirect(url_for('event_registration.display_regform', self.registration.locator.registrant))
class UserSuccessHandler(SixpayNotificationHandler):
"""User redirect target in case of successful payment."""
def _process(self):
try:
if self.token is not None:
self._process_confirmation()
except TransactionFailure:
flash(_('Your payment could not be confirmed. Please contact the event organizers.'), 'warning')
else:
if self.registration.transaction.status == TransactionStatus.successful:
flash(_('Your payment has been confirmed.'), 'success')
return redirect(url_for('event_registration.display_regform', self.registration.locator.registrant))
| mit | b0f387d6a7dd6bef3034b3d216122d3d | 44.088949 | 117 | 0.635521 | 4.564256 | false | false | false | false |
indico/indico-plugins | citadel/indico_citadel/schemas_test.py | 1 | 17756 | # This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from datetime import datetime, timedelta
from io import BytesIO
import pytest
from pytz import utc
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.core.marshmallow import mm
from indico.modules.attachments.models.attachments import Attachment, AttachmentFile, AttachmentType
from indico.modules.attachments.models.folders import AttachmentFolder
from indico.modules.events.contributions.models.persons import ContributionPersonLink, SubContributionPersonLink
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.modules.events.models.persons import EventPerson, EventPersonLink
from indico.modules.events.notes.models.notes import EventNote, RenderMode
pytest_plugins = 'indico.modules.events.timetable.testing.fixtures'
def test_dump_event(db, dummy_user, dummy_event):
from .schemas import EventRecordSchema
schema = EventRecordSchema(context={'schema': 'test-events'})
dummy_event.description = 'A dummy <strong>event</strong>'
dummy_event.keywords = ['foo', 'bar']
person = EventPerson.create_from_user(dummy_user, dummy_event)
person2 = EventPerson(event=dummy_event, first_name='Admin', last_name='Saurus', affiliation='Indico')
dummy_event.person_links.append(EventPersonLink(person=person))
dummy_event.person_links.append(EventPersonLink(person=person2))
db.session.flush()
category_id = dummy_event.category_id
assert schema.dump(dummy_event) == {
'$schema': 'test-events',
'_access': {
'delete': ['IndicoAdmin'],
'owner': ['IndicoAdmin'],
'update': ['IndicoAdmin'],
},
'_data': {
'description': 'A dummy event',
'keywords': ['foo', 'bar'],
'location': {'address': '', 'room_name': '', 'venue_name': ''},
'persons': [{'name': 'Guinea Pig'},
{'affiliation': 'Indico', 'name': 'Admin Saurus'}],
'site': 'http://localhost',
'title': 'dummy#0'
},
'category_id': 1,
'category_path': [
{'id': 0, 'title': 'Home', 'url': '/'},
{'id': category_id, 'title': 'dummy', 'url': f'/category/{category_id}/'},
],
'end_dt': dummy_event.end_dt.isoformat(),
'event_id': 0,
'start_dt': dummy_event.start_dt.isoformat(),
'type': 'event',
'type_format': 'meeting',
'url': 'http://localhost/event/0/',
}
@pytest.mark.parametrize('scheduled', (False, True))
def test_dump_contribution(db, dummy_user, dummy_event, dummy_contribution, create_entry, scheduled):
from .schemas import ContributionRecordSchema
person = EventPerson.create_from_user(dummy_user, dummy_event)
dummy_contribution.person_links.append(ContributionPersonLink(person=person))
dummy_contribution.description = 'A dummy <strong>contribution</strong>'
extra = {}
if scheduled:
create_entry(dummy_contribution, utc.localize(datetime(2020, 4, 20, 4, 20)))
extra = {
'start_dt': dummy_contribution.start_dt.isoformat(),
'end_dt': dummy_contribution.end_dt.isoformat(),
}
db.session.flush()
category_id = dummy_contribution.event.category_id
schema = ContributionRecordSchema(context={'schema': 'test-contribs'})
assert schema.dump(dummy_contribution) == {
'$schema': 'test-contribs',
'_access': {
'delete': ['IndicoAdmin'],
'owner': ['IndicoAdmin'],
'update': ['IndicoAdmin'],
},
'_data': {
'description': 'A dummy contribution',
'location': {'address': '', 'room_name': '', 'venue_name': ''},
'persons': [{'name': 'Guinea Pig'}],
'site': 'http://localhost',
'title': 'Dummy Contribution',
},
'category_id': category_id,
'category_path': [
{'id': 0, 'title': 'Home', 'url': '/'},
{'id': category_id, 'title': 'dummy', 'url': f'/category/{category_id}/'},
],
'contribution_id': dummy_contribution.id,
'duration': 20,
'event_id': 0,
'type': 'contribution',
'url': f'http://localhost/event/0/contributions/{dummy_contribution.id}/',
**extra
}
@pytest.mark.parametrize('scheduled', (False, True))
def test_dump_subcontribution(db, dummy_user, dummy_event, dummy_contribution, create_entry, scheduled):
from .schemas import SubContributionRecordSchema
extra = {}
if scheduled:
create_entry(dummy_contribution, utc.localize(datetime(2020, 4, 20, 4, 20)))
extra = {
'start_dt': dummy_contribution.start_dt.isoformat(),
'end_dt': dummy_contribution.end_dt.isoformat(),
}
subcontribution = SubContribution(contribution=dummy_contribution, title='Dummy Subcontribution',
description='A dummy <strong>subcontribution</strong>',
duration=timedelta(minutes=10))
person = EventPerson.create_from_user(dummy_user, dummy_event)
subcontribution.person_links.append(SubContributionPersonLink(person=person))
db.session.flush()
category_id = dummy_contribution.event.category_id
schema = SubContributionRecordSchema(context={'schema': 'test-subcontribs'})
assert schema.dump(subcontribution) == {
'$schema': 'test-subcontribs',
'_access': {
'delete': ['IndicoAdmin'],
'owner': ['IndicoAdmin'],
'update': ['IndicoAdmin'],
},
'_data': {
'description': 'A dummy subcontribution',
'location': {'address': '', 'room_name': '', 'venue_name': ''},
'persons': [{'name': 'Guinea Pig'}],
'site': 'http://localhost',
'title': 'Dummy Subcontribution',
},
'category_id': category_id,
'category_path': [
{'id': 0, 'title': 'Home', 'url': '/'},
{'id': category_id, 'title': 'dummy', 'url': f'/category/{category_id}/'},
],
'contribution_id': dummy_contribution.id,
'duration': 10,
'event_id': 0,
'subcontribution_id': subcontribution.id,
'type': 'subcontribution',
'url': f'http://localhost/event/0/contributions/{dummy_contribution.id}/subcontributions/{subcontribution.id}',
**extra
}
def test_dump_attachment(db, dummy_user, dummy_contribution):
from .schemas import AttachmentRecordSchema
folder = AttachmentFolder(title='Dummy Folder', description='a dummy folder')
file = AttachmentFile(user=dummy_user, filename='dummy_file.txt', content_type='text/plain')
attachment = Attachment(folder=folder, user=dummy_user, title='Dummy Attachment', type=AttachmentType.file,
file=file)
attachment.folder.object = dummy_contribution
attachment.file.save(BytesIO(b'hello world'))
db.session.flush()
category_id = dummy_contribution.event.category_id
schema = AttachmentRecordSchema(context={'schema': 'test-attachment'})
assert schema.dump(attachment) == {
'$schema': 'test-attachment',
'_access': {
'delete': ['IndicoAdmin'],
'owner': ['IndicoAdmin'],
'update': ['IndicoAdmin'],
},
'_data': {
'filename': 'dummy_file.txt',
'site': 'http://localhost',
'title': 'Dummy Attachment',
'persons': {'name': 'Guinea Pig'},
},
'attachment_id': attachment.id,
'category_id': category_id,
'category_path': [
{'id': 0, 'title': 'Home', 'url': '/'},
{'id': category_id, 'title': 'dummy', 'url': f'/category/{category_id}/'},
],
'contribution_id': dummy_contribution.id,
'event_id': 0,
'folder_id': folder.id,
'modified_dt': attachment.modified_dt.isoformat(),
'type': 'attachment',
'type_format': 'file',
'url': (
f'http://localhost/event/0/contributions/'
f'{dummy_contribution.id}/attachments/{folder.id}/{attachment.id}/dummy_file.txt'
),
}
@pytest.mark.parametrize('link_type', ('event', 'contrib', 'subcontrib'))
def test_dump_event_note(db, dummy_user, dummy_event, dummy_contribution, link_type):
from .schemas import EventNoteRecordSchema
if link_type == 'event':
ids = {}
note = EventNote(object=dummy_event)
url = '/event/0/note/'
elif link_type == 'contrib':
ids = {'contribution_id': dummy_contribution.id}
note = EventNote(object=dummy_contribution)
url = f'/event/0/contributions/{dummy_contribution.id}/note/'
elif link_type == 'subcontrib':
subcontribution = SubContribution(contribution=dummy_contribution, title='Dummy Subcontribution',
duration=timedelta(minutes=10))
db.session.flush()
ids = {
'contribution_id': subcontribution.contribution_id,
'subcontribution_id': subcontribution.id,
}
note = EventNote(object=subcontribution)
url = f'/event/0/contributions/{dummy_contribution.id}/subcontributions/{subcontribution.id}/note/'
note.create_revision(RenderMode.html, 'this is a dummy <strong>note</strong>', dummy_user)
db.session.flush()
category_id = dummy_event.category_id
schema = EventNoteRecordSchema(context={'schema': 'test-notes'})
assert schema.dump(note) == {
'$schema': 'test-notes',
'_access': {
'delete': ['IndicoAdmin'],
'owner': ['IndicoAdmin'],
'update': ['IndicoAdmin'],
},
'_data': {
'content': 'this is a dummy note',
'site': 'http://localhost',
'title': note.object.title,
'persons': {'name': 'Guinea Pig'}
},
'category_id': category_id,
'category_path': [
{'id': 0, 'title': 'Home', 'url': '/'},
{'id': category_id, 'title': 'dummy', 'url': f'/category/{category_id}/'},
],
'modified_dt': note.current_revision.created_dt.isoformat(),
'event_id': 0,
'note_id': note.id,
'type': 'event_note',
'url': f'http://localhost{url}',
**ids
}
def test_event_acls(dummy_event, create_user):
from .schemas import ACLSchema
class TestSchema(ACLSchema, mm.Schema):
pass
def assert_acl(expected_read_acl):
__tracebackhide__ = True
data = schema.dump(dummy_event)
read_acl = data['_access'].pop('read', None)
assert data == {'_access': {'delete': ['IndicoAdmin'], 'owner': ['IndicoAdmin'], 'update': ['IndicoAdmin']}}
if read_acl is not None:
read_acl = set(read_acl)
assert read_acl == expected_read_acl
schema = TestSchema()
u1 = create_user(1, email='user1@example.com')
u2 = create_user(2, email='user2@example.com')
u3 = create_user(3, email='user3@example.com')
# event is inheriting public, so no acl
assert_acl(None)
# event is protected and the acl is empty (nobody has regular access)
dummy_event.protection_mode = ProtectionMode.protected
assert_acl({'IndicoAdmin'})
dummy_event.update_principal(u1, read_access=True)
dummy_event.category.update_principal(u2, read_access=True)
dummy_event.category.parent.update_principal(u3, read_access=True)
# self-protected, so no acl inherited
assert_acl({'IndicoAdmin', 'User:1'})
# event is inheriting from public categories, so there is no acl
dummy_event.protection_mode = ProtectionMode.inheriting
assert_acl(None)
# event it itself public, so no acl here as well
dummy_event.protection_mode = ProtectionMode.public
assert_acl(None)
# inheriting, so all parent acl entries
dummy_event.protection_mode = ProtectionMode.inheriting
dummy_event.category.parent.protection_mode = ProtectionMode.protected
assert_acl({'IndicoAdmin', 'User:1', 'User:2', 'User:3'})
# category protected, so no parent category acl inherited
dummy_event.category.protection_mode = ProtectionMode.protected
assert_acl({'IndicoAdmin', 'User:1', 'User:2'})
# parent category acl entry is a manager, that one is inherited
dummy_event.category.parent.update_principal(u3, full_access=True)
assert_acl({'IndicoAdmin', 'User:1', 'User:2', 'User:3'})
def test_attachment_acls(dummy_event, dummy_user, create_user):
from .schemas import ACLSchema
class TestSchema(ACLSchema, mm.Schema):
pass
folder = AttachmentFolder(title='Dummy Folder', description='a dummy folder')
attachment = Attachment(folder=folder, user=dummy_user, title='Dummy Attachment', type=AttachmentType.link,
link_url='https://example.com')
attachment.folder.object = dummy_event
def assert_acl(expected_read_acl):
__tracebackhide__ = True
data = schema.dump(attachment)
read_acl = data['_access'].pop('read', None)
assert data == {'_access': {'delete': ['IndicoAdmin'], 'owner': ['IndicoAdmin'], 'update': ['IndicoAdmin']}}
if read_acl is not None:
read_acl = set(read_acl)
assert read_acl == expected_read_acl
schema = TestSchema()
u1 = create_user(1, email='user1@example.com')
u2 = create_user(2, email='user2@example.com')
u3 = create_user(3, email='user3@example.com')
# event is inheriting public, so no acl
assert_acl(None)
# event is protected and the acl is empty (nobody has regular access)
dummy_event.protection_mode = ProtectionMode.protected
assert_acl({'IndicoAdmin'})
dummy_event.update_principal(u1, read_access=True)
dummy_event.category.update_principal(u2, read_access=True)
dummy_event.category.parent.update_principal(u3, read_access=True)
# self-protected, so no acl inherited
assert_acl({'IndicoAdmin', 'User:1'})
# event is inheriting from public categories, so there is no acl
dummy_event.protection_mode = ProtectionMode.inheriting
assert_acl(None)
# event it itself public, so no acl here as well
dummy_event.protection_mode = ProtectionMode.public
assert_acl(None)
# inheriting, so all parent acl entries
dummy_event.protection_mode = ProtectionMode.inheriting
dummy_event.category.parent.protection_mode = ProtectionMode.protected
assert_acl({'IndicoAdmin', 'User:1', 'User:2', 'User:3'})
# category protected, so no parent category acl inherited
dummy_event.category.protection_mode = ProtectionMode.protected
assert_acl({'IndicoAdmin', 'User:1', 'User:2'})
# parent category acl entry is a manager, that one is inherited
dummy_event.category.parent.update_principal(u3, full_access=True)
assert_acl({'IndicoAdmin', 'User:1', 'User:2', 'User:3'})
# attachment self-protected, only the category/event manager has access
folder.update_principal(u2, read_access=True)
attachment.protection_mode = ProtectionMode.protected
assert_acl({'IndicoAdmin', 'User:3'})
# the user in the attachment acl has access as well
attachment.update_principal(u1, read_access=True)
attachment.protection_mode = ProtectionMode.protected
assert_acl({'IndicoAdmin', 'User:3', 'User:1'})
# attachment inheriting from self-protected folder - only the folder acl is used
attachment.protection_mode = ProtectionMode.inheriting
folder.protection_mode = ProtectionMode.protected
assert_acl({'IndicoAdmin', 'User:3', 'User:2'})
@pytest.mark.parametrize('obj_type', ('event', 'contrib', 'subcontrib', 'attachment', 'note'))
def test_acls(dummy_event, dummy_contribution, dummy_user, create_user, obj_type):
from .schemas import ACLSchema
class TestSchema(ACLSchema, mm.Schema):
pass
if obj_type == 'event':
obj = dummy_event
elif obj_type == 'contrib':
obj = dummy_contribution
elif obj_type == 'subcontrib':
obj = SubContribution(contribution=dummy_contribution, title='Test', duration=timedelta(minutes=10))
elif obj_type == 'attachment':
folder = AttachmentFolder(title='Dummy Folder', description='a dummy folder')
obj = Attachment(folder=folder, user=dummy_user, title='Dummy Attachment', type=AttachmentType.link,
link_url='https://example.com')
obj.folder.object = dummy_event
elif obj_type == 'note':
obj = EventNote(object=dummy_event)
obj.create_revision(RenderMode.html, 'this is a dummy note', dummy_user)
def assert_acl(expected_read_acl):
__tracebackhide__ = True
data = schema.dump(obj)
read_acl = data['_access'].pop('read', None)
assert data == {'_access': {'delete': ['IndicoAdmin'], 'owner': ['IndicoAdmin'], 'update': ['IndicoAdmin']}}
if read_acl is not None:
read_acl = set(read_acl)
assert read_acl == expected_read_acl
schema = TestSchema()
user = create_user(1, email='user1@example.com')
# everything is public
assert_acl(None)
# event is protected and the acl is empty (nobody has regular access)
dummy_event.protection_mode = ProtectionMode.protected
assert_acl({'IndicoAdmin'})
# user on the acl has access
dummy_event.update_principal(user, read_access=True)
assert_acl({'IndicoAdmin', 'User:1'})
| mit | 6aee2c36a2f6b30cab3ddd9209bf6d8e | 39.263039 | 119 | 0.626943 | 3.770652 | false | true | false | false |
indico/indico-plugins | citadel/indico_citadel/cli.py | 1 | 2691 | # This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
import os
import sys
import time
import traceback
import click
from indico.cli.core import cli_group
from indico.core.db import db
from indico.util.console import cformat
from indico_citadel.models.id_map import CitadelIdMap
from indico_livesync.models.agents import LiveSyncAgent
@cli_group(name='citadel')
def cli():
"""Manage the Citadel plugin."""
@cli.command()
@click.option('--force', '-f', is_flag=True, help="Upload even if it has already been done once.")
@click.option('--retry', '-r', is_flag=True, help="Restart automatically after a failure")
@click.option('--batch', type=int, default=1000, show_default=True, metavar='N',
help="The amount of records yielded per upload batch.")
@click.option('--max-size', type=int, metavar='SIZE',
help="The max size (in MB) of files to upload. Defaults to the size from the plugin settings.")
def upload(batch, force, max_size, retry):
"""Upload file contents for full text search."""
agent = LiveSyncAgent.query.filter(LiveSyncAgent.backend_name == 'citadel').first()
if agent is None:
print('No citadel livesync agent found')
return
if not CitadelIdMap.query.has_rows():
print('It looks like you did not export any data to Citadel yet.')
print(cformat('To do so, run %{yellow!}indico livesync initial-export {}%{reset}').format(agent.id))
return
backend = agent.create_backend()
if not backend.is_configured():
print('Citadel is not properly configured.')
return
initial = not agent.settings.get('file_upload_done')
try:
total, errors, aborted = backend.run_export_files(batch, force, max_size=max_size, initial=initial)
except Exception:
if not retry:
raise
traceback.print_exc()
print('Restarting in 2 seconds\a')
time.sleep(2)
os.execl(sys.argv[0], *sys.argv)
return # exec doesn't return but just in case...
if not errors and not aborted:
print(f'{total} files uploaded')
if max_size is None:
backend.set_initial_file_upload_state(True)
db.session.commit()
else:
print('Max size was set; not enabling queue runs.')
else:
if aborted:
print('Upload aborted')
print(f'{total} files processed, {errors} failed')
print('Please re-run this script; queue runs will remain disabled for now')
| mit | c24b460b0945733fc6f374563bbb7904 | 35.364865 | 109 | 0.663322 | 3.75838 | false | false | false | false |
indico/indico-plugins | piwik/indico_piwik/controllers.py | 1 | 2435 | # This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from flask import jsonify, request
from flask_pluginengine import current_plugin
from werkzeug.exceptions import NotFound
from indico.modules.events.management.controllers import RHManageEventBase
from indico_piwik.reports import ReportCountries, ReportDevices, ReportGeneral, ReportVisitsPerDay
from indico_piwik.views import WPStatistics
class WPPiwikStatistics(WPStatistics):
@property
def additional_bundles(self):
return {
'screen': [current_plugin.manifest[x] for x in ('main.js', 'main.css')],
'print': ()
}
class RHPiwikBase(RHManageEventBase):
def _process_args(self):
from indico_piwik.plugin import PiwikPlugin
RHManageEventBase._process_args(self)
if not PiwikPlugin.settings.get('site_id_events'):
raise NotFound
class RHStatistics(RHPiwikBase):
def _process(self):
report = ReportGeneral.get(event_id=self.event.id,
contrib_id=request.args.get('contrib_id'),
start_date=request.args.get('start_date'),
end_date=request.args.get('end_date'))
return WPPiwikStatistics.render_template('statistics.html', self.event, report=report)
class RHApiBase(RHPiwikBase):
ALLOW_LOCKED = True
def _process_args(self):
RHPiwikBase._process_args(self)
self._report_params = {'start_date': request.args.get('start_date'),
'end_date': request.args.get('end_date')}
class RHApiEventBase(RHApiBase):
def _process_args(self):
RHApiBase._process_args(self)
self._report_params['event_id'] = self.event.id
self._report_params['contrib_id'] = request.args.get('contrib_id')
class RHApiEventVisitsPerDay(RHApiEventBase):
def _process(self):
return jsonify(ReportVisitsPerDay.get(**self._report_params))
class RHApiEventGraphCountries(RHApiEventBase):
def _process(self):
return jsonify(ReportCountries.get(**self._report_params))
class RHApiEventGraphDevices(RHApiEventBase):
def _process(self):
return jsonify(ReportDevices.get(**self._report_params))
| mit | facda99eb36b8aab30b05ffb924b4c6b | 32.819444 | 98 | 0.674333 | 3.70624 | false | false | false | false |
indico/indico-plugins | previewer_jupyter/indico_previewer_jupyter/controllers.py | 1 | 2235 | # This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
import nbformat
from flask import current_app, render_template, request, session
from flask_pluginengine import current_plugin
from nbconvert.exporters import HTMLExporter
from traitlets.config import Config
from werkzeug.exceptions import Forbidden
from indico.modules.attachments import Attachment
from indico.web.rh import RH
from indico_previewer_jupyter.cpp_highlighter import CppHighlighter
class RHEventPreviewIPyNB(RH):
def _check_access(self):
if not self.attachment.can_access(session.user):
raise Forbidden
def _process_args(self):
self.attachment = Attachment.query.filter_by(id=request.view_args['attachment_id'], is_deleted=False).one()
def _process(self):
config = Config()
config.HTMLExporter.preprocessors = [CppHighlighter]
config.HTMLExporter.template_name = 'classic'
# Disable unused extensions
config.HTMLExporter.mathjax_url = ''
config.HTMLExporter.jquery_url = ''
config.HTMLExporter.require_js_url = ''
with self.attachment.file.open() as f:
notebook = nbformat.read(f, as_version=4)
html_exporter = HTMLExporter(config=config)
body, resources = html_exporter.from_notebook_node(notebook)
css_code = '\n'.join(resources['inlining'].get('css', []))
html = render_template('previewer_jupyter:ipynb_preview.html', attachment=self.attachment,
html_code=body, css_code=css_code, plugin=current_plugin)
response = current_app.response_class(html)
# Use CSP to restrict access to possibly malicious scripts or inline JS
csp_header = "script-src 'self';"
response.headers['Content-Security-Policy'] = csp_header
response.headers['X-Webkit-CSP'] = csp_header
# IE10 doesn't have proper CSP support, so we need to be more strict
response.headers['X-Content-Security-Policy'] = "sandbox allow-same-origin;"
return response
| mit | 3ab18696838a66ecdc149299412ba91c | 38.910714 | 115 | 0.695749 | 3.788136 | false | true | false | false |
indico/indico-plugins | livesync/indico_livesync/task.py | 1 | 1345 | # This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from celery.schedules import crontab
from indico.core.celery import celery
from indico.core.db import db
from indico_livesync.models.agents import LiveSyncAgent
from indico_livesync.util import clean_old_entries
@celery.periodic_task(run_every=crontab(minute='*/15'), plugin='livesync')
def scheduled_update():
from indico_livesync.plugin import LiveSyncPlugin
if LiveSyncPlugin.settings.get('disable_queue_runs'):
LiveSyncPlugin.logger.warning('Queue runs are disabled')
return
clean_old_entries()
for agent in LiveSyncAgent.query.all():
if agent.backend is None:
LiveSyncPlugin.logger.warning('Skipping agent %s; backend not found', agent.name)
continue
backend = agent.create_backend()
queue_allowed, reason = backend.check_queue_status()
if not queue_allowed:
LiveSyncPlugin.logger.warning('Skipping agent %s; queue runs disabled: %s', agent.name, reason)
continue
LiveSyncPlugin.logger.info('Running agent %s', agent.name)
backend.run()
db.session.commit()
| mit | c492f418cdefd41e5cf12e341cefce45 | 37.428571 | 107 | 0.705576 | 3.864943 | false | false | false | false |
indico/indico-plugins | citadel/indico_citadel/search.py | 1 | 5022 | # This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
import base64
import zlib
import requests
from flask import current_app
from requests.exceptions import RequestException
from werkzeug.urls import url_join
from indico.modules.search.base import IndicoSearchProvider, SearchOption
from indico.util.decorators import classproperty
from indico_citadel import _
from indico_citadel.result_schemas import CitadelResultSchema
from indico_citadel.util import format_filters, format_query, get_user_access
class CitadelProvider(IndicoSearchProvider):
def __init__(self, *args, **kwargs):
from indico_citadel.plugin import CitadelPlugin
super().__init__(*args, **kwargs)
self.token = CitadelPlugin.settings.get('search_backend_token')
self.backend_url = CitadelPlugin.settings.get('search_backend_url')
self.records_url = url_join(self.backend_url, 'api/records/')
@classproperty
@classmethod
def active(cls):
from indico_citadel.plugin import CitadelPlugin
if current_app.config['TESTING']:
return True
elif CitadelPlugin.settings.get('disable_search'):
return False
return bool(CitadelPlugin.settings.get('search_backend_url') and
CitadelPlugin.settings.get('search_backend_token'))
def search(self, query, user=None, page=1, object_types=(), *, admin_override_enabled=False,
**params):
# https://cern-search.docs.cern.ch/usage/operations/#query-documents
# this token is used by the backend to authenticate and also to filter
# the objects that we can actually read
headers = {
'Authorization': f'Bearer {self.token}'
}
operator = params.pop('default_operator', 'AND')
sort = params.pop('sort', None)
filter_query, ranges = format_filters(params, filters, range_filters)
# Look for objects matching the `query` and schema, make sure the query is properly escaped
# https://cern-search.docs.cern.ch/usage/operations/#advanced-queries
parts = [format_query(query, {k: field for k, (field, _) in placeholders.items()})]
if ranges:
parts.append(ranges)
search_params = {
'page': page, 'size': self.RESULTS_PER_PAGE, 'q': ' '.join(parts), 'highlight': '_data.*',
'type': [x.name for x in object_types], 'sort': sort, 'default_operator': operator,
**filter_query
}
# Filter by the objects that can be viewed by users/groups in the `access` argument
if access := get_user_access(user, admin_override_enabled):
access_string = ','.join(access)
if len(access_string) > 1024:
access_string_gz = base64.b64encode(zlib.compress(access_string.encode(), level=9))
search_params['access_gz'] = access_string_gz
else:
search_params['access'] = access_string
try:
resp = requests.get(self.records_url, params=search_params, headers=headers)
resp.raise_for_status()
except RequestException:
raise Exception('Failed contacting the search service')
data = resp.json()
return CitadelResultSchema(context={'results_per_page': self.RESULTS_PER_PAGE}).load(data)
def get_placeholders(self):
return [SearchOption(key, label) for key, (_, label) in placeholders.items()]
def get_sort_options(self):
return [SearchOption(key, label) for key, label in sort_options.items()]
placeholders = {
'title': ('_data.title', _('The title of an event, contribution, etc.')),
'person': ('_data.persons_index.name', _("A speaker, author or event chair's name")),
'affiliation': ('_data.persons_index.affiliation', _("A speaker, author or event chair's affiliation")),
'type': ('type_any', _('An entry type (such as conference, meeting, file, etc.)')),
'venue': ('_data.location.venue_name', _("Name of the venue")),
'room': ('_data.location.room_name', _("Name of the room")),
'address': ('_data.location.address', _("Address of the venue")),
'file': ('_data.filename', _("Name of the attached file")),
'keyword': ('_data.keywords', _('A keyword associated with an event')),
'category': ('category_path.title', _('The category of an event')),
}
range_filters = {
'start_range': 'start_dt'
}
sort_options = {
'bestmatch': _('Most relevant'),
'mostrecent': _('Newest first'),
'-mostrecent': _('Oldest first')
}
filters = {
'person_affiliation': _('Affiliation'),
'person_name': _('Person'),
'type_format': _('Type'),
'venue': _('Location'),
'start_range': _('Date'),
'category': _('Category'),
'category_id': _('Category ID'),
'event_id': _('Event ID'),
}
| mit | 392913658d26adce34fd53aff28136f1 | 39.829268 | 108 | 0.639188 | 3.83945 | false | false | false | false |
indico/indico-plugins | piwik/indico_piwik/queries/graphs.py | 1 | 2063 | # This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from base64 import b64encode
from flask_pluginengine import current_plugin
from indico_piwik.queries.base import PiwikQueryReportEventBase
class PiwikQueryReportEventGraphBase(PiwikQueryReportEventBase):
"""Base Piwik query for retrieving PNG graphs"""
def call(self, apiModule, apiAction, height=None, width=None, graphType='verticalBar', **query_params):
if height is not None:
query_params['height'] = height
if width is not None:
query_params['width'] = width
return super().call(method='ImageGraph.get', apiModule=apiModule, apiAction=apiAction, aliasedGraph='1',
graphType=graphType, **query_params)
def get_result(self):
"""Perform the call and return the graph data
:return: Encoded PNG graph data string to be inserted in a `src`
atribute of a HTML img tag.
"""
png = self.call()
if png is None:
return
if png.startswith(b'GD extension must be loaded'):
current_plugin.logger.warning('Piwik server answered on ImageGraph.get: %s', png)
return
return f'data:image/png;base64,{b64encode(png).decode()}'
class PiwikQueryReportEventGraphCountries(PiwikQueryReportEventGraphBase):
def call(self, **query_params):
return super().call(apiModule='UserCountry', apiAction='getCountry', period='range', width=490, height=260,
graphType='horizontalBar', **query_params)
class PiwikQueryReportEventGraphDevices(PiwikQueryReportEventGraphBase):
def call(self, **query_params):
return super().call(apiModule='DevicesDetection', apiAction='getOsVersions', period='range', width=320,
height=260, graphType='horizontalBar', **query_params)
| mit | 9cd1ae9339d55205da8fa2cab82d87e5 | 40.26 | 115 | 0.672807 | 4.053045 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.