code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# Django
from django.urls import path
from django.views.generic import TemplateView
# Local
from . import views
urlpatterns = [
# Root
path('', views.index, name='index',),
# Footer
path('about/', TemplateView.as_view(template_name='app/pages/about.html'), name='about',),
path('faq/', TemplateView.as_view(template_name='app/pages/faq.html'), name='faq',),
path('privacy/', TemplateView.as_view(template_name='app/pages/privacy.html'), name='privacy',),
path('terms/', TemplateView.as_view(template_name='app/pages/terms.html'), name='terms',),
path('support/', TemplateView.as_view(template_name='app/pages/support.html'), name='support',),
# Authentication
path('join', views.join, name='join'),
path('callback', views.callback, name='callback'),
path('login', views.login, name='login'),
path('logout', views.logout, name='logout'),
# Account
path('account', views.account, name='account',),
# Delete
path('delete', views.delete, name='delete',),
]
|
[
"django.views.generic.TemplateView.as_view",
"django.urls.path"
] |
[((145, 180), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (149, 180), False, 'from django.urls import path\n'), ((704, 741), 'django.urls.path', 'path', (['"""join"""', 'views.join'], {'name': '"""join"""'}), "('join', views.join, name='join')\n", (708, 741), False, 'from django.urls import path\n'), ((747, 796), 'django.urls.path', 'path', (['"""callback"""', 'views.callback'], {'name': '"""callback"""'}), "('callback', views.callback, name='callback')\n", (751, 796), False, 'from django.urls import path\n'), ((802, 842), 'django.urls.path', 'path', (['"""login"""', 'views.login'], {'name': '"""login"""'}), "('login', views.login, name='login')\n", (806, 842), False, 'from django.urls import path\n'), ((848, 891), 'django.urls.path', 'path', (['"""logout"""', 'views.logout'], {'name': '"""logout"""'}), "('logout', views.logout, name='logout')\n", (852, 891), False, 'from django.urls import path\n'), ((912, 958), 'django.urls.path', 'path', (['"""account"""', 'views.account'], {'name': '"""account"""'}), "('account', views.account, name='account')\n", (916, 958), False, 'from django.urls import path\n'), ((979, 1022), 'django.urls.path', 'path', (['"""delete"""', 'views.delete'], {'name': '"""delete"""'}), "('delete', views.delete, name='delete')\n", (983, 1022), False, 'from django.urls import path\n'), ((216, 274), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""app/pages/about.html"""'}), "(template_name='app/pages/about.html')\n", (236, 274), False, 'from django.views.generic import TemplateView\n'), ((309, 365), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""app/pages/faq.html"""'}), "(template_name='app/pages/faq.html')\n", (329, 365), False, 'from django.views.generic import TemplateView\n'), ((402, 462), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""app/pages/privacy.html"""'}), "(template_name='app/pages/privacy.html')\n", (422, 462), False, 'from django.views.generic import TemplateView\n'), ((501, 559), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""app/pages/terms.html"""'}), "(template_name='app/pages/terms.html')\n", (521, 559), False, 'from django.views.generic import TemplateView\n'), ((598, 658), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""app/pages/support.html"""'}), "(template_name='app/pages/support.html')\n", (618, 658), False, 'from django.views.generic import TemplateView\n')]
|
import logging
import argparse
import blink.main_dense as main_dense
logger = logging.getLogger(__name__)
class EntityLinker:
def __init__(self, model_path, logger=None):
self.logger = logger
self.models_path = model_path
self.config = {
"test_entities": None,
"test_mentions": None,
"interactive": False,
"biencoder_model": self.models_path+"biencoder_wiki_large.bin",
"biencoder_config": self.models_path+"biencoder_wiki_large.json",
"entity_catalogue": self.models_path+"entity.jsonl",
"entity_encoding": self.models_path+"all_entities_large.t7",
"crossencoder_model": self.models_path+"crossencoder_wiki_large.bin",
"crossencoder_config": self.models_path+"crossencoder_wiki_large.json",
"fast": True, # set this to be true if speed is a concern
"output_path": "logs/", # logging directory
"faiss_index": "flat",
"index_path": self.models_path+"index.pkl",
"top_k": 30
}
self.args = argparse.Namespace(**self.config)
self.models = main_dense.load_models(self.args, logger=self.logger)
def __call__(self, data_to_link):
_, _, _, _, _, predictions, scores, = main_dense.run(self.args, logger=self.logger,
biencoder=self.models[0], biencoder_params=self.models[1], crossencoder=self.models[2],
crossencoder_params=self.models[3], candidate_encoding=self.models[4],
title2id=self.models[5], id2title=self.models[6], id2text=self.models[7],
wikipedia_id2local_id=self.models[8], faiss_indexer=self.models[9],
test_data=data_to_link)
return predictions
|
[
"argparse.Namespace",
"blink.main_dense.run",
"blink.main_dense.load_models",
"logging.getLogger"
] |
[((79, 106), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (96, 106), False, 'import logging\n'), ((1107, 1140), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '(**self.config)\n', (1125, 1140), False, 'import argparse\n'), ((1164, 1217), 'blink.main_dense.load_models', 'main_dense.load_models', (['self.args'], {'logger': 'self.logger'}), '(self.args, logger=self.logger)\n', (1186, 1217), True, 'import blink.main_dense as main_dense\n'), ((1305, 1696), 'blink.main_dense.run', 'main_dense.run', (['self.args'], {'logger': 'self.logger', 'biencoder': 'self.models[0]', 'biencoder_params': 'self.models[1]', 'crossencoder': 'self.models[2]', 'crossencoder_params': 'self.models[3]', 'candidate_encoding': 'self.models[4]', 'title2id': 'self.models[5]', 'id2title': 'self.models[6]', 'id2text': 'self.models[7]', 'wikipedia_id2local_id': 'self.models[8]', 'faiss_indexer': 'self.models[9]', 'test_data': 'data_to_link'}), '(self.args, logger=self.logger, biencoder=self.models[0],\n biencoder_params=self.models[1], crossencoder=self.models[2],\n crossencoder_params=self.models[3], candidate_encoding=self.models[4],\n title2id=self.models[5], id2title=self.models[6], id2text=self.models[7\n ], wikipedia_id2local_id=self.models[8], faiss_indexer=self.models[9],\n test_data=data_to_link)\n', (1319, 1696), True, 'import blink.main_dense as main_dense\n')]
|
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from enumfields.drf.serializers import EnumSupportSerializerMixin
from rest_framework.fields import SerializerMethodField
from rest_framework.serializers import (
ModelSerializer
)
from sorl_thumbnail_serializer.fields import HyperlinkedSorlImageField
from campaigns.models import Product, Campaign, CampaignPartyRelation, CampaignPartyRelationType, \
CampaignEnrollmentRequest
from team.serializers import TeamListSerializer
User = get_user_model()
class CampaignCreateSerializer(EnumSupportSerializerMixin, ModelSerializer):
class Meta:
model = Campaign
fields = [
'id',
'title',
'start_time',
'end_time',
'description',
]
class CampaignListSerializer(EnumSupportSerializerMixin, ModelSerializer):
creator = SerializerMethodField()
# A thumbnail image, sorl options and read-only
thumbnail = HyperlinkedSorlImageField(
'500x500',
options={"crop": "center"},
source='image',
read_only=True
)
class Meta:
model = Campaign
fields = [
'id',
'title',
'creator',
'start_time',
'end_time',
'description',
'thumbnail',
'image',
'width_field',
'height_field',
]
read_only_fields = [
'thumbnail',
'image',
'width_field',
'height_field',
]
def get_creator(self, obj):
return CampaignPartyRelation.objects.get(
campaign=obj,
type=CampaignPartyRelationType.CREATOR,
).content_object.name
class CampaignUpdateSerializer(EnumSupportSerializerMixin, ModelSerializer):
class Meta:
model = Campaign
fields = [
'id',
'title',
'start_time',
'end_time',
'description',
]
class CampaignDetailSerializer(EnumSupportSerializerMixin, ModelSerializer):
accessable = SerializerMethodField()
creator = SerializerMethodField()
requested = SerializerMethodField()
enrolled = SerializerMethodField()
# A thumbnail image, sorl options and read-only
thumbnail = HyperlinkedSorlImageField(
'500x500',
options={"crop": "center"},
source='profile_image',
read_only=True
)
# A larger version of the image, allows writing
# profile_image = HyperlinkedSorlImageField('1024')
class Meta:
model = Campaign
fields = [
'id',
'title',
'creator',
'type',
'description',
'start_time',
'end_time',
'accessable',
'requested',
'enrolled',
'thumbnail',
'image',
'width_field',
'height_field',
]
read_only_fields = [
'thumbnail',
'image',
'width_field',
'height_field',
]
def get_accessable(self, obj):
user = self.context.get('request').user
if user.is_authenticated and CampaignPartyRelation.objects.filter(
campaign=obj,
type=CampaignPartyRelationType.CREATOR,
content_type=ContentType.objects.get(model="profile"),
object_id=user.id
).exists():
return True
return False
def get_creator(self, obj):
return CampaignPartyRelation.objects.get(
campaign=obj,
type=CampaignPartyRelationType.CREATOR,
).content_object.name
def get_requested(self, obj):
user = self.context.get('request').user
if user.is_authenticated and CampaignEnrollmentRequest.objects.filter(
campaign=obj,
user=user
).exists():
return True
return False
def get_enrolled(self, obj):
user = self.context.get('request').user
if user.is_authenticated and CampaignPartyRelation.objects.filter(
campaign=obj,
type=CampaignPartyRelationType.MEMBER,
content_type=ContentType.objects.get(model="user"),
object_id=user.id
).exists():
return True
return False
class CampaignImageUpdateRetriveSerializer(ModelSerializer):
class Meta:
model = Campaign
fields = [
'image',
'width_field',
'height_field',
]
read_only_fields = [
'width_field',
'height_field',
]
class CampaignDeleteSerializer(EnumSupportSerializerMixin, ModelSerializer):
class Meta:
model = Campaign
fields = [
'id',
]
class CampaignRequestEnrollmentSerializer(ModelSerializer):
class Meta:
model = CampaignEnrollmentRequest
fields = [
'note'
]
class ProductCreateSerializer(ModelSerializer):
class Meta:
model = Product
fields = [
'name',
'description',
'price'
]
class ProductListSerializer(ModelSerializer):
seller = TeamListSerializer()
class Meta:
model = Product
fields = [
'seller',
'id',
'name',
'description',
'price'
]
|
[
"campaigns.models.CampaignPartyRelation.objects.get",
"django.contrib.contenttypes.models.ContentType.objects.get",
"rest_framework.fields.SerializerMethodField",
"django.contrib.auth.get_user_model",
"campaigns.models.CampaignEnrollmentRequest.objects.filter",
"team.serializers.TeamListSerializer",
"sorl_thumbnail_serializer.fields.HyperlinkedSorlImageField"
] |
[((549, 565), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (563, 565), False, 'from django.contrib.auth import get_user_model\n'), ((922, 945), 'rest_framework.fields.SerializerMethodField', 'SerializerMethodField', ([], {}), '()\n', (943, 945), False, 'from rest_framework.fields import SerializerMethodField\n'), ((1015, 1116), 'sorl_thumbnail_serializer.fields.HyperlinkedSorlImageField', 'HyperlinkedSorlImageField', (['"""500x500"""'], {'options': "{'crop': 'center'}", 'source': '"""image"""', 'read_only': '(True)'}), "('500x500', options={'crop': 'center'}, source=\n 'image', read_only=True)\n", (1040, 1116), False, 'from sorl_thumbnail_serializer.fields import HyperlinkedSorlImageField\n'), ((2131, 2154), 'rest_framework.fields.SerializerMethodField', 'SerializerMethodField', ([], {}), '()\n', (2152, 2154), False, 'from rest_framework.fields import SerializerMethodField\n'), ((2169, 2192), 'rest_framework.fields.SerializerMethodField', 'SerializerMethodField', ([], {}), '()\n', (2190, 2192), False, 'from rest_framework.fields import SerializerMethodField\n'), ((2209, 2232), 'rest_framework.fields.SerializerMethodField', 'SerializerMethodField', ([], {}), '()\n', (2230, 2232), False, 'from rest_framework.fields import SerializerMethodField\n'), ((2248, 2271), 'rest_framework.fields.SerializerMethodField', 'SerializerMethodField', ([], {}), '()\n', (2269, 2271), False, 'from rest_framework.fields import SerializerMethodField\n'), ((2340, 2449), 'sorl_thumbnail_serializer.fields.HyperlinkedSorlImageField', 'HyperlinkedSorlImageField', (['"""500x500"""'], {'options': "{'crop': 'center'}", 'source': '"""profile_image"""', 'read_only': '(True)'}), "('500x500', options={'crop': 'center'}, source=\n 'profile_image', read_only=True)\n", (2365, 2449), False, 'from sorl_thumbnail_serializer.fields import HyperlinkedSorlImageField\n'), ((5302, 5322), 'team.serializers.TeamListSerializer', 'TeamListSerializer', ([], {}), '()\n', (5320, 5322), False, 'from team.serializers import TeamListSerializer\n'), ((1627, 1719), 'campaigns.models.CampaignPartyRelation.objects.get', 'CampaignPartyRelation.objects.get', ([], {'campaign': 'obj', 'type': 'CampaignPartyRelationType.CREATOR'}), '(campaign=obj, type=\n CampaignPartyRelationType.CREATOR)\n', (1660, 1719), False, 'from campaigns.models import Product, Campaign, CampaignPartyRelation, CampaignPartyRelationType, CampaignEnrollmentRequest\n'), ((3582, 3674), 'campaigns.models.CampaignPartyRelation.objects.get', 'CampaignPartyRelation.objects.get', ([], {'campaign': 'obj', 'type': 'CampaignPartyRelationType.CREATOR'}), '(campaign=obj, type=\n CampaignPartyRelationType.CREATOR)\n', (3615, 3674), False, 'from campaigns.models import Product, Campaign, CampaignPartyRelation, CampaignPartyRelationType, CampaignEnrollmentRequest\n'), ((3845, 3910), 'campaigns.models.CampaignEnrollmentRequest.objects.filter', 'CampaignEnrollmentRequest.objects.filter', ([], {'campaign': 'obj', 'user': 'user'}), '(campaign=obj, user=user)\n', (3885, 3910), False, 'from campaigns.models import Product, Campaign, CampaignPartyRelation, CampaignPartyRelationType, CampaignEnrollmentRequest\n'), ((3393, 3433), 'django.contrib.contenttypes.models.ContentType.objects.get', 'ContentType.objects.get', ([], {'model': '"""profile"""'}), "(model='profile')\n", (3416, 3433), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((4279, 4316), 'django.contrib.contenttypes.models.ContentType.objects.get', 'ContentType.objects.get', ([], {'model': '"""user"""'}), "(model='user')\n", (4302, 4316), False, 'from django.contrib.contenttypes.models import ContentType\n')]
|
import coffeewhale
import time
def main():
test_func()
@coffeewhale.on_except
def test_func():
print('start sleeping')
time.sleep(1)
print('after sleep')
raise Exception()
# coffeewhale.notify(url="<KEY>",
# result='hello world!')
if __name__ == "__main__":
main()
|
[
"time.sleep"
] |
[((135, 148), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (145, 148), False, 'import time\n')]
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pydata/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
self.assert_series_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_arith_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with tm.assertRaisesRegexp(ValueError, msg):
l == r
with tm.assertRaisesRegexp(ValueError, msg):
l != r
with tm.assertRaisesRegexp(ValueError, msg):
l < r
msg = "Can only compare identically-labeled DataFrame objects"
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() == r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() != r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() < r.to_frame()
def test_bool_ops_df_compat(self):
# GH 1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
# different length
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 & s4, exp)
tm.assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 | s4, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, True],
index=list('ABCD'), name='x')
tm.assert_series_equal(s4 | s3, exp)
exp = pd.DataFrame({'x': [True, False, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
def test_series_frame_radd_bug(self):
# GH 353
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
with tm.assertRaises(TypeError):
datetime.now() + self.ts
with tm.assertRaises(TypeError):
self.ts + datetime.now()
def test_series_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = Series(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([2, 3, 4], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + 1
tm.assert_series_equal(res, exp)
res = np.nan + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + np.nan
tm.assert_series_equal(res, exp)
s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')], dtype=dtype)
exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'),
pd.Timedelta('6 days')])
tm.assert_series_equal(pd.Timedelta('3 days') + s, exp)
tm.assert_series_equal(s + pd.Timedelta('3 days'), exp)
s = pd.Series(['x', np.nan, 'x'])
tm.assert_series_equal('a' + s, pd.Series(['ax', np.nan, 'ax']))
tm.assert_series_equal(s + 'a', pd.Series(['xa', np.nan, 'xa']))
def test_frame_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = DataFrame(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([2, 3, 4], dtype=dtype)
tm.assert_frame_equal(res, exp)
res = pd.DataFrame([1, 2, 3], dtype=dtype) + 1
tm.assert_frame_equal(res, exp)
res = np.nan + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_frame_equal(res, exp)
res = pd.DataFrame([1, 2, 3], dtype=dtype) + np.nan
tm.assert_frame_equal(res, exp)
df = pd.DataFrame(['x', np.nan, 'x'])
tm.assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax']))
tm.assert_frame_equal(df + 'a', pd.DataFrame(['xa', np.nan, 'xa']))
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A': self.ts})
tm.assert_series_equal(self.ts + self.ts, self.ts + df['A'],
check_names=False)
tm.assert_series_equal(self.ts ** self.ts, self.ts ** df['A'],
check_names=False)
tm.assert_series_equal(self.ts < self.ts, self.ts < df['A'],
check_names=False)
tm.assert_series_equal(self.ts / self.ts, self.ts / df['A'],
check_names=False)
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all='ignore'):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
pairings = []
for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, 'r' + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
if compat.PY3:
pairings.append((Series.div, operator.truediv, 1))
pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x),
1))
else:
pairings.append((Series.div, operator.div, 1))
pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))
for op, equiv_op, fv in pairings:
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
self.assertTrue(tm.equalContents(ts.index != 5, expected))
self.assertTrue(tm.equalContents(~(ts.index == 5), expected))
def test_operators_na_handling(self):
from decimal import Decimal
from datetime import date
s = Series([Decimal('1.3'), Decimal('2.3')],
index=[date(2012, 1, 1), date(2012, 1, 2)])
result = s + s.shift(1)
result2 = s.shift(1) + s
self.assertTrue(isnull(result[0]))
self.assertTrue(isnull(result2[0]))
s = Series(['foo', 'bar', 'baz', np.nan])
result = 'prefix_' + s
expected = Series(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan])
assert_series_equal(result, expected)
result = s + '_suffix'
expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan])
assert_series_equal(result, expected)
def test_divide_decimal(self):
""" resolves issue #9787 """
from decimal import Decimal
expected = Series([Decimal(5)])
s = Series([Decimal(10)])
s = s / Decimal(2)
tm.assert_series_equal(expected, s)
s = Series([Decimal(10)])
s = s // Decimal(2)
tm.assert_series_equal(expected, s)
def test_datetime64_with_index(self):
# arithmetic integer ops with an index
s = Series(np.random.randn(5))
expected = s - s.index.to_series()
result = s - s.index
assert_series_equal(result, expected)
# GH 4629
# arithmetic datetime64 ops with an index
s = Series(date_range('20130101', periods=5),
index=date_range('20130101', periods=5))
expected = s - s.index.to_series()
result = s - s.index
assert_series_equal(result, expected)
result = s - s.index.to_period()
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(5, 2),
index=date_range('20130101', periods=5))
df['date'] = Timestamp('20130102')
df['expected'] = df['date'] - df.index.to_series()
df['result'] = df['date'] - df.index
assert_series_equal(df['result'], df['expected'], check_names=False)
def test_dti_tz_convert_to_utc(self):
base = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
tz='UTC')
idx1 = base.tz_convert('Asia/Tokyo')[:2]
idx2 = base.tz_convert('US/Eastern')[1:]
res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2)
assert_series_equal(res, Series([np.nan, 3, np.nan], index=base))
|
[
"numpy.abs",
"operator.add",
"operator.pow",
"numpy.isnan",
"pandas.DatetimeIndex",
"numpy.arange",
"numpy.float64",
"pandas.bdate_range",
"pandas.DataFrame",
"pandas.offsets.Minute",
"numpy.random.randn",
"pandas.tseries.index.Timestamp",
"pandas.util.testing.rands_array",
"datetime.timedelta",
"pandas.Period",
"operator.truediv",
"pandas.Timedelta",
"operator.div",
"pandas.compat.range",
"operator.mod",
"datetime.datetime.now",
"pandas.util.testing.assertIsInstance",
"pandas.core.nanops.nangt",
"pandas.date_range",
"pandas.util.testing.assert_frame_equal",
"pandas.timedelta_range",
"pandas.util.testing.assertRaises",
"pandas.tseries.tdi.Timedelta",
"datetime.date",
"datetime.datetime",
"pandas.Index",
"pandas.to_timedelta",
"pandas.Series",
"pandas.util.testing.assert_almost_equal",
"pandas.util.testing.assert_series_equal",
"operator.sub",
"pandas.Timestamp",
"pandas.compat.zip",
"pandas.offsets.Hour",
"decimal.Decimal",
"pandas.util.testing.equalContents",
"numpy.errstate",
"pandas.util.testing.makeFloatSeries",
"pandas.isnull",
"operator.floordiv",
"pandas.offsets.Second",
"numpy.timedelta64",
"numpy.array",
"pandas.util.testing.assertRaisesRegexp",
"pandas.offsets.Milli",
"numpy.array_equal",
"pandas.util.testing.assert_produces_warning",
"operator.mul"
] |
[((834, 853), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (849, 853), True, 'import numpy as np\n'), ((870, 889), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (885, 889), True, 'import numpy as np\n'), ((934, 959), 'pandas.core.nanops.nangt', 'nanops.nangt', (['left', 'right'], {}), '(left, right)\n', (946, 959), True, 'import pandas.core.nanops as nanops\n'), ((1093, 1130), 'pandas.util.testing.assert_almost_equal', 'assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (1112, 1130), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((1144, 1167), 'pandas.Series', 'Series', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (1150, 1167), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((1181, 1209), 'pandas.Series', 'Series', (['[False, True, False]'], {}), '([False, True, False])\n', (1187, 1209), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((1245, 1274), 'pandas.Series', 'Series', (['[False, False, False]'], {}), '([False, False, False])\n', (1251, 1274), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((1283, 1319), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s == s2)', 'exp'], {}), '(s == s2, exp)\n', (1305, 1319), True, 'import pandas.util.testing as tm\n'), ((1328, 1364), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s2 == s)', 'exp'], {}), '(s2 == s, exp)\n', (1350, 1364), True, 'import pandas.util.testing as tm\n'), ((2461, 2512), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(-self.series)', '(-1 * self.series)'], {}), '(-self.series, -1 * self.series)\n', (2480, 2512), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((2549, 2608), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(-(self.series < 0))', '(~(self.series < 0))'], {}), '(-(self.series < 0), ~(self.series < 0))\n', (2568, 2608), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((6874, 6904), 'pandas.Series', 'Series', (['[]', '[]'], {'dtype': 'np.int32'}), '([], [], dtype=np.int32)\n', (6880, 6904), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((6918, 6936), 'pandas.Series', 'Series', (["{'x': 0.0}"], {}), "({'x': 0.0})\n", (6924, 6936), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((7469, 7512), 'pandas.date_range', 'date_range', (['"""2012-1-1"""'], {'periods': '(3)', 'freq': '"""D"""'}), "('2012-1-1', periods=3, freq='D')\n", (7479, 7512), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((7526, 7569), 'pandas.date_range', 'date_range', (['"""2012-1-2"""'], {'periods': '(3)', 'freq': '"""D"""'}), "('2012-1-2', periods=3, freq='D')\n", (7536, 7569), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((7724, 7751), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['rs', 'xp'], {}), '(rs, xp)\n', (7743, 7751), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((8237, 8273), 'pandas.util.testing.assertIsInstance', 'tm.assertIsInstance', (['maxa', 'Timestamp'], {}), '(maxa, Timestamp)\n', (8256, 8273), True, 'import pandas.util.testing as tm\n'), ((8563, 8587), 'pandas.Series', 'Series', (['values'], {'name': '"""A"""'}), "(values, name='A')\n", (8569, 8587), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((8596, 8633), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (8615, 8633), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((8813, 8850), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (8832, 8850), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((8913, 8939), 'datetime.datetime', 'datetime', (['(2001)', '(1)', '(1)', '(3)', '(4)'], {}), '(2001, 1, 1, 3, 4)\n', (8921, 8939), False, 'from datetime import datetime, timedelta\n'), ((9079, 9116), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (["df['A']", 'resultb'], {}), "(df['A'], resultb)\n", (9098, 9116), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((9159, 9176), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (9168, 9176), False, 'from datetime import datetime, timedelta\n'), ((9247, 9284), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['resultb', "df['A']"], {}), "(resultb, df['A'])\n", (9266, 9284), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((9369, 9400), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)', 'seconds': '(3)'}), '(minutes=5, seconds=3)\n', (9378, 9400), False, 'from datetime import datetime, timedelta\n'), ((9471, 9508), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (["df['A']", 'resultb'], {}), "(df['A'], resultb)\n", (9490, 9508), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((10667, 10704), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (10686, 10704), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((10713, 10751), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result2', 'expected'], {}), '(result2, expected)\n', (10732, 10751), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((10950, 10987), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (10969, 10987), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((10996, 11034), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result2', 'expected'], {}), '(result2, expected)\n', (11015, 11034), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((11238, 11275), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (11257, 11275), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((11284, 11322), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result2', 'expected'], {}), '(result2, expected)\n', (11303, 11322), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((11507, 11544), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (11526, 11544), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((11794, 11831), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (11813, 11831), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((11840, 11878), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result2', 'expected'], {}), '(result2, expected)\n', (11859, 11878), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((12090, 12127), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (12109, 12127), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((12136, 12174), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result2', 'expected'], {}), '(result2, expected)\n', (12155, 12174), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((12519, 12542), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""2012-01-01"""'], {}), "('2012-01-01')\n", (12528, 12542), False, 'from pandas.tseries.index import Timestamp\n'), ((12614, 12651), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(ts + s)', 'expected'], {}), '(ts + s, expected)\n', (12633, 12651), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((12660, 12697), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(s + ts)', 'expected'], {}), '(s + ts, expected)\n', (12679, 12697), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((12783, 12821), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(ts - s)', 'expected2'], {}), '(ts - s, expected2)\n', (12802, 12821), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((12830, 12869), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(ts + -s)', 'expected2'], {}), '(ts + -s, expected2)\n', (12849, 12869), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((13126, 13163), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (13145, 13163), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((13281, 13318), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (13300, 13318), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((13602, 13639), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (13621, 13639), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((13782, 13819), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (13801, 13819), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((14246, 14277), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)', 'seconds': '(4)'}), '(minutes=5, seconds=4)\n', (14255, 14277), False, 'from datetime import datetime, timedelta\n'), ((14466, 14503), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (14485, 14503), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((14647, 14685), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result2', 'expected'], {}), '(result2, expected)\n', (14666, 14685), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((14715, 14753), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(result + td2)', 'td1'], {}), '(result + td2, td1)\n', (14734, 14753), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((14940, 14967), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""00:05:04"""'], {}), "('00:05:04')\n", (14955, 14967), True, 'import pandas as pd\n'), ((15156, 15193), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (15175, 15193), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((15337, 15375), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result2', 'expected'], {}), '(result2, expected)\n', (15356, 15375), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((15405, 15443), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(result + td2)', 'td1'], {}), '(result + td2, td1)\n', (15424, 15443), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((15761, 15778), 'pandas.Series', 'Series', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (15767, 15778), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((15916, 15953), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (15935, 15953), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((15968, 15988), 'pandas.Series', 'Series', (['[20, 30, 40]'], {}), '([20, 30, 40])\n', (15974, 15988), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((16126, 16163), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (16145, 16163), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((16300, 16337), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (16319, 16337), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((16352, 16372), 'pandas.Series', 'Series', (['[20, 30, 40]'], {}), '([20, 30, 40])\n', (16358, 16372), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((16510, 16547), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (16529, 16547), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((17089, 17126), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (17108, 17126), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((17265, 17302), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (17284, 17302), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((21145, 21176), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)', 'seconds': '(4)'}), '(minutes=5, seconds=4)\n', (21154, 21176), False, 'from datetime import datetime, timedelta\n'), ((23357, 23389), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (23376, 23389), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((23498, 23530), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (23517, 23530), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((24034, 24066), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (24053, 24066), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((24234, 24266), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (24253, 24266), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((24428, 24460), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (24447, 24460), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((24563, 24595), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (24582, 24595), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((24698, 24730), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (24717, 24730), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((24833, 24865), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (24852, 24865), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((25261, 25298), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (25280, 25298), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((25381, 25418), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (25400, 25418), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((25625, 25668), 'pandas.Series', 'Series', (['[NaT, NaT]'], {'dtype': '"""timedelta64[ns]"""'}), "([NaT, NaT], dtype='timedelta64[ns]')\n", (25631, 25668), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((25719, 25761), 'pandas.Series', 'Series', (['[NaT, NaT]'], {'dtype': '"""datetime64[ns]"""'}), "([NaT, NaT], dtype='datetime64[ns]')\n", (25725, 25761), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((25798, 25835), 'pandas.Series', 'Series', (['[NaT]'], {'dtype': '"""datetime64[ns]"""'}), "([NaT], dtype='datetime64[ns]')\n", (25804, 25835), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((25873, 25911), 'pandas.Series', 'Series', (['[NaT]'], {'dtype': '"""timedelta64[ns]"""'}), "([NaT], dtype='timedelta64[ns]')\n", (25879, 25911), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((25943, 26014), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(timedelta_series - NaT)', 'nat_series_dtype_timedelta'], {}), '(timedelta_series - NaT, nat_series_dtype_timedelta)\n', (25962, 26014), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((26023, 26095), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(-NaT + timedelta_series)', 'nat_series_dtype_timedelta'], {}), '(-NaT + timedelta_series, nat_series_dtype_timedelta)\n', (26042, 26095), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((26133, 26231), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(timedelta_series - single_nat_dtype_timedelta)', 'nat_series_dtype_timedelta'], {}), '(timedelta_series - single_nat_dtype_timedelta,\n nat_series_dtype_timedelta)\n', (26152, 26231), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((26264, 26363), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(-single_nat_dtype_timedelta + timedelta_series)', 'nat_series_dtype_timedelta'], {}), '(-single_nat_dtype_timedelta + timedelta_series,\n nat_series_dtype_timedelta)\n', (26283, 26363), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((26397, 26467), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(datetime_series - NaT)', 'nat_series_dtype_timestamp'], {}), '(datetime_series - NaT, nat_series_dtype_timestamp)\n', (26416, 26467), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((26476, 26547), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(-NaT + datetime_series)', 'nat_series_dtype_timestamp'], {}), '(-NaT + datetime_series, nat_series_dtype_timestamp)\n', (26495, 26547), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((26557, 26653), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(datetime_series - single_nat_dtype_datetime)', 'nat_series_dtype_timedelta'], {}), '(datetime_series - single_nat_dtype_datetime,\n nat_series_dtype_timedelta)\n', (26576, 26653), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((26785, 26882), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(datetime_series - single_nat_dtype_timedelta)', 'nat_series_dtype_timestamp'], {}), '(datetime_series - single_nat_dtype_timedelta,\n nat_series_dtype_timestamp)\n', (26804, 26882), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((26915, 27013), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(-single_nat_dtype_timedelta + datetime_series)', 'nat_series_dtype_timestamp'], {}), '(-single_nat_dtype_timedelta + datetime_series,\n nat_series_dtype_timestamp)\n', (26934, 27013), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((27213, 27298), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timestamp - NaT)', 'nat_series_dtype_timestamp'], {}), '(nat_series_dtype_timestamp - NaT,\n nat_series_dtype_timestamp)\n', (27232, 27298), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((27331, 27417), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(-NaT + nat_series_dtype_timestamp)', 'nat_series_dtype_timestamp'], {}), '(-NaT + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp)\n', (27350, 27417), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((27451, 27558), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timestamp - single_nat_dtype_datetime)', 'nat_series_dtype_timedelta'], {}), '(nat_series_dtype_timestamp - single_nat_dtype_datetime,\n nat_series_dtype_timedelta)\n', (27470, 27558), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((27729, 27837), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timestamp - single_nat_dtype_timedelta)', 'nat_series_dtype_timestamp'], {}), '(nat_series_dtype_timestamp - single_nat_dtype_timedelta,\n nat_series_dtype_timestamp)\n', (27748, 27837), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((27898, 28007), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(-single_nat_dtype_timedelta + nat_series_dtype_timestamp)', 'nat_series_dtype_timestamp'], {}), '(-single_nat_dtype_timedelta +\n nat_series_dtype_timestamp, nat_series_dtype_timestamp)\n', (27917, 28007), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((28187, 28272), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timestamp + NaT)', 'nat_series_dtype_timestamp'], {}), '(nat_series_dtype_timestamp + NaT,\n nat_series_dtype_timestamp)\n', (28206, 28272), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((28305, 28390), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(NaT + nat_series_dtype_timestamp)', 'nat_series_dtype_timestamp'], {}), '(NaT + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp)\n', (28324, 28390), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((28424, 28532), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timestamp + single_nat_dtype_timedelta)', 'nat_series_dtype_timestamp'], {}), '(nat_series_dtype_timestamp + single_nat_dtype_timedelta,\n nat_series_dtype_timestamp)\n', (28443, 28532), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((28593, 28701), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(single_nat_dtype_timedelta + nat_series_dtype_timestamp)', 'nat_series_dtype_timestamp'], {}), '(single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp)\n', (28612, 28701), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((28763, 28848), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timedelta + NaT)', 'nat_series_dtype_timedelta'], {}), '(nat_series_dtype_timedelta + NaT,\n nat_series_dtype_timedelta)\n', (28782, 28848), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((28881, 28966), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(NaT + nat_series_dtype_timedelta)', 'nat_series_dtype_timedelta'], {}), '(NaT + nat_series_dtype_timedelta,\n nat_series_dtype_timedelta)\n', (28900, 28966), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((29000, 29108), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timedelta + single_nat_dtype_timedelta)', 'nat_series_dtype_timedelta'], {}), '(nat_series_dtype_timedelta + single_nat_dtype_timedelta,\n nat_series_dtype_timedelta)\n', (29019, 29108), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((29169, 29277), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(single_nat_dtype_timedelta + nat_series_dtype_timedelta)', 'nat_series_dtype_timedelta'], {}), '(single_nat_dtype_timedelta + nat_series_dtype_timedelta,\n nat_series_dtype_timedelta)\n', (29188, 29277), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((29339, 29410), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(timedelta_series + NaT)', 'nat_series_dtype_timedelta'], {}), '(timedelta_series + NaT, nat_series_dtype_timedelta)\n', (29358, 29410), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((29419, 29490), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(NaT + timedelta_series)', 'nat_series_dtype_timedelta'], {}), '(NaT + timedelta_series, nat_series_dtype_timedelta)\n', (29438, 29490), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((29500, 29598), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(timedelta_series + single_nat_dtype_timedelta)', 'nat_series_dtype_timedelta'], {}), '(timedelta_series + single_nat_dtype_timedelta,\n nat_series_dtype_timedelta)\n', (29519, 29598), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((29631, 29729), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(single_nat_dtype_timedelta + timedelta_series)', 'nat_series_dtype_timedelta'], {}), '(single_nat_dtype_timedelta + timedelta_series,\n nat_series_dtype_timedelta)\n', (29650, 29729), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((29763, 29848), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timestamp + NaT)', 'nat_series_dtype_timestamp'], {}), '(nat_series_dtype_timestamp + NaT,\n nat_series_dtype_timestamp)\n', (29782, 29848), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((29881, 29966), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(NaT + nat_series_dtype_timestamp)', 'nat_series_dtype_timestamp'], {}), '(NaT + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp)\n', (29900, 29966), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((30000, 30108), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timestamp + single_nat_dtype_timedelta)', 'nat_series_dtype_timestamp'], {}), '(nat_series_dtype_timestamp + single_nat_dtype_timedelta,\n nat_series_dtype_timestamp)\n', (30019, 30108), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((30169, 30277), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(single_nat_dtype_timedelta + nat_series_dtype_timestamp)', 'nat_series_dtype_timestamp'], {}), '(single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp)\n', (30188, 30277), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((30339, 30424), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timedelta + NaT)', 'nat_series_dtype_timedelta'], {}), '(nat_series_dtype_timedelta + NaT,\n nat_series_dtype_timedelta)\n', (30358, 30424), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((30457, 30542), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(NaT + nat_series_dtype_timedelta)', 'nat_series_dtype_timedelta'], {}), '(NaT + nat_series_dtype_timedelta,\n nat_series_dtype_timedelta)\n', (30476, 30542), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((30576, 30684), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timedelta + single_nat_dtype_timedelta)', 'nat_series_dtype_timedelta'], {}), '(nat_series_dtype_timedelta + single_nat_dtype_timedelta,\n nat_series_dtype_timedelta)\n', (30595, 30684), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((30745, 30853), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(single_nat_dtype_timedelta + nat_series_dtype_timedelta)', 'nat_series_dtype_timedelta'], {}), '(single_nat_dtype_timedelta + nat_series_dtype_timedelta,\n nat_series_dtype_timedelta)\n', (30764, 30853), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((30915, 31022), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timedelta + single_nat_dtype_datetime)', 'nat_series_dtype_timestamp'], {}), '(nat_series_dtype_timedelta + single_nat_dtype_datetime,\n nat_series_dtype_timestamp)\n', (30934, 31022), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((31083, 31190), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(single_nat_dtype_datetime + nat_series_dtype_timedelta)', 'nat_series_dtype_timestamp'], {}), '(single_nat_dtype_datetime + nat_series_dtype_timedelta,\n nat_series_dtype_timestamp)\n', (31102, 31190), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((31277, 31362), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nat_series_dtype_timedelta * 1.0)', 'nat_series_dtype_timedelta'], {}), '(nat_series_dtype_timedelta * 1.0,\n nat_series_dtype_timedelta)\n', (31296, 31362), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((31395, 31480), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(1.0 * nat_series_dtype_timedelta)', 'nat_series_dtype_timedelta'], {}), '(1.0 * nat_series_dtype_timedelta,\n nat_series_dtype_timedelta)\n', (31414, 31480), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((31514, 31573), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(timedelta_series * 1)', 'timedelta_series'], {}), '(timedelta_series * 1, timedelta_series)\n', (31533, 31573), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((31582, 31641), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(1 * timedelta_series)', 'timedelta_series'], {}), '(1 * timedelta_series, timedelta_series)\n', (31601, 31641), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((31880, 31951), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(timedelta_series * nan)', 'nat_series_dtype_timedelta'], {}), '(timedelta_series * nan, nat_series_dtype_timedelta)\n', (31899, 31951), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((31960, 32031), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(nan * timedelta_series)', 'nat_series_dtype_timedelta'], {}), '(nan * timedelta_series, nat_series_dtype_timedelta)\n', (31979, 32031), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((32605, 32676), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(timedelta_series / nan)', 'nat_series_dtype_timedelta'], {}), '(timedelta_series / nan, nat_series_dtype_timedelta)\n', (32624, 32676), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((33189, 33226), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (33208, 33226), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((33247, 33271), 'pandas.Series', 'Series', (['expected'], {'name': '(0)'}), '(expected, name=0)\n', (33253, 33271), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((33333, 33370), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (33352, 33370), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((33423, 33459), 'pandas.Series', 'Series', (["['a', 'b', np.nan, 'c', 'a']"], {}), "(['a', 'b', np.nan, 'c', 'a'])\n", (33429, 33459), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((33506, 33547), 'pandas.Series', 'Series', (['[True, False, False, False, True]'], {}), '([True, False, False, False, True])\n', (33512, 33547), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((33556, 33593), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (33575, 33593), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((33639, 33682), 'pandas.Series', 'Series', (['[False, False, False, False, False]'], {}), '([False, False, False, False, False])\n', (33645, 33682), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((33691, 33728), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (33710, 33728), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((33795, 33832), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (33814, 33832), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((33933, 33957), 'pandas.Series', 'Series', (['[(1, 1), (1, 2)]'], {}), '([(1, 1), (1, 2)])\n', (33939, 33957), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((34007, 34028), 'pandas.Series', 'Series', (['[False, True]'], {}), '([False, True])\n', (34013, 34028), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((34037, 34074), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (34056, 34074), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((34124, 34145), 'pandas.Series', 'Series', (['[True, False]'], {}), '([True, False])\n', (34130, 34145), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((34154, 34191), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (34173, 34191), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((34241, 34263), 'pandas.Series', 'Series', (['[False, False]'], {}), '([False, False])\n', (34247, 34263), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((34272, 34309), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (34291, 34309), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((34359, 34379), 'pandas.Series', 'Series', (['[True, True]'], {}), '([True, True])\n', (34365, 34379), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((34388, 34425), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (34407, 34425), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((34439, 34463), 'pandas.Series', 'Series', (['[(1, 1), (1, 1)]'], {}), '([(1, 1), (1, 1)])\n', (34445, 34463), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((34513, 34533), 'pandas.Series', 'Series', (['[True, True]'], {}), '([True, True])\n', (34519, 34533), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((34542, 34579), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (34561, 34579), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((34629, 34651), 'pandas.Series', 'Series', (['[False, False]'], {}), '([False, False])\n', (34635, 34651), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((34660, 34697), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (34679, 34697), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((34812, 34833), 'pandas.Series', 'Series', (['[True, False]'], {}), '([True, False])\n', (34818, 34833), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((34842, 34879), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (34861, 34879), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((40861, 40884), 'pandas.Series', 'Series', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (40867, 40884), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((40897, 40915), 'pandas.Series', 'Series', (["['b', 'a']"], {}), "(['b', 'a'])\n", (40903, 40915), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((40980, 40994), 'pandas.Series', 'Series', (['[1, 2]'], {}), '([1, 2])\n', (40986, 40994), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((41007, 41024), 'pandas.Series', 'Series', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (41013, 41024), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((41384, 41421), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (41403, 41421), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((41514, 41551), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (41533, 41551), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((41645, 41682), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (41664, 41682), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((41923, 41960), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (41942, 41960), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((42061, 42098), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (42080, 42098), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((42240, 42277), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (42259, 42277), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((42379, 42416), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (42398, 42416), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((42562, 42599), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (42581, 42599), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((42717, 42754), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (42736, 42754), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((43371, 43398), 'pandas.Series', 'Series', (['[True, False, True]'], {}), '([True, False, True])\n', (43377, 43398), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((47696, 47736), 'pandas.Series', 'Series', (['[True, False, True]'], {'index': 'index'}), '([True, False, True], index=index)\n', (47702, 47736), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((47753, 47795), 'pandas.Series', 'Series', (['[False, False, False]'], {'index': 'index'}), '([False, False, False], index=index)\n', (47759, 47795), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((47812, 47853), 'pandas.Series', 'Series', (['[True, False, False]'], {'index': 'index'}), '([True, False, False], index=index)\n', (47818, 47853), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((47872, 47882), 'pandas.Series', 'Series', (['[]'], {}), '([])\n', (47878, 47882), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((48014, 48029), 'pandas.Series', 'Series', (['([3] * 4)'], {}), '([3] * 4)\n', (48020, 48029), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((48047, 48062), 'pandas.Series', 'Series', (['([4] * 4)'], {}), '([4] * 4)\n', (48053, 48062), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((48127, 48161), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (48146, 48161), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((48226, 48260), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (48245, 48260), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((48351, 48385), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (48370, 48385), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((48479, 48513), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (48498, 48513), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((48643, 48677), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (48662, 48677), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((48764, 48798), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (48783, 48798), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((48873, 48907), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (48892, 48907), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((48954, 48969), 'pandas.Series', 'Series', (['([0] * 4)'], {}), '([0] * 4)\n', (48960, 48969), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((48978, 49012), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (48997, 49012), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((49087, 49121), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (49106, 49121), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((49168, 49188), 'pandas.Series', 'Series', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (49174, 49188), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((49197, 49231), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (49216, 49231), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((49250, 49279), 'pandas.Series', 'Series', (['([1] * 4)'], {'dtype': '"""int8"""'}), "([1] * 4, dtype='int8')\n", (49256, 49279), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((49329, 49364), 'pandas.Series', 'Series', (['[0, 1, 0, 1]'], {'dtype': '"""int64"""'}), "([0, 1, 0, 1], dtype='int64')\n", (49335, 49364), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((49373, 49407), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (49392, 49407), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((49492, 49527), 'pandas.Series', 'Series', (['[1, 1, 3, 3]'], {'dtype': '"""int32"""'}), "([1, 1, 3, 3], dtype='int32')\n", (49498, 49527), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((49536, 49570), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (49555, 49570), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((51300, 51334), 'pandas.Series', 'Series', (['[False, True, False, True]'], {}), '([False, True, False, True])\n', (51306, 51334), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((51426, 51457), 'pandas.Series', 'Series', (["['a', 'b', np.NaN, 'd']"], {}), "(['a', 'b', np.NaN, 'd'])\n", (51432, 51457), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((51522, 51556), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['res', 'expected'], {}), '(res, expected)\n', (51541, 51556), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((51612, 51648), 'pandas.Series', 'Series', (['[2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (51618, 51648), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((51884, 51911), 'pandas.Series', 'Series', (['(True)'], {'index': 's.index'}), '(True, index=s.index)\n', (51890, 51911), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((52013, 52032), 'pandas.DataFrame', 'DataFrame', (["{'A': s}"], {}), "({'A': s})\n", (52022, 52032), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((53175, 53260), 'pandas.Series', 'Series', (['(self.ts.values[:-5] + int_ts.values)'], {'index': 'self.ts.index[:-5]', 'name': '"""ts"""'}), "(self.ts.values[:-5] + int_ts.values, index=self.ts.index[:-5], name='ts'\n )\n", (53181, 53260), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((54150, 54186), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s1 + s2)', 'exp'], {}), '(s1 + s2, exp)\n', (54172, 54186), True, 'import pandas.util.testing as tm\n'), ((54195, 54231), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s2 + s1)', 'exp'], {}), '(s2 + s1, exp)\n', (54217, 54231), True, 'import pandas.util.testing as tm\n'), ((54738, 54774), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s3 + s4)', 'exp'], {}), '(s3 + s4, exp)\n', (54760, 54774), True, 'import pandas.util.testing as tm\n'), ((54783, 54819), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s4 + s3)', 'exp'], {}), '(s4 + s3, exp)\n', (54805, 54819), True, 'import pandas.util.testing as tm\n'), ((56448, 56484), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s1 & s2)', 'exp'], {}), '(s1 & s2, exp)\n', (56470, 56484), True, 'import pandas.util.testing as tm\n'), ((56493, 56529), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s2 & s1)', 'exp'], {}), '(s2 & s1, exp)\n', (56515, 56529), True, 'import pandas.util.testing as tm\n'), ((56676, 56712), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s1 | s2)', 'exp'], {}), '(s1 | s2, exp)\n', (56698, 56712), True, 'import pandas.util.testing as tm\n'), ((56880, 56916), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s2 | s1)', 'exp'], {}), '(s2 | s1, exp)\n', (56902, 56916), True, 'import pandas.util.testing as tm\n'), ((57749, 57785), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s3 & s4)', 'exp'], {}), '(s3 & s4, exp)\n', (57771, 57785), True, 'import pandas.util.testing as tm\n'), ((57794, 57830), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s4 & s3)', 'exp'], {}), '(s4 & s3, exp)\n', (57816, 57830), True, 'import pandas.util.testing as tm\n'), ((57998, 58034), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s3 | s4)', 'exp'], {}), '(s3 | s4, exp)\n', (58020, 58034), True, 'import pandas.util.testing as tm\n'), ((58179, 58215), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(s4 | s3)', 'exp'], {}), '(s4 | s3, exp)\n', (58201, 58215), True, 'import pandas.util.testing as tm\n'), ((58895, 58932), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (58914, 58932), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((58950, 58975), 'pandas.DataFrame', 'DataFrame', (["{'vals': vals}"], {}), "({'vals': vals})\n", (58959, 58975), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((59087, 59126), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (59108, 59126), True, 'import pandas.util.testing as tm\n'), ((60727, 60756), 'pandas.Series', 'pd.Series', (["['x', np.nan, 'x']"], {}), "(['x', np.nan, 'x'])\n", (60736, 60756), True, 'import pandas as pd\n'), ((61921, 61953), 'pandas.DataFrame', 'pd.DataFrame', (["['x', np.nan, 'x']"], {}), "(['x', np.nan, 'x'])\n", (61933, 61953), True, 'import pandas as pd\n'), ((62200, 62225), 'pandas.DataFrame', 'DataFrame', (["{'A': self.ts}"], {}), "({'A': self.ts})\n", (62209, 62225), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((62235, 62314), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(self.ts + self.ts)', "(self.ts + df['A'])"], {'check_names': '(False)'}), "(self.ts + self.ts, self.ts + df['A'], check_names=False)\n", (62257, 62314), True, 'import pandas.util.testing as tm\n'), ((62354, 62440), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(self.ts ** self.ts)', "(self.ts ** df['A'])"], {'check_names': '(False)'}), "(self.ts ** self.ts, self.ts ** df['A'], check_names=\n False)\n", (62376, 62440), True, 'import pandas.util.testing as tm\n'), ((62475, 62554), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(self.ts < self.ts)', "(self.ts < df['A'])"], {'check_names': '(False)'}), "(self.ts < self.ts, self.ts < df['A'], check_names=False)\n", (62497, 62554), True, 'import pandas.util.testing as tm\n'), ((62594, 62673), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['(self.ts / self.ts)', "(self.ts / df['A'])"], {'check_names': '(False)'}), "(self.ts / self.ts, self.ts / df['A'], check_names=False)\n", (62616, 62673), True, 'import pandas.util.testing as tm\n'), ((65043, 65096), 'pandas.Series', 'Series', (['[3, 4, 5, 6, 7]', '[3, 4, 5, 6, 7]'], {'dtype': 'float'}), '([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)\n', (65049, 65096), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((65680, 65717), 'pandas.Series', 'Series', (["['foo', 'bar', 'baz', np.nan]"], {}), "(['foo', 'bar', 'baz', np.nan])\n", (65686, 65717), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((65768, 65826), 'pandas.Series', 'Series', (["['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan]"], {}), "(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan])\n", (65774, 65826), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((65835, 65872), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (65854, 65872), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((65924, 65982), 'pandas.Series', 'Series', (["['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan]"], {}), "(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan])\n", (65930, 65982), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((65991, 66028), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (66010, 66028), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((66250, 66285), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 's'], {}), '(expected, s)\n', (66272, 66285), True, 'import pandas.util.testing as tm\n'), ((66358, 66393), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['expected', 's'], {}), '(expected, s)\n', (66380, 66393), True, 'import pandas.util.testing as tm\n'), ((66604, 66641), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (66623, 66641), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((66905, 66942), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (66924, 66942), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((66993, 67030), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (67012, 67030), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((67163, 67184), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130102"""'], {}), "('20130102')\n", (67172, 67184), False, 'from pandas.tseries.index import Timestamp\n'), ((67297, 67365), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (["df['result']", "df['expected']"], {'check_names': '(False)'}), "(df['result'], df['expected'], check_names=False)\n", (67316, 67365), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((67424, 67494), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2011-01-01', '2011-01-02', '2011-01-03']"], {'tz': '"""UTC"""'}), "(['2011-01-01', '2011-01-02', '2011-01-03'], tz='UTC')\n", (67440, 67494), True, 'import pandas as pd\n'), ((973, 1002), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (984, 1002), True, 'import numpy as np\n'), ((2364, 2384), 'pandas.util.testing.makeFloatSeries', 'tm.makeFloatSeries', ([], {}), '()\n', (2382, 2384), True, 'import pandas.util.testing as tm\n'), ((2386, 2406), 'pandas.util.testing.makeFloatSeries', 'tm.makeFloatSeries', ([], {}), '()\n', (2404, 2406), True, 'import pandas.util.testing as tm\n'), ((2647, 2672), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (2658, 2672), True, 'import numpy as np\n'), ((2764, 2822), 'pandas.DataFrame', 'DataFrame', (["{'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}"], {}), "({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})\n", (2773, 2822), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((3055, 3092), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (3074, 3092), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((3153, 3196), 'pandas.Series', 'Series', (['np.inf'], {'index': 'p.index', 'name': '"""first"""'}), "(np.inf, index=p.index, name='first')\n", (3159, 3196), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((3209, 3246), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (3228, 3246), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((3353, 3399), 'pandas.Series', 'Series', (["(p['first'].values / p['second'].values)"], {}), "(p['first'].values / p['second'].values)\n", (3359, 3399), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((3412, 3449), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (3431, 3449), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((3467, 3525), 'pandas.DataFrame', 'DataFrame', (["{'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]}"], {}), "({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})\n", (3476, 3525), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((3864, 3891), 'pandas.Series', 'Series', (['[np.nan, 1.0, -1.0]'], {}), '([np.nan, 1.0, -1.0])\n', (3870, 3891), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((3940, 3973), 'pandas.Series', 'Series', (['[np.nan, np.inf, -np.inf]'], {}), '([np.nan, np.inf, -np.inf])\n', (3946, 3973), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((3986, 4023), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (4005, 4023), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((4097, 4151), 'pandas.DataFrame', 'DataFrame', (["{'first': (1, 0), 'second': (-0.01, -0.02)}"], {}), "({'first': (1, 0), 'second': (-0.01, -0.02)})\n", (4106, 4151), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((4175, 4199), 'pandas.Series', 'Series', (['[-0.01, -np.inf]'], {}), '([-0.01, -np.inf])\n', (4181, 4199), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((4262, 4318), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {'check_names': '(False)'}), '(result, expected, check_names=False)\n', (4281, 4318), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((4378, 4415), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (4397, 4415), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((4455, 4473), 'pandas.Series', 'Series', (['[-1, 0, 1]'], {}), '([-1, 0, 1])\n', (4461, 4473), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((4525, 4548), 'pandas.Series', 'Series', (['[0.0, nan, 0.0]'], {}), '([0.0, nan, 0.0])\n', (4531, 4548), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((4561, 4598), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (4580, 4598), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((4650, 4674), 'pandas.Series', 'Series', (['[-inf, nan, inf]'], {}), '([-inf, nan, inf])\n', (4656, 4674), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((4687, 4724), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (4706, 4724), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((4777, 4801), 'pandas.Series', 'Series', (['[-inf, nan, inf]'], {}), '([-inf, nan, inf])\n', (4783, 4801), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((4814, 4851), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (4833, 4851), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((5200, 5272), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['cython_or_numpy', 'python'], {'check_dtype': 'check_dtype'}), '(cython_or_numpy, python, check_dtype=check_dtype)\n', (5222, 5272), True, 'import pandas.util.testing as tm\n'), ((6976, 7005), 'pandas.Series', 'Series', (['[np.nan]'], {'index': "['x']"}), "([np.nan], index=['x'])\n", (6982, 7005), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((7224, 7251), 'numpy.array', 'np.array', (['(1)'], {'dtype': 'np.int64'}), '(1, dtype=np.int64)\n', (7232, 7251), True, 'import numpy as np\n'), ((7404, 7431), 'numpy.array', 'np.array', (['(1)'], {'dtype': 'np.int64'}), '(1, dtype=np.int64)\n', (7412, 7431), True, 'import numpy as np\n'), ((7583, 7593), 'pandas.Series', 'Series', (['v2'], {}), '(v2)\n', (7589, 7593), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((7596, 7606), 'pandas.Series', 'Series', (['v1'], {}), '(v1)\n', (7602, 7606), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((8457, 8478), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20111230"""'], {}), "('20111230')\n", (8466, 8478), False, 'from pandas.tseries.index import Timestamp\n'), ((8480, 8501), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20120101"""'], {}), "('20120101')\n", (8489, 8501), False, 'from pandas.tseries.index import Timestamp\n'), ((8521, 8542), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20120103"""'], {}), "('20120103')\n", (8530, 8542), False, 'from pandas.tseries.index import Timestamp\n'), ((8689, 8709), 'datetime.datetime', 'datetime', (['(2001)', '(1)', '(1)'], {}), '(2001, 1, 1)\n', (8697, 8709), False, 'from datetime import datetime, timedelta\n'), ((9682, 9713), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)', 'seconds': '(1)'}), '(minutes=5, seconds=1)\n', (9691, 9713), False, 'from datetime import datetime, timedelta\n'), ((9847, 9860), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (9857, 9860), True, 'import numpy as np\n'), ((9863, 9883), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (9872, 9883), True, 'import pandas as pd\n'), ((9909, 9929), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (9918, 9929), True, 'import pandas as pd\n'), ((9998, 10018), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (10007, 10018), True, 'import pandas as pd\n'), ((10021, 10034), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (10031, 10034), True, 'import numpy as np\n'), ((10054, 10074), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (10063, 10074), True, 'import pandas as pd\n'), ((10174, 10194), 'pandas.Series', 'pd.Series', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (10183, 10194), True, 'import pandas as pd\n'), ((10220, 10240), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (10229, 10240), True, 'import pandas as pd\n'), ((10492, 10512), 'pandas.offsets.Second', 'pd.offsets.Second', (['(5)'], {}), '(5)\n', (10509, 10512), True, 'import pandas as pd\n'), ((10531, 10551), 'pandas.offsets.Second', 'pd.offsets.Second', (['(5)'], {}), '(5)\n', (10548, 10551), True, 'import pandas as pd\n'), ((10774, 10794), 'pandas.offsets.Second', 'pd.offsets.Second', (['(5)'], {}), '(5)\n', (10791, 10794), True, 'import pandas as pd\n'), ((11057, 11076), 'pandas.offsets.Milli', 'pd.offsets.Milli', (['(5)'], {}), '(5)\n', (11073, 11076), True, 'import pandas as pd\n'), ((11095, 11114), 'pandas.offsets.Milli', 'pd.offsets.Milli', (['(5)'], {}), '(5)\n', (11111, 11114), True, 'import pandas as pd\n'), ((11368, 11387), 'pandas.offsets.Milli', 'pd.offsets.Milli', (['(5)'], {}), '(5)\n', (11384, 11387), True, 'import pandas as pd\n'), ((11615, 11637), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (11629, 11637), True, 'import numpy as np\n'), ((11656, 11678), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (11670, 11678), True, 'import numpy as np\n'), ((11901, 11924), 'numpy.timedelta64', 'np.timedelta64', (['(5)', '"""ms"""'], {}), "(5, 'ms')\n", (11915, 11924), True, 'import numpy as np\n'), ((11943, 11966), 'numpy.timedelta64', 'np.timedelta64', (['(5)', '"""ms"""'], {}), "(5, 'ms')\n", (11957, 11966), True, 'import numpy as np\n'), ((12469, 12504), 'pandas.timedelta_range', 'timedelta_range', (['"""1 day"""'], {'periods': '(3)'}), "('1 day', periods=3)\n", (12484, 12504), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((12569, 12604), 'pandas.date_range', 'date_range', (['"""2012-01-02"""'], {'periods': '(3)'}), "('2012-01-02', periods=3)\n", (12579, 12604), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((12726, 12773), 'pandas.date_range', 'date_range', (['"""2011-12-31"""'], {'periods': '(3)', 'freq': '"""-1D"""'}), "('2011-12-31', periods=3, freq='-1D')\n", (12736, 12773), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((13032, 13052), 'pandas.offsets.Minute', 'pd.offsets.Minute', (['(1)'], {}), '(1)\n', (13049, 13052), True, 'import pandas as pd\n'), ((13187, 13207), 'pandas.offsets.Minute', 'pd.offsets.Minute', (['(1)'], {}), '(1)\n', (13204, 13207), True, 'import pandas as pd\n'), ((13686, 13707), 'pandas.offsets.Second', 'pd.offsets.Second', (['(12)'], {}), '(12)\n', (13703, 13707), True, 'import pandas as pd\n'), ((14891, 14924), 'pandas.to_timedelta', 'pd.to_timedelta', (["(['00:05:03'] * 3)"], {}), "(['00:05:03'] * 3)\n", (14906, 14924), True, 'import pandas as pd\n'), ((15586, 15624), 'pandas.date_range', 'date_range', (['"""2013-01-01"""', '"""2013-01-03"""'], {}), "('2013-01-01', '2013-01-03')\n", (15596, 15624), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((15651, 15689), 'pandas.date_range', 'date_range', (['"""2013-03-01"""', '"""2013-03-03"""'], {}), "('2013-03-01', '2013-03-03')\n", (15661, 15689), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((16678, 16711), 'pandas.Series', 'Series', (['[20, 30, 40]'], {'dtype': 'dtype'}), '([20, 30, 40], dtype=dtype)\n', (16684, 16711), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((16915, 16952), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (16934, 16952), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((18016, 18054), 'pandas.date_range', 'date_range', (['"""2013-01-01"""', '"""2013-01-03"""'], {}), "('2013-01-01', '2013-01-03')\n", (18026, 18054), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((18081, 18119), 'pandas.date_range', 'date_range', (['"""2013-03-01"""', '"""2013-03-03"""'], {}), "('2013-03-01', '2013-03-03')\n", (18091, 18119), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((18910, 18943), 'pandas.date_range', 'date_range', (['"""20130101"""'], {'periods': '(3)'}), "('20130101', periods=3)\n", (18920, 18943), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((20095, 20160), 'datetime.timedelta', 'timedelta', ([], {'days': 'd', 'hours': 'h', 'minutes': 'm', 'seconds': 's', 'microseconds': 'us'}), '(days=d, hours=h, minutes=m, seconds=s, microseconds=us)\n', (20104, 20160), False, 'from datetime import datetime, timedelta\n'), ((22938, 22989), 'pandas.date_range', 'date_range', (['"""2000-01-01 09:00:00"""'], {'periods': '(5)', 'tz': 'tz'}), "('2000-01-01 09:00:00', periods=5, tz=tz)\n", (22948, 22989), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((23110, 23162), 'pandas.timedelta_range', 'timedelta_range', (['"""1 days 1 min"""'], {'periods': '(5)', 'freq': '"""H"""'}), "('1 days 1 min', periods=5, freq='H')\n", (23125, 23162), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((23740, 23772), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (23759, 23772), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((23893, 23925), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (23912, 23925), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((26691, 26717), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (26706, 26717), True, 'import pandas.util.testing as tm\n'), ((27624, 27650), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (27639, 27650), True, 'import pandas.util.testing as tm\n'), ((28074, 28100), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (28089, 28100), True, 'import pandas.util.testing as tm\n'), ((32046, 32072), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (32061, 32072), True, 'import pandas.util.testing as tm\n'), ((32119, 32145), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (32134, 32145), True, 'import pandas.util.testing as tm\n'), ((32203, 32229), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (32218, 32229), True, 'import pandas.util.testing as tm\n'), ((32278, 32304), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (32293, 32304), True, 'import pandas.util.testing as tm\n'), ((32690, 32716), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (32705, 32716), True, 'import pandas.util.testing as tm\n'), ((32776, 32802), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (32791, 32802), True, 'import pandas.util.testing as tm\n'), ((32970, 33013), 'pandas.date_range', 'date_range', (['"""2012-1-1"""'], {'periods': '(3)', 'freq': '"""D"""'}), "('2012-1-1', periods=3, freq='D')\n", (32980, 33013), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((34950, 34985), 'pandas.bdate_range', 'bdate_range', (['"""1/1/2000"""'], {'periods': '(10)'}), "('1/1/2000', periods=10)\n", (34961, 34985), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((35474, 35511), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (35493, 35511), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((36102, 36139), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (36121, 36139), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((36210, 36229), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (36225, 36229), True, 'import numpy as np\n'), ((36500, 36537), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (36519, 36537), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((36654, 36662), 'pandas.compat.range', 'range', (['(5)'], {}), '(5)\n', (36659, 36662), False, 'from pandas.compat import range, zip\n'), ((36684, 36717), 'pandas.date_range', 'date_range', (['"""20010101"""'], {'periods': '(5)'}), "('20010101', periods=5)\n", (36694, 36717), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((37199, 37238), 'pandas.Series', 'Series', (["['a', np.nan, 'c']"], {'dtype': 'dtype'}), "(['a', np.nan, 'c'], dtype=dtype)\n", (37205, 37238), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((37259, 37298), 'pandas.Series', 'Series', (["['a', np.nan, 'd']"], {'dtype': 'dtype'}), "(['a', np.nan, 'd'], dtype=dtype)\n", (37265, 37298), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((37358, 37386), 'pandas.Series', 'Series', (['[True, False, False]'], {}), '([True, False, False])\n', (37364, 37386), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((37399, 37436), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (37418, 37436), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((37496, 37523), 'pandas.Series', 'Series', (['[False, True, True]'], {}), '([False, True, True])\n', (37502, 37523), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((37536, 37573), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (37555, 37573), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((37634, 37663), 'pandas.Series', 'Series', (['[False, False, False]'], {}), '([False, False, False])\n', (37640, 37663), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((37676, 37713), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (37695, 37713), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((37774, 37800), 'pandas.Series', 'Series', (['[True, True, True]'], {}), '([True, True, True])\n', (37780, 37800), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((37813, 37850), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (37832, 37850), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((42159, 42169), 'pandas.Series', 'Series', (['[]'], {}), '([])\n', (42165, 42169), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((42300, 42310), 'pandas.Series', 'Series', (['[]'], {}), '([])\n', (42306, 42310), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((42465, 42483), 'pandas.Series', 'Series', (['[1]', "['z']"], {}), "([1], ['z'])\n", (42471, 42483), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((42622, 42640), 'pandas.Series', 'Series', (['[1]', "['z']"], {}), "([1], ['z'])\n", (42628, 42640), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((42869, 42879), 'pandas.Series', 'Series', (['[]'], {}), '([])\n', (42875, 42879), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((42881, 42899), 'pandas.Series', 'Series', (['[1]', "['z']"], {}), "([1], ['z'])\n", (42887, 42899), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((42919, 42942), 'pandas.Series', 'Series', (['np.nan', 'b.index'], {}), '(np.nan, b.index)\n', (42925, 42942), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((42944, 42967), 'pandas.Series', 'Series', (['np.nan', 'a.index'], {}), '(np.nan, a.index)\n', (42950, 42967), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((43012, 43045), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'a[a]'], {}), '(result, a[a])\n', (43031, 43045), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((43065, 43078), 'pandas.Series', 'Series', (["['z']"], {}), "(['z'])\n", (43071, 43078), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((43275, 43308), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'a[a]'], {}), '(result, a[a])\n', (43294, 43308), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((43520, 43559), 'pandas.Series', 'Series', (['[True, True, True]'], {'index': 'index'}), '([True, True, True], index=index)\n', (43526, 43559), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((43572, 43609), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (43591, 43609), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((43820, 43860), 'pandas.Series', 'Series', (['[True, False, True]'], {'index': 'index'}), '([True, False, True], index=index)\n', (43826, 43860), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((43873, 43910), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (43892, 43910), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((44029, 44069), 'pandas.Series', 'Series', (['[True, False, True]'], {'index': 'index'}), '([True, False, True], index=index)\n', (44035, 44069), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((44082, 44119), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (44101, 44119), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((44239, 44281), 'pandas.Series', 'Series', (['[False, False, False]'], {'index': 'index'}), '([False, False, False], index=index)\n', (44245, 44281), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((44294, 44331), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (44313, 44331), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((44483, 44502), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (44498, 44502), True, 'import numpy as np\n'), ((44530, 44549), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (44545, 44549), True, 'import numpy as np\n'), ((47972, 47980), 'pandas.compat.range', 'range', (['(4)'], {}), '(4)\n', (47977, 47980), False, 'from pandas.compat import range, zip\n'), ((48318, 48326), 'pandas.compat.range', 'range', (['(4)'], {}), '(4)\n', (48323, 48326), False, 'from pandas.compat import range, zip\n'), ((48443, 48454), 'pandas.compat.range', 'range', (['(4)', '(8)'], {}), '(4, 8)\n', (48448, 48454), False, 'from pandas.compat import range, zip\n'), ((50077, 50131), 'pandas.Series', 'Series', (['([False] * 7)'], {'index': "['b', 'c', 'a', 0, 1, 2, 3]"}), "([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])\n", (50083, 50131), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((50282, 50336), 'pandas.Series', 'Series', (['([False] * 7)'], {'index': "[0, 1, 2, 3, 'a', 'b', 'c']"}), "([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])\n", (50288, 50336), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((50349, 50389), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(s_tft & s_0123)', 'exp'], {}), '(s_tft & s_0123, exp)\n', (50368, 50389), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((50565, 50619), 'pandas.Series', 'Series', (['([False] * 7)'], {'index': "[0, 1, 2, 3, 'b', 'c', 'a']"}), "([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])\n", (50571, 50619), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((50770, 50824), 'pandas.Series', 'Series', (['([False] * 7)'], {'index': "[0, 1, 2, 3, 'a', 'b', 'c']"}), "([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])\n", (50776, 50824), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((50837, 50877), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(s_0123 & s_tft)', 'exp'], {}), '(s_0123 & s_tft, exp)\n', (50856, 50877), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((50923, 50942), 'pandas.Series', 'Series', (['([False] * 4)'], {}), '([False] * 4)\n', (50929, 50942), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((50988, 51021), 'pandas.Series', 'Series', (['[False, True, True, True]'], {}), '([False, True, True, True])\n', (50994, 51021), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((51069, 51088), 'pandas.Series', 'Series', (['([False] * 4)'], {}), '([False] * 4)\n', (51075, 51088), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((51136, 51155), 'pandas.Series', 'Series', (['([False] * 4)'], {}), '([False] * 4)\n', (51142, 51155), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((51261, 51280), 'pandas.Series', 'Series', (['([False] * 4)'], {}), '([False] * 4)\n', (51267, 51280), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((51750, 51770), 'datetime.datetime', 'datetime', (['(2005)', '(1)', '(1)'], {}), '(2005, 1, 1)\n', (51758, 51770), False, 'from datetime import datetime, timedelta\n'), ((53420, 53439), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (53435, 53439), True, 'import numpy as np\n'), ((58783, 58804), 'pandas.util.testing.rands_array', 'tm.rands_array', (['(5)', '(10)'], {}), '(5, 10)\n', (58797, 58804), True, 'import pandas.util.testing as tm\n'), ((59174, 59200), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (59189, 59200), True, 'import pandas.util.testing as tm\n'), ((59253, 59279), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (59268, 59279), True, 'import pandas.util.testing as tm\n'), ((59846, 59879), 'pandas.Series', 'pd.Series', (['[2, 3, 4]'], {'dtype': 'dtype'}), '([2, 3, 4], dtype=dtype)\n', (59855, 59879), True, 'import pandas as pd\n'), ((59892, 59924), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'exp'], {}), '(res, exp)\n', (59914, 59924), True, 'import pandas.util.testing as tm\n'), ((59993, 60025), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'exp'], {}), '(res, exp)\n', (60015, 60025), True, 'import pandas.util.testing as tm\n'), ((60106, 60154), 'pandas.Series', 'pd.Series', (['[np.nan, np.nan, np.nan]'], {'dtype': 'dtype'}), '([np.nan, np.nan, np.nan], dtype=dtype)\n', (60115, 60154), True, 'import pandas as pd\n'), ((60167, 60199), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'exp'], {}), '(res, exp)\n', (60189, 60199), True, 'import pandas.util.testing as tm\n'), ((60273, 60305), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'exp'], {}), '(res, exp)\n', (60295, 60305), True, 'import pandas.util.testing as tm\n'), ((60797, 60828), 'pandas.Series', 'pd.Series', (["['ax', np.nan, 'ax']"], {}), "(['ax', np.nan, 'ax'])\n", (60806, 60828), True, 'import pandas as pd\n'), ((60870, 60901), 'pandas.Series', 'pd.Series', (["['xa', np.nan, 'xa']"], {}), "(['xa', np.nan, 'xa'])\n", (60879, 60901), True, 'import pandas as pd\n'), ((61436, 61472), 'pandas.DataFrame', 'pd.DataFrame', (['[2, 3, 4]'], {'dtype': 'dtype'}), '([2, 3, 4], dtype=dtype)\n', (61448, 61472), True, 'import pandas as pd\n'), ((61485, 61516), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['res', 'exp'], {}), '(res, exp)\n', (61506, 61516), True, 'import pandas.util.testing as tm\n'), ((61588, 61619), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['res', 'exp'], {}), '(res, exp)\n', (61609, 61619), True, 'import pandas.util.testing as tm\n'), ((61703, 61754), 'pandas.DataFrame', 'pd.DataFrame', (['[np.nan, np.nan, np.nan]'], {'dtype': 'dtype'}), '([np.nan, np.nan, np.nan], dtype=dtype)\n', (61715, 61754), True, 'import pandas as pd\n'), ((61767, 61798), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['res', 'exp'], {}), '(res, exp)\n', (61788, 61798), True, 'import pandas.util.testing as tm\n'), ((61875, 61906), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['res', 'exp'], {}), '(res, exp)\n', (61896, 61906), True, 'import pandas.util.testing as tm\n'), ((61994, 62028), 'pandas.DataFrame', 'pd.DataFrame', (["['ax', np.nan, 'ax']"], {}), "(['ax', np.nan, 'ax'])\n", (62006, 62028), True, 'import pandas as pd\n'), ((62070, 62104), 'pandas.DataFrame', 'pd.DataFrame', (["['xa', np.nan, 'xa']"], {}), "(['xa', np.nan, 'xa'])\n", (62082, 62104), True, 'import pandas as pd\n'), ((62941, 62950), 'pandas.isnull', 'isnull', (['a'], {}), '(a)\n', (62947, 62950), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((62971, 62980), 'pandas.isnull', 'isnull', (['b'], {}), '(b)\n', (62977, 62980), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((63711, 63740), 'pandas.Series', 'Series', (['exp_values', 'exp_index'], {}), '(exp_values, exp_index)\n', (63717, 63740), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((63753, 63790), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (63772, 63790), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((64835, 64867), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (64854, 64867), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((65172, 65213), 'pandas.util.testing.equalContents', 'tm.equalContents', (['(ts.index != 5)', 'expected'], {}), '(ts.index != 5, expected)\n', (65188, 65213), True, 'import pandas.util.testing as tm\n'), ((65239, 65283), 'pandas.util.testing.equalContents', 'tm.equalContents', (['(~(ts.index == 5))', 'expected'], {}), '(~(ts.index == 5), expected)\n', (65255, 65283), True, 'import pandas.util.testing as tm\n'), ((65604, 65621), 'pandas.isnull', 'isnull', (['result[0]'], {}), '(result[0])\n', (65610, 65621), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((65647, 65665), 'pandas.isnull', 'isnull', (['result2[0]'], {}), '(result2[0])\n', (65653, 65665), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((66230, 66240), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (66237, 66240), False, 'from decimal import Decimal\n'), ((66338, 66348), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (66345, 66348), False, 'from decimal import Decimal\n'), ((66504, 66522), 'numpy.random.randn', 'np.random.randn', (['(5)'], {}), '(5)\n', (66519, 66522), True, 'import numpy as np\n'), ((66730, 66763), 'pandas.date_range', 'date_range', (['"""20130101"""'], {'periods': '(5)'}), "('20130101', periods=5)\n", (66740, 66763), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((67055, 67076), 'numpy.random.randn', 'np.random.randn', (['(5)', '(2)'], {}), '(5, 2)\n', (67070, 67076), True, 'import numpy as np\n'), ((67640, 67666), 'pandas.Series', 'Series', (['[1, 2]'], {'index': 'idx1'}), '([1, 2], index=idx1)\n', (67646, 67666), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((67669, 67695), 'pandas.Series', 'Series', (['[1, 1]'], {'index': 'idx2'}), '([1, 1], index=idx2)\n', (67675, 67695), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((67729, 67768), 'pandas.Series', 'Series', (['[np.nan, 3, np.nan]'], {'index': 'base'}), '([np.nan, 3, np.nan], index=base)\n', (67735, 67768), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((1940, 1980), 'pandas.util.testing.assert_almost_equal', 'tm.assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (1962, 1980), True, 'import pandas.util.testing as tm\n'), ((3771, 3819), 'numpy.array_equal', 'np.array_equal', (['result', "(p['second'] / p['first'])"], {}), "(result, p['second'] / p['first'])\n", (3785, 3819), True, 'import numpy as np\n'), ((5000, 5014), 'numpy.abs', 'np.abs', (['series'], {}), '(series)\n', (5006, 5014), True, 'import numpy as np\n'), ((5059, 5072), 'numpy.abs', 'np.abs', (['other'], {}), '(other)\n', (5065, 5072), True, 'import numpy as np\n'), ((7863, 7880), 'datetime.timedelta', 'timedelta', ([], {'days': 'i'}), '(days=i)\n', (7872, 7880), False, 'from datetime import datetime, timedelta\n'), ((8750, 8774), 'datetime.timedelta', 'timedelta', ([], {'days': '(4017 + i)'}), '(days=4017 + i)\n', (8759, 8774), False, 'from datetime import datetime, timedelta\n'), ((9617, 9648), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)', 'seconds': '(1)'}), '(minutes=5, seconds=1)\n', (9626, 9648), False, 'from datetime import datetime, timedelta\n'), ((10149, 10168), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (10157, 10168), True, 'import numpy as np\n'), ((10413, 10439), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:01"""'], {}), "('20130101 9:01')\n", (10422, 10439), False, 'from pandas.tseries.index import Timestamp\n'), ((10441, 10467), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:02"""'], {}), "('20130101 9:02')\n", (10450, 10467), False, 'from pandas.tseries.index import Timestamp\n'), ((10583, 10612), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:01:05"""'], {}), "('20130101 9:01:05')\n", (10592, 10612), False, 'from pandas.tseries.index import Timestamp\n'), ((10614, 10643), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:02:05"""'], {}), "('20130101 9:02:05')\n", (10623, 10643), False, 'from pandas.tseries.index import Timestamp\n'), ((10814, 10834), 'pandas.offsets.Second', 'pd.offsets.Second', (['(5)'], {}), '(5)\n', (10831, 10834), True, 'import pandas as pd\n'), ((10866, 10895), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:00:55"""'], {}), "('20130101 9:00:55')\n", (10875, 10895), False, 'from pandas.tseries.index import Timestamp\n'), ((10897, 10926), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:01:55"""'], {}), "('20130101 9:01:55')\n", (10906, 10926), False, 'from pandas.tseries.index import Timestamp\n'), ((11146, 11179), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:01:00.005"""'], {}), "('20130101 9:01:00.005')\n", (11155, 11179), False, 'from pandas.tseries.index import Timestamp\n'), ((11181, 11214), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:02:00.005"""'], {}), "('20130101 9:02:00.005')\n", (11190, 11214), False, 'from pandas.tseries.index import Timestamp\n'), ((11345, 11365), 'pandas.offsets.Minute', 'pd.offsets.Minute', (['(5)'], {}), '(5)\n', (11362, 11365), True, 'import pandas as pd\n'), ((11415, 11448), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:06:00.005"""'], {}), "('20130101 9:06:00.005')\n", (11424, 11448), False, 'from pandas.tseries.index import Timestamp\n'), ((11450, 11483), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:07:00.005"""'], {}), "('20130101 9:07:00.005')\n", (11459, 11483), False, 'from pandas.tseries.index import Timestamp\n'), ((11710, 11739), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:01:01"""'], {}), "('20130101 9:01:01')\n", (11719, 11739), False, 'from pandas.tseries.index import Timestamp\n'), ((11741, 11770), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:02:01"""'], {}), "('20130101 9:02:01')\n", (11750, 11770), False, 'from pandas.tseries.index import Timestamp\n'), ((11998, 12031), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:01:00.005"""'], {}), "('20130101 9:01:00.005')\n", (12007, 12031), False, 'from pandas.tseries.index import Timestamp\n'), ((12033, 12066), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130101 9:02:00.005"""'], {}), "('20130101 9:02:00.005')\n", (12042, 12066), False, 'from pandas.tseries.index import Timestamp\n'), ((13472, 13503), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(6)', 'seconds': '(3)'}), '(minutes=6, seconds=3)\n', (13481, 13503), False, 'from datetime import datetime, timedelta\n'), ((13505, 13536), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)', 'seconds': '(6)'}), '(minutes=5, seconds=6)\n', (13514, 13536), False, 'from datetime import datetime, timedelta\n'), ((13551, 13591), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)', 'minutes': '(5)', 'seconds': '(3)'}), '(hours=2, minutes=5, seconds=3)\n', (13560, 13591), False, 'from datetime import datetime, timedelta\n'), ((13663, 13683), 'pandas.offsets.Minute', 'pd.offsets.Minute', (['(1)'], {}), '(1)\n', (13680, 13683), True, 'import pandas as pd\n'), ((18439, 18476), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (18458, 18476), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((19317, 19338), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130301"""'], {}), "('20130301')\n", (19326, 19338), False, 'from pandas.tseries.index import Timestamp\n'), ((19340, 19370), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130228 23:00:00"""'], {}), "('20130228 23:00:00')\n", (19349, 19370), False, 'from pandas.tseries.index import Timestamp\n'), ((19394, 19424), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130228 22:00:00"""'], {}), "('20130228 22:00:00')\n", (19403, 19424), False, 'from pandas.tseries.index import Timestamp\n'), ((19426, 19456), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20130228 21:00:00"""'], {}), "('20130228 21:00:00')\n", (19435, 19456), False, 'from pandas.tseries.index import Timestamp\n'), ((20288, 20317), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['lhs', 'rhs'], {}), '(lhs, rhs)\n', (20307, 20317), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((21476, 21497), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20111230"""'], {}), "('20111230')\n", (21485, 21497), False, 'from pandas.tseries.index import Timestamp\n'), ((21499, 21520), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20120101"""'], {}), "('20120101')\n", (21508, 21520), False, 'from pandas.tseries.index import Timestamp\n'), ((21544, 21565), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20120103"""'], {}), "('20120103')\n", (21553, 21565), False, 'from pandas.tseries.index import Timestamp\n'), ((21619, 21640), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20111231"""'], {}), "('20111231')\n", (21628, 21640), False, 'from pandas.tseries.index import Timestamp\n'), ((21642, 21663), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20120102"""'], {}), "('20120102')\n", (21651, 21663), False, 'from pandas.tseries.index import Timestamp\n'), ((21687, 21708), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""20120104"""'], {}), "('20120104')\n", (21696, 21708), False, 'from pandas.tseries.index import Timestamp\n'), ((25053, 25103), 'pandas.Timestamp', 'pd.Timestamp', (['"""2016-02-10"""'], {'tz': '"""America/Sao_Paulo"""'}), "('2016-02-10', tz='America/Sao_Paulo')\n", (25065, 25103), True, 'import pandas as pd\n'), ((25127, 25177), 'pandas.Timestamp', 'pd.Timestamp', (['"""2016-02-08"""'], {'tz': '"""America/Sao_Paulo"""'}), "('2016-02-08', tz='America/Sao_Paulo')\n", (25139, 25177), True, 'import pandas as pd\n'), ((25232, 25250), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""2days"""'], {}), "('2days')\n", (25241, 25250), False, 'from pandas.tseries.tdi import Timedelta\n'), ((25351, 25370), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""-2days"""'], {}), "('-2days')\n", (25360, 25370), False, 'from pandas.tseries.tdi import Timedelta\n'), ((25507, 25522), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""1s"""'], {}), "('1s')\n", (25516, 25522), False, 'from pandas.tseries.tdi import Timedelta\n'), ((25564, 25585), 'pandas.tseries.index.Timestamp', 'Timestamp', (['"""19900315"""'], {}), "('19900315')\n", (25573, 25585), False, 'from pandas.tseries.index import Timestamp\n'), ((33094, 33106), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (33103, 33106), False, 'from datetime import datetime, timedelta\n'), ((33108, 33120), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (33117, 33120), False, 'from datetime import datetime, timedelta\n'), ((38563, 38585), 'pandas.Series', 'Series', (['l'], {'dtype': 'dtype'}), '(l, dtype=dtype)\n', (38569, 38585), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((39512, 39538), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-01"""'], {}), "('2011-01-01')\n", (39524, 39538), True, 'import pandas as pd\n'), ((39565, 39591), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-03"""'], {}), "('2011-01-03')\n", (39577, 39591), True, 'import pandas as pd\n'), ((39612, 39634), 'pandas.Timedelta', 'pd.Timedelta', (['"""1 days"""'], {}), "('1 days')\n", (39624, 39634), True, 'import pandas as pd\n'), ((39644, 39666), 'pandas.Timedelta', 'pd.Timedelta', (['"""3 days"""'], {}), "('3 days')\n", (39656, 39666), True, 'import pandas as pd\n'), ((39687, 39717), 'pandas.Period', 'pd.Period', (['"""2011-01"""'], {'freq': '"""M"""'}), "('2011-01', freq='M')\n", (39696, 39717), True, 'import pandas as pd\n'), ((39744, 39774), 'pandas.Period', 'pd.Period', (['"""2011-03"""'], {'freq': '"""M"""'}), "('2011-03', freq='M')\n", (39753, 39774), True, 'import pandas as pd\n'), ((39865, 39887), 'pandas.Series', 'Series', (['l'], {'dtype': 'dtype'}), '(l, dtype=dtype)\n', (39871, 39887), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((39916, 39945), 'pandas.Series', 'Series', (['[False, False, False]'], {}), '([False, False, False])\n', (39922, 39945), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((39962, 40007), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(left == pd.NaT)', 'expected'], {}), '(left == pd.NaT, expected)\n', (39981, 40007), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((40024, 40069), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(pd.NaT == left)', 'expected'], {}), '(pd.NaT == left, expected)\n', (40043, 40069), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((40098, 40124), 'pandas.Series', 'Series', (['[True, True, True]'], {}), '([True, True, True])\n', (40104, 40124), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((40141, 40186), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(left != pd.NaT)', 'expected'], {}), '(left != pd.NaT, expected)\n', (40160, 40186), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((40203, 40248), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(pd.NaT != left)', 'expected'], {}), '(pd.NaT != left, expected)\n', (40222, 40248), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((40277, 40306), 'pandas.Series', 'Series', (['[False, False, False]'], {}), '([False, False, False])\n', (40283, 40306), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((40323, 40367), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(left < pd.NaT)', 'expected'], {}), '(left < pd.NaT, expected)\n', (40342, 40367), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((40384, 40428), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(pd.NaT > left)', 'expected'], {}), '(pd.NaT > left, expected)\n', (40403, 40428), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((40445, 40490), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(left <= pd.NaT)', 'expected'], {}), '(left <= pd.NaT, expected)\n', (40464, 40490), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((40507, 40552), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(pd.NaT >= left)', 'expected'], {}), '(pd.NaT >= left, expected)\n', (40526, 40552), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((40570, 40614), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(left > pd.NaT)', 'expected'], {}), '(left > pd.NaT, expected)\n', (40589, 40614), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((40631, 40675), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(pd.NaT < left)', 'expected'], {}), '(pd.NaT < left, expected)\n', (40650, 40675), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((40692, 40737), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(left >= pd.NaT)', 'expected'], {}), '(left >= pd.NaT, expected)\n', (40711, 40737), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((40754, 40799), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(pd.NaT <= left)', 'expected'], {}), '(pd.NaT <= left, expected)\n', (40773, 40799), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((43452, 43492), 'pandas.Series', 'Series', (['[True, False, True]'], {'index': 'index'}), '([True, False, True], index=index)\n', (43458, 43492), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((43752, 43792), 'pandas.Series', 'Series', (['[True, False, True]'], {'index': 'index'}), '([True, False, True], index=index)\n', (43758, 43792), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((43961, 44001), 'pandas.Series', 'Series', (['[True, False, True]'], {'index': 'index'}), '([True, False, True], index=index)\n', (43967, 44001), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((44171, 44211), 'pandas.Series', 'Series', (['[True, False, True]'], {'index': 'index'}), '([True, False, True], index=index)\n', (44177, 44211), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((45570, 45608), 'pandas.util.testing.assertRaisesRegexp', 'tm.assertRaisesRegexp', (['ValueError', 'msg'], {}), '(ValueError, msg)\n', (45591, 45608), True, 'import pandas.util.testing as tm\n'), ((50149, 50191), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['RuntimeWarning'], {}), '(RuntimeWarning)\n', (50175, 50191), True, 'import pandas.util.testing as tm\n'), ((50209, 50249), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(s_tft & s_0123)', 'exp'], {}), '(s_tft & s_0123, exp)\n', (50228, 50249), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((50637, 50679), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['RuntimeWarning'], {}), '(RuntimeWarning)\n', (50663, 50679), True, 'import pandas.util.testing as tm\n'), ((50697, 50737), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(s_0123 & s_tft)', 'exp'], {}), '(s_0123 & s_tft, exp)\n', (50716, 50737), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((51194, 51231), 'pandas.Series', 'Series', (['[False, np.NaN, False, False]'], {}), '([False, np.NaN, False, False])\n', (51200, 51231), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((51372, 51398), 'pandas.Series', 'Series', (['[0.1, 4, -3.14, 2]'], {}), '([0.1, 4, -3.14, 2])\n', (51378, 51398), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((51817, 51837), 'datetime.datetime', 'datetime', (['(2005)', '(1)', '(1)'], {}), '(2005, 1, 1)\n', (51825, 51837), False, 'from datetime import datetime, timedelta\n'), ((52633, 52642), 'pandas.Index', 'Index', (['[]'], {}), '([])\n', (52638, 52642), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((53447, 53460), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (53456, 53460), True, 'import numpy as np\n'), ((55521, 55559), 'pandas.util.testing.assertRaisesRegexp', 'tm.assertRaisesRegexp', (['ValueError', 'msg'], {}), '(ValueError, msg)\n', (55542, 55559), True, 'import pandas.util.testing as tm\n'), ((55602, 55640), 'pandas.util.testing.assertRaisesRegexp', 'tm.assertRaisesRegexp', (['ValueError', 'msg'], {}), '(ValueError, msg)\n', (55623, 55640), True, 'import pandas.util.testing as tm\n'), ((55683, 55721), 'pandas.util.testing.assertRaisesRegexp', 'tm.assertRaisesRegexp', (['ValueError', 'msg'], {}), '(ValueError, msg)\n', (55704, 55721), True, 'import pandas.util.testing as tm\n'), ((55838, 55876), 'pandas.util.testing.assertRaisesRegexp', 'tm.assertRaisesRegexp', (['ValueError', 'msg'], {}), '(ValueError, msg)\n', (55859, 55876), True, 'import pandas.util.testing as tm\n'), ((55941, 55979), 'pandas.util.testing.assertRaisesRegexp', 'tm.assertRaisesRegexp', (['ValueError', 'msg'], {}), '(ValueError, msg)\n', (55962, 55979), True, 'import pandas.util.testing as tm\n'), ((56044, 56082), 'pandas.util.testing.assertRaisesRegexp', 'tm.assertRaisesRegexp', (['ValueError', 'msg'], {}), '(ValueError, msg)\n', (56065, 56082), True, 'import pandas.util.testing as tm\n'), ((59214, 59228), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (59226, 59228), False, 'from datetime import datetime, timedelta\n'), ((59303, 59317), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (59315, 59317), False, 'from datetime import datetime, timedelta\n'), ((59433, 59459), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-01"""'], {}), "('2011-01-01')\n", (59445, 59459), True, 'import pandas as pd\n'), ((59461, 59487), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-02"""'], {}), "('2011-01-02')\n", (59473, 59487), True, 'import pandas as pd\n'), ((59631, 59653), 'pandas.Series', 'Series', (['d'], {'dtype': 'dtype'}), '(d, dtype=dtype)\n', (59637, 59653), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((59794, 59827), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {'dtype': 'dtype'}), '([1, 2, 3], dtype=dtype)\n', (59803, 59827), True, 'import pandas as pd\n'), ((59943, 59976), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {'dtype': 'dtype'}), '([1, 2, 3], dtype=dtype)\n', (59952, 59976), True, 'import pandas as pd\n'), ((60054, 60087), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {'dtype': 'dtype'}), '([1, 2, 3], dtype=dtype)\n', (60063, 60087), True, 'import pandas as pd\n'), ((60218, 60251), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {'dtype': 'dtype'}), '([1, 2, 3], dtype=dtype)\n', (60227, 60251), True, 'import pandas as pd\n'), ((61017, 61043), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-01"""'], {}), "('2011-01-01')\n", (61029, 61043), True, 'import pandas as pd\n'), ((61045, 61071), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-02"""'], {}), "('2011-01-02')\n", (61057, 61071), True, 'import pandas as pd\n'), ((61215, 61240), 'pandas.DataFrame', 'DataFrame', (['d'], {'dtype': 'dtype'}), '(d, dtype=dtype)\n', (61224, 61240), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((61381, 61417), 'pandas.DataFrame', 'pd.DataFrame', (['[1, 2, 3]'], {'dtype': 'dtype'}), '([1, 2, 3], dtype=dtype)\n', (61393, 61417), True, 'import pandas as pd\n'), ((61535, 61571), 'pandas.DataFrame', 'pd.DataFrame', (['[1, 2, 3]'], {'dtype': 'dtype'}), '([1, 2, 3], dtype=dtype)\n', (61547, 61571), True, 'import pandas as pd\n'), ((61648, 61684), 'pandas.DataFrame', 'pd.DataFrame', (['[1, 2, 3]'], {'dtype': 'dtype'}), '([1, 2, 3], dtype=dtype)\n', (61660, 61684), True, 'import pandas as pd\n'), ((61817, 61853), 'pandas.DataFrame', 'pd.DataFrame', (['[1, 2, 3]'], {'dtype': 'dtype'}), '([1, 2, 3], dtype=dtype)\n', (61829, 61853), True, 'import pandas as pd\n'), ((63841, 63853), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (63850, 63853), True, 'import numpy as np\n'), ((63907, 63919), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (63916, 63919), True, 'import numpy as np\n'), ((65418, 65432), 'decimal.Decimal', 'Decimal', (['"""1.3"""'], {}), "('1.3')\n", (65425, 65432), False, 'from decimal import Decimal\n'), ((65434, 65448), 'decimal.Decimal', 'Decimal', (['"""2.3"""'], {}), "('2.3')\n", (65441, 65448), False, 'from decimal import Decimal\n'), ((66166, 66176), 'decimal.Decimal', 'Decimal', (['(5)'], {}), '(5)\n', (66173, 66176), False, 'from decimal import Decimal\n'), ((66200, 66211), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (66207, 66211), False, 'from decimal import Decimal\n'), ((66307, 66318), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (66314, 66318), False, 'from decimal import Decimal\n'), ((66790, 66823), 'pandas.date_range', 'date_range', (['"""20130101"""'], {'periods': '(5)'}), "('20130101', periods=5)\n", (66800, 66823), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((67107, 67140), 'pandas.date_range', 'date_range', (['"""20130101"""'], {'periods': '(5)'}), "('20130101', periods=5)\n", (67117, 67140), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((2189, 2229), 'pandas.util.testing.assert_almost_equal', 'tm.assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (2211, 2229), True, 'import pandas.util.testing as tm\n'), ((5645, 5663), 'operator.add', 'operator.add', (['y', 'x'], {}), '(y, x)\n', (5657, 5663), False, 'import operator\n'), ((5715, 5733), 'operator.sub', 'operator.sub', (['y', 'x'], {}), '(y, x)\n', (5727, 5733), False, 'import operator\n'), ((5785, 5807), 'operator.truediv', 'operator.truediv', (['y', 'x'], {}), '(y, x)\n', (5801, 5807), False, 'import operator\n'), ((5859, 5882), 'operator.floordiv', 'operator.floordiv', (['y', 'x'], {}), '(y, x)\n', (5876, 5882), False, 'import operator\n'), ((5934, 5952), 'operator.mul', 'operator.mul', (['y', 'x'], {}), '(y, x)\n', (5946, 5952), False, 'import operator\n'), ((6004, 6022), 'operator.pow', 'operator.pow', (['y', 'x'], {}), '(y, x)\n', (6016, 6022), False, 'import operator\n'), ((6111, 6129), 'operator.mod', 'operator.mod', (['y', 'x'], {}), '(y, x)\n', (6123, 6129), False, 'import operator\n'), ((7890, 7898), 'pandas.compat.range', 'range', (['(3)'], {}), '(3)\n', (7895, 7898), False, 'from pandas.compat import range, zip\n'), ((8784, 8792), 'pandas.compat.range', 'range', (['(3)'], {}), '(3)\n', (8789, 8792), False, 'from pandas.compat import range, zip\n'), ((12972, 13003), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)', 'seconds': '(3)'}), '(minutes=5, seconds=3)\n', (12981, 13003), False, 'from datetime import datetime, timedelta\n'), ((13080, 13111), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(6)', 'seconds': '(3)'}), '(minutes=6, seconds=3)\n', (13089, 13111), False, 'from datetime import datetime, timedelta\n'), ((13235, 13266), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(4)', 'seconds': '(3)'}), '(minutes=4, seconds=3)\n', (13244, 13266), False, 'from datetime import datetime, timedelta\n'), ((13350, 13370), 'pandas.offsets.Minute', 'pd.offsets.Minute', (['(1)'], {}), '(1)\n', (13367, 13370), True, 'import pandas as pd\n'), ((13372, 13392), 'pandas.offsets.Second', 'pd.offsets.Second', (['(3)'], {}), '(3)\n', (13389, 13392), True, 'import pandas as pd\n'), ((13424, 13442), 'pandas.offsets.Hour', 'pd.offsets.Hour', (['(2)'], {}), '(2)\n', (13439, 13442), True, 'import pandas as pd\n'), ((13735, 13767), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(6)', 'seconds': '(15)'}), '(minutes=6, seconds=15)\n', (13744, 13767), False, 'from datetime import datetime, timedelta\n'), ((14194, 14225), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)', 'seconds': '(3)'}), '(minutes=5, seconds=3)\n', (14203, 14225), False, 'from datetime import datetime, timedelta\n'), ((17413, 17441), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""2 days 22:48:00"""'], {}), "('2 days 22:48:00')\n", (17422, 17441), False, 'from pandas.tseries.tdi import Timedelta\n'), ((17443, 17471), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""1 days 23:12:00"""'], {}), "('1 days 23:12:00')\n", (17452, 17471), False, 'from pandas.tseries.tdi import Timedelta\n'), ((17506, 17522), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""NaT"""'], {}), "('NaT')\n", (17515, 17522), False, 'from pandas.tseries.tdi import Timedelta\n'), ((17600, 17629), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""29 days 12:00:00"""'], {}), "('29 days 12:00:00')\n", (17609, 17629), False, 'from pandas.tseries.tdi import Timedelta\n'), ((17631, 17660), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""29 days 12:00:00"""'], {}), "('29 days 12:00:00')\n", (17640, 17660), False, 'from pandas.tseries.tdi import Timedelta\n'), ((17695, 17711), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""NaT"""'], {}), "('NaT')\n", (17704, 17711), False, 'from pandas.tseries.tdi import Timedelta\n'), ((18399, 18422), 'numpy.timedelta64', 'np.timedelta64', (['m', 'unit'], {}), '(m, unit)\n', (18413, 18422), True, 'import numpy as np\n'), ((18644, 18681), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (18663, 18681), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((18844, 18867), 'numpy.timedelta64', 'np.timedelta64', (['m', 'unit'], {}), '(m, unit)\n', (18858, 18867), True, 'import numpy as np\n'), ((19871, 19891), 'pandas.compat.zip', 'zip', (['args', 'intervals'], {}), '(args, intervals)\n', (19874, 19891), False, 'from pandas.compat import range, zip\n'), ((20934, 20977), 'pandas.util.testing.assertRaisesRegexp', 'tm.assertRaisesRegexp', (['TypeError', '"""operate"""'], {}), "(TypeError, 'operate')\n", (20955, 20977), True, 'import pandas.util.testing as tm\n'), ((21064, 21095), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)', 'seconds': '(3)'}), '(minutes=5, seconds=3)\n', (21073, 21095), False, 'from datetime import datetime, timedelta\n'), ((31736, 31753), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""1.5s"""'], {}), "('1.5s')\n", (31745, 31753), False, 'from pandas.tseries.tdi import Timedelta\n'), ((31850, 31867), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""1.5s"""'], {}), "('1.5s')\n", (31859, 31867), False, 'from pandas.tseries.tdi import Timedelta\n'), ((32462, 32479), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""0.5s"""'], {}), "('0.5s')\n", (32471, 32479), False, 'from pandas.tseries.tdi import Timedelta\n'), ((32576, 32593), 'pandas.tseries.tdi.Timedelta', 'Timedelta', (['"""0.5s"""'], {}), "('0.5s')\n", (32585, 32593), False, 'from pandas.tseries.tdi import Timedelta\n'), ((37906, 37932), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-01"""'], {}), "('2011-01-01')\n", (37918, 37932), True, 'import pandas as pd\n'), ((37960, 37986), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-03"""'], {}), "('2011-01-03')\n", (37972, 37986), True, 'import pandas as pd\n'), ((38023, 38049), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-03"""'], {}), "('2011-01-03')\n", (38035, 38049), True, 'import pandas as pd\n'), ((38072, 38094), 'pandas.Timedelta', 'pd.Timedelta', (['"""1 days"""'], {}), "('1 days')\n", (38084, 38094), True, 'import pandas as pd\n'), ((38122, 38144), 'pandas.Timedelta', 'pd.Timedelta', (['"""3 days"""'], {}), "('3 days')\n", (38134, 38144), True, 'import pandas as pd\n'), ((38181, 38203), 'pandas.Timedelta', 'pd.Timedelta', (['"""3 days"""'], {}), "('3 days')\n", (38193, 38203), True, 'import pandas as pd\n'), ((38226, 38256), 'pandas.Period', 'pd.Period', (['"""2011-01"""'], {'freq': '"""M"""'}), "('2011-01', freq='M')\n", (38235, 38256), True, 'import pandas as pd\n'), ((38284, 38314), 'pandas.Period', 'pd.Period', (['"""2011-03"""'], {'freq': '"""M"""'}), "('2011-03', freq='M')\n", (38293, 38314), True, 'import pandas as pd\n'), ((38351, 38381), 'pandas.Period', 'pd.Period', (['"""2011-03"""'], {'freq': '"""M"""'}), "('2011-03', freq='M')\n", (38360, 38381), True, 'import pandas as pd\n'), ((38649, 38671), 'pandas.Series', 'Series', (['r'], {'dtype': 'dtype'}), '(r, dtype=dtype)\n', (38655, 38671), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((38673, 38694), 'pandas.Index', 'Index', (['r'], {'dtype': 'dtype'}), '(r, dtype=dtype)\n', (38678, 38694), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((38728, 38756), 'pandas.Series', 'Series', (['[False, False, True]'], {}), '([False, False, True])\n', (38734, 38756), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((38777, 38821), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(left == right)', 'expected'], {}), '(left == right, expected)\n', (38796, 38821), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((38854, 38881), 'pandas.Series', 'Series', (['[True, True, False]'], {}), '([True, True, False])\n', (38860, 38881), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((38902, 38946), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(left != right)', 'expected'], {}), '(left != right, expected)\n', (38921, 38946), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((38979, 39008), 'pandas.Series', 'Series', (['[False, False, False]'], {}), '([False, False, False])\n', (38985, 39008), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((39029, 39072), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(left < right)', 'expected'], {}), '(left < right, expected)\n', (39048, 39072), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((39105, 39134), 'pandas.Series', 'Series', (['[False, False, False]'], {}), '([False, False, False])\n', (39111, 39134), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((39155, 39198), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(left > right)', 'expected'], {}), '(left > right, expected)\n', (39174, 39198), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((39231, 39259), 'pandas.Series', 'Series', (['[False, False, True]'], {}), '([False, False, True])\n', (39237, 39259), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((39280, 39324), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(left >= right)', 'expected'], {}), '(left >= right, expected)\n', (39299, 39324), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((39357, 39385), 'pandas.Series', 'Series', (['[False, False, True]'], {}), '([False, False, True])\n', (39363, 39385), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((39406, 39450), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['(left <= right)', 'expected'], {}), '(left <= right, expected)\n', (39425, 39450), False, 'from pandas.util.testing import assert_series_equal, assert_almost_equal\n'), ((43129, 43171), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['RuntimeWarning'], {}), '(RuntimeWarning)\n', (43155, 43171), True, 'import pandas.util.testing as tm\n'), ((52701, 52717), 'numpy.isnan', 'np.isnan', (['result'], {}), '(result)\n', (52709, 52717), True, 'import numpy as np\n'), ((52768, 52777), 'pandas.Index', 'Index', (['[]'], {}), '([])\n', (52773, 52777), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((59675, 59701), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (59690, 59701), True, 'import pandas.util.testing as tm\n'), ((60334, 60356), 'pandas.Timedelta', 'pd.Timedelta', (['"""1 days"""'], {}), "('1 days')\n", (60346, 60356), True, 'import pandas as pd\n'), ((60358, 60380), 'pandas.Timedelta', 'pd.Timedelta', (['"""2 days"""'], {}), "('2 days')\n", (60370, 60380), True, 'import pandas as pd\n'), ((60409, 60431), 'pandas.Timedelta', 'pd.Timedelta', (['"""3 days"""'], {}), "('3 days')\n", (60421, 60431), True, 'import pandas as pd\n'), ((60476, 60498), 'pandas.Timedelta', 'pd.Timedelta', (['"""4 days"""'], {}), "('4 days')\n", (60488, 60498), True, 'import pandas as pd\n'), ((60500, 60522), 'pandas.Timedelta', 'pd.Timedelta', (['"""5 days"""'], {}), "('5 days')\n", (60512, 60522), True, 'import pandas as pd\n'), ((60553, 60575), 'pandas.Timedelta', 'pd.Timedelta', (['"""6 days"""'], {}), "('6 days')\n", (60565, 60575), True, 'import pandas as pd\n'), ((60613, 60635), 'pandas.Timedelta', 'pd.Timedelta', (['"""3 days"""'], {}), "('3 days')\n", (60625, 60635), True, 'import pandas as pd\n'), ((60685, 60707), 'pandas.Timedelta', 'pd.Timedelta', (['"""3 days"""'], {}), "('3 days')\n", (60697, 60707), True, 'import pandas as pd\n'), ((61262, 61288), 'pandas.util.testing.assertRaises', 'tm.assertRaises', (['TypeError'], {}), '(TypeError)\n', (61277, 61288), True, 'import pandas.util.testing as tm\n'), ((63075, 63100), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (63086, 63100), True, 'import numpy as np\n'), ((65477, 65493), 'datetime.date', 'date', (['(2012)', '(1)', '(1)'], {}), '(2012, 1, 1)\n', (65481, 65493), False, 'from datetime import date\n'), ((65495, 65511), 'datetime.date', 'date', (['(2012)', '(1)', '(2)'], {}), '(2012, 1, 2)\n', (65499, 65511), False, 'from datetime import date\n'), ((7620, 7662), 'pandas.Series', 'Series', (['(1000000000.0 * 3600 * 24)', 'rs.index'], {}), '(1000000000.0 * 3600 * 24, rs.index)\n', (7626, 7662), False, 'from pandas import Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8\n'), ((14332, 14352), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (14341, 14352), False, 'from datetime import datetime, timedelta\n'), ((14369, 14389), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (14378, 14389), False, 'from datetime import datetime, timedelta\n'), ((14561, 14581), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (14570, 14581), False, 'from datetime import datetime, timedelta\n'), ((14598, 14618), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (14607, 14618), False, 'from datetime import datetime, timedelta\n'), ((15022, 15042), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (15031, 15042), False, 'from datetime import datetime, timedelta\n'), ((15059, 15079), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (15068, 15079), False, 'from datetime import datetime, timedelta\n'), ((15251, 15271), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (15260, 15271), False, 'from datetime import datetime, timedelta\n'), ((15288, 15308), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (15297, 15308), False, 'from datetime import datetime, timedelta\n'), ((20012, 20020), 'pandas.compat.range', 'range', (['(2)'], {}), '(2)\n', (20017, 20020), False, 'from pandas.compat import range, zip\n'), ((64508, 64530), 'operator.truediv', 'operator.truediv', (['y', 'x'], {}), '(y, x)\n', (64524, 64530), False, 'import operator\n'), ((64693, 64711), 'operator.div', 'operator.div', (['y', 'x'], {}), '(y, x)\n', (64705, 64711), False, 'import operator\n'), ((18344, 18367), 'numpy.timedelta64', 'np.timedelta64', (['m', 'unit'], {}), '(m, unit)\n', (18358, 18367), True, 'import numpy as np\n'), ((18789, 18812), 'numpy.timedelta64', 'np.timedelta64', (['m', 'unit'], {}), '(m, unit)\n', (18803, 18812), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Send DHT22 sensor data periodically to AWS IoT and process actuation commands received.
import time
import datetime
import ssl
import json
import paho.mqtt.client as mqtt
import dht22
import pigpio
import RPi.GPIO as GPIO
# TODO: Change this to the name of our Raspberry Pi, also known as our "Thing Name"
deviceName = "g88pi"
# Public certificate of our Raspberry Pi, as provided by AWS IoT.
deviceCertificate = "tp-iot-certificate.pem.crt"
# Private key of our Raspberry Pi, as provided by AWS IoT.
devicePrivateKey = "tp-iot-private.pem.key"
# Root certificate to authenticate AWS IoT when we connect to their server.
awsCert = "aws-iot-rootCA.crt"
isConnected = False
# Assume we connected the DHT22 Sensor, YwRobot Light Sensor, L-934ID-5V LED as follows:
# DHT22/AM2302 --> Raspberry Pi:
# + --> GPIO 8
# Out --> GPIO 22
# - --> Ground (Pin 14)
power = 8
temp_sensor = 22
# YwRobot Light Sensor --> Raspberry Pi:
# Ground --> Ground (Pin 9)
# VCC --> 3.3V Power (Pin 1)
# DOUT --> GPIO 4
light_sensor = 4
# L-934ID-5V LED --> Raspberry Pi
# + --> GPIO 25
# Ground --> Ground (Pin 20)
led = 25
# This is the main logic of the program. We connect to AWS IoT via MQTT, send sensor data periodically to AWS IoT,
# and handle any actuation commands received from AWS IoT.
def main():
global isConnected
# Create an MQTT client for connecting to AWS IoT via MQTT.
client = mqtt.Client(deviceName + "_sr") # Client ID must be unique because AWS will disconnect any duplicates.
client.on_connect = on_connect # When connected, call on_connect.
client.on_message = on_message # When message received, call on_message.
client.on_log = on_log # When logging debug messages, call on_log.
# Set the certificates and private key for connecting to AWS IoT. TLS 1.2 is mandatory for AWS IoT and is supported
# only in Python 3.4 and later, compiled with OpenSSL 1.0.1 and later.
client.tls_set(awsCert, deviceCertificate, devicePrivateKey, ssl.CERT_REQUIRED, ssl.PROTOCOL_TLSv1_2)
# Connect to AWS IoT server. Use AWS command line "aws iot describe-endpoint" to get the address.
print("Connecting to AWS IoT...")
client.connect("A1P01IYM2DOZA0.iot.us-west-2.amazonaws.com", 8883, 60)
# Start a background thread to process the MQTT network commands concurrently, including auto-reconnection.
client.loop_start()
# Prepare the DHT22 sensor. Ensure we don't read from the DHT22 within 2 seconds, else it will eventually hang.
dht22_sensor = dht22.Sensor(pigpio.pi(), temp_sensor, power=power)
# Set the pin numbering to the BCM (same as GPIO) numbering format.
GPIO.setmode(GPIO.BCM)
# We tell the system that the LED port should be an output port, not input.
GPIO.setup(led, GPIO.OUT)
time.sleep(1)
# Loop forever.
while True:
try:
# If we are not connected yet to AWS IoT, wait 1 second and try again.
if not isConnected:
time.sleep(1)
continue
# Read DHT22 sensor values. Skip if we detect an error.
dht22_sensor.trigger()
if dht22_sensor.bad_checksum() + dht22_sensor.short_message() + dht22_sensor.missing_message() + \
dht22_sensor.sensor_resets() != 0 or dht22_sensor.temperature() < 0 or dht22_sensor.humidity() < 0:
print(("DHT22 may be connected incorrectly: temperature={:3.1f}, humidity={:3.1f}, bad_checksum={}, " +
"short_message={}, missing_message={}, sensor_resets={}")
.format(dht22_sensor.temperature(), dht22_sensor.humidity(), dht22_sensor.bad_checksum(),
dht22_sensor.short_message(), dht22_sensor.missing_message(),
dht22_sensor.sensor_resets()))
continue
# Prepare our sensor data in JSON format.
payload = {
"state": {
"reported": {
"temperature": round(dht22_sensor.temperature(), 1),
"humidity": round(dht22_sensor.humidity(), 1),
"timestamp": datetime.datetime.now().isoformat()
}
}
}
print("Sending sensor data to AWS IoT...\n" +
json.dumps(payload, indent=4, separators=(',', ': ')))
# Publish our sensor data to AWS IoT via the MQTT topic, also known as updating our "Thing Shadow".
client.publish("$aws/things/" + deviceName + "/shadow/update", json.dumps(payload))
print("Sent to AWS IoT")
# Wait 30 seconds before sending the next set of sensor data.
time.sleep(30)
except KeyboardInterrupt:
# Stop the program when we press Ctrl-C.
break
except Exception as e:
# For all other errors, we wait a while and resume.
print("Exception: " + str(e))
time.sleep(10)
continue
# This is called when we are connected to AWS IoT via MQTT.
# We subscribe for notifications of desired state updates.
def on_connect(client, userdata, flags, rc):
global isConnected
isConnected = True
print("Connected to AWS IoT")
# Subscribe to our MQTT topic so that we will receive notifications of updates.
topic = "$aws/things/" + deviceName + "/shadow/update/accepted"
print("Subscribing to MQTT topic " + topic)
client.subscribe(topic)
# This is called when we receive a subscription notification from AWS IoT.
# If this is an actuation command, we execute it.
def on_message(client, userdata, msg):
# Convert the JSON payload to a Python dictionary.
# The payload is in binary format so we need to decode as UTF-8.
payload2 = json.loads(msg.payload.decode("utf-8"))
print("Received message, topic: " + msg.topic + ", payload:\n" +
json.dumps(payload2, indent=4, separators=(',', ': ')))
# If there is a desired state in this message, then we actuate,
# e.g. if we see "led=on", we switch on the LED.
if payload2.get("state") is not None and payload2["state"].get("desired") is not None:
# Get the desired state and loop through all attributes inside.
desired_state = payload2["state"]["desired"]
for attribute in desired_state:
# We handle the attribute and desired value by actuating.
value = desired_state.get(attribute)
actuate(client, attribute, value)
# Control my actuators based on the specified attribute and value,
# e.g. "led=on" will switch on my LED.
def actuate(client, attribute, value):
if attribute == "timestamp":
# Ignore the timestamp attribute, it's only for info.
return
print("Setting " + attribute + " to " + value + "...")
if attribute == "led":
# We actuate the LED for "on", "off" or "flash1".
if value == "on":
# Switch on LED.
GPIO.output(led, True)
send_reported_state(client, "led", "on")
return
elif value == "off":
# Switch off LED.
GPIO.output(led, False)
send_reported_state(client, "led", "off")
return
elif value == "flash1":
# Switch on LED, wait 1 second, switch it off.
GPIO.output(led, True)
send_reported_state(client, "led", "on")
time.sleep(1)
GPIO.output(led, False)
send_reported_state(client, "led", "off")
time.sleep(1)
return
# Show an error if attribute or value are incorrect.
print("Error: Don't know how to set " + attribute + " to " + value)
# Send the reported state of our actuator tp AWS IoT after it has been triggered, e.g. "led": "on".
def send_reported_state(client, attribute, value):
# Prepare our sensor data in JSON format.
payload = {
"state": {
"reported": {
attribute: value,
"timestamp": datetime.datetime.now().isoformat()
}
}
}
print("Sending sensor data to AWS IoT...\n" +
json.dumps(payload, indent=4, separators=(',', ': ')))
# Publish our sensor data to AWS IoT via the MQTT topic, also known as updating our "Thing Shadow".
client.publish("$aws/things/" + deviceName + "/shadow/update", json.dumps(payload))
print("Sent to AWS IoT")
# Print out log messages for tracing.
def on_log(client, userdata, level, buf):
print("Log: " + buf)
# Start the main program.
main()
|
[
"RPi.GPIO.setmode",
"RPi.GPIO.setup",
"json.dumps",
"time.sleep",
"paho.mqtt.client.Client",
"RPi.GPIO.output",
"datetime.datetime.now",
"pigpio.pi"
] |
[((1451, 1482), 'paho.mqtt.client.Client', 'mqtt.Client', (["(deviceName + '_sr')"], {}), "(deviceName + '_sr')\n", (1462, 1482), True, 'import paho.mqtt.client as mqtt\n'), ((2697, 2719), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (2709, 2719), True, 'import RPi.GPIO as GPIO\n'), ((2804, 2829), 'RPi.GPIO.setup', 'GPIO.setup', (['led', 'GPIO.OUT'], {}), '(led, GPIO.OUT)\n', (2814, 2829), True, 'import RPi.GPIO as GPIO\n'), ((2834, 2847), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2844, 2847), False, 'import time\n'), ((2582, 2593), 'pigpio.pi', 'pigpio.pi', ([], {}), '()\n', (2591, 2593), False, 'import pigpio\n'), ((8453, 8472), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (8463, 8472), False, 'import json\n'), ((4773, 4787), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (4783, 4787), False, 'import time\n'), ((5977, 6031), 'json.dumps', 'json.dumps', (['payload2'], {'indent': '(4)', 'separators': "(',', ': ')"}), "(payload2, indent=4, separators=(',', ': '))\n", (5987, 6031), False, 'import json\n'), ((7044, 7066), 'RPi.GPIO.output', 'GPIO.output', (['led', '(True)'], {}), '(led, True)\n', (7055, 7066), True, 'import RPi.GPIO as GPIO\n'), ((8226, 8279), 'json.dumps', 'json.dumps', (['payload'], {'indent': '(4)', 'separators': "(',', ': ')"}), "(payload, indent=4, separators=(',', ': '))\n", (8236, 8279), False, 'import json\n'), ((3029, 3042), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3039, 3042), False, 'import time\n'), ((4628, 4647), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (4638, 4647), False, 'import json\n'), ((5043, 5057), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (5053, 5057), False, 'import time\n'), ((7210, 7233), 'RPi.GPIO.output', 'GPIO.output', (['led', '(False)'], {}), '(led, False)\n', (7221, 7233), True, 'import RPi.GPIO as GPIO\n'), ((4385, 4438), 'json.dumps', 'json.dumps', (['payload'], {'indent': '(4)', 'separators': "(',', ': ')"}), "(payload, indent=4, separators=(',', ': '))\n", (4395, 4438), False, 'import json\n'), ((7410, 7432), 'RPi.GPIO.output', 'GPIO.output', (['led', '(True)'], {}), '(led, True)\n', (7421, 7432), True, 'import RPi.GPIO as GPIO\n'), ((7498, 7511), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7508, 7511), False, 'import time\n'), ((7525, 7548), 'RPi.GPIO.output', 'GPIO.output', (['led', '(False)'], {}), '(led, False)\n', (7536, 7548), True, 'import RPi.GPIO as GPIO\n'), ((7615, 7628), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7625, 7628), False, 'import time\n'), ((8100, 8123), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8121, 8123), False, 'import datetime\n'), ((4219, 4242), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4240, 4242), False, 'import datetime\n')]
|
# Generated by Django 2.2.2 on 2019-07-18 19:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weblog', '0011_auto_20190718_1829'),
]
operations = [
migrations.AddField(
model_name='userdetail',
name='phone',
field=models.CharField(blank=True, max_length=15, null=True),
),
]
|
[
"django.db.models.CharField"
] |
[((336, 390), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(15)', 'null': '(True)'}), '(blank=True, max_length=15, null=True)\n', (352, 390), False, 'from django.db import migrations, models\n')]
|
import torch
import torch.nn as nn
from src.network import Conv2d
class MCNN(nn.Module):
def __init__(self, bn=False):
super(MCNN, self).__init__()
self.branch1 = nn.Sequential(Conv2d(1, 16, 9, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(16, 32, 7, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(32, 16, 7, same_padding=True, bn=bn),
Conv2d(16, 8, 7, same_padding=True, bn=bn))
self.branch2 = nn.Sequential(Conv2d(1, 20, 7, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(20, 40, 5, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(40, 20, 5, same_padding=True, bn=bn),
Conv2d(20, 10, 5, same_padding=True, bn=bn))
self.branch3 = nn.Sequential(Conv2d(1, 24, 5, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(24, 48, 3, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(48, 24, 3, same_padding=True, bn=bn),
Conv2d(24, 12, 3, same_padding=True, bn=bn))
self.branch4 = nn.Sequential(Conv2d(1, 28, 3, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(28, 56, 1, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(56, 28, 1, same_padding=True, bn=bn),
Conv2d(28, 14, 1, same_padding=True, bn=bn))
self.fuse = nn.Sequential(Conv2d(44, 1, 1, same_padding=True, bn=bn))
def forward(self, im_data):
x1 = self.branch1(im_data)
x2 = self.branch2(im_data)
x3 = self.branch3(im_data)
x4 = self.branch4(im_data)
x = torch.cat((x1, x2, x3, x4), 1)
x = self.fuse(x)
return x
|
[
"torch.nn.MaxPool2d",
"src.network.Conv2d",
"torch.cat"
] |
[((2169, 2199), 'torch.cat', 'torch.cat', (['(x1, x2, x3, x4)', '(1)'], {}), '((x1, x2, x3, x4), 1)\n', (2178, 2199), False, 'import torch\n'), ((201, 243), 'src.network.Conv2d', 'Conv2d', (['(1)', '(16)', '(9)'], {'same_padding': '(True)', 'bn': 'bn'}), '(1, 16, 9, same_padding=True, bn=bn)\n', (207, 243), False, 'from src.network import Conv2d\n'), ((282, 297), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (294, 297), True, 'import torch.nn as nn\n'), ((336, 379), 'src.network.Conv2d', 'Conv2d', (['(16)', '(32)', '(7)'], {'same_padding': '(True)', 'bn': 'bn'}), '(16, 32, 7, same_padding=True, bn=bn)\n', (342, 379), False, 'from src.network import Conv2d\n'), ((418, 433), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (430, 433), True, 'import torch.nn as nn\n'), ((472, 515), 'src.network.Conv2d', 'Conv2d', (['(32)', '(16)', '(7)'], {'same_padding': '(True)', 'bn': 'bn'}), '(32, 16, 7, same_padding=True, bn=bn)\n', (478, 515), False, 'from src.network import Conv2d\n'), ((554, 596), 'src.network.Conv2d', 'Conv2d', (['(16)', '(8)', '(7)'], {'same_padding': '(True)', 'bn': 'bn'}), '(16, 8, 7, same_padding=True, bn=bn)\n', (560, 596), False, 'from src.network import Conv2d\n'), ((636, 678), 'src.network.Conv2d', 'Conv2d', (['(1)', '(20)', '(7)'], {'same_padding': '(True)', 'bn': 'bn'}), '(1, 20, 7, same_padding=True, bn=bn)\n', (642, 678), False, 'from src.network import Conv2d\n'), ((717, 732), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (729, 732), True, 'import torch.nn as nn\n'), ((771, 814), 'src.network.Conv2d', 'Conv2d', (['(20)', '(40)', '(5)'], {'same_padding': '(True)', 'bn': 'bn'}), '(20, 40, 5, same_padding=True, bn=bn)\n', (777, 814), False, 'from src.network import Conv2d\n'), ((853, 868), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (865, 868), True, 'import torch.nn as nn\n'), ((907, 950), 'src.network.Conv2d', 'Conv2d', (['(40)', '(20)', '(5)'], {'same_padding': '(True)', 'bn': 'bn'}), '(40, 20, 5, same_padding=True, bn=bn)\n', (913, 950), False, 'from src.network import Conv2d\n'), ((989, 1032), 'src.network.Conv2d', 'Conv2d', (['(20)', '(10)', '(5)'], {'same_padding': '(True)', 'bn': 'bn'}), '(20, 10, 5, same_padding=True, bn=bn)\n', (995, 1032), False, 'from src.network import Conv2d\n'), ((1072, 1114), 'src.network.Conv2d', 'Conv2d', (['(1)', '(24)', '(5)'], {'same_padding': '(True)', 'bn': 'bn'}), '(1, 24, 5, same_padding=True, bn=bn)\n', (1078, 1114), False, 'from src.network import Conv2d\n'), ((1153, 1168), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1165, 1168), True, 'import torch.nn as nn\n'), ((1207, 1250), 'src.network.Conv2d', 'Conv2d', (['(24)', '(48)', '(3)'], {'same_padding': '(True)', 'bn': 'bn'}), '(24, 48, 3, same_padding=True, bn=bn)\n', (1213, 1250), False, 'from src.network import Conv2d\n'), ((1289, 1304), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1301, 1304), True, 'import torch.nn as nn\n'), ((1343, 1386), 'src.network.Conv2d', 'Conv2d', (['(48)', '(24)', '(3)'], {'same_padding': '(True)', 'bn': 'bn'}), '(48, 24, 3, same_padding=True, bn=bn)\n', (1349, 1386), False, 'from src.network import Conv2d\n'), ((1425, 1468), 'src.network.Conv2d', 'Conv2d', (['(24)', '(12)', '(3)'], {'same_padding': '(True)', 'bn': 'bn'}), '(24, 12, 3, same_padding=True, bn=bn)\n', (1431, 1468), False, 'from src.network import Conv2d\n'), ((1508, 1550), 'src.network.Conv2d', 'Conv2d', (['(1)', '(28)', '(3)'], {'same_padding': '(True)', 'bn': 'bn'}), '(1, 28, 3, same_padding=True, bn=bn)\n', (1514, 1550), False, 'from src.network import Conv2d\n'), ((1589, 1604), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1601, 1604), True, 'import torch.nn as nn\n'), ((1643, 1686), 'src.network.Conv2d', 'Conv2d', (['(28)', '(56)', '(1)'], {'same_padding': '(True)', 'bn': 'bn'}), '(28, 56, 1, same_padding=True, bn=bn)\n', (1649, 1686), False, 'from src.network import Conv2d\n'), ((1725, 1740), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1737, 1740), True, 'import torch.nn as nn\n'), ((1779, 1822), 'src.network.Conv2d', 'Conv2d', (['(56)', '(28)', '(1)'], {'same_padding': '(True)', 'bn': 'bn'}), '(56, 28, 1, same_padding=True, bn=bn)\n', (1785, 1822), False, 'from src.network import Conv2d\n'), ((1861, 1904), 'src.network.Conv2d', 'Conv2d', (['(28)', '(14)', '(1)'], {'same_padding': '(True)', 'bn': 'bn'}), '(28, 14, 1, same_padding=True, bn=bn)\n', (1867, 1904), False, 'from src.network import Conv2d\n'), ((1940, 1982), 'src.network.Conv2d', 'Conv2d', (['(44)', '(1)', '(1)'], {'same_padding': '(True)', 'bn': 'bn'}), '(44, 1, 1, same_padding=True, bn=bn)\n', (1946, 1982), False, 'from src.network import Conv2d\n')]
|
import argparse
import multiprocessing
import random
import shutil
from datetime import datetime
from functools import partial
from pathlib import Path
import chainer
import chainer.functions as F
import chainer.links as L
import cupy
import numpy as np
from chainer import iterators, optimizers, serializers
from chainer.datasets import TransformDataset, get_cifar10
from chainer.training import StandardUpdater, Trainer, extensions
import augmentation
from metric_learning import MetricLearnClassifier
from modified_evaluator import ModifiedEvaluator
from modified_updater import ModifiedUpdater
from resnet import ResNet50
def apply_augmentation(inputs, mean, std, angle=(-5, 5), scale=(1, 1.2),
crop_size=None, train=True):
img, label = inputs
img = img.copy()
img = img.transpose(1, 2, 0)
if train:
img, _ = augmentation.gamma_correction(img)
img -= mean[None, None, :]
img /= std[None, None, :]
if train:
img, _ = augmentation.random_rotate(img, angle=angle)
if np.random.rand() < 0.5:
img, _ = augmentation.mirror(img)
if np.random.rand() < 0.5:
img, _ = augmentation.flip(img)
img, _ = augmentation.random_resize(img, scale=scale)
if crop_size is not None:
rnd1 = np.random.randint(img.shape[0] - crop_size)
rnd2 = np.random.randint(img.shape[1] - crop_size)
img = img[rnd1:rnd1 + crop_size, rnd2:rnd2 + crop_size, :]
img = img.transpose(2, 0, 1)
return img, label
def main():
parser = argparse.ArgumentParser(description='training mnist')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--batchsize', '-b', type=int, default=8,
help='Number of images in each mini-batch')
parser.add_argument('--seed', '-s', type=int, default=0,
help='Random seed')
parser.add_argument('--report_trigger', '-rt', type=str, default='1e',
help='Interval for reporting(Ex.100i, default:1e)')
parser.add_argument('--save_trigger', '-st', type=str, default='1e',
help='Interval for saving the model(Ex.100i, default:1e)')
parser.add_argument('--load_model', '-lm', type=str, default=None,
help='Path of the model object to load')
parser.add_argument('--load_optimizer', '-lo', type=str, default=None,
help='Path of the optimizer object to load')
args = parser.parse_args()
start_time = datetime.now()
save_dir = Path('output/{}'.format(start_time.strftime('%Y%m%d_%H%M')))
random.seed(args.seed)
np.random.seed(args.seed)
cupy.random.seed(args.seed)
model = MetricLearnClassifier(ResNet50(), 512, 10,
method='arcface', final_margin=0.5,
final_scale=64, target_epoch=100)
if args.load_model is not None:
serializers.load_npz(args.load_model, model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
optimizer = optimizers.Adam(alpha=1e-3, weight_decay_rate=5e-4, amsgrad=True)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(5e-4))
if args.load_optimizer is not None:
serializers.load_npz(args.load_optimizer, optimizer)
train_data, valid_data = get_cifar10(scale=255.)
mean = np.mean([x for x, _ in train_data], axis=(0, 2, 3))
std = np.std([x for x, _ in train_data], axis=(0, 2, 3))
train_transform = partial(apply_augmentation, mean=mean, std=std, crop_size=28, train=True)
valid_transform = partial(apply_augmentation, mean=mean, std=std, crop_size=28, train=True)
train_data = TransformDataset(train_data, train_transform)
valid_data = TransformDataset(valid_data, valid_transform)
train_iter = iterators.SerialIterator(train_data, args.batchsize)
valid_iter = iterators.SerialIterator(valid_data, args.batchsize, repeat=False, shuffle=False)
updater = ModifiedUpdater(train_iter, optimizer, device=args.gpu)
trainer = Trainer(updater, (args.epoch, 'epoch'), out=save_dir)
report_trigger = (int(args.report_trigger[:-1]), 'iteration' if args.report_trigger[-1] == 'i' else 'epoch')
trainer.extend(extensions.LogReport(trigger=report_trigger))
trainer.extend(ModifiedEvaluator(valid_iter, model, device=args.gpu), name='val', trigger=report_trigger)
trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'main/loss', 'main/accuracy', 'val/main/loss',
'val/main/accuracy', 'elapsed_time']), trigger=report_trigger)
trainer.extend(extensions.PlotReport(['main/loss', 'val/main/loss'], x_key=report_trigger[1],
marker='.', file_name='loss.png', trigger=report_trigger))
trainer.extend(extensions.PlotReport(['main/accuracy', 'val/main/accuracy'], x_key=report_trigger[1],
marker='.', file_name='accuracy.png', trigger=report_trigger))
save_trigger = (int(args.save_trigger[:-1]), 'iteration' if args.save_trigger[-1] == 'i' else 'epoch')
trainer.extend(extensions.snapshot_object(model, filename='model_{0}-{{.updater.{0}}}.npz'
.format(save_trigger[1])), trigger=save_trigger)
trainer.extend(extensions.snapshot_object(optimizer, filename='optimizer_{0}-{{.updater.{0}}}.npz'
.format(save_trigger[1])), trigger=save_trigger)
trainer.extend(extensions.ProgressBar())
trainer.extend(extensions.ExponentialShift('lr', 0.5), trigger=(30, 'epoch'))
if save_dir.exists():
shutil.rmtree(save_dir)
save_dir.mkdir()
(save_dir / 'training_details').mkdir()
# Write parameters text
with open(save_dir / 'training_details/train_params.txt', 'w') as f:
f.write('model: {}\n'.format(model.predictor.__class__.__name__))
f.write('n_epoch: {}\n'.format(args.epoch))
f.write('batch_size: {}\n'.format(args.batchsize))
f.write('n_data_train: {}\n'.format(len(train_data)))
f.write('n_data_val: {}\n'.format(len(valid_data)))
f.write('seed: {}\n'.format(args.seed))
trainer.run()
if __name__ == '__main__':
main()
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"modified_updater.ModifiedUpdater",
"augmentation.flip",
"numpy.mean",
"numpy.random.randint",
"chainer.iterators.SerialIterator",
"augmentation.random_rotate",
"shutil.rmtree",
"resnet.ResNet50",
"chainer.training.extensions.LogReport",
"modified_evaluator.ModifiedEvaluator",
"chainer.serializers.load_npz",
"numpy.std",
"cupy.random.seed",
"chainer.training.extensions.ExponentialShift",
"random.seed",
"chainer.datasets.TransformDataset",
"augmentation.random_resize",
"datetime.datetime.now",
"functools.partial",
"chainer.optimizer.WeightDecay",
"chainer.training.extensions.PrintReport",
"chainer.training.extensions.PlotReport",
"chainer.datasets.get_cifar10",
"augmentation.gamma_correction",
"augmentation.mirror",
"chainer.optimizers.Adam",
"chainer.training.Trainer",
"chainer.training.extensions.ProgressBar",
"numpy.random.rand",
"chainer.cuda.get_device_from_id"
] |
[((1560, 1613), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""training mnist"""'}), "(description='training mnist')\n", (1583, 1613), False, 'import argparse\n'), ((2759, 2773), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2771, 2773), False, 'from datetime import datetime\n'), ((2855, 2877), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (2866, 2877), False, 'import random\n'), ((2882, 2907), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2896, 2907), True, 'import numpy as np\n'), ((2912, 2939), 'cupy.random.seed', 'cupy.random.seed', (['args.seed'], {}), '(args.seed)\n', (2928, 2939), False, 'import cupy\n'), ((3343, 3411), 'chainer.optimizers.Adam', 'optimizers.Adam', ([], {'alpha': '(0.001)', 'weight_decay_rate': '(0.0005)', 'amsgrad': '(True)'}), '(alpha=0.001, weight_decay_rate=0.0005, amsgrad=True)\n', (3358, 3411), False, 'from chainer import iterators, optimizers, serializers\n'), ((3627, 3651), 'chainer.datasets.get_cifar10', 'get_cifar10', ([], {'scale': '(255.0)'}), '(scale=255.0)\n', (3638, 3651), False, 'from chainer.datasets import TransformDataset, get_cifar10\n'), ((3662, 3713), 'numpy.mean', 'np.mean', (['[x for x, _ in train_data]'], {'axis': '(0, 2, 3)'}), '([x for x, _ in train_data], axis=(0, 2, 3))\n', (3669, 3713), True, 'import numpy as np\n'), ((3724, 3774), 'numpy.std', 'np.std', (['[x for x, _ in train_data]'], {'axis': '(0, 2, 3)'}), '([x for x, _ in train_data], axis=(0, 2, 3))\n', (3730, 3774), True, 'import numpy as np\n'), ((3798, 3871), 'functools.partial', 'partial', (['apply_augmentation'], {'mean': 'mean', 'std': 'std', 'crop_size': '(28)', 'train': '(True)'}), '(apply_augmentation, mean=mean, std=std, crop_size=28, train=True)\n', (3805, 3871), False, 'from functools import partial\n'), ((3894, 3967), 'functools.partial', 'partial', (['apply_augmentation'], {'mean': 'mean', 'std': 'std', 'crop_size': '(28)', 'train': '(True)'}), '(apply_augmentation, mean=mean, std=std, crop_size=28, train=True)\n', (3901, 3967), False, 'from functools import partial\n'), ((3986, 4031), 'chainer.datasets.TransformDataset', 'TransformDataset', (['train_data', 'train_transform'], {}), '(train_data, train_transform)\n', (4002, 4031), False, 'from chainer.datasets import TransformDataset, get_cifar10\n'), ((4049, 4094), 'chainer.datasets.TransformDataset', 'TransformDataset', (['valid_data', 'valid_transform'], {}), '(valid_data, valid_transform)\n', (4065, 4094), False, 'from chainer.datasets import TransformDataset, get_cifar10\n'), ((4113, 4165), 'chainer.iterators.SerialIterator', 'iterators.SerialIterator', (['train_data', 'args.batchsize'], {}), '(train_data, args.batchsize)\n', (4137, 4165), False, 'from chainer import iterators, optimizers, serializers\n'), ((4183, 4269), 'chainer.iterators.SerialIterator', 'iterators.SerialIterator', (['valid_data', 'args.batchsize'], {'repeat': '(False)', 'shuffle': '(False)'}), '(valid_data, args.batchsize, repeat=False, shuffle=\n False)\n', (4207, 4269), False, 'from chainer import iterators, optimizers, serializers\n'), ((4280, 4335), 'modified_updater.ModifiedUpdater', 'ModifiedUpdater', (['train_iter', 'optimizer'], {'device': 'args.gpu'}), '(train_iter, optimizer, device=args.gpu)\n', (4295, 4335), False, 'from modified_updater import ModifiedUpdater\n'), ((4350, 4403), 'chainer.training.Trainer', 'Trainer', (['updater', "(args.epoch, 'epoch')"], {'out': 'save_dir'}), "(updater, (args.epoch, 'epoch'), out=save_dir)\n", (4357, 4403), False, 'from chainer.training import StandardUpdater, Trainer, extensions\n'), ((865, 899), 'augmentation.gamma_correction', 'augmentation.gamma_correction', (['img'], {}), '(img)\n', (894, 899), False, 'import augmentation\n'), ((994, 1038), 'augmentation.random_rotate', 'augmentation.random_rotate', (['img'], {'angle': 'angle'}), '(img, angle=angle)\n', (1020, 1038), False, 'import augmentation\n'), ((1216, 1260), 'augmentation.random_resize', 'augmentation.random_resize', (['img'], {'scale': 'scale'}), '(img, scale=scale)\n', (1242, 1260), False, 'import augmentation\n'), ((1306, 1349), 'numpy.random.randint', 'np.random.randint', (['(img.shape[0] - crop_size)'], {}), '(img.shape[0] - crop_size)\n', (1323, 1349), True, 'import numpy as np\n'), ((1365, 1408), 'numpy.random.randint', 'np.random.randint', (['(img.shape[1] - crop_size)'], {}), '(img.shape[1] - crop_size)\n', (1382, 1408), True, 'import numpy as np\n'), ((2975, 2985), 'resnet.ResNet50', 'ResNet50', ([], {}), '()\n', (2983, 2985), False, 'from resnet import ResNet50\n'), ((3179, 3223), 'chainer.serializers.load_npz', 'serializers.load_npz', (['args.load_model', 'model'], {}), '(args.load_model, model)\n', (3199, 3223), False, 'from chainer import iterators, optimizers, serializers\n'), ((3459, 3496), 'chainer.optimizer.WeightDecay', 'chainer.optimizer.WeightDecay', (['(0.0005)'], {}), '(0.0005)\n', (3488, 3496), False, 'import chainer\n'), ((3544, 3596), 'chainer.serializers.load_npz', 'serializers.load_npz', (['args.load_optimizer', 'optimizer'], {}), '(args.load_optimizer, optimizer)\n', (3564, 3596), False, 'from chainer import iterators, optimizers, serializers\n'), ((4537, 4581), 'chainer.training.extensions.LogReport', 'extensions.LogReport', ([], {'trigger': 'report_trigger'}), '(trigger=report_trigger)\n', (4557, 4581), False, 'from chainer.training import StandardUpdater, Trainer, extensions\n'), ((4602, 4655), 'modified_evaluator.ModifiedEvaluator', 'ModifiedEvaluator', (['valid_iter', 'model'], {'device': 'args.gpu'}), '(valid_iter, model, device=args.gpu)\n', (4619, 4655), False, 'from modified_evaluator import ModifiedEvaluator\n'), ((4712, 4846), 'chainer.training.extensions.PrintReport', 'extensions.PrintReport', (["['epoch', 'iteration', 'main/loss', 'main/accuracy', 'val/main/loss',\n 'val/main/accuracy', 'elapsed_time']"], {}), "(['epoch', 'iteration', 'main/loss', 'main/accuracy',\n 'val/main/loss', 'val/main/accuracy', 'elapsed_time'])\n", (4734, 4846), False, 'from chainer.training import StandardUpdater, Trainer, extensions\n'), ((4930, 5071), 'chainer.training.extensions.PlotReport', 'extensions.PlotReport', (["['main/loss', 'val/main/loss']"], {'x_key': 'report_trigger[1]', 'marker': '"""."""', 'file_name': '"""loss.png"""', 'trigger': 'report_trigger'}), "(['main/loss', 'val/main/loss'], x_key=report_trigger[\n 1], marker='.', file_name='loss.png', trigger=report_trigger)\n", (4951, 5071), False, 'from chainer.training import StandardUpdater, Trainer, extensions\n'), ((5128, 5286), 'chainer.training.extensions.PlotReport', 'extensions.PlotReport', (["['main/accuracy', 'val/main/accuracy']"], {'x_key': 'report_trigger[1]', 'marker': '"""."""', 'file_name': '"""accuracy.png"""', 'trigger': 'report_trigger'}), "(['main/accuracy', 'val/main/accuracy'], x_key=\n report_trigger[1], marker='.', file_name='accuracy.png', trigger=\n report_trigger)\n", (5149, 5286), False, 'from chainer.training import StandardUpdater, Trainer, extensions\n'), ((5834, 5858), 'chainer.training.extensions.ProgressBar', 'extensions.ProgressBar', ([], {}), '()\n', (5856, 5858), False, 'from chainer.training import StandardUpdater, Trainer, extensions\n'), ((5879, 5917), 'chainer.training.extensions.ExponentialShift', 'extensions.ExponentialShift', (['"""lr"""', '(0.5)'], {}), "('lr', 0.5)\n", (5906, 5917), False, 'from chainer.training import StandardUpdater, Trainer, extensions\n'), ((5977, 6000), 'shutil.rmtree', 'shutil.rmtree', (['save_dir'], {}), '(save_dir)\n', (5990, 6000), False, 'import shutil\n'), ((1050, 1066), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1064, 1066), True, 'import numpy as np\n'), ((1095, 1119), 'augmentation.mirror', 'augmentation.mirror', (['img'], {}), '(img)\n', (1114, 1119), False, 'import augmentation\n'), ((1131, 1147), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1145, 1147), True, 'import numpy as np\n'), ((1176, 1198), 'augmentation.flip', 'augmentation.flip', (['img'], {}), '(img)\n', (1193, 1198), False, 'import augmentation\n'), ((3255, 3296), 'chainer.cuda.get_device_from_id', 'chainer.cuda.get_device_from_id', (['args.gpu'], {}), '(args.gpu)\n', (3286, 3296), False, 'import chainer\n')]
|
#!/usr/bin/python3
import sys
import os
import shutil
import csv
import zipfile
import pandas as pd
import glob
infile = sys.argv[1]
outfile = sys.argv[2]
# remove holding_folder if it exists, and create new folder
# use 'rm -r /holding_folder/* in shell script instead?'
holding_path = '/media/secure_volume/holding_folder'
if os.path.isdir(holding_path):
shutil.rmtree(holding_path)
os.mkdir(holding_path)
def extract(infile):
'''
Merges bioindex.tsv with the infile (balanced data),
finds the volsplit.zip location for each bio file and
extracts the files into secure_volume/holding_folder.
'''
bioindex = pd.read_csv('/media/secure_volume/index/bioindex.tsv', sep='\t')
balanced_bioindex = pd.read_table(infile)
for suffix in balanced_bioindex.filesuffix.unique():
volsplit_file = 'volsplit'+str(suffix)+'.zip'
volsplit_df = balanced_bioindex.loc[balanced_bioindex.filesuffix == suffix,:]
try:
with zipfile.ZipFile('/media/secure_volume/'+volsplit_file, 'r') as myzip:
for idx, row in volsplit_df.iterrows():
filename = row['mainid']+'.zip'
myzip.extract(filename, '/media/secure_volume/holding_folder')
except Exception as e:
print('ERROR:',filename,'not found in',volsplit_file,'!', e)
def slicer(outfile):
idx_file_path = '/media/secure_volume/index/bioindex.tsv'
holding_folder_path = '/media/secure_volume/holding_folder/'
bio_idx_df = pd.read_table(idx_file_path)
bio_idx_df.set_index('mainid', inplace = True)
mainid_list = [vol for vol in os.listdir(holding_folder_path) if vol.endswith('.zip')]
# remove '.zip' from file names
mainid_list_clean = [item[0:-4] for item in mainid_list]
#subset bioindex on holding_folder IDs
htid_series = bio_idx_df.htid[mainid_list_clean]
file_path_list = glob.glob(holding_folder_path+'*.zip')
# print('file path list has: ',len(file_path_list))
# print('htid_list has', len(htid_list))
slice_df = pd.DataFrame(htid_series)
slice_df['path'] = file_path_list
slice_df['c'] = 0
slice_df['d'] = 1001
with open(outfile, 'w') as outf:
slice_df.to_csv(outfile, sep='\t', header=False, index=False)
print("Wrote", len(slice_df), "rows to", outfile)
extract(infile)
slicer(outfile)
|
[
"pandas.DataFrame",
"os.mkdir",
"zipfile.ZipFile",
"os.path.isdir",
"pandas.read_csv",
"glob.glob",
"pandas.read_table",
"shutil.rmtree",
"os.listdir"
] |
[((331, 358), 'os.path.isdir', 'os.path.isdir', (['holding_path'], {}), '(holding_path)\n', (344, 358), False, 'import os\n'), ((392, 414), 'os.mkdir', 'os.mkdir', (['holding_path'], {}), '(holding_path)\n', (400, 414), False, 'import os\n'), ((364, 391), 'shutil.rmtree', 'shutil.rmtree', (['holding_path'], {}), '(holding_path)\n', (377, 391), False, 'import shutil\n'), ((655, 719), 'pandas.read_csv', 'pd.read_csv', (['"""/media/secure_volume/index/bioindex.tsv"""'], {'sep': '"""\t"""'}), "('/media/secure_volume/index/bioindex.tsv', sep='\\t')\n", (666, 719), True, 'import pandas as pd\n'), ((744, 765), 'pandas.read_table', 'pd.read_table', (['infile'], {}), '(infile)\n', (757, 765), True, 'import pandas as pd\n'), ((1530, 1558), 'pandas.read_table', 'pd.read_table', (['idx_file_path'], {}), '(idx_file_path)\n', (1543, 1558), True, 'import pandas as pd\n'), ((1916, 1956), 'glob.glob', 'glob.glob', (["(holding_folder_path + '*.zip')"], {}), "(holding_folder_path + '*.zip')\n", (1925, 1956), False, 'import glob\n'), ((2076, 2101), 'pandas.DataFrame', 'pd.DataFrame', (['htid_series'], {}), '(htid_series)\n', (2088, 2101), True, 'import pandas as pd\n'), ((1644, 1675), 'os.listdir', 'os.listdir', (['holding_folder_path'], {}), '(holding_folder_path)\n', (1654, 1675), False, 'import os\n'), ((994, 1055), 'zipfile.ZipFile', 'zipfile.ZipFile', (["('/media/secure_volume/' + volsplit_file)", '"""r"""'], {}), "('/media/secure_volume/' + volsplit_file, 'r')\n", (1009, 1055), False, 'import zipfile\n')]
|
#!/usr/bin/env python
# coding=utf-8
# vim:ts=4:sts=4:sw=4:et
#
# Author: <NAME>
# Date: 2019-02-26 18:30:53 +0000 (Tue, 26 Feb 2019)
#
# https://github.com/harisekhon/nagios-plugins
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Nagios Plugin to check the health status of Kubernetes via its API
Tested on Kubernetes 1.13
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import traceback
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon import RestNagiosPlugin
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = '<NAME>'
__version__ = '0.1'
class CheckKubernetesHealth(RestNagiosPlugin):
def __init__(self):
# Python 2.x
super(CheckKubernetesHealth, self).__init__()
# Python 3.x
# super().__init__()
self.name = 'Kubernetes API'
self.default_port = 8001
# or just /healthz
self.path = '/healthz/ping'
self.auth = 'optional'
self.json = False
self.msg = 'Kubernetes msg not defined yet'
#def add_options(self):
# super(CheckKubernetesHealth, self).add_options()
def process_options(self):
super(CheckKubernetesHealth, self).process_options()
self.no_args()
def parse(self, req):
content = req.content
if content != 'ok':
self.critical()
self.msg = "Kubernetes health = '{}'".format(content)
if __name__ == '__main__':
CheckKubernetesHealth().main()
|
[
"sys.path.append",
"os.path.dirname",
"traceback.format_exc",
"os.path.join",
"sys.exit"
] |
[((798, 827), 'os.path.join', 'os.path.join', (['srcdir', '"""pylib"""'], {}), "(srcdir, 'pylib')\n", (810, 827), False, 'import os\n'), ((828, 851), 'sys.path.append', 'sys.path.append', (['libdir'], {}), '(libdir)\n', (843, 851), False, 'import sys\n'), ((762, 787), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (777, 787), False, 'import os\n'), ((1016, 1027), 'sys.exit', 'sys.exit', (['(4)'], {}), '(4)\n', (1024, 1027), False, 'import sys\n'), ((980, 1002), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1000, 1002), False, 'import traceback\n')]
|
import os
import json
__author__ = '<NAME> <<EMAIL>>'
class JSONStorage:
"""
File storage for a dictionary.
"""
file = '' # file name of storage file
data = None # data dict
indent = ' ' # indent prefix for pretty printing json files
def __init__(self, path, name):
"""
Initizlize.
:param path: path to the storage file;
empty means the current direcory.
:param name: file name, json file; may include a path.
"""
if path:
os.makedirs(path, exist_ok=True)
self.file = os.path.normpath(os.path.join(path, name))
try:
with open(self.file) as data_file:
self.data = json.load(data_file)
except FileNotFoundError:
self.data = dict()
self.dump()
def dump(self):
"""
Dump data into storage file.
"""
with open(self.file, 'w') as out_file:
json.dump(self.data, out_file, indent=self.indent)
def get(self, item):
"""
Get stored item.
:param item: name, string, of item to get.
:return: stored item; raises a KeyError if item does not exist.
"""
return self.data[item]
def set(self, item, value):
"""
Set item's value; causes the data to be dumped into the storage file.
:param item: name, string of item to set.
:param value: value to set.
"""
self.data[item] = value
self.dump()
def __getattr__(self, item):
"""
Get stored item with .-notation if not defined as a class member.
:param item: name, string of item compatible
with Python class member name.
:return value of item.
"""
if item in self.data:
return self.data[item]
else:
raise AttributeError
|
[
"json.dump",
"json.load",
"os.path.join",
"os.makedirs"
] |
[((528, 560), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (539, 560), False, 'import os\n'), ((598, 622), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (610, 622), False, 'import os\n'), ((963, 1013), 'json.dump', 'json.dump', (['self.data', 'out_file'], {'indent': 'self.indent'}), '(self.data, out_file, indent=self.indent)\n', (972, 1013), False, 'import json\n'), ((712, 732), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (721, 732), False, 'import json\n')]
|
from django.core.management.base import BaseCommand
from django.contrib.admin.models import LogEntry
def clear_old_admin_logs():
logs = LogEntry.objects.all()
for i in range(2000, len(logs)):
logs[i].delete()
class Command(BaseCommand):
def handle(self, *args, **options):
clear_old_admin_logs()
|
[
"django.contrib.admin.models.LogEntry.objects.all"
] |
[((142, 164), 'django.contrib.admin.models.LogEntry.objects.all', 'LogEntry.objects.all', ([], {}), '()\n', (162, 164), False, 'from django.contrib.admin.models import LogEntry\n')]
|
#!/usr/bin/env python
#
# Copyright (c) 2018 SAP SE
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# this script checks for volume attachments of already deleted volumes in the cinder db
import argparse
import configparser
import datetime
import logging
import os
import sys
from openstack import connection, exceptions
from sqlalchemy import and_, MetaData, select, Table, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
# get all instances from nova
def get_nova_instances(conn):
nova_instances = dict()
# get all instance from nova
try:
for nova_instance in conn.compute.servers(details=False, all_projects=1):
nova_instances[nova_instance.id] = nova_instance
if not nova_instances:
raise RuntimeError('- PLEASE CHECK MANUALLY - did not get any nova instances back from the nova api - this should in theory never happen ...')
except exceptions.HttpException as e:
log.warn("- PLEASE CHECK MANUALLY - got an http exception connecting to openstack: %s", str(e))
sys.exit(1)
except exceptions.SDKException as e:
log.warn("- PLEASE CHECK MANUALLY - got an sdk exception connecting to openstack: %s", str(e))
sys.exit(1)
#for i in nova_instances:
# print nova_instances[i].id
if not nova_instances:
raise RuntimeError('Did not get any nova instances back.')
return nova_instances
# get all volume attachments for volumes
def get_orphan_volume_attachments(meta):
orphan_volume_attachments = {}
orphan_volume_attachment_t = Table('volume_attachment', meta, autoload=True)
columns = [orphan_volume_attachment_t.c.id, orphan_volume_attachment_t.c.instance_uuid]
orphan_volume_attachment_q = select(columns=columns, whereclause=and_(orphan_volume_attachment_t.c.deleted == 0))
# return a dict indexed by orphan_volume_attachment_id and with the value nova_instance_uuid for non deleted orphan_volume_attachments
for (orphan_volume_attachment_id, nova_instance_uuid) in orphan_volume_attachment_q.execute():
orphan_volume_attachments[orphan_volume_attachment_id] = nova_instance_uuid
return orphan_volume_attachments
# get all the volume attachments in the cinder db for already deleted instances in nova
def get_wrong_orphan_volume_attachments(nova_instances, orphan_volume_attachments):
wrong_orphan_volume_attachments = {}
for orphan_volume_attachment_id in orphan_volume_attachments:
if nova_instances.get(orphan_volume_attachments[orphan_volume_attachment_id]) is None:
wrong_orphan_volume_attachments[orphan_volume_attachment_id] = orphan_volume_attachments[orphan_volume_attachment_id]
return wrong_orphan_volume_attachments
# delete volume attachments in the cinder db for already deleted instances in nova
def fix_wrong_orphan_volume_attachments(meta, wrong_orphan_volume_attachments, fix_limit):
if len(wrong_orphan_volume_attachments) <= int(fix_limit):
orphan_volume_attachment_t = Table('volume_attachment', meta, autoload=True)
for orphan_volume_attachment_id in wrong_orphan_volume_attachments:
log.info ("-- action: deleting orphan volume attachment id: %s", orphan_volume_attachment_id)
now = datetime.datetime.utcnow()
delete_orphan_volume_attachment_q = orphan_volume_attachment_t.update().\
where(orphan_volume_attachment_t.c.id == orphan_volume_attachment_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_orphan_volume_attachment_q.execute()
else:
log.warn("- PLEASE CHECK MANUALLY - too many (more than %s) wrong orphan volume attachments - denying to fix them automatically", str(fix_limit))
# get all the volumes in state "error_deleting"
def get_error_deleting_volumes(meta):
error_deleting_volumes = []
volumes_t = Table('volumes', meta, autoload=True)
error_deleting_volumes_q = select(columns=[volumes_t.c.id]).where(and_(volumes_t.c.status == "error_deleting",volumes_t.c.deleted == 0))
# convert the query result into a list
for i in error_deleting_volumes_q.execute():
error_deleting_volumes.append(i[0])
return error_deleting_volumes
# delete all the volumes in state "error_deleting"
def fix_error_deleting_volumes(meta, error_deleting_volumes):
volumes_t = Table('volumes', meta, autoload=True)
volume_attachment_t = Table('volume_attachment', meta, autoload=True)
volume_metadata_t = Table('volume_metadata', meta, autoload=True)
volume_admin_metadata_t = Table('volume_admin_metadata', meta, autoload=True)
for error_deleting_volumes_id in error_deleting_volumes:
now = datetime.datetime.utcnow()
log.info("-- action: deleting possible volume admin metadata for volume id: %s", error_deleting_volumes_id)
delete_volume_admin_metadata_q = volume_admin_metadata_t.update().\
where(volume_admin_metadata_t.c.volume_id == error_deleting_volumes_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_admin_metadata_q.execute()
log.info("-- action: deleting possible volume metadata for volume id: %s", error_deleting_volumes_id)
delete_volume_metadata_q = volume_metadata_t.update().\
where(volume_metadata_t.c.volume_id == error_deleting_volumes_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_metadata_q.execute()
log.info("-- action: deleting possible volume attachments for volume id: %s", error_deleting_volumes_id)
delete_volume_attachment_q = volume_attachment_t.update().\
where(volume_attachment_t.c.volume_id == error_deleting_volumes_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_attachment_q.execute()
log.info("-- action: deleting volume id: %s", error_deleting_volumes_id)
delete_volume_q = volumes_t.update().\
where(volumes_t.c.id == error_deleting_volumes_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_q.execute()
# get all the snapshots in state "error_deleting"
def get_error_deleting_snapshots(meta):
error_deleting_snapshots = []
snapshots_t = Table('snapshots', meta, autoload=True)
error_deleting_snapshots_q = select(columns=[snapshots_t.c.id]).where(and_(snapshots_t.c.status == "error_deleting",snapshots_t.c.deleted == 0))
# convert the query result into a list
for i in error_deleting_snapshots_q.execute():
error_deleting_snapshots.append(i[0])
return error_deleting_snapshots
# delete all the snapshots in state "error_deleting"
def fix_error_deleting_snapshots(meta, error_deleting_snapshots):
snapshots_t = Table('snapshots', meta, autoload=True)
for error_deleting_snapshots_id in error_deleting_snapshots:
log.info("-- action: deleting snapshot id: %s", error_deleting_snapshots_id)
now = datetime.datetime.utcnow()
delete_snapshot_q = snapshots_t.update().\
where(snapshots_t.c.id == error_deleting_snapshots_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_snapshot_q.execute()
# get all the rows with a volume_admin_metadata still defined where the corresponding volume is already deleted
def get_wrong_volume_admin_metadata(meta):
wrong_admin_metadata = {}
volume_admin_metadata_t = Table('volume_admin_metadata', meta, autoload=True)
volumes_t = Table('volumes', meta, autoload=True)
admin_metadata_join = volume_admin_metadata_t.join(volumes_t,volume_admin_metadata_t.c.volume_id == volumes_t.c.id)
columns = [volumes_t.c.id, volumes_t.c.deleted, volume_admin_metadata_t.c.id, volume_admin_metadata_t.c.deleted]
wrong_volume_admin_metadata_q = select(columns=columns).select_from(admin_metadata_join).\
where(and_(volumes_t.c.deleted == 1, volume_admin_metadata_t.c.deleted == 0))
# return a dict indexed by volume_admin_metadata_id and with the value volume_id for non deleted volume_admin_metadata
for (volume_id, volume_deleted, volume_admin_metadata_id, volume_admin_metadata_deleted) in wrong_volume_admin_metadata_q.execute():
wrong_admin_metadata[volume_admin_metadata_id] = volume_id
return wrong_admin_metadata
# delete volume_admin_metadata still defined where the corresponding volume is already deleted
def fix_wrong_volume_admin_metadata(meta, wrong_admin_metadata):
volume_admin_metadata_t = Table('volume_admin_metadata', meta, autoload=True)
for volume_admin_metadata_id in wrong_admin_metadata:
log.info("-- action: deleting volume_admin_metadata id: %s", volume_admin_metadata_id)
now = datetime.datetime.utcnow()
delete_volume_admin_metadata_q = volume_admin_metadata_t.update().\
where(volume_admin_metadata_t.c.id == volume_admin_metadata_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_admin_metadata_q.execute()
# get all the rows with a volume_glance_metadata still defined where the corresponding volume is already deleted
def get_wrong_volume_glance_metadata(meta):
wrong_glance_metadata = {}
volume_glance_metadata_t = Table('volume_glance_metadata', meta, autoload=True)
volumes_t = Table('volumes', meta, autoload=True)
glance_metadata_join = volume_glance_metadata_t.join(volumes_t,volume_glance_metadata_t.c.volume_id == volumes_t.c.id)
columns = [volumes_t.c.id, volumes_t.c.deleted, volume_glance_metadata_t.c.id, volume_glance_metadata_t.c.deleted]
wrong_volume_glance_metadata_q = select(columns=columns).select_from(glance_metadata_join).\
where(and_(volumes_t.c.deleted == 1, volume_glance_metadata_t.c.deleted == 0))
# return a dict indexed by volume_glance_metadata_id and with the value volume_id for non deleted volume_glance_metadata
for (volume_id, volume_deleted, volume_glance_metadata_id, volume_glance_metadata_deleted) in wrong_volume_glance_metadata_q.execute():
wrong_glance_metadata[volume_glance_metadata_id] = volume_id
return wrong_glance_metadata
# delete volume_glance_metadata still defined where the corresponding volume is already deleted
def fix_wrong_volume_glance_metadata(meta, wrong_glance_metadata):
volume_glance_metadata_t = Table('volume_glance_metadata', meta, autoload=True)
for volume_glance_metadata_id in wrong_glance_metadata:
log.info("-- action: deleting volume_glance_metadata id: %s", volume_glance_metadata_id)
now = datetime.datetime.utcnow()
delete_volume_glance_metadata_q = volume_glance_metadata_t.update().\
where(volume_glance_metadata_t.c.id == volume_glance_metadata_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_glance_metadata_q.execute()
# get all the rows with a volume_metadata still defined where the corresponding volume is already deleted
def get_wrong_volume_metadata(meta):
wrong_metadata = {}
volume_metadata_t = Table('volume_metadata', meta, autoload=True)
volumes_t = Table('volumes', meta, autoload=True)
metadata_join = volume_metadata_t.join(volumes_t,volume_metadata_t.c.volume_id == volumes_t.c.id)
columns = [volumes_t.c.id, volumes_t.c.deleted, volume_metadata_t.c.id, volume_metadata_t.c.deleted]
wrong_volume_metadata_q = select(columns=columns).select_from(metadata_join).\
where(and_(volumes_t.c.deleted == 1, volume_metadata_t.c.deleted == 0))
# return a dict indexed by volume_metadata_id and with the value volume_id for non deleted volume_metadata
for (volume_id, volume_deleted, volume_metadata_id, volume_metadata_deleted) in wrong_volume_metadata_q.execute():
wrong_metadata[volume_metadata_id] = volume_id
return wrong_metadata
# delete volume_metadata still defined where the corresponding volume is already deleted
def fix_wrong_volume_metadata(meta, wrong_metadata):
volume_metadata_t = Table('volume_metadata', meta, autoload=True)
for volume_metadata_id in wrong_metadata:
log.info("-- action: deleting volume_metadata id: %s", volume_metadata_id)
now = datetime.datetime.utcnow()
delete_volume_metadata_q = volume_metadata_t.update().\
where(volume_metadata_t.c.id == volume_metadata_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_metadata_q.execute()
# get all the rows with a volume attachment still defined where the corresponding volume is already deleted
def get_wrong_volume_attachments(meta):
wrong_attachments = {}
volume_attachment_t = Table('volume_attachment', meta, autoload=True)
volumes_t = Table('volumes', meta, autoload=True)
attachment_join = volume_attachment_t.join(volumes_t,volume_attachment_t.c.volume_id == volumes_t.c.id)
columns = [volumes_t.c.id, volumes_t.c.deleted, volume_attachment_t.c.id, volume_attachment_t.c.deleted]
wrong_volume_attachment_q = select(columns=columns).select_from(attachment_join).\
where(and_(volumes_t.c.deleted == 1, volume_attachment_t.c.deleted == 0))
# return a dict indexed by volume_attachment_id and with the value volume_id for non deleted volume_attachments
for (volume_id, volume_deleted, volume_attachment_id, volume_attachment_deleted) in wrong_volume_attachment_q.execute():
wrong_attachments[volume_attachment_id] = volume_id
return wrong_attachments
# delete volume attachment still defined where the corresponding volume is already deleted
def fix_wrong_volume_attachments(meta, wrong_attachments, fix_limit):
if len(wrong_attachments) <= int(fix_limit):
volume_attachment_t = Table('volume_attachment', meta, autoload=True)
for volume_attachment_id in wrong_attachments:
log.info("-- action: deleting volume attachment id: %s", volume_attachment_id)
now = datetime.datetime.utcnow()
delete_volume_attachment_q = volume_attachment_t.update().\
where(volume_attachment_t.c.id == volume_attachment_id).values(updated_at=now, deleted_at=now, deleted=1)
delete_volume_attachment_q.execute()
else:
log.warn("- PLEASE CHECK MANUALLY - too many (more than %s) wrong volume attachments - denying to fix them automatically", str(fix_limit))
# get all the rows, which have the deleted flag set, but not the delete_at column
def get_missing_deleted_at(meta, table_names):
missing_deleted_at = {}
for t in table_names:
a_table_t = Table(t, meta, autoload=True)
a_table_select_deleted_at_q = a_table_t.select().where(
and_(a_table_t.c.deleted == 1, a_table_t.c.deleted_at == None))
for row in a_table_select_deleted_at_q.execute():
missing_deleted_at[row.id] = t
return missing_deleted_at
# set deleted_at to updated_at value if not set for marked as deleted rows
def fix_missing_deleted_at(meta, table_names):
now = datetime.datetime.utcnow()
for t in table_names:
a_table_t = Table(t, meta, autoload=True)
log.info("- action: fixing columns with missing deleted_at times in the %s table", t)
a_table_set_deleted_at_q = a_table_t.update().where(
and_(a_table_t.c.deleted == 1, a_table_t.c.deleted_at == None)).values(
deleted_at=now)
a_table_set_deleted_at_q.execute()
# get all the rows with a service still defined where the corresponding volume is already deleted
def get_deleted_services_still_used_in_volumes(meta):
deleted_services_still_used_in_volumes = {}
services_t = Table('services', meta, autoload=True)
volumes_t = Table('volumes', meta, autoload=True)
services_volumes_join = services_t.join(volumes_t,services_t.c.uuid == volumes_t.c.service_uuid)
columns = [services_t.c.uuid, services_t.c.deleted, volumes_t.c.id, volumes_t.c.deleted]
deleted_services_still_used_in_volumes_q = select(columns=columns).select_from(services_volumes_join).\
where(and_(volumes_t.c.deleted == 0, services_t.c.deleted == 1))
# return a dict indexed by service_uuid and with the value volume_id for deleted but still referenced services
for (service_uuid, service_deleted, volume_id, volume_deleted) in deleted_services_still_used_in_volumes_q.execute():
deleted_services_still_used_in_volumes[service_uuid] = volume_id
return deleted_services_still_used_in_volumes
# delete services still defined where the corresponding volume is already deleted
def fix_deleted_services_still_used_in_volumes(meta, deleted_services_still_used_in_volumes):
services_t = Table('services', meta, autoload=True)
for deleted_services_still_used_in_volumes_id in deleted_services_still_used_in_volumes:
log.info("-- action: undeleting service uuid: %s", deleted_services_still_used_in_volumes_id)
undelete_services_q = services_t.update().where(services_t.c.uuid == deleted_services_still_used_in_volumes_id).values(deleted=0,deleted_at=None)
undelete_services_q.execute()
# establish an openstack connection
def makeOsConnection():
try:
conn = connection.Connection(auth_url=os.getenv('OS_AUTH_URL'),
project_name=os.getenv('OS_PROJECT_NAME'),
project_domain_name=os.getenv('OS_PROJECT_DOMAIN_NAME'),
username=os.getenv('OS_USERNAME'),
user_domain_name=os.getenv('OS_USER_DOMAIN_NAME'),
password=os.getenv('OS_PASSWORD'),
identity_api_version="3")
except Exception as e:
log.warn("- PLEASE CHECK MANUALLY - problems connecting to openstack: %s",
str(e))
sys.exit(1)
return conn
# establish a database connection and return the handle
def makeConnection(db_url):
engine = create_engine(db_url)
engine.connect()
Session = sessionmaker(bind=engine)
thisSession = Session()
metadata = MetaData()
metadata.bind = engine
Base = declarative_base()
return thisSession, metadata, Base
# return the database connection string from the config file
def get_db_url(config_file):
parser = configparser.SafeConfigParser()
try:
parser.read(config_file)
db_url = parser.get('database', 'connection', raw=True)
except:
log.info("ERROR: Check Cinder configuration file.")
sys.exit(2)
return db_url
# cmdline handling
def parse_cmdline_args():
parser = argparse.ArgumentParser()
parser.add_argument("--config",
default='./cinder.conf',
help='configuration file')
parser.add_argument("--dry-run",
action="store_true",
help='print only what would be done without actually doing it')
parser.add_argument("--fix-limit",
default=25,
help='maximum number of inconsistencies to fix automatically - if there are more, automatic fixing is denied')
return parser.parse_args()
def main():
try:
args = parse_cmdline_args()
except Exception as e:
log.error("Check command line arguments (%s)", e.strerror)
# connect to openstack
conn = makeOsConnection()
# connect to the DB
db_url = get_db_url(args.config)
cinder_session, cinder_metadata, cinder_Base = makeConnection(db_url)
# fixing volume attachments at no longer existing instances
orphan_volume_attachments = get_orphan_volume_attachments(cinder_metadata)
nova_instances = get_nova_instances(conn)
wrong_orphan_volume_attachments = get_wrong_orphan_volume_attachments(nova_instances, orphan_volume_attachments)
if len(wrong_orphan_volume_attachments) != 0:
log.info("- orphan volume attachments found:")
# print out what we would delete
for orphan_volume_attachment_id in wrong_orphan_volume_attachments:
log.info("-- orphan volume attachment (id in cinder db: %s) for non existent instance in nova: %s", orphan_volume_attachment_id,
orphan_volume_attachments[orphan_volume_attachment_id])
if not args.dry_run:
log.info("- deleting orphan volume attachment inconsistencies found")
fix_wrong_orphan_volume_attachments(cinder_metadata, wrong_orphan_volume_attachments, args.fix_limit)
else:
log.info("- no orphan volume attachments found")
# fixing possible volumes in state "error-deleting"
error_deleting_volumes = get_error_deleting_volumes(cinder_metadata)
if len(error_deleting_volumes) != 0:
log.info("- volumes in state error_deleting found")
# print out what we would delete
for error_deleting_volumes_id in error_deleting_volumes:
log.info("-- volume id: %s", error_deleting_volumes_id)
if not args.dry_run:
log.info("- deleting volumes in state error_deleting")
fix_error_deleting_volumes(cinder_metadata, error_deleting_volumes)
else:
log.info("- no volumes in state error_deleting found")
# fixing possible snapshots in state "error-deleting"
error_deleting_snapshots = get_error_deleting_snapshots(cinder_metadata)
if len(error_deleting_snapshots) != 0:
log.info("- snapshots in state error_deleting found")
# print out what we would delete
for error_deleting_snapshots_id in error_deleting_snapshots:
log.info("-- snapshot id: %s", error_deleting_snapshots_id)
if not args.dry_run:
log.info("- deleting snapshots in state error_deleting")
fix_error_deleting_snapshots(cinder_metadata, error_deleting_snapshots)
else:
log.info("- no snapshots in state error_deleting found")
# fixing possible wrong admin_metadata entries
wrong_admin_metadata = get_wrong_volume_admin_metadata(cinder_metadata)
if len(wrong_admin_metadata) != 0:
log.info("- volume_admin_metadata inconsistencies found")
# print out what we would delete
for volume_admin_metadata_id in wrong_admin_metadata:
log.info("-- volume_admin_metadata id: %s - deleted volume id: %s", volume_admin_metadata_id, wrong_admin_metadata[volume_admin_metadata_id])
if not args.dry_run:
log.info("- removing volume_admin_metadata inconsistencies found")
fix_wrong_volume_admin_metadata(cinder_metadata, wrong_admin_metadata)
else:
log.info("- volume_admin_metadata entries are consistent")
# fixing possible wrong glance_metadata entries
wrong_glance_metadata = get_wrong_volume_glance_metadata(cinder_metadata)
if len(wrong_glance_metadata) != 0:
log.info("- volume_glance_metadata inconsistencies found")
# print out what we would delete
for volume_glance_metadata_id in wrong_glance_metadata:
log.info("-- volume_glance_metadata id: %s - deleted volume id: %s", volume_glance_metadata_id, wrong_glance_metadata[volume_glance_metadata_id])
if not args.dry_run:
log.info("- removing volume_glance_metadata inconsistencies found")
fix_wrong_volume_glance_metadata(cinder_metadata, wrong_glance_metadata)
else:
log.info("- volume_glance_metadata entries are consistent")
# fixing possible wrong metadata entries
wrong_metadata = get_wrong_volume_metadata(cinder_metadata)
if len(wrong_metadata) != 0:
log.info("- volume_metadata inconsistencies found")
# print out what we would delete
for volume_metadata_id in wrong_metadata:
log.info("-- volume_metadata id: %s - deleted volume id: %s", volume_metadata_id, wrong_metadata[volume_metadata_id])
if not args.dry_run:
log.info("- removing volume_metadata inconsistencies found")
fix_wrong_volume_metadata(cinder_metadata, wrong_metadata)
else:
log.info("- volume_metadata entries are consistent")
# fixing possible wrong attachment entries
wrong_attachments = get_wrong_volume_attachments(cinder_metadata)
if len(wrong_attachments) != 0:
log.info("- volume attachment inconsistencies found")
# print out what we would delete
for volume_attachment_id in wrong_attachments:
log.info("-- volume attachment id: %s - deleted volume id: %s", volume_attachment_id, wrong_attachments[volume_attachment_id])
if not args.dry_run:
log.info("- removing volume attachment inconsistencies found")
fix_wrong_volume_attachments(cinder_metadata, wrong_attachments, args.fix_limit)
else:
log.info("- volume attachments are consistent")
# fixing possible missing deleted_at timestamps in some tables
# tables which sometimes have missing deleted_at values
table_names = [ 'snapshots', 'volume_attachment' ]
missing_deleted_at = get_missing_deleted_at(cinder_metadata, table_names)
if len(missing_deleted_at) != 0:
log.info("- missing deleted_at values found:")
# print out what we would delete
for missing_deleted_at_id in missing_deleted_at:
log.info("--- id %s of the %s table is missing deleted_at time", missing_deleted_at_id, missing_deleted_at[missing_deleted_at_id])
if not args.dry_run:
log.info("- setting missing deleted_at values")
fix_missing_deleted_at(cinder_metadata, table_names)
else:
log.info("- no missing deleted_at values")
deleted_services_still_used_in_volumes = get_deleted_services_still_used_in_volumes(cinder_metadata)
if len(deleted_services_still_used_in_volumes) != 0:
log.info("- deleted services still used in volumes found:")
# print out what we would delete
for deleted_services_still_used_in_volumes_id in deleted_services_still_used_in_volumes:
log.info("--- deleted service uuid %s still used in volumes table entry %s", deleted_services_still_used_in_volumes_id, deleted_services_still_used_in_volumes[deleted_services_still_used_in_volumes_id])
if not args.dry_run:
log.info("- undeleting service uuid still used in volumes table")
fix_deleted_services_still_used_in_volumes(cinder_metadata, deleted_services_still_used_in_volumes)
else:
log.info("- deleted services still used in volumes")
if __name__ == "__main__":
main()
|
[
"sqlalchemy.MetaData",
"argparse.ArgumentParser",
"logging.basicConfig",
"sqlalchemy.and_",
"sqlalchemy.select",
"datetime.datetime.utcnow",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.Table",
"configparser.SafeConfigParser",
"sqlalchemy.create_engine",
"sys.exit",
"sqlalchemy.orm.sessionmaker",
"os.getenv",
"logging.getLogger"
] |
[((1047, 1074), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1064, 1074), False, 'import logging\n'), ((1075, 1151), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)-15s %(message)s"""'}), "(level=logging.INFO, format='%(asctime)-15s %(message)s')\n", (1094, 1151), False, 'import logging\n'), ((2289, 2336), 'sqlalchemy.Table', 'Table', (['"""volume_attachment"""', 'meta'], {'autoload': '(True)'}), "('volume_attachment', meta, autoload=True)\n", (2294, 2336), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((4595, 4632), 'sqlalchemy.Table', 'Table', (['"""volumes"""', 'meta'], {'autoload': '(True)'}), "('volumes', meta, autoload=True)\n", (4600, 4632), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((5078, 5115), 'sqlalchemy.Table', 'Table', (['"""volumes"""', 'meta'], {'autoload': '(True)'}), "('volumes', meta, autoload=True)\n", (5083, 5115), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((5142, 5189), 'sqlalchemy.Table', 'Table', (['"""volume_attachment"""', 'meta'], {'autoload': '(True)'}), "('volume_attachment', meta, autoload=True)\n", (5147, 5189), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((5214, 5259), 'sqlalchemy.Table', 'Table', (['"""volume_metadata"""', 'meta'], {'autoload': '(True)'}), "('volume_metadata', meta, autoload=True)\n", (5219, 5259), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((5290, 5341), 'sqlalchemy.Table', 'Table', (['"""volume_admin_metadata"""', 'meta'], {'autoload': '(True)'}), "('volume_admin_metadata', meta, autoload=True)\n", (5295, 5341), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((6942, 6981), 'sqlalchemy.Table', 'Table', (['"""snapshots"""', 'meta'], {'autoload': '(True)'}), "('snapshots', meta, autoload=True)\n", (6947, 6981), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((7449, 7488), 'sqlalchemy.Table', 'Table', (['"""snapshots"""', 'meta'], {'autoload': '(True)'}), "('snapshots', meta, autoload=True)\n", (7454, 7488), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((8103, 8154), 'sqlalchemy.Table', 'Table', (['"""volume_admin_metadata"""', 'meta'], {'autoload': '(True)'}), "('volume_admin_metadata', meta, autoload=True)\n", (8108, 8154), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((8171, 8208), 'sqlalchemy.Table', 'Table', (['"""volumes"""', 'meta'], {'autoload': '(True)'}), "('volumes', meta, autoload=True)\n", (8176, 8208), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((9180, 9231), 'sqlalchemy.Table', 'Table', (['"""volume_admin_metadata"""', 'meta'], {'autoload': '(True)'}), "('volume_admin_metadata', meta, autoload=True)\n", (9185, 9231), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((9900, 9952), 'sqlalchemy.Table', 'Table', (['"""volume_glance_metadata"""', 'meta'], {'autoload': '(True)'}), "('volume_glance_metadata', meta, autoload=True)\n", (9905, 9952), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((9969, 10006), 'sqlalchemy.Table', 'Table', (['"""volumes"""', 'meta'], {'autoload': '(True)'}), "('volumes', meta, autoload=True)\n", (9974, 10006), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((10998, 11050), 'sqlalchemy.Table', 'Table', (['"""volume_glance_metadata"""', 'meta'], {'autoload': '(True)'}), "('volume_glance_metadata', meta, autoload=True)\n", (11003, 11050), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((11700, 11745), 'sqlalchemy.Table', 'Table', (['"""volume_metadata"""', 'meta'], {'autoload': '(True)'}), "('volume_metadata', meta, autoload=True)\n", (11705, 11745), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((11762, 11799), 'sqlalchemy.Table', 'Table', (['"""volumes"""', 'meta'], {'autoload': '(True)'}), "('volumes', meta, autoload=True)\n", (11767, 11799), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((12651, 12696), 'sqlalchemy.Table', 'Table', (['"""volume_metadata"""', 'meta'], {'autoload': '(True)'}), "('volume_metadata', meta, autoload=True)\n", (12656, 12696), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((13293, 13340), 'sqlalchemy.Table', 'Table', (['"""volume_attachment"""', 'meta'], {'autoload': '(True)'}), "('volume_attachment', meta, autoload=True)\n", (13298, 13340), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((13357, 13394), 'sqlalchemy.Table', 'Table', (['"""volumes"""', 'meta'], {'autoload': '(True)'}), "('volumes', meta, autoload=True)\n", (13362, 13394), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((15639, 15665), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (15663, 15665), False, 'import datetime\n'), ((16273, 16311), 'sqlalchemy.Table', 'Table', (['"""services"""', 'meta'], {'autoload': '(True)'}), "('services', meta, autoload=True)\n", (16278, 16311), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((16328, 16365), 'sqlalchemy.Table', 'Table', (['"""volumes"""', 'meta'], {'autoload': '(True)'}), "('volumes', meta, autoload=True)\n", (16333, 16365), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((17298, 17336), 'sqlalchemy.Table', 'Table', (['"""services"""', 'meta'], {'autoload': '(True)'}), "('services', meta, autoload=True)\n", (17303, 17336), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((18612, 18633), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (18625, 18633), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((18669, 18694), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (18681, 18694), False, 'from sqlalchemy.orm import sessionmaker\n'), ((18738, 18748), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (18746, 18748), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((18787, 18805), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (18803, 18805), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((18950, 18981), 'configparser.SafeConfigParser', 'configparser.SafeConfigParser', ([], {}), '()\n', (18979, 18981), False, 'import configparser\n'), ((19258, 19283), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (19281, 19283), False, 'import argparse\n'), ((3738, 3785), 'sqlalchemy.Table', 'Table', (['"""volume_attachment"""', 'meta'], {'autoload': '(True)'}), "('volume_attachment', meta, autoload=True)\n", (3743, 3785), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((4703, 4773), 'sqlalchemy.and_', 'and_', (["(volumes_t.c.status == 'error_deleting')", '(volumes_t.c.deleted == 0)'], {}), "(volumes_t.c.status == 'error_deleting', volumes_t.c.deleted == 0)\n", (4707, 4773), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((5418, 5444), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5442, 5444), False, 'import datetime\n'), ((7056, 7130), 'sqlalchemy.and_', 'and_', (["(snapshots_t.c.status == 'error_deleting')", '(snapshots_t.c.deleted == 0)'], {}), "(snapshots_t.c.status == 'error_deleting', snapshots_t.c.deleted == 0)\n", (7060, 7130), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((7654, 7680), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (7678, 7680), False, 'import datetime\n'), ((8555, 8625), 'sqlalchemy.and_', 'and_', (['(volumes_t.c.deleted == 1)', '(volume_admin_metadata_t.c.deleted == 0)'], {}), '(volumes_t.c.deleted == 1, volume_admin_metadata_t.c.deleted == 0)\n', (8559, 8625), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((9400, 9426), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (9424, 9426), False, 'import datetime\n'), ((10360, 10431), 'sqlalchemy.and_', 'and_', (['(volumes_t.c.deleted == 1)', '(volume_glance_metadata_t.c.deleted == 0)'], {}), '(volumes_t.c.deleted == 1, volume_glance_metadata_t.c.deleted == 0)\n', (10364, 10431), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((11223, 11249), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (11247, 11249), False, 'import datetime\n'), ((12104, 12168), 'sqlalchemy.and_', 'and_', (['(volumes_t.c.deleted == 1)', '(volume_metadata_t.c.deleted == 0)'], {}), '(volumes_t.c.deleted == 1, volume_metadata_t.c.deleted == 0)\n', (12108, 12168), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((12841, 12867), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (12865, 12867), False, 'import datetime\n'), ((13713, 13779), 'sqlalchemy.and_', 'and_', (['(volumes_t.c.deleted == 1)', '(volume_attachment_t.c.deleted == 0)'], {}), '(volumes_t.c.deleted == 1, volume_attachment_t.c.deleted == 0)\n', (13717, 13779), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((14356, 14403), 'sqlalchemy.Table', 'Table', (['"""volume_attachment"""', 'meta'], {'autoload': '(True)'}), "('volume_attachment', meta, autoload=True)\n", (14361, 14403), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((15203, 15232), 'sqlalchemy.Table', 'Table', (['t', 'meta'], {'autoload': '(True)'}), '(t, meta, autoload=True)\n', (15208, 15232), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((15712, 15741), 'sqlalchemy.Table', 'Table', (['t', 'meta'], {'autoload': '(True)'}), '(t, meta, autoload=True)\n', (15717, 15741), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((16682, 16739), 'sqlalchemy.and_', 'and_', (['(volumes_t.c.deleted == 0)', '(services_t.c.deleted == 1)'], {}), '(volumes_t.c.deleted == 0, services_t.c.deleted == 1)\n', (16686, 16739), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((1770, 1781), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1778, 1781), False, 'import sys\n'), ((1935, 1946), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1943, 1946), False, 'import sys\n'), ((2498, 2545), 'sqlalchemy.and_', 'and_', (['(orphan_volume_attachment_t.c.deleted == 0)'], {}), '(orphan_volume_attachment_t.c.deleted == 0)\n', (2502, 2545), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((3987, 4013), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4011, 4013), False, 'import datetime\n'), ((4664, 4696), 'sqlalchemy.select', 'select', ([], {'columns': '[volumes_t.c.id]'}), '(columns=[volumes_t.c.id])\n', (4670, 4696), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((7015, 7049), 'sqlalchemy.select', 'select', ([], {'columns': '[snapshots_t.c.id]'}), '(columns=[snapshots_t.c.id])\n', (7021, 7049), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((14569, 14595), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (14593, 14595), False, 'import datetime\n'), ((15309, 15371), 'sqlalchemy.and_', 'and_', (['(a_table_t.c.deleted == 1)', '(a_table_t.c.deleted_at == None)'], {}), '(a_table_t.c.deleted == 1, a_table_t.c.deleted_at == None)\n', (15313, 15371), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((18484, 18495), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (18492, 18495), False, 'import sys\n'), ((19168, 19179), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (19176, 19179), False, 'import sys\n'), ((17842, 17866), 'os.getenv', 'os.getenv', (['"""OS_AUTH_URL"""'], {}), "('OS_AUTH_URL')\n", (17851, 17866), False, 'import os\n'), ((17918, 17946), 'os.getenv', 'os.getenv', (['"""OS_PROJECT_NAME"""'], {}), "('OS_PROJECT_NAME')\n", (17927, 17946), False, 'import os\n'), ((18005, 18040), 'os.getenv', 'os.getenv', (['"""OS_PROJECT_DOMAIN_NAME"""'], {}), "('OS_PROJECT_DOMAIN_NAME')\n", (18014, 18040), False, 'import os\n'), ((18088, 18112), 'os.getenv', 'os.getenv', (['"""OS_USERNAME"""'], {}), "('OS_USERNAME')\n", (18097, 18112), False, 'import os\n'), ((18168, 18200), 'os.getenv', 'os.getenv', (['"""OS_USER_DOMAIN_NAME"""'], {}), "('OS_USER_DOMAIN_NAME')\n", (18177, 18200), False, 'import os\n'), ((18248, 18272), 'os.getenv', 'os.getenv', (['"""OS_PASSWORD"""'], {}), "('OS_PASSWORD')\n", (18257, 18272), False, 'import os\n'), ((8482, 8505), 'sqlalchemy.select', 'select', ([], {'columns': 'columns'}), '(columns=columns)\n', (8488, 8505), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((10286, 10309), 'sqlalchemy.select', 'select', ([], {'columns': 'columns'}), '(columns=columns)\n', (10292, 10309), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((12037, 12060), 'sqlalchemy.select', 'select', ([], {'columns': 'columns'}), '(columns=columns)\n', (12043, 12060), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((13644, 13667), 'sqlalchemy.select', 'select', ([], {'columns': 'columns'}), '(columns=columns)\n', (13650, 13667), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((15910, 15972), 'sqlalchemy.and_', 'and_', (['(a_table_t.c.deleted == 1)', '(a_table_t.c.deleted_at == None)'], {}), '(a_table_t.c.deleted == 1, a_table_t.c.deleted_at == None)\n', (15914, 15972), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n'), ((16607, 16630), 'sqlalchemy.select', 'select', ([], {'columns': 'columns'}), '(columns=columns)\n', (16613, 16630), False, 'from sqlalchemy import and_, MetaData, select, Table, create_engine\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
READS_LOCATION = 'genestack.location:reads'
READS_LINK = 'genestack.url:reads'
class Key(object):
SPACE = 'space'
FORMAT = 'format'
TYPE = 'type'
class Space(object):
BASESPACE = 'basespace'
COLORSPACE = 'colorspace'
class Format(object):
PHRED33 = 'phred33'
PHRED64 = 'phred64'
FASTA_QUAL = 'fasta-qual'
SRA = 'sra'
SFF = 'sff'
FAST5 = 'fast5'
class Type(object):
SINGLE = 'single'
PAIRED = 'paired'
PAIRED_WITH_UNPAIRED = 'paired-with-unpaired'
def compose_format_map(space, file_format, file_type):
return {Key.SPACE: space,
Key.FORMAT: file_format,
Key.TYPE: file_type}
|
[
"future.standard_library.install_aliases"
] |
[((210, 244), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (242, 244), False, 'from future import standard_library\n')]
|
from optparse import OptionParser
import os,sys
import itertools
import re
def readSrc(src_dir):
lines=[]
for root, dirs, files in os.walk(src_dir):
for file in files:
if file.endswith(".cpp"):
lines+=["New_file "+ file]
lines_file = open(os.path.join(root, file)).read().splitlines()
lines+=lines_file
pass
pass
pass
return lines
def writeRunLog(dico, filename):
st=""
for clas in list(dico.keys()):
st+="class : "+clas+"\n"
st+="=======\n"
st+=" - Desc : "+dico[clas]["desc"]+"\n"
if (len(list(dico[clas]["parameters"].keys()))>0):
st+=" - Params : \n"
st+=" ********** \n"
pass
for param in list(dico[clas]["parameters"].keys()):
st+=" + Param : "+param+" ==> Desc : "+dico[clas]["parameters"][param]["desc"]+"\n"
st+=" -----\n"
if (len(list(dico[clas]["parameters"][param]["dict"].keys()))>0):
st+=" + Dicts : \n"
st+=" +++++ \n"
pass
for dic in list(dico[clas]["parameters"][param]["dict"].keys()):
st+=" Dict : "+dic+" ==> Desc : "+dico[clas]["parameters"][param]["dict"][dic]["desc"]+"\n"
st+=" ----\n"
pass
pass
pass
fi=open(filename, "w")
fi.write(st)
fi.close()
return
def getLinesWithRegExp(lines):
dico={}
for xd in ["XD","2XD","3XD"]:
debut=0
for line in lines:
# on rajoute un blanc pour avoir le dernier mot des commentaires
line+=" "
if ((len(line.strip())>=8) and (line.split()[0]=="New_file")):
debut=1
filename=line.split()[1]
# revoir les comm ne marchent pas pour mpcube
# elif (re.findall("//.*//[ ]*"+xd,line)):
# continue
elif (re.findall("//[ ]*"+xd+"[ ]+",line)):
# traitement des classes
li=re.findall(re.escape(xd)+"(.*)"+re.escape(' '),line)[0].split(' ')
li = [x for x in li if x.strip()]
desc=re.split("//[ ]*"+xd+"[ ]+",line)[-1]
if li[0]=="attr":
if (debut<2):
raise Exception("error in "+filename+" first line XD "+line)
# print dico[nameClass]
desc2=li[1:]
dico_p={"desc":' '.join(desc2)}
dico_p["dict"]={}
dico_p["numero"]=len(dico[nameClass]["parameters"])
dico[nameClass]['parameters'][li[1]]=dico_p
# print li
# print desc2
#1/0
elif li[0]=="ref":
if (debut<2):
raise Exception("error in "+filename+" first line XD "+line)
# print nameClass, line
dico[nameClass]["refs"].append([li[1],li[2]])
# 1/0
else:
nameClass=li[0]
dico[nameClass]={"desc":desc,"parameters":{},"refs":[]}
debut=2
pass
elif re.findall("//[ ]*"+xd+"_ADD_P+",line):
# traitement des parametres
if (debut<2):
raise Exception("error in "+filename+" first line XD "+line)
dico_param={}
optionnel=True
if (re.findall("Param::REQUIRED",line)):
optionnel=False
pass
print("line:",line)
param=line.split('"')[1].lower()
mparam=param.split("|")[0]
if mparam=="lambda":
mparam="lambda_u"
dico_param["mparm"]=mparam
dico_param["optionnel"]=optionnel
dr=line.split(xd+"_ADD_P")[-1].split()
desc=param+" "+dr[0]+" "+mparam+" "+str(int(optionnel))+" "+' '.join(dr[1:])
dico_param["desc"]=desc
dico_param["numero"]=len(dico[nameClass]["parameters"])
dico_param["dict"]={}
dico[nameClass]["parameters"][param]=dico_param
pass
elif re.findall("//[ ]*"+xd+"_ADD_DICO+",line):
# traitement des dictionnaires
if (debut<2):
raise 'jjjjjjjj'
dr=line.split(xd+"_ADD_P")[-1].split()
dico_dict={}
dico_dict["desc"]=line
dict_name=line.split('"')[1].lower()
dico[nameClass]["parameters"][param]["dict"][dict_name]=dico_dict
pass
pass
return dico
def writeOutPutFile(dico, filename,st_add=""):
st=""
for clas in list(dico.keys()):
st+=dico[clas]["desc"]+"\n"
Params=dico[clas]["parameters"]
for i in range(len(list(Params.keys()))):
ok=0
for j,param in enumerate(Params.keys()):
if (i==Params[param]["numero"]):
ok=1
break
if (ok==0):
print("pb",clas,"nmero",i,"params",Params)
1/0
if (len(list(Params[param]["dict"].keys()))==0):
st+=" attr "+Params[param]["desc"]+"\n"
pass
str_dico=" attr "+param+" chaine(into=["
for dic in list(Params[param]["dict"].keys()):
str_dico+='"'+dic+'",'
pass
if (len(list(Params[param]["dict"].keys()))>0):
desc=Params[param]["desc"].split()[2:]
st+=str_dico+"]) "+' '.join(desc)+"\n"
pass
pass
for ref in dico[clas]["refs"]:
st+=" ref "+ref[0]+" "+ref[1]+"\n"
pass
pass
st=st.replace(" double "," floattant ")
st=st.replace(" flag "," rien ")
st=st.replace(" int "," entier ")
st=st.replace(r"'",r"\'")
st=st.replace(r"\\'",r"\'")
#st="\\'".join(st.split("'"))
#st="\\'".join(st.split("\\\\'"))
fi=open(filename, "w")
fi.write(st_add)
fi.write(st)
fi.write("\n")
fi.close()
return
def run(result_dir, src_dir):
lines=readSrc(src_dir)
dico=getLinesWithRegExp(lines)
run_log=os.path.join(result_dir,"run.log")
writeRunLog(dico, run_log)
trad_org=os.path.join(result_dir,"TRAD_2.org")
fi=open(trad_org,"r")
st_org=fi.read()
fi.close()
st=st_org
trad_add=os.path.join(result_dir,"TRAD2_ajout0")
if (os.path.exists(trad_add)):
fi=open(trad_add,"r")
st+=fi.read()
fi.close()
trad_ajout=os.path.join(result_dir,"TRAD_2")
writeOutPutFile(dico,trad_ajout,st)
return
def options_script(argv):
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option("-r", "--result", dest="result_dir", type="string",
metavar="<result_dir>",
help="choose results directory")
parser.add_option("-s", "--src", dest="src_dir", type="string",
metavar="<src_dir>",
help="choose src directory")
parser.set_defaults(result_dir=os.getcwd())
parser.set_defaults(src_dir=os.getcwd())
(options, args) = parser.parse_args(argv)
if len(args) > 0:
parser.print_help()
sys.exit(1)
pass
if options.result_dir != os.getcwd():
options.result_dir=os.path.join(os.getcwd(),options.result_dir)
if not os.path.isdir(options.result_dir):
os.mkdir(options.result_dir)
pass
pass
result_dir = os.path.expanduser(options.result_dir)
result_dir = os.path.expandvars(result_dir)
result_dir = os.path.abspath(result_dir)
if not os.path.isdir(result_dir):
sys.stderr.write('Error: result dir \"' + result_dir + '\" is not a directory\n')
sys.exit(1)
pass
src_dir = options.src_dir
if src_dir!=None:
os.path.expanduser(options.src_dir)
src_dir = os.path.expandvars(src_dir)
src_dir = os.path.abspath(src_dir)
if not os.path.isdir(src_dir):
sys.stderr.write('Error: source dir \"' + src_dir + '\" is not a directory\n')
sys.exit(1)
pass
pass
return result_dir, src_dir
def main(argv):
"""
Main function.
"""
result_dir, src_dir = options_script(argv)
run(result_dir, src_dir)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"os.mkdir",
"os.path.abspath",
"re.split",
"os.path.join",
"optparse.OptionParser",
"os.getcwd",
"os.path.isdir",
"os.walk",
"os.path.exists",
"re.escape",
"os.path.expandvars",
"re.findall",
"sys.stderr.write",
"os.path.expanduser",
"sys.exit"
] |
[((141, 157), 'os.walk', 'os.walk', (['src_dir'], {}), '(src_dir)\n', (148, 157), False, 'import os, sys\n'), ((6467, 6502), 'os.path.join', 'os.path.join', (['result_dir', '"""run.log"""'], {}), "(result_dir, 'run.log')\n", (6479, 6502), False, 'import os, sys\n'), ((6547, 6585), 'os.path.join', 'os.path.join', (['result_dir', '"""TRAD_2.org"""'], {}), "(result_dir, 'TRAD_2.org')\n", (6559, 6585), False, 'import os, sys\n'), ((6675, 6715), 'os.path.join', 'os.path.join', (['result_dir', '"""TRAD2_ajout0"""'], {}), "(result_dir, 'TRAD2_ajout0')\n", (6687, 6715), False, 'import os, sys\n'), ((6723, 6747), 'os.path.exists', 'os.path.exists', (['trad_add'], {}), '(trad_add)\n', (6737, 6747), False, 'import os, sys\n'), ((6838, 6872), 'os.path.join', 'os.path.join', (['result_dir', '"""TRAD_2"""'], {}), "(result_dir, 'TRAD_2')\n", (6850, 6872), False, 'import os, sys\n'), ((6965, 7009), 'optparse.OptionParser', 'OptionParser', ([], {'usage': '"""usage: %prog [options]"""'}), "(usage='usage: %prog [options]')\n", (6977, 7009), False, 'from optparse import OptionParser\n'), ((7828, 7866), 'os.path.expanduser', 'os.path.expanduser', (['options.result_dir'], {}), '(options.result_dir)\n', (7846, 7866), False, 'import os, sys\n'), ((7884, 7914), 'os.path.expandvars', 'os.path.expandvars', (['result_dir'], {}), '(result_dir)\n', (7902, 7914), False, 'import os, sys\n'), ((7932, 7959), 'os.path.abspath', 'os.path.abspath', (['result_dir'], {}), '(result_dir)\n', (7947, 7959), False, 'import os, sys\n'), ((7549, 7560), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7557, 7560), False, 'import os, sys\n'), ((7604, 7615), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7613, 7615), False, 'import os, sys\n'), ((7971, 7996), 'os.path.isdir', 'os.path.isdir', (['result_dir'], {}), '(result_dir)\n', (7984, 7996), False, 'import os, sys\n'), ((8006, 8085), 'sys.stderr.write', 'sys.stderr.write', (['(\'Error: result dir "\' + result_dir + \'" is not a directory\\n\')'], {}), '(\'Error: result dir "\' + result_dir + \'" is not a directory\\n\')\n', (8022, 8085), False, 'import os, sys\n'), ((8096, 8107), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8104, 8107), False, 'import os, sys\n'), ((8182, 8217), 'os.path.expanduser', 'os.path.expanduser', (['options.src_dir'], {}), '(options.src_dir)\n', (8200, 8217), False, 'import os, sys\n'), ((8236, 8263), 'os.path.expandvars', 'os.path.expandvars', (['src_dir'], {}), '(src_dir)\n', (8254, 8263), False, 'import os, sys\n'), ((8282, 8306), 'os.path.abspath', 'os.path.abspath', (['src_dir'], {}), '(src_dir)\n', (8297, 8306), False, 'import os, sys\n'), ((7385, 7396), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7394, 7396), False, 'import os, sys\n'), ((7430, 7441), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7439, 7441), False, 'import os, sys\n'), ((7657, 7668), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7666, 7668), False, 'import os, sys\n'), ((7704, 7737), 'os.path.isdir', 'os.path.isdir', (['options.result_dir'], {}), '(options.result_dir)\n', (7717, 7737), False, 'import os, sys\n'), ((7751, 7779), 'os.mkdir', 'os.mkdir', (['options.result_dir'], {}), '(options.result_dir)\n', (7759, 7779), False, 'import os, sys\n'), ((8322, 8344), 'os.path.isdir', 'os.path.isdir', (['src_dir'], {}), '(src_dir)\n', (8335, 8344), False, 'import os, sys\n'), ((8358, 8434), 'sys.stderr.write', 'sys.stderr.write', (['(\'Error: source dir "\' + src_dir + \'" is not a directory\\n\')'], {}), '(\'Error: source dir "\' + src_dir + \'" is not a directory\\n\')\n', (8374, 8434), False, 'import os, sys\n'), ((8449, 8460), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8457, 8460), False, 'import os, sys\n'), ((2031, 2071), 're.findall', 're.findall', (["('//[ ]*' + xd + '[ ]+')", 'line'], {}), "('//[ ]*' + xd + '[ ]+', line)\n", (2041, 2071), False, 'import re\n'), ((3326, 3369), 're.findall', 're.findall', (["('//[ ]*' + xd + '_ADD_P+')", 'line'], {}), "('//[ ]*' + xd + '_ADD_P+', line)\n", (3336, 3369), False, 'import re\n'), ((2259, 2297), 're.split', 're.split', (["('//[ ]*' + xd + '[ ]+')", 'line'], {}), "('//[ ]*' + xd + '[ ]+', line)\n", (2267, 2297), False, 'import re\n'), ((3602, 3637), 're.findall', 're.findall', (['"""Param::REQUIRED"""', 'line'], {}), "('Param::REQUIRED', line)\n", (3612, 3637), False, 'import re\n'), ((4396, 4442), 're.findall', 're.findall', (["('//[ ]*' + xd + '_ADD_DICO+')", 'line'], {}), "('//[ ]*' + xd + '_ADD_DICO+', line)\n", (4406, 4442), False, 'import re\n'), ((301, 325), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (313, 325), False, 'import os, sys\n'), ((2153, 2167), 're.escape', 're.escape', (['""" """'], {}), "(' ')\n", (2162, 2167), False, 'import re\n'), ((2132, 2145), 're.escape', 're.escape', (['xd'], {}), '(xd)\n', (2141, 2145), False, 'import re\n')]
|
from importlib.resources import path
import sys
import os
import shutil
from git import Repo
from subprocess import call
from git import RemoteProgress
import git
from tqdm import tqdm
from pathlib import Path
dir_path = (os.path.expanduser('~/Documents') + "\server")
os.chdir(dir_path)
gitaddress = str("https://github.com/0xol/server")
print("what server version would you like to install")
print("format is 'client-version'")
print("example 'forge-1.16.5' or 'vanilla-1.7.10'")
print("for lists of supported server version check https://github.com/0xol/server and check under branches")
branch = input()
os.system("del /F /S /Q /A .git")
os.system("del /F /S /Q /A .git") #just in case the program didnt kill it the first time
folder = dir_path
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
class CloneProgress(RemoteProgress):
def __init__(self):
super().__init__()
self.pbar = tqdm()
def update(self, op_code, cur_count, max_count=None, message=''):
self.pbar.total = max_count
self.pbar.n = cur_count
self.pbar.refresh()
print(dir_path)
Repo.clone_from(gitaddress, dir_path , branch=branch, progress=CloneProgress())
|
[
"tqdm.tqdm",
"os.path.join",
"os.unlink",
"os.path.isdir",
"os.system",
"os.path.isfile",
"os.path.islink",
"os.chdir",
"shutil.rmtree",
"os.path.expanduser",
"os.listdir"
] |
[((271, 289), 'os.chdir', 'os.chdir', (['dir_path'], {}), '(dir_path)\n', (279, 289), False, 'import os\n'), ((611, 644), 'os.system', 'os.system', (['"""del /F /S /Q /A .git"""'], {}), "('del /F /S /Q /A .git')\n", (620, 644), False, 'import os\n'), ((645, 678), 'os.system', 'os.system', (['"""del /F /S /Q /A .git"""'], {}), "('del /F /S /Q /A .git')\n", (654, 678), False, 'import os\n'), ((769, 787), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (779, 787), False, 'import os\n'), ((224, 257), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Documents"""'], {}), "('~/Documents')\n", (242, 257), False, 'import os\n'), ((805, 835), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (817, 835), False, 'import os\n'), ((1226, 1232), 'tqdm.tqdm', 'tqdm', ([], {}), '()\n', (1230, 1232), False, 'from tqdm import tqdm\n'), ((856, 881), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (870, 881), False, 'import os\n'), ((885, 910), 'os.path.islink', 'os.path.islink', (['file_path'], {}), '(file_path)\n', (899, 910), False, 'import os\n'), ((924, 944), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (933, 944), False, 'import os\n'), ((958, 982), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (971, 982), False, 'import os\n'), ((996, 1020), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (1009, 1020), False, 'import shutil\n')]
|
import time
import socket
import random
from subprocess import run, PIPE
test_dir = '"/Users/oliver/Google Drive/Cambridge/CST_II/project/testing/gtspeed"'
def test_git():
with open('test_strings.txt') as f:
for line in f:
p = run(['gitmaildir_cli', 'deliver', '--dir='+test_dir], stdout=PIPE, input=line, encoding='ascii')
def test_non_git():
with open('test_strings.txt') as f:
for line in f:
filename = str(int(time.time() * 1000000)) + "." + str(random.random() * 1000000000) + "." + socket.gethostname()
mail_file = open('gtspeed/'+filename, 'w')
mail_file.write(line)
mail_file.close
test_git()
|
[
"subprocess.run",
"socket.gethostname",
"random.random",
"time.time"
] |
[((253, 356), 'subprocess.run', 'run', (["['gitmaildir_cli', 'deliver', '--dir=' + test_dir]"], {'stdout': 'PIPE', 'input': 'line', 'encoding': '"""ascii"""'}), "(['gitmaildir_cli', 'deliver', '--dir=' + test_dir], stdout=PIPE, input=\n line, encoding='ascii')\n", (256, 356), False, 'from subprocess import run, PIPE\n'), ((539, 559), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (557, 559), False, 'import socket\n'), ((501, 516), 'random.random', 'random.random', ([], {}), '()\n', (514, 516), False, 'import random\n'), ((465, 476), 'time.time', 'time.time', ([], {}), '()\n', (474, 476), False, 'import time\n')]
|
#Program to plot a point
from cg_algorithms.circle_algorithms import circle_algorithms
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
import math
import time
def init():
glClearColor(0.0, 0.0, 0.0, 0.0)
gluOrtho2D(-250.0, 250.0, -250.0, 250.0)
def plot_points():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0, 0.0, 0.0)
glBegin(GL_POINTS)
bres = circle_algorithms(60, 0, 0)
bres.bresenham_circle()
para = circle_algorithms(100, 0, 0)
para.parameteric_circle()
midp = circle_algorithms(150, 0, 0)
midp.midpoint_circle()
glEnd()
glFlush()
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(500, 500)
glutInitWindowPosition(50, 50)
glutCreateWindow(b'plot_all_points')
glutDisplayFunc(plot_points)
init()
glutMainLoop()
main()
|
[
"cg_algorithms.circle_algorithms.circle_algorithms"
] |
[((411, 438), 'cg_algorithms.circle_algorithms.circle_algorithms', 'circle_algorithms', (['(60)', '(0)', '(0)'], {}), '(60, 0, 0)\n', (428, 438), False, 'from cg_algorithms.circle_algorithms import circle_algorithms\n'), ((478, 506), 'cg_algorithms.circle_algorithms.circle_algorithms', 'circle_algorithms', (['(100)', '(0)', '(0)'], {}), '(100, 0, 0)\n', (495, 506), False, 'from cg_algorithms.circle_algorithms import circle_algorithms\n'), ((548, 576), 'cg_algorithms.circle_algorithms.circle_algorithms', 'circle_algorithms', (['(150)', '(0)', '(0)'], {}), '(150, 0, 0)\n', (565, 576), False, 'from cg_algorithms.circle_algorithms import circle_algorithms\n')]
|
import requests
from collections import OrderedDict
from django.utils.http import urlencode
from allauth.socialaccount.providers.core.oauth2.client import (
OAuth2Client,
OAuth2Error,
)
class WeixinOAuth2Client(OAuth2Client):
def get_redirect_url(self, authorization_url, extra_params):
params = {
'appid': self.consumer_key,
'redirect_uri': self.callback_url,
'scope': self.scope,
'response_type': 'code'
}
if self.state:
params['state'] = self.state
params.update(extra_params)
sorted_params = OrderedDict()
for param in sorted(params):
sorted_params[param] = params[param]
return '%s?%s' % (authorization_url, urlencode(sorted_params))
def get_access_token(self, code):
data = {'appid': self.consumer_key,
'redirect_uri': self.callback_url,
'grant_type': 'authorization_code',
'secret': self.consumer_secret,
'scope': self.scope,
'code': code}
params = None
self._strip_empty_keys(data)
url = self.access_token_url
if self.access_token_method == 'GET':
params = data
data = None
# TODO: Proper exception handling
resp = requests.request(self.access_token_method,
url,
params=params,
data=data)
access_token = None
if resp.status_code == 200:
access_token = resp.json()
if not access_token or 'access_token' not in access_token:
raise OAuth2Error('Error retrieving access token: %s'
% resp.content)
return access_token
|
[
"collections.OrderedDict",
"allauth.socialaccount.providers.core.oauth2.client.OAuth2Error",
"requests.request",
"django.utils.http.urlencode"
] |
[((613, 626), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (624, 626), False, 'from collections import OrderedDict\n'), ((1333, 1406), 'requests.request', 'requests.request', (['self.access_token_method', 'url'], {'params': 'params', 'data': 'data'}), '(self.access_token_method, url, params=params, data=data)\n', (1349, 1406), False, 'import requests\n'), ((1691, 1754), 'allauth.socialaccount.providers.core.oauth2.client.OAuth2Error', 'OAuth2Error', (["('Error retrieving access token: %s' % resp.content)"], {}), "('Error retrieving access token: %s' % resp.content)\n", (1702, 1754), False, 'from allauth.socialaccount.providers.core.oauth2.client import OAuth2Client, OAuth2Error\n'), ((758, 782), 'django.utils.http.urlencode', 'urlencode', (['sorted_params'], {}), '(sorted_params)\n', (767, 782), False, 'from django.utils.http import urlencode\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
feedforward neural network
"""
import mindspore.nn as nn
from mindelec.architecture import get_activation, LinearBlock
class FFNN(nn.Cell):
"""
Full-connect networks.
Args:
input_dim (int): the input dimensions.
output_dim (int): the output dimensions.
hidden_layer (int): number of hidden layers.
activation (str or Cell): activation functions.
"""
def __init__(self, input_dim, output_dim, hidden_layer=64, activation="sin"):
super(FFNN, self).__init__()
self.activation = get_activation(activation)
self.fc1 = LinearBlock(input_dim, hidden_layer)
self.fc2 = LinearBlock(hidden_layer, hidden_layer)
self.fc3 = LinearBlock(hidden_layer, hidden_layer)
self.fc4 = LinearBlock(hidden_layer, hidden_layer)
self.fc5 = LinearBlock(hidden_layer, output_dim)
def construct(self, *inputs):
"""fc network"""
x = inputs[0]
out = self.fc1(x)
out = self.activation(out)
out = self.fc2(out)
out = self.activation(out)
out = self.fc3(out)
out = self.activation(out)
out = self.fc4(out)
out = self.activation(out)
out = self.fc5(out)
return out
|
[
"mindelec.architecture.get_activation",
"mindelec.architecture.LinearBlock"
] |
[((1205, 1231), 'mindelec.architecture.get_activation', 'get_activation', (['activation'], {}), '(activation)\n', (1219, 1231), False, 'from mindelec.architecture import get_activation, LinearBlock\n'), ((1251, 1287), 'mindelec.architecture.LinearBlock', 'LinearBlock', (['input_dim', 'hidden_layer'], {}), '(input_dim, hidden_layer)\n', (1262, 1287), False, 'from mindelec.architecture import get_activation, LinearBlock\n'), ((1307, 1346), 'mindelec.architecture.LinearBlock', 'LinearBlock', (['hidden_layer', 'hidden_layer'], {}), '(hidden_layer, hidden_layer)\n', (1318, 1346), False, 'from mindelec.architecture import get_activation, LinearBlock\n'), ((1366, 1405), 'mindelec.architecture.LinearBlock', 'LinearBlock', (['hidden_layer', 'hidden_layer'], {}), '(hidden_layer, hidden_layer)\n', (1377, 1405), False, 'from mindelec.architecture import get_activation, LinearBlock\n'), ((1425, 1464), 'mindelec.architecture.LinearBlock', 'LinearBlock', (['hidden_layer', 'hidden_layer'], {}), '(hidden_layer, hidden_layer)\n', (1436, 1464), False, 'from mindelec.architecture import get_activation, LinearBlock\n'), ((1484, 1521), 'mindelec.architecture.LinearBlock', 'LinearBlock', (['hidden_layer', 'output_dim'], {}), '(hidden_layer, output_dim)\n', (1495, 1521), False, 'from mindelec.architecture import get_activation, LinearBlock\n')]
|
from corehq.sql_db.connections import get_db_alias_or_none, ICDS_UCR_CITUS_ENGINE_ID
def get_icds_ucr_citus_db_alias():
return get_db_alias_or_none(ICDS_UCR_CITUS_ENGINE_ID)
|
[
"corehq.sql_db.connections.get_db_alias_or_none"
] |
[((133, 179), 'corehq.sql_db.connections.get_db_alias_or_none', 'get_db_alias_or_none', (['ICDS_UCR_CITUS_ENGINE_ID'], {}), '(ICDS_UCR_CITUS_ENGINE_ID)\n', (153, 179), False, 'from corehq.sql_db.connections import get_db_alias_or_none, ICDS_UCR_CITUS_ENGINE_ID\n')]
|
from django.db.models import Q
from .base import EntityType
TYPE_VIDEO = "video"
class VideoEntity(EntityType):
name = TYPE_VIDEO
@classmethod
def filter_date_lte(cls, qs, dt):
return qs.filter(publication_date__lte=dt)
@classmethod
def filter_date_gte(cls, qs, dt):
return qs.filter(publication_date__gte=dt)
@classmethod
def filter_search(cls, qs, query):
from tournesol.models import Entity
# Filtering in a nested queryset is necessary here, to be able to annotate
# each entity without duplicated scores, due to the m2m field 'tags'.
return qs.filter(pk__in=Entity.objects.filter(
Q(name__icontains=query) |
Q(description__icontains=query) |
Q(tags__name__icontains=query)
))
|
[
"django.db.models.Q"
] |
[((767, 797), 'django.db.models.Q', 'Q', ([], {'tags__name__icontains': 'query'}), '(tags__name__icontains=query)\n', (768, 797), False, 'from django.db.models import Q\n'), ((682, 706), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'query'}), '(name__icontains=query)\n', (683, 706), False, 'from django.db.models import Q\n'), ((721, 752), 'django.db.models.Q', 'Q', ([], {'description__icontains': 'query'}), '(description__icontains=query)\n', (722, 752), False, 'from django.db.models import Q\n')]
|
import os
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import plot_confusion_matrix
# Univariate visualization
def univariate_plot(data, path, save = True):
''' Plot the data univariately. '''
for col in data.columns:
plt.figure(figsize = (10, 8))
sns.displot(data[col])
plt.title(f'Distribution plot for Feature {col}')
if save:
plt.savefig(f'{path} - Feature {col}.png', dpi = 300)
plt.show()
plt.close('all')
return None
def correlogram(data, path, palette = 'inferno', h = 10, w = 10, save = True):
''' Plot and save correlogram. '''
plt.figure(figsize = (h, w))
sns.pairplot(data = data, palette = palette)
plt.title('Bivariate visual relationships in data')
if save:
plt.savefig(f'{path}.png', dpi = 300)
plt.show()
plt.close('all')
return None
def get_correlation_map(data, path, save = True, h = 20, w = 10):
''' Visualize feature correlation. '''
plt.figure(figsize = (h, w))
sns.heatmap(data.corr(), annot = True, fmt = '.3g')
plt.title('Feature collinearity heatmap')
if save:
plt.savefig(f'{path}.png', dpi = 300)
plt.show(); plt.close('all')
return None
def visualize_confusion_matrix(model, X, y, split, path, save = True):
""" Display Confusion Matrix visually."""
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (20, 20))
plt.title(f'Confusion matrix for {split.upper()}', fontsize = 30, pad = 30)
plot_confusion_matrix(model, X, y, ax = ax)
if save:
plt.savefig(os.path.join(path, f'{split}-confusion-matrix.png'), dpi = 300)
plt.show()
plt.close('all')
return None
def class_distribution(data, path, save = True, h = 10, w = 10):
''' Visualize class distribution. '''
plt.figure(figsize = (w, h))
sns.countplot(x = data)
plt.title('Class Distribution', pad = 20, fontsize = 20)
plt.xlabel('Class Label', fontsize = 20)
plt.ylabel('Class Population', fontsize = 20)
if save:
plt.savefig(f'{path}.png', dpi = 300)
plt.show(); plt.close('all')
return None
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"sklearn.metrics.plot_confusion_matrix",
"seaborn.displot",
"os.path.join",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"seaborn.countplot",
"seaborn.pairplot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((704, 730), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(h, w)'}), '(figsize=(h, w))\n', (714, 730), True, 'import matplotlib.pyplot as plt\n'), ((737, 777), 'seaborn.pairplot', 'sns.pairplot', ([], {'data': 'data', 'palette': 'palette'}), '(data=data, palette=palette)\n', (749, 777), True, 'import seaborn as sns\n'), ((791, 842), 'matplotlib.pyplot.title', 'plt.title', (['"""Bivariate visual relationships in data"""'], {}), "('Bivariate visual relationships in data')\n", (800, 842), True, 'import matplotlib.pyplot as plt\n'), ((916, 926), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (924, 926), True, 'import matplotlib.pyplot as plt\n'), ((931, 947), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (940, 947), True, 'import matplotlib.pyplot as plt\n'), ((1099, 1125), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(h, w)'}), '(figsize=(h, w))\n', (1109, 1125), True, 'import matplotlib.pyplot as plt\n'), ((1188, 1229), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature collinearity heatmap"""'], {}), "('Feature collinearity heatmap')\n", (1197, 1229), True, 'import matplotlib.pyplot as plt\n'), ((1303, 1313), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1311, 1313), True, 'import matplotlib.pyplot as plt\n'), ((1315, 1331), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1324, 1331), True, 'import matplotlib.pyplot as plt\n'), ((1503, 1551), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(20, 20)'}), '(nrows=1, ncols=1, figsize=(20, 20))\n', (1515, 1551), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1683), 'sklearn.metrics.plot_confusion_matrix', 'plot_confusion_matrix', (['model', 'X', 'y'], {'ax': 'ax'}), '(model, X, y, ax=ax)\n', (1663, 1683), False, 'from sklearn.metrics import plot_confusion_matrix\n'), ((1797, 1807), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1805, 1807), True, 'import matplotlib.pyplot as plt\n'), ((1812, 1828), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1821, 1828), True, 'import matplotlib.pyplot as plt\n'), ((1964, 1990), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(w, h)'}), '(figsize=(w, h))\n', (1974, 1990), True, 'import matplotlib.pyplot as plt\n'), ((1997, 2018), 'seaborn.countplot', 'sns.countplot', ([], {'x': 'data'}), '(x=data)\n', (2010, 2018), True, 'import seaborn as sns\n'), ((2030, 2082), 'matplotlib.pyplot.title', 'plt.title', (['"""Class Distribution"""'], {'pad': '(20)', 'fontsize': '(20)'}), "('Class Distribution', pad=20, fontsize=20)\n", (2039, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2096, 2134), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Class Label"""'], {'fontsize': '(20)'}), "('Class Label', fontsize=20)\n", (2106, 2134), True, 'import matplotlib.pyplot as plt\n'), ((2141, 2184), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Class Population"""'], {'fontsize': '(20)'}), "('Class Population', fontsize=20)\n", (2151, 2184), True, 'import matplotlib.pyplot as plt\n'), ((2260, 2270), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2268, 2270), True, 'import matplotlib.pyplot as plt\n'), ((2272, 2288), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2281, 2288), True, 'import matplotlib.pyplot as plt\n'), ((285, 312), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (295, 312), True, 'import matplotlib.pyplot as plt\n'), ((323, 345), 'seaborn.displot', 'sns.displot', (['data[col]'], {}), '(data[col])\n', (334, 345), True, 'import seaborn as sns\n'), ((355, 404), 'matplotlib.pyplot.title', 'plt.title', (['f"""Distribution plot for Feature {col}"""'], {}), "(f'Distribution plot for Feature {col}')\n", (364, 404), True, 'import matplotlib.pyplot as plt\n'), ((514, 524), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (522, 524), True, 'import matplotlib.pyplot as plt\n'), ((533, 549), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (542, 549), True, 'import matplotlib.pyplot as plt\n'), ((869, 904), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{path}.png"""'], {'dpi': '(300)'}), "(f'{path}.png', dpi=300)\n", (880, 904), True, 'import matplotlib.pyplot as plt\n'), ((1256, 1291), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{path}.png"""'], {'dpi': '(300)'}), "(f'{path}.png', dpi=300)\n", (1267, 1291), True, 'import matplotlib.pyplot as plt\n'), ((2213, 2248), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{path}.png"""'], {'dpi': '(300)'}), "(f'{path}.png', dpi=300)\n", (2224, 2248), True, 'import matplotlib.pyplot as plt\n'), ((443, 494), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{path} - Feature {col}.png"""'], {'dpi': '(300)'}), "(f'{path} - Feature {col}.png', dpi=300)\n", (454, 494), True, 'import matplotlib.pyplot as plt\n'), ((1724, 1775), 'os.path.join', 'os.path.join', (['path', 'f"""{split}-confusion-matrix.png"""'], {}), "(path, f'{split}-confusion-matrix.png')\n", (1736, 1775), False, 'import os\n')]
|
#!/usr/bin/env python
###########################################################################
## File : cmsHarvest.py
## Authors : <NAME> (<EMAIL>)
## <NAME> (<EMAIL>)
## <NAME> (<EMAIL>)
## Last change: 20100308
##
## Purpose : Main program to run all kinds of harvesting.
## For more information please refer to the CMS Twiki url
## mentioned just below here.
###########################################################################
"""Main program to run all kinds of harvesting.
These are the basic kinds of harvesting implemented (contact me if
your favourite is missing):
- RelVal : Run for release validation samples. Makes heavy use of MC
truth information.
- RelValFS: FastSim RelVal.
- MC : Run for MC samples.
- DQMOffline : Run for real data (could also be run for MC).
For the mappings of these harvesting types to sequence names please
see the setup_harvesting_info() and option_handler_list_types()
methods.
"""
from __future__ import print_function
###########################################################################
from builtins import range
__version__ = "3.8.2p1" # (version jump to match release)
__author__ = "<NAME> (<EMAIL>)," \
"<NAME> (<EMAIL>)"
twiki_url = "https://twiki.cern.ch/twiki/bin/view/CMS/CmsHarvester"
###########################################################################
###########################################################################
## TODO list
##
## !!! Some code refactoring is in order. A lot of the code that loads
## and builds dataset and run lists is duplicated. !!!
##
## - SPECIAL (future):
## After discussing all these harvesting issues yet again with Luca,
## it looks like we'll need something special to handle harvesting
## for (collisions) data in reprocessing. Stuff with a special DBS
## instance in which `rolling' reports of reprocessed datasets is
## publised. In this case we will have to check (w.r.t. the parent
## dataset) how much of a given run is ready, and submit once we're
## satisfied (let's say 90%).
##
## - We could get rid of most of the `and dataset.status = VALID'
## pieces in the DBS queries.
## - Change to a more efficient grid scheduler.
## - Implement incremental harvesting. Requires some changes to the
## book keeping to store the harvested number of events for each
## run. Also requires some changes to the dataset checking to see if
## additional statistics have become available.
## - Emphasize the warnings in case we're running in force
## mode. Otherwise they may get lost a bit in the output.
## - Fix the creation of the CASTOR dirs. The current approach works
## but is a) too complicated and b) too inefficient.
## - Fully implement all harvesting types.
## --> Discuss with Andreas what exactly should be in there. And be
## careful with the run numbers!
## - Add the (second-step) harvesting config file to the (first-step)
## ME extraction job to make sure it does not get lost.
## - Improve sanity checks on harvesting type vs. data type.
## - Implement reference histograms.
## 1) User-specified reference dataset.
## 2) Educated guess based on dataset name.
## 3) References from GlobalTag.
## 4) No reference at all.
## - Is this options.evt_type used anywhere?
## - Combine all these dbs_resolve_xxx into a single call to DBS(?).
## - Implement CRAB server use?
## - Add implementation of email address of user. (Only necessary for
## CRAB server.)
###########################################################################
import os
import sys
import commands
import re
import logging
import optparse
import datetime
import copy
from inspect import getargspec
from random import choice
import six
# These we need to communicate with DBS global DBSAPI
from DBSAPI.dbsApi import DbsApi
import DBSAPI.dbsException
import DBSAPI.dbsApiException
from functools import reduce
# and these we need to parse the DBS output.
global xml
global SAXParseException
import xml.sax
from xml.sax import SAXParseException
import Configuration.PyReleaseValidation
from Configuration.PyReleaseValidation.ConfigBuilder import \
ConfigBuilder, defaultOptions
# from Configuration.PyReleaseValidation.cmsDriverOptions import options, python_config_filename
#import FWCore.ParameterSet.Config as cms
# Debugging stuff.
import pdb
try:
import debug_hook
except ImportError:
pass
###########################################################################
## Helper class: Usage exception.
###########################################################################
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
# End of Usage.
###########################################################################
## Helper class: Error exception.
###########################################################################
class Error(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
###########################################################################
## Helper class: CMSHarvesterHelpFormatter.
###########################################################################
class CMSHarvesterHelpFormatter(optparse.IndentedHelpFormatter):
"""Helper class to add some customised help output to cmsHarvester.
We want to add some instructions, as well as a pointer to the CMS
Twiki.
"""
def format_usage(self, usage):
usage_lines = []
sep_line = "-" * 60
usage_lines.append(sep_line)
usage_lines.append("Welcome to the CMS harvester, a (hopefully useful)")
usage_lines.append("tool to create harvesting configurations.")
usage_lines.append("For more information please have a look at the CMS Twiki:")
usage_lines.append(" %s" % twiki_url)
usage_lines.append(sep_line)
usage_lines.append("")
# Since we only add to the output, we now just append the
# original output from IndentedHelpFormatter.
usage_lines.append(optparse.IndentedHelpFormatter. \
format_usage(self, usage))
formatted_usage = "\n".join(usage_lines)
return formatted_usage
# End of CMSHarvesterHelpFormatter.
###########################################################################
## Helper class: DBSXMLHandler.
###########################################################################
class DBSXMLHandler(xml.sax.handler.ContentHandler):
"""XML handler class to parse DBS results.
The tricky thing here is that older DBS versions (2.0.5 and
earlier) return results in a different XML format than newer
versions. Previously the result values were returned as attributes
to the `result' element. The new approach returns result values as
contents of named elements.
The old approach is handled directly in startElement(), the new
approach in characters().
NOTE: All results are returned in the form of string values of
course!
"""
# This is the required mapping from the name of the variable we
# ask for to what we call it ourselves. (Effectively this is the
# mapping between the old attribute key name and the new element
# name.)
mapping = {
"dataset" : "PATH",
"dataset.tag" : "PROCESSEDDATASET_GLOBALTAG",
"datatype.type" : "PRIMARYDSTYPE_TYPE",
"run" : "RUNS_RUNNUMBER",
"run.number" : "RUNS_RUNNUMBER",
"file.name" : "FILES_LOGICALFILENAME",
"file.numevents" : "FILES_NUMBEROFEVENTS",
"algo.version" : "APPVERSION_VERSION",
"site" : "STORAGEELEMENT_SENAME",
}
def __init__(self, tag_names):
# This is a list used as stack to keep track of where we are
# in the element tree.
self.element_position = []
self.tag_names = tag_names
self.results = {}
def startElement(self, name, attrs):
self.element_position.append(name)
self.current_value = []
#----------
# This is to catch results from DBS 2.0.5 and earlier.
if name == "result":
for name in self.tag_names:
key = DBSXMLHandler.mapping[name]
value = str(attrs[key])
try:
self.results[name].append(value)
except KeyError:
self.results[name] = [value]
#----------
def endElement(self, name):
assert self.current_element() == name, \
"closing unopenend element `%s'" % name
if self.current_element() in self.tag_names:
contents = "".join(self.current_value)
if self.current_element() in self.results:
self.results[self.current_element()].append(contents)
else:
self.results[self.current_element()] = [contents]
self.element_position.pop()
def characters(self, content):
# NOTE: It is possible that the actual contents of the tag
# gets split into multiple pieces. This method will be called
# for each of the pieces. This means we have to concatenate
# everything ourselves.
if self.current_element() in self.tag_names:
self.current_value.append(content)
def current_element(self):
return self.element_position[-1]
def check_results_validity(self):
"""Make sure that all results arrays have equal length.
We should have received complete rows from DBS. I.e. all
results arrays in the handler should be of equal length.
"""
results_valid = True
res_names = self.results.keys()
if len(res_names) > 1:
for res_name in res_names[1:]:
res_tmp = self.results[res_name]
if len(res_tmp) != len(self.results[res_names[0]]):
results_valid = False
return results_valid
# End of DBSXMLHandler.
###########################################################################
## CMSHarvester class.
###########################################################################
class CMSHarvester(object):
"""Class to perform CMS harvesting.
More documentation `obviously' to follow.
"""
##########
def __init__(self, cmd_line_opts=None):
"Initialize class and process command line options."
self.version = __version__
# These are the harvesting types allowed. See the head of this
# file for more information.
self.harvesting_types = [
"RelVal",
"RelValFS",
"MC",
"DQMOffline",
]
# These are the possible harvesting modes:
# - Single-step: harvesting takes place on-site in a single
# step. For each samples only a single ROOT file containing
# the harvesting results is returned.
# - Single-step-allow-partial: special hack to allow
# harvesting of partial statistics using single-step
# harvesting on spread-out samples.
# - Two-step: harvesting takes place in two steps. The first
# step returns a series of monitoring elenent summaries for
# each sample. The second step then merges these summaries
# locally and does the real harvesting. This second step
# produces the ROOT file containing the harvesting results.
self.harvesting_modes = [
"single-step",
"single-step-allow-partial",
"two-step"
]
# It is possible to specify a GlobalTag that will override any
# choices (regarding GlobalTags) made by the cmsHarvester.
# BUG BUG BUG
# For the moment, until I figure out a way to obtain the
# GlobalTag with which a given data (!) dataset was created,
# it is necessary to specify a GlobalTag for harvesting of
# data.
# BUG BUG BUG end
self.globaltag = None
# It's also possible to switch off the use of reference
# histograms altogether.
self.use_ref_hists = True
# The database name and account are hard-coded. They are not
# likely to change before the end-of-life of this tool. But of
# course there is a way to override this from the command
# line. One can even override the Frontier connection used for
# the GlobalTag and for the reference histograms
# independently. Please only use this for testing purposes.
self.frontier_connection_name = {}
self.frontier_connection_name["globaltag"] = "frontier://" \
"FrontierProd/"
self.frontier_connection_name["refhists"] = "frontier://" \
"FrontierProd/"
self.frontier_connection_overridden = {}
for key in self.frontier_connection_name.keys():
self.frontier_connection_overridden[key] = False
# This contains information specific to each of the harvesting
# types. Used to create the harvesting configuration. It is
# filled by setup_harvesting_info().
self.harvesting_info = None
###
# These are default `unused' values that will be filled in
# depending on the command line options.
# The type of harvesting we're doing. See
# self.harvesting_types for allowed types.
self.harvesting_type = None
# The harvesting mode, popularly known as single-step
# vs. two-step. The thing to remember at this point is that
# single-step is only possible for samples located completely
# at a single site (i.e. SE).
self.harvesting_mode = None
# BUG BUG BUG
# Default temporarily set to two-step until we can get staged
# jobs working with CRAB.
self.harvesting_mode_default = "single-step"
# BUG BUG BUG end
# The input method: are we reading a dataset name (or regexp)
# directly from the command line or are we reading a file
# containing a list of dataset specifications. Actually we
# keep one of each for both datasets and runs.
self.input_method = {}
self.input_method["datasets"] = {}
self.input_method["datasets"]["use"] = None
self.input_method["datasets"]["ignore"] = None
self.input_method["runs"] = {}
self.input_method["runs"]["use"] = None
self.input_method["runs"]["ignore"] = None
self.input_method["runs"]["ignore"] = None
# The name of whatever input we're using.
self.input_name = {}
self.input_name["datasets"] = {}
self.input_name["datasets"]["use"] = None
self.input_name["datasets"]["ignore"] = None
self.input_name["runs"] = {}
self.input_name["runs"]["use"] = None
self.input_name["runs"]["ignore"] = None
self.Jsonlumi = False
self.Jsonfilename = "YourJSON.txt"
self.Jsonrunfilename = "YourJSON.txt"
self.todofile = "YourToDofile.txt"
# If this is true, we're running in `force mode'. In this case
# the sanity checks are performed but failure will not halt
# everything.
self.force_running = None
# The base path of the output dir in CASTOR.
self.castor_base_dir = None
self.castor_base_dir_default = "/castor/cern.ch/" \
"cms/store/temp/" \
"dqm/offline/harvesting_output/"
# The name of the file to be used for book keeping: which
# datasets, runs, etc. we have already processed.
self.book_keeping_file_name = None
self.book_keeping_file_name_default = "harvesting_accounting.txt"
# The dataset name to reference histogram name mapping is read
# from a text file. The name of this file is kept in the
# following variable.
self.ref_hist_mappings_file_name = None
# And this is the default value.
self.ref_hist_mappings_file_name_default = "harvesting_ref_hist_mappings.txt"
# Hmmm, hard-coded prefix of the CERN CASTOR area. This is the
# only supported CASTOR area.
# NOTE: Make sure this one starts with a `/'.
self.castor_prefix = "/castor/cern.ch"
# Normally the central harvesting should be done using the
# `t1access' grid role. To be able to run without T1 access
# the --no-t1access flag can be used. This variable keeps
# track of that special mode.
self.non_t1access = False
self.caf_access = False
self.saveByLumiSection = False
self.crab_submission = False
self.nr_max_sites = 1
self.preferred_site = "no preference"
# This will become the list of datasets and runs to consider
self.datasets_to_use = {}
# and this will become the list of datasets and runs to skip.
self.datasets_to_ignore = {}
# This, in turn, will hold all book keeping information.
self.book_keeping_information = {}
# And this is where the dataset name to reference histogram
# name mapping is stored.
self.ref_hist_mappings = {}
# We're now also allowing run selection. This means we also
# have to keep list of runs requested and vetoed by the user.
self.runs_to_use = {}
self.runs_to_ignore = {}
# Cache for CMSSW version availability at different sites.
self.sites_and_versions_cache = {}
# Cache for checked GlobalTags.
self.globaltag_check_cache = []
# Global flag to see if there were any jobs for which we could
# not find a matching site.
self.all_sites_found = True
# Helper string centrally defined.
self.no_matching_site_found_str = "no_matching_site_found"
# Store command line options for later use.
if cmd_line_opts is None:
cmd_line_opts = sys.argv[1:]
self.cmd_line_opts = cmd_line_opts
# Set up the logger.
log_handler = logging.StreamHandler()
# This is the default log formatter, the debug option switches
# on some more information.
log_formatter = logging.Formatter("%(message)s")
log_handler.setFormatter(log_formatter)
logger = logging.getLogger()
logger.name = "main"
logger.addHandler(log_handler)
self.logger = logger
# The default output mode is quite verbose.
self.set_output_level("NORMAL")
#logger.debug("Initialized successfully")
# End of __init__.
##########
def cleanup(self):
"Clean up after ourselves."
# NOTE: This is the safe replacement of __del__.
#self.logger.debug("All done -> shutting down")
logging.shutdown()
# End of cleanup.
##########
def time_stamp(self):
"Create a timestamp to use in the created config files."
time_now = datetime.datetime.utcnow()
# We don't care about the microseconds.
time_now = time_now.replace(microsecond = 0)
time_stamp = "%sUTC" % datetime.datetime.isoformat(time_now)
# End of time_stamp.
return time_stamp
##########
def ident_string(self):
"Spit out an identification string for cmsHarvester.py."
ident_str = "`cmsHarvester.py " \
"version %s': cmsHarvester.py %s" % \
(__version__,
reduce(lambda x, y: x+' '+y, sys.argv[1:]))
return ident_str
##########
def format_conditions_string(self, globaltag):
"""Create the conditions string needed for `cmsDriver'.
Just glueing the FrontierConditions bit in front of it really.
"""
# Not very robust but okay. The idea is that if the user
# specified (since this cannot happen with GlobalTags coming
# from DBS) something containing `conditions', they probably
# know what they're doing and we should not muck things up. In
# all other cases we just assume we only received the
# GlobalTag part and we built the usual conditions string from
# that .
if globaltag.lower().find("conditions") > -1:
conditions_string = globaltag
else:
conditions_string = "FrontierConditions_GlobalTag,%s" % \
globaltag
# End of format_conditions_string.
return conditions_string
##########
def db_account_name_cms_cond_globaltag(self):
"""Return the database account name used to store the GlobalTag.
The name of the database account depends (albeit weakly) on
the CMSSW release version.
"""
# This never changed, unlike the cms_cond_31X_DQM_SUMMARY ->
# cms_cond_34X_DQM transition.
account_name = "CMS_COND_31X_GLOBALTAG"
# End of db_account_name_cms_cond_globaltag.
return account_name
##########
def db_account_name_cms_cond_dqm_summary(self):
"""See db_account_name_cms_cond_globaltag."""
account_name = None
version = self.cmssw_version[6:11]
if version < "3_4_0":
account_name = "CMS_COND_31X_DQM_SUMMARY"
else:
account_name = "<KEY>"
# End of db_account_name_cms_cond_dqm_summary.
return account_name
##########
def config_file_header(self):
"Create a nice header to be used to mark the generated files."
tmp = []
time_stamp = self.time_stamp()
ident_str = self.ident_string()
tmp.append("# %s" % time_stamp)
tmp.append("# WARNING: This file was created automatically!")
tmp.append("")
tmp.append("# Created by %s" % ident_str)
header = "\n".join(tmp)
# End of config_file_header.
return header
##########
def set_output_level(self, output_level):
"""Adjust the level of output generated.
Choices are:
- normal : default level of output
- quiet : less output than the default
- verbose : some additional information
- debug : lots more information, may be overwhelming
NOTE: The debug option is a bit special in the sense that it
also modifies the output format.
"""
# NOTE: These levels are hooked up to the ones used in the
# logging module.
output_levels = {
"NORMAL" : logging.INFO,
"QUIET" : logging.WARNING,
"VERBOSE" : logging.INFO,
"DEBUG" : logging.DEBUG
}
output_level = output_level.upper()
try:
# Update the logger.
self.log_level = output_levels[output_level]
self.logger.setLevel(self.log_level)
except KeyError:
# Show a complaint
self.logger.fatal("Unknown output level `%s'" % ouput_level)
# and re-raise an exception.
raise Exception
# End of set_output_level.
##########
def option_handler_debug(self, option, opt_str, value, parser):
"""Switch to debug mode.
This both increases the amount of output generated, as well as
changes the format used (more detailed information is given).
"""
# Switch to a more informative log formatter for debugging.
log_formatter_debug = logging.Formatter("[%(levelname)s] " \
# NOTE: funcName was
# only implemented
# starting with python
# 2.5.
#"%(funcName)s() " \
#"@%(filename)s:%(lineno)d " \
"%(message)s")
# Hmmm, not very nice. This assumes there's only a single
# handler associated with the current logger.
log_handler = self.logger.handlers[0]
log_handler.setFormatter(log_formatter_debug)
self.set_output_level("DEBUG")
# End of option_handler_debug.
##########
def option_handler_quiet(self, option, opt_str, value, parser):
"Switch to quiet mode: less verbose."
self.set_output_level("QUIET")
# End of option_handler_quiet.
##########
def option_handler_force(self, option, opt_str, value, parser):
"""Switch on `force mode' in which case we don't brake for nobody.
In so-called `force mode' all sanity checks are performed but
we don't halt on failure. Of course this requires some care
from the user.
"""
self.logger.debug("Switching on `force mode'.")
self.force_running = True
# End of option_handler_force.
##########
def option_handler_harvesting_type(self, option, opt_str, value, parser):
"""Set the harvesting type to be used.
This checks that no harvesting type is already set, and sets
the harvesting type to be used to the one specified. If a
harvesting type is already set an exception is thrown. The
same happens when an unknown type is specified.
"""
# Check for (in)valid harvesting types.
# NOTE: The matching is done in a bit of a complicated
# way. This allows the specification of the type to be
# case-insensitive while still ending up with the properly
# `cased' version afterwards.
value = value.lower()
harvesting_types_lowered = [i.lower() for i in self.harvesting_types]
try:
type_index = harvesting_types_lowered.index(value)
# If this works, we now have the index to the `properly
# cased' version of the harvesting type.
except ValueError:
self.logger.fatal("Unknown harvesting type `%s'" % \
value)
self.logger.fatal(" possible types are: %s" %
", ".join(self.harvesting_types))
raise Usage("Unknown harvesting type `%s'" % \
value)
# Check if multiple (by definition conflicting) harvesting
# types are being specified.
if not self.harvesting_type is None:
msg = "Only one harvesting type should be specified"
self.logger.fatal(msg)
raise Usage(msg)
self.harvesting_type = self.harvesting_types[type_index]
self.logger.info("Harvesting type to be used: `%s'" % \
self.harvesting_type)
# End of option_handler_harvesting_type.
##########
def option_handler_harvesting_mode(self, option, opt_str, value, parser):
"""Set the harvesting mode to be used.
Single-step harvesting can be used for samples that are
located completely at a single site (= SE). Otherwise use
two-step mode.
"""
# Check for valid mode.
harvesting_mode = value.lower()
if not harvesting_mode in self.harvesting_modes:
msg = "Unknown harvesting mode `%s'" % harvesting_mode
self.logger.fatal(msg)
self.logger.fatal(" possible modes are: %s" % \
", ".join(self.harvesting_modes))
raise Usage(msg)
# Check if we've been given only a single mode, otherwise
# complain.
if not self.harvesting_mode is None:
msg = "Only one harvesting mode should be specified"
self.logger.fatal(msg)
raise Usage(msg)
self.harvesting_mode = harvesting_mode
self.logger.info("Harvesting mode to be used: `%s'" % \
self.harvesting_mode)
# End of option_handler_harvesting_mode.
##########
def option_handler_globaltag(self, option, opt_str, value, parser):
"""Set the GlobalTag to be used, overriding our own choices.
By default the cmsHarvester will use the GlobalTag with which
a given dataset was created also for the harvesting. The
--globaltag option is the way to override this behaviour.
"""
# Make sure that this flag only occurred once.
if not self.globaltag is None:
msg = "Only one GlobalTag should be specified"
self.logger.fatal(msg)
raise Usage(msg)
self.globaltag = value
self.logger.info("GlobalTag to be used: `%s'" % \
self.globaltag)
# End of option_handler_globaltag.
##########
def option_handler_no_ref_hists(self, option, opt_str, value, parser):
"Switch use of all reference histograms off."
self.use_ref_hists = False
self.logger.warning("Switching off all use of reference histograms")
# End of option_handler_no_ref_hists.
##########
def option_handler_frontier_connection(self, option, opt_str,
value, parser):
"""Override the default Frontier connection string.
Please only use this for testing (e.g. when a test payload has
been inserted into cms_orc_off instead of cms_orc_on).
This method gets called for three different command line
options:
- --frontier-connection,
- --frontier-connection-for-globaltag,
- --frontier-connection-for-refhists.
Appropriate care has to be taken to make sure things are only
specified once.
"""
# Figure out with which command line option we've been called.
frontier_type = opt_str.split("-")[-1]
if frontier_type == "connection":
# Main option: change all connection strings.
frontier_types = self.frontier_connection_name.keys()
else:
frontier_types = [frontier_type]
# Make sure that each Frontier connection is specified only
# once. (Okay, in a bit of a dodgy way...)
for connection_name in frontier_types:
if self.frontier_connection_overridden[connection_name] == True:
msg = "Please specify either:\n" \
" `--frontier-connection' to change the " \
"Frontier connection used for everything, or\n" \
"either one or both of\n" \
" `--frontier-connection-for-globaltag' to " \
"change the Frontier connection used for the " \
"GlobalTag and\n" \
" `--frontier-connection-for-refhists' to change " \
"the Frontier connection used for the " \
"reference histograms."
self.logger.fatal(msg)
raise Usage(msg)
frontier_prefix = "frontier://"
if not value.startswith(frontier_prefix):
msg = "Expecting Frontier connections to start with " \
"`%s'. You specified `%s'." % \
(frontier_prefix, value)
self.logger.fatal(msg)
raise Usage(msg)
# We also kind of expect this to be either FrontierPrep or
# FrontierProd (but this is just a warning).
if value.find("FrontierProd") < 0 and \
value.find("FrontierProd") < 0:
msg = "Expecting Frontier connections to contain either " \
"`FrontierProd' or `FrontierPrep'. You specified " \
"`%s'. Are you sure?" % \
value
self.logger.warning(msg)
if not value.endswith("/"):
value += "/"
for connection_name in frontier_types:
self.frontier_connection_name[connection_name] = value
self.frontier_connection_overridden[connection_name] = True
frontier_type_str = "unknown"
if connection_name == "globaltag":
frontier_type_str = "the GlobalTag"
elif connection_name == "refhists":
frontier_type_str = "the reference histograms"
self.logger.warning("Overriding default Frontier " \
"connection for %s " \
"with `%s'" % \
(frontier_type_str,
self.frontier_connection_name[connection_name]))
# End of option_handler_frontier_connection
##########
def option_handler_input_todofile(self, option, opt_str, value, parser):
self.todofile = value
# End of option_handler_input_todofile.
##########
def option_handler_input_Jsonfile(self, option, opt_str, value, parser):
self.Jsonfilename = value
# End of option_handler_input_Jsonfile.
##########
def option_handler_input_Jsonrunfile(self, option, opt_str, value, parser):
self.Jsonrunfilename = value
# End of option_handler_input_Jsonrunfile.
##########
def option_handler_input_spec(self, option, opt_str, value, parser):
"""TODO TODO TODO
Document this...
"""
# Figure out if we were called for the `use these' or the
# `ignore these' case.
if opt_str.lower().find("ignore") > -1:
spec_type = "ignore"
else:
spec_type = "use"
# Similar: are we being called for datasets or for runs?
if opt_str.lower().find("dataset") > -1:
select_type = "datasets"
else:
select_type = "runs"
if not self.input_method[select_type][spec_type] is None:
msg = "Please only specify one input method " \
"(for the `%s' case)" % opt_str
self.logger.fatal(msg)
raise Usage(msg)
input_method = opt_str.replace("-", "").replace("ignore", "")
self.input_method[select_type][spec_type] = input_method
self.input_name[select_type][spec_type] = value
self.logger.debug("Input method for the `%s' case: %s" % \
(spec_type, input_method))
# End of option_handler_input_spec
##########
def option_handler_book_keeping_file(self, option, opt_str, value, parser):
"""Store the name of the file to be used for book keeping.
The only check done here is that only a single book keeping
file is specified.
"""
file_name = value
if not self.book_keeping_file_name is None:
msg = "Only one book keeping file should be specified"
self.logger.fatal(msg)
raise Usage(msg)
self.book_keeping_file_name = file_name
self.logger.info("Book keeping file to be used: `%s'" % \
self.book_keeping_file_name)
# End of option_handler_book_keeping_file.
##########
def option_handler_ref_hist_mapping_file(self, option, opt_str, value, parser):
"""Store the name of the file for the ref. histogram mapping.
"""
file_name = value
if not self.ref_hist_mappings_file_name is None:
msg = "Only one reference histogram mapping file " \
"should be specified"
self.logger.fatal(msg)
raise Usage(msg)
self.ref_hist_mappings_file_name = file_name
self.logger.info("Reference histogram mapping file " \
"to be used: `%s'" % \
self.ref_hist_mappings_file_name)
# End of option_handler_ref_hist_mapping_file.
##########
# OBSOLETE OBSOLETE OBSOLETE
## def option_handler_dataset_name(self, option, opt_str, value, parser):
## """Specify the name(s) of the dataset(s) to be processed.
## It is checked to make sure that no dataset name or listfile
## names are given yet. If all is well (i.e. we still have a
## clean slate) the dataset name is stored for later use,
## otherwise a Usage exception is raised.
## """
## if not self.input_method is None:
## if self.input_method == "dataset":
## raise Usage("Please only feed me one dataset specification")
## elif self.input_method == "listfile":
## raise Usage("Cannot specify both dataset and input list file")
## else:
## assert False, "Unknown input method `%s'" % self.input_method
## self.input_method = "dataset"
## self.input_name = value
## self.logger.info("Input method used: %s" % self.input_method)
## # End of option_handler_dataset_name.
## ##########
## def option_handler_listfile_name(self, option, opt_str, value, parser):
## """Specify the input list file containing datasets to be processed.
## It is checked to make sure that no dataset name or listfile
## names are given yet. If all is well (i.e. we still have a
## clean slate) the listfile name is stored for later use,
## otherwise a Usage exception is raised.
## """
## if not self.input_method is None:
## if self.input_method == "listfile":
## raise Usage("Please only feed me one list file")
## elif self.input_method == "dataset":
## raise Usage("Cannot specify both dataset and input list file")
## else:
## assert False, "Unknown input method `%s'" % self.input_method
## self.input_method = "listfile"
## self.input_name = value
## self.logger.info("Input method used: %s" % self.input_method)
## # End of option_handler_listfile_name.
# OBSOLETE OBSOLETE OBSOLETE end
##########
def option_handler_castor_dir(self, option, opt_str, value, parser):
"""Specify where on CASTOR the output should go.
At the moment only output to CERN CASTOR is
supported. Eventually the harvested results should go into the
central place for DQM on CASTOR anyway.
"""
# Check format of specified CASTOR area.
castor_dir = value
#castor_dir = castor_dir.lstrip(os.path.sep)
castor_prefix = self.castor_prefix
# Add a leading slash if necessary and clean up the path.
castor_dir = os.path.join(os.path.sep, castor_dir)
self.castor_base_dir = os.path.normpath(castor_dir)
self.logger.info("CASTOR (base) area to be used: `%s'" % \
self.castor_base_dir)
# End of option_handler_castor_dir.
##########
def option_handler_no_t1access(self, option, opt_str, value, parser):
"""Set the self.no_t1access flag to try and create jobs that
run without special `t1access' role.
"""
self.non_t1access = True
self.logger.warning("Running in `non-t1access' mode. " \
"Will try to create jobs that run " \
"without special rights but no " \
"further promises...")
# End of option_handler_no_t1access.
##########
def option_handler_caf_access(self, option, opt_str, value, parser):
"""Set the self.caf_access flag to try and create jobs that
run on the CAF.
"""
self.caf_access = True
self.logger.warning("Running in `caf_access' mode. " \
"Will try to create jobs that run " \
"on CAF but no" \
"further promises...")
# End of option_handler_caf_access.
##########
def option_handler_saveByLumiSection(self, option, opt_str, value, parser):
"""Set process.dqmSaver.saveByLumiSectiont=1 in cfg harvesting file
"""
self.saveByLumiSection = True
self.logger.warning("waning concerning saveByLumiSection option")
# End of option_handler_saveByLumiSection.
##########
def option_handler_crab_submission(self, option, opt_str, value, parser):
"""Crab jobs are not created and
"submitted automatically",
"""
self.crab_submission = True
# End of option_handler_crab_submission.
##########
def option_handler_sites(self, option, opt_str, value, parser):
self.nr_max_sites = value
##########
def option_handler_preferred_site(self, option, opt_str, value, parser):
self.preferred_site = value
##########
def option_handler_list_types(self, option, opt_str, value, parser):
"""List all harvesting types and their mappings.
This lists all implemented harvesting types with their
corresponding mappings to sequence names. This had to be
separated out from the help since it depends on the CMSSW
version and was making things a bit of a mess.
NOTE: There is no way (at least not that I could come up with)
to code this in a neat generic way that can be read both by
this method and by setup_harvesting_info(). Please try hard to
keep these two methods in sync!
"""
sep_line = "-" * 50
sep_line_short = "-" * 20
print(sep_line)
print("The following harvesting types are available:")
print(sep_line)
print("`RelVal' maps to:")
print(" pre-3_3_0 : HARVESTING:validationHarvesting")
print(" 3_4_0_pre2 and later: HARVESTING:validationHarvesting+dqmHarvesting")
print(" Exceptions:")
print(" 3_3_0_pre1-4 : HARVESTING:validationHarvesting")
print(" 3_3_0_pre6 : HARVESTING:validationHarvesting")
print(" 3_4_0_pre1 : HARVESTING:validationHarvesting")
print(sep_line_short)
print("`RelValFS' maps to:")
print(" always : HARVESTING:validationHarvestingFS")
print(sep_line_short)
print("`MC' maps to:")
print(" always : HARVESTING:validationprodHarvesting")
print(sep_line_short)
print("`DQMOffline' maps to:")
print(" always : HARVESTING:dqmHarvesting")
print(sep_line)
# We're done, let's quit. (This is the same thing optparse
# does after printing the help.)
raise SystemExit
# End of option_handler_list_types.
##########
def setup_harvesting_info(self):
"""Fill our dictionary with all info needed to understand
harvesting.
This depends on the CMSSW version since at some point the
names and sequences were modified.
NOTE: There is no way (at least not that I could come up with)
to code this in a neat generic way that can be read both by
this method and by option_handler_list_types(). Please try
hard to keep these two methods in sync!
"""
assert not self.cmssw_version is None, \
"ERROR setup_harvesting() requires " \
"self.cmssw_version to be set!!!"
harvesting_info = {}
# This is the version-independent part.
harvesting_info["DQMOffline"] = {}
harvesting_info["DQMOffline"]["beamspot"] = None
harvesting_info["DQMOffline"]["eventcontent"] = None
harvesting_info["DQMOffline"]["harvesting"] = "AtRunEnd"
harvesting_info["RelVal"] = {}
harvesting_info["RelVal"]["beamspot"] = None
harvesting_info["RelVal"]["eventcontent"] = None
harvesting_info["RelVal"]["harvesting"] = "AtRunEnd"
harvesting_info["RelValFS"] = {}
harvesting_info["RelValFS"]["beamspot"] = None
harvesting_info["RelValFS"]["eventcontent"] = None
harvesting_info["RelValFS"]["harvesting"] = "AtRunEnd"
harvesting_info["MC"] = {}
harvesting_info["MC"]["beamspot"] = None
harvesting_info["MC"]["eventcontent"] = None
harvesting_info["MC"]["harvesting"] = "AtRunEnd"
# This is the version-dependent part. And I know, strictly
# speaking it's not necessary to fill in all three types since
# in a single run we'll only use one type anyway. This does
# look more readable, however, and required less thought from
# my side when I put this together.
# DEBUG DEBUG DEBUG
# Check that we understand our own version naming.
assert self.cmssw_version.startswith("CMSSW_")
# DEBUG DEBUG DEBUG end
version = self.cmssw_version[6:]
#----------
# RelVal
step_string = None
if version < "3_3_0":
step_string = "validationHarvesting"
elif version in ["3_3_0_pre1", "3_3_0_pre2",
"3_3_0_pre3", "3_3_0_pre4",
"3_3_0_pre6", "3_4_0_pre1"]:
step_string = "validationHarvesting"
else:
step_string = "validationHarvesting+dqmHarvesting"
harvesting_info["RelVal"]["step_string"] = step_string
# DEBUG DEBUG DEBUG
# Let's make sure we found something.
assert not step_string is None, \
"ERROR Could not decide a RelVal harvesting sequence " \
"for CMSSW version %s" % self.cmssw_version
# DEBUG DEBUG DEBUG end
#----------
# RelVal
step_string = "validationHarvestingFS"
harvesting_info["RelValFS"]["step_string"] = step_string
#----------
# MC
step_string = "validationprodHarvesting"
harvesting_info["MC"]["step_string"] = step_string
# DEBUG DEBUG DEBUG
# Let's make sure we found something.
assert not step_string is None, \
"ERROR Could not decide a MC harvesting " \
"sequence for CMSSW version %s" % self.cmssw_version
# DEBUG DEBUG DEBUG end
#----------
# DQMOffline
step_string = "dqmHarvesting"
harvesting_info["DQMOffline"]["step_string"] = step_string
#----------
self.harvesting_info = harvesting_info
self.logger.info("Based on the CMSSW version (%s) " \
"I decided to use the `HARVESTING:%s' " \
"sequence for %s harvesting" % \
(self.cmssw_version,
self.harvesting_info[self.harvesting_type]["step_string"],
self.harvesting_type))
# End of setup_harvesting_info.
##########
def create_castor_path_name_common(self, dataset_name):
"""Build the common part of the output path to be used on
CASTOR.
This consists of the CASTOR area base path specified by the
user and a piece depending on the data type (data vs. MC), the
harvesting type and the dataset name followed by a piece
containing the run number and event count. (See comments in
create_castor_path_name_special for details.) This method
creates the common part, without run number and event count.
"""
castor_path = self.castor_base_dir
###
# The data type: data vs. mc.
datatype = self.datasets_information[dataset_name]["datatype"]
datatype = datatype.lower()
castor_path = os.path.join(castor_path, datatype)
# The harvesting type.
harvesting_type = self.harvesting_type
harvesting_type = harvesting_type.lower()
castor_path = os.path.join(castor_path, harvesting_type)
# The CMSSW release version (only the `digits'). Note that the
# CMSSW version used here is the version used for harvesting,
# not the one from the dataset. This does make the results
# slightly harder to find. On the other hand it solves
# problems in case one re-harvests a given dataset with a
# different CMSSW version, which would lead to ambiguous path
# names. (Of course for many cases the harvesting is done with
# the same CMSSW version the dataset was created with.)
release_version = self.cmssw_version
release_version = release_version.lower(). \
replace("cmssw", ""). \
strip("_")
castor_path = os.path.join(castor_path, release_version)
# The dataset name.
dataset_name_escaped = self.escape_dataset_name(dataset_name)
castor_path = os.path.join(castor_path, dataset_name_escaped)
###
castor_path = os.path.normpath(castor_path)
# End of create_castor_path_name_common.
return castor_path
##########
def create_castor_path_name_special(self,
dataset_name, run_number,
castor_path_common):
"""Create the specialised part of the CASTOR output dir name.
NOTE: To avoid clashes with `incremental harvesting'
(re-harvesting when a dataset grows) we have to include the
event count in the path name. The underlying `problem' is that
CRAB does not overwrite existing output files so if the output
file already exists CRAB will fail to copy back the output.
NOTE: It's not possible to create different kinds of
harvesting jobs in a single call to this tool. However, in
principle it could be possible to create both data and MC jobs
in a single go.
NOTE: The number of events used in the path name is the
_total_ number of events in the dataset/run at the time of
harvesting. If we're doing partial harvesting the final
results will reflect lower statistics. This is a) the easiest
to code and b) the least likely to lead to confusion if
someone ever decides to swap/copy around file blocks between
sites.
"""
castor_path = castor_path_common
###
# The run number part.
castor_path = os.path.join(castor_path, "run_%d" % run_number)
###
# The event count (i.e. the number of events we currently see
# for this dataset).
#nevents = self.datasets_information[dataset_name] \
# ["num_events"][run_number]
castor_path = os.path.join(castor_path, "nevents")
###
castor_path = os.path.normpath(castor_path)
# End of create_castor_path_name_special.
return castor_path
##########
def create_and_check_castor_dirs(self):
"""Make sure all required CASTOR output dirs exist.
This checks the CASTOR base dir specified by the user as well
as all the subdirs required by the current set of jobs.
"""
self.logger.info("Checking (and if necessary creating) CASTOR " \
"output area(s)...")
# Call the real checker method for the base dir.
self.create_and_check_castor_dir(self.castor_base_dir)
# Now call the checker for all (unique) subdirs.
castor_dirs = []
for (dataset_name, runs) in six.iteritems(self.datasets_to_use):
for run in runs:
castor_dirs.append(self.datasets_information[dataset_name] \
["castor_path"][run])
castor_dirs_unique = sorted(set(castor_dirs))
# This can take some time. E.g. CRAFT08 has > 300 runs, each
# of which will get a new directory. So we show some (rough)
# info in between.
ndirs = len(castor_dirs_unique)
step = max(ndirs / 10, 1)
for (i, castor_dir) in enumerate(castor_dirs_unique):
if (i + 1) % step == 0 or \
(i + 1) == ndirs:
self.logger.info(" %d/%d" % \
(i + 1, ndirs))
self.create_and_check_castor_dir(castor_dir)
# Now check if the directory is empty. If (an old version
# of) the output file already exists CRAB will run new
# jobs but never copy the results back. We assume the user
# knows what they are doing and only issue a warning in
# case the directory is not empty.
self.logger.debug("Checking if path `%s' is empty" % \
castor_dir)
cmd = "rfdir %s" % castor_dir
(status, output) = commands.getstatusoutput(cmd)
if status != 0:
msg = "Could not access directory `%s'" \
" !!! This is bad since I should have just" \
" created it !!!" % castor_dir
self.logger.fatal(msg)
raise Error(msg)
if len(output) > 0:
self.logger.warning("Output directory `%s' is not empty:" \
" new jobs will fail to" \
" copy back output" % \
castor_dir)
# End of create_and_check_castor_dirs.
##########
def create_and_check_castor_dir(self, castor_dir):
"""Check existence of the give CASTOR dir, if necessary create
it.
Some special care has to be taken with several things like
setting the correct permissions such that CRAB can store the
output results. Of course this means that things like
/castor/cern.ch/ and user/j/ have to be recognised and treated
properly.
NOTE: Only CERN CASTOR area (/castor/cern.ch/) supported for
the moment.
NOTE: This method uses some slightly tricky caching to make
sure we don't keep over and over checking the same base paths.
"""
###
# Local helper function to fully split a path into pieces.
def split_completely(path):
(parent_path, name) = os.path.split(path)
if name == "":
return (parent_path, )
else:
return split_completely(parent_path) + (name, )
###
# Local helper function to check rfio (i.e. CASTOR)
# directories.
def extract_permissions(rfstat_output):
"""Parse the output from rfstat and return the
5-digit permissions string."""
permissions_line = [i for i in output.split("\n") \
if i.lower().find("protection") > -1]
regexp = re.compile(".*\(([0123456789]{5})\).*")
match = regexp.search(rfstat_output)
if not match or len(match.groups()) != 1:
msg = "Could not extract permissions " \
"from output: %s" % rfstat_output
self.logger.fatal(msg)
raise Error(msg)
permissions = match.group(1)
# End of extract_permissions.
return permissions
###
# These are the pieces of CASTOR directories that we do not
# want to touch when modifying permissions.
# NOTE: This is all a bit involved, basically driven by the
# fact that one wants to treat the `j' directory of
# `/castor/cern.ch/user/j/jhegeman/' specially.
# BUG BUG BUG
# This should be simplified, for example by comparing to the
# CASTOR prefix or something like that.
# BUG BUG BUG end
castor_paths_dont_touch = {
0: ["/", "castor", "cern.ch", "cms", "store", "temp",
"dqm", "offline", "user"],
-1: ["user", "store"]
}
self.logger.debug("Checking CASTOR path `%s'" % castor_dir)
###
# First we take the full CASTOR path apart.
castor_path_pieces = split_completely(castor_dir)
# Now slowly rebuild the CASTOR path and see if a) all
# permissions are set correctly and b) the final destination
# exists.
path = ""
check_sizes = sorted(castor_paths_dont_touch.keys())
len_castor_path_pieces = len(castor_path_pieces)
for piece_index in range (len_castor_path_pieces):
skip_this_path_piece = False
piece = castor_path_pieces[piece_index]
## self.logger.debug("Checking CASTOR path piece `%s'" % \
## piece)
for check_size in check_sizes:
# Do we need to do anything with this?
if (piece_index + check_size) > -1:
## self.logger.debug("Checking `%s' against `%s'" % \
## (castor_path_pieces[piece_index + check_size],
## castor_paths_dont_touch[check_size]))
if castor_path_pieces[piece_index + check_size] in castor_paths_dont_touch[check_size]:
## self.logger.debug(" skipping")
skip_this_path_piece = True
## else:
## # Piece not in the list, fine.
## self.logger.debug(" accepting")
# Add piece to the path we're building.
## self.logger.debug("!!! Skip path piece `%s'? %s" % \
## (piece, str(skip_this_path_piece)))
## self.logger.debug("Adding piece to path...")
path = os.path.join(path, piece)
## self.logger.debug("Path is now `%s'" % \
## path)
# Hmmmm, only at this point can we do some caching. Not
# ideal, but okay.
try:
if path in self.castor_path_checks_cache:
continue
except AttributeError:
# This only happens the first time around.
self.castor_path_checks_cache = []
self.castor_path_checks_cache.append(path)
# Now, unless we're supposed to skip this piece of the
# path, let's make sure it exists and set the permissions
# correctly for use by CRAB. This means that:
# - the final output directory should (at least) have
# permissions 775
# - all directories above that should (at least) have
# permissions 755.
# BUT: Even though the above permissions are the usual
# ones to used when setting up CASTOR areas for grid job
# output, there is one caveat in case multiple people are
# working in the same CASTOR area. If user X creates
# /a/b/c/ and user Y wants to create /a/b/d/ he/she does
# not have sufficient rights. So: we set all dir
# permissions to 775 to avoid this.
if not skip_this_path_piece:
# Ok, first thing: let's make sure this directory
# exists.
# NOTE: The nice complication is of course that the
# usual os.path.isdir() etc. methods don't work for an
# rfio filesystem. So we call rfstat and interpret an
# error as meaning that the path does not exist.
self.logger.debug("Checking if path `%s' exists" % \
path)
cmd = "rfstat %s" % path
(status, output) = commands.getstatusoutput(cmd)
if status != 0:
# Path does not exist, let's try and create it.
self.logger.debug("Creating path `%s'" % path)
cmd = "nsmkdir -m 775 %s" % path
(status, output) = commands.getstatusoutput(cmd)
if status != 0:
msg = "Could not create directory `%s'" % path
self.logger.fatal(msg)
raise Error(msg)
cmd = "rfstat %s" % path
(status, output) = commands.getstatusoutput(cmd)
# Now check that it looks like a directory. If I'm not
# mistaken one can deduce this from the fact that the
# (octal) permissions string starts with `40' (instead
# of `100').
permissions = extract_permissions(output)
if not permissions.startswith("40"):
msg = "Path `%s' is not a directory(?)" % path
self.logger.fatal(msg)
raise Error(msg)
# Figure out the current permissions for this
# (partial) path.
self.logger.debug("Checking permissions for path `%s'" % path)
cmd = "rfstat %s" % path
(status, output) = commands.getstatusoutput(cmd)
if status != 0:
msg = "Could not obtain permissions for directory `%s'" % \
path
self.logger.fatal(msg)
raise Error(msg)
# Take the last three digits of the permissions.
permissions = extract_permissions(output)[-3:]
# Now if necessary fix permissions.
# NOTE: Be careful never to `downgrade' permissions.
if piece_index == (len_castor_path_pieces - 1):
# This means we're looking at the final
# destination directory.
permissions_target = "775"
else:
# `Only' an intermediate directory.
permissions_target = "775"
# Compare permissions.
permissions_new = []
for (i, j) in zip(permissions, permissions_target):
permissions_new.append(str(max(int(i), int(j))))
permissions_new = "".join(permissions_new)
self.logger.debug(" current permissions: %s" % \
permissions)
self.logger.debug(" target permissions : %s" % \
permissions_target)
if permissions_new != permissions:
# We have to modify the permissions.
self.logger.debug("Changing permissions of `%s' " \
"to %s (were %s)" % \
(path, permissions_new, permissions))
cmd = "rfchmod %s %s" % (permissions_new, path)
(status, output) = commands.getstatusoutput(cmd)
if status != 0:
msg = "Could not change permissions for path `%s' " \
"to %s" % (path, permissions_new)
self.logger.fatal(msg)
raise Error(msg)
self.logger.debug(" Permissions ok (%s)" % permissions_new)
# End of create_and_check_castor_dir.
##########
def pick_a_site(self, sites, cmssw_version):
# Create list of forbidden sites
sites_forbidden = []
if (self.preferred_site == "CAF") or (self.preferred_site == "caf.cern.ch"):
self.caf_access = True
if self.caf_access == False:
sites_forbidden.append("caf.cern.ch")
# These are the T1 sites. These are only forbidden if we're
# running in non-T1 mode.
# Source:
# https://cmsweb.cern.ch/sitedb/sitelist/?naming_scheme=ce
# Hard-coded, yes. Not nice, no.
all_t1 = [
"srm-cms.cern.ch",
"ccsrm.in2p3.fr",
"cmssrm-fzk.gridka.de",
"cmssrm.fnal.gov",
"gridka-dCache.fzk.de",
"srm-cms.gridpp.rl.ac.uk",
"srm.grid.sinica.edu.tw",
"srm2.grid.sinica.edu.tw",
"srmcms.pic.es",
"storm-fe-cms.cr.cnaf.infn.it"
]
country_codes = {
"CAF" : "caf.cern.ch",
"CH" : "srm-cms.cern.ch",
"FR" : "ccsrm.in2p3.fr",
"DE" : "cmssrm-fzk.gridka.de",
"GOV" : "cmssrm.fnal.gov",
"DE2" : "gridka-dCache.fzk.de",
"UK" : "srm-cms.gridpp.rl.ac.uk",
"TW" : "srm.grid.sinica.edu.tw",
"TW2" : "srm2.grid.sinica.edu.tw",
"ES" : "srmcms.pic.es",
"IT" : "storm-fe-cms.cr.cnaf.infn.it"
}
if self.non_t1access:
sites_forbidden.extend(all_t1)
for site in sites_forbidden:
if site in sites:
sites.remove(site)
if self.preferred_site in country_codes:
self.preferred_site = country_codes[self.preferred_site]
if self.preferred_site != "no preference":
if self.preferred_site in sites:
sites = [self.preferred_site]
else:
sites= []
#print sites
# Looks like we have to do some caching here, otherwise things
# become waaaay toooo sloooooow. So that's what the
# sites_and_versions_cache does.
# NOTE: Keep this set to None!
site_name = None
cmd = None
while len(sites) > 0 and \
site_name is None:
# Create list of t1_sites
t1_sites = []
for site in sites:
if site in all_t1:
t1_sites.append(site)
if site == "caf.cern.ch":
t1_sites.append(site)
# If avilable pick preferred site
#if self.preferred_site in sites:
# se_name = self.preferred_site
# Else, if available pick t1 site
if len(t1_sites) > 0:
se_name = choice(t1_sites)
# Else pick any site
else:
se_name = choice(sites)
# But check that it hosts the CMSSW version we want.
if se_name in self.sites_and_versions_cache and \
cmssw_version in self.sites_and_versions_cache[se_name]:
if self.sites_and_versions_cache[se_name][cmssw_version]:
site_name = se_name
break
else:
self.logger.debug(" --> rejecting site `%s'" % se_name)
sites.remove(se_name)
else:
self.logger.info("Checking if site `%s' " \
"has CMSSW version `%s'" % \
(se_name, cmssw_version))
self.sites_and_versions_cache[se_name] = {}
# TODO TODO TODO
# Test for SCRAM architecture removed as per request
# from Andreas.
# scram_arch = os.getenv("SCRAM_ARCH")
# cmd = "lcg-info --list-ce " \
# "--query '" \
# "Tag=VO-cms-%s," \
# "Tag=VO-cms-%s," \
# "CEStatus=Production," \
# "CloseSE=%s'" % \
# (cmssw_version, scram_arch, se_name)
# TODO TODO TODO end
cmd = "lcg-info --list-ce " \
"--query '" \
"Tag=VO-cms-%s," \
"CEStatus=Production," \
"CloseSE=%s'" % \
(cmssw_version, se_name)
(status, output) = commands.getstatusoutput(cmd)
if status != 0:
self.logger.error("Could not check site information " \
"for site `%s'" % se_name)
else:
if (len(output) > 0) or (se_name == "caf.cern.ch"):
self.sites_and_versions_cache[se_name][cmssw_version] = True
site_name = se_name
break
else:
self.sites_and_versions_cache[se_name][cmssw_version] = False
self.logger.debug(" --> rejecting site `%s'" % se_name)
sites.remove(se_name)
if site_name is self.no_matching_site_found_str:
self.logger.error(" --> no matching site found")
self.logger.error(" --> Your release or SCRAM " \
"architecture may not be available" \
"anywhere on the (LCG) grid.")
if not cmd is None:
self.logger.debug(" (command used: `%s')" % cmd)
else:
self.logger.debug(" --> selected site `%s'" % site_name)
# Return something more descriptive (than `None') in case we
# found nothing.
if site_name is None:
site_name = self.no_matching_site_found_str
# Keep track of our global flag signifying that this
# happened.
self.all_sites_found = False
# End of pick_a_site.
return site_name
##########
def parse_cmd_line_options(self):
# Set up the command line parser. Note that we fix up the help
# formatter so that we can add some text pointing people to
# the Twiki etc.
parser = optparse.OptionParser(version="%s %s" % \
("%prog", self.version),
formatter=CMSHarvesterHelpFormatter())
self.option_parser = parser
# The debug switch.
parser.add_option("-d", "--debug",
help="Switch on debug mode",
action="callback",
callback=self.option_handler_debug)
# The quiet switch.
parser.add_option("-q", "--quiet",
help="Be less verbose",
action="callback",
callback=self.option_handler_quiet)
# The force switch. If this switch is used sanity checks are
# performed but failures do not lead to aborts. Use with care.
parser.add_option("", "--force",
help="Force mode. Do not abort on sanity check "
"failures",
action="callback",
callback=self.option_handler_force)
# Choose between the different kinds of harvesting.
parser.add_option("", "--harvesting_type",
help="Harvesting type: %s" % \
", ".join(self.harvesting_types),
action="callback",
callback=self.option_handler_harvesting_type,
type="string",
metavar="HARVESTING_TYPE")
# Choose between single-step and two-step mode.
parser.add_option("", "--harvesting_mode",
help="Harvesting mode: %s (default = %s)" % \
(", ".join(self.harvesting_modes),
self.harvesting_mode_default),
action="callback",
callback=self.option_handler_harvesting_mode,
type="string",
metavar="HARVESTING_MODE")
# Override the GlobalTag chosen by the cmsHarvester.
parser.add_option("", "--globaltag",
help="GlobalTag to use. Default is the ones " \
"the dataset was created with for MC, for data" \
"a GlobalTag has to be specified.",
action="callback",
callback=self.option_handler_globaltag,
type="string",
metavar="GLOBALTAG")
# Allow switching off of reference histograms.
parser.add_option("", "--no-ref-hists",
help="Don't use any reference histograms",
action="callback",
callback=self.option_handler_no_ref_hists)
# Allow the default (i.e. the one that should be used)
# Frontier connection to be overridden.
parser.add_option("", "--frontier-connection",
help="Use this Frontier connection to find " \
"GlobalTags and LocalTags (for reference " \
"histograms).\nPlease only use this for " \
"testing.",
action="callback",
callback=self.option_handler_frontier_connection,
type="string",
metavar="FRONTIER")
# Similar to the above but specific to the Frontier connection
# to be used for the GlobalTag.
parser.add_option("", "--frontier-connection-for-globaltag",
help="Use this Frontier connection to find " \
"GlobalTags.\nPlease only use this for " \
"testing.",
action="callback",
callback=self.option_handler_frontier_connection,
type="string",
metavar="FRONTIER")
# Similar to the above but specific to the Frontier connection
# to be used for the reference histograms.
parser.add_option("", "--frontier-connection-for-refhists",
help="Use this Frontier connection to find " \
"LocalTags (for reference " \
"histograms).\nPlease only use this for " \
"testing.",
action="callback",
callback=self.option_handler_frontier_connection,
type="string",
metavar="FRONTIER")
# Option to specify the name (or a regexp) of the dataset(s)
# to be used.
parser.add_option("", "--dataset",
help="Name (or regexp) of dataset(s) to process",
action="callback",
#callback=self.option_handler_dataset_name,
callback=self.option_handler_input_spec,
type="string",
#dest="self.input_name",
metavar="DATASET")
# Option to specify the name (or a regexp) of the dataset(s)
# to be ignored.
parser.add_option("", "--dataset-ignore",
help="Name (or regexp) of dataset(s) to ignore",
action="callback",
callback=self.option_handler_input_spec,
type="string",
metavar="DATASET-IGNORE")
# Option to specify the name (or a regexp) of the run(s)
# to be used.
parser.add_option("", "--runs",
help="Run number(s) to process",
action="callback",
callback=self.option_handler_input_spec,
type="string",
metavar="RUNS")
# Option to specify the name (or a regexp) of the run(s)
# to be ignored.
parser.add_option("", "--runs-ignore",
help="Run number(s) to ignore",
action="callback",
callback=self.option_handler_input_spec,
type="string",
metavar="RUNS-IGNORE")
# Option to specify a file containing a list of dataset names
# (or regexps) to be used.
parser.add_option("", "--datasetfile",
help="File containing list of dataset names " \
"(or regexps) to process",
action="callback",
#callback=self.option_handler_listfile_name,
callback=self.option_handler_input_spec,
type="string",
#dest="self.input_name",
metavar="DATASETFILE")
# Option to specify a file containing a list of dataset names
# (or regexps) to be ignored.
parser.add_option("", "--datasetfile-ignore",
help="File containing list of dataset names " \
"(or regexps) to ignore",
action="callback",
callback=self.option_handler_input_spec,
type="string",
metavar="DATASETFILE-IGNORE")
# Option to specify a file containing a list of runs to be
# used.
parser.add_option("", "--runslistfile",
help="File containing list of run numbers " \
"to process",
action="callback",
callback=self.option_handler_input_spec,
type="string",
metavar="RUNSLISTFILE")
# Option to specify a file containing a list of runs
# to be ignored.
parser.add_option("", "--runslistfile-ignore",
help="File containing list of run numbers " \
"to ignore",
action="callback",
callback=self.option_handler_input_spec,
type="string",
metavar="RUNSLISTFILE-IGNORE")
# Option to specify a Jsonfile contaning a list of runs
# to be used.
parser.add_option("", "--Jsonrunfile",
help="Jsonfile containing dictionary of run/lumisections pairs. " \
"All lumisections of runs contained in dictionary are processed.",
action="callback",
callback=self.option_handler_input_Jsonrunfile,
type="string",
metavar="JSONRUNFILE")
# Option to specify a Jsonfile contaning a dictionary of run/lumisections pairs
# to be used.
parser.add_option("", "--Jsonfile",
help="Jsonfile containing dictionary of run/lumisections pairs. " \
"Only specified lumisections of runs contained in dictionary are processed.",
action="callback",
callback=self.option_handler_input_Jsonfile,
type="string",
metavar="JSONFILE")
# Option to specify a ToDo file contaning a list of runs
# to be used.
parser.add_option("", "--todo-file",
help="Todo file containing a list of runs to process.",
action="callback",
callback=self.option_handler_input_todofile,
type="string",
metavar="TODO-FILE")
# Option to specify which file to use for the dataset name to
# reference histogram name mappings.
parser.add_option("", "--refhistmappingfile",
help="File to be use for the reference " \
"histogram mappings. Default: `%s'." % \
self.ref_hist_mappings_file_name_default,
action="callback",
callback=self.option_handler_ref_hist_mapping_file,
type="string",
metavar="REFHISTMAPPING-FILE")
# Specify the place in CASTOR where the output should go.
# NOTE: Only output to CASTOR is supported for the moment,
# since the central DQM results place is on CASTOR anyway.
parser.add_option("", "--castordir",
help="Place on CASTOR to store results. " \
"Default: `%s'." % \
self.castor_base_dir_default,
action="callback",
callback=self.option_handler_castor_dir,
type="string",
metavar="CASTORDIR")
# Use this to try and create jobs that will run without
# special `t1access' role.
parser.add_option("", "--no-t1access",
help="Try to create jobs that will run " \
"without special `t1access' role",
action="callback",
callback=self.option_handler_no_t1access)
# Use this to create jobs that may run on CAF
parser.add_option("", "--caf-access",
help="Crab jobs may run " \
"on CAF",
action="callback",
callback=self.option_handler_caf_access)
# set process.dqmSaver.saveByLumiSection=1 in harvesting cfg file
parser.add_option("", "--saveByLumiSection",
help="set saveByLumiSection=1 in harvesting cfg file",
action="callback",
callback=self.option_handler_saveByLumiSection)
# Use this to enable automatic creation and submission of crab jobs
parser.add_option("", "--automatic-crab-submission",
help="Crab jobs are created and " \
"submitted automatically",
action="callback",
callback=self.option_handler_crab_submission)
# Option to set the max number of sites, each
#job is submitted to
parser.add_option("", "--max-sites",
help="Max. number of sites each job is submitted to",
action="callback",
callback=self.option_handler_sites,
type="int")
# Option to set the preferred site
parser.add_option("", "--site",
help="Crab jobs are submitted to specified site. T1 sites may be shortened by the following (country) codes: \
srm-cms.cern.ch : CH \
ccsrm.in2p3.fr : FR \
cmssrm-fzk.gridka.de : DE \
cmssrm.fnal.gov : GOV \
gridka-dCache.fzk.de : DE2 \
rm-cms.gridpp.rl.ac.uk : UK \
srm.grid.sinica.edu.tw : TW \
srm2.grid.sinica.edu.tw : TW2 \
srmcms.pic.es : ES \
storm-fe-cms.cr.cnaf.infn.it : IT",
action="callback",
callback=self.option_handler_preferred_site,
type="string")
# This is the command line flag to list all harvesting
# type-to-sequence mappings.
parser.add_option("-l", "--list",
help="List all harvesting types and their" \
"corresponding sequence names",
action="callback",
callback=self.option_handler_list_types)
# If nothing was specified: tell the user how to do things the
# next time and exit.
# NOTE: We just use the OptParse standard way of doing this by
# acting as if a '--help' was specified.
if len(self.cmd_line_opts) < 1:
self.cmd_line_opts = ["--help"]
# Some trickery with the options. Why? Well, since these
# options change the output level immediately from the option
# handlers, the results differ depending on where they are on
# the command line. Let's just make sure they are at the
# front.
# NOTE: Not very efficient or sophisticated, but it works and
# it only has to be done once anyway.
for i in ["-d", "--debug",
"-q", "--quiet"]:
if i in self.cmd_line_opts:
self.cmd_line_opts.remove(i)
self.cmd_line_opts.insert(0, i)
# Everything is set up, now parse what we were given.
parser.set_defaults()
(self.options, self.args) = parser.parse_args(self.cmd_line_opts)
# End of parse_cmd_line_options.
##########
def check_input_status(self):
"""Check completeness and correctness of input information.
Check that all required information has been specified and
that, at least as far as can be easily checked, it makes
sense.
NOTE: This is also where any default values are applied.
"""
self.logger.info("Checking completeness/correctness of input...")
# The cmsHarvester does not take (i.e. understand) any
# arguments so there should not be any.
if len(self.args) > 0:
msg = "Sorry but I don't understand `%s'" % \
(" ".join(self.args))
self.logger.fatal(msg)
raise Usage(msg)
# BUG BUG BUG
# While we wait for some bugs left and right to get fixed, we
# disable two-step.
if self.harvesting_mode == "two-step":
msg = "--------------------\n" \
" Sorry, but for the moment (well, till it works)" \
" the two-step mode has been disabled.\n" \
"--------------------\n"
self.logger.fatal(msg)
raise Error(msg)
# BUG BUG BUG end
# We need a harvesting method to be specified
if self.harvesting_type is None:
msg = "Please specify a harvesting type"
self.logger.fatal(msg)
raise Usage(msg)
# as well as a harvesting mode.
if self.harvesting_mode is None:
self.harvesting_mode = self.harvesting_mode_default
msg = "No harvesting mode specified --> using default `%s'" % \
self.harvesting_mode
self.logger.warning(msg)
#raise Usage(msg)
###
# We need an input method so we can find the dataset name(s).
if self.input_method["datasets"]["use"] is None:
msg = "Please specify an input dataset name " \
"or a list file name"
self.logger.fatal(msg)
raise Usage(msg)
# DEBUG DEBUG DEBUG
# If we get here, we should also have an input name.
assert not self.input_name["datasets"]["use"] is None
# DEBUG DEBUG DEBUG end
###
# The same holds for the reference histogram mapping file (if
# we're using references).
if self.use_ref_hists:
if self.ref_hist_mappings_file_name is None:
self.ref_hist_mappings_file_name = self.ref_hist_mappings_file_name_default
msg = "No reference histogram mapping file specified --> " \
"using default `%s'" % \
self.ref_hist_mappings_file_name
self.logger.warning(msg)
###
# We need to know where to put the stuff (okay, the results)
# on CASTOR.
if self.castor_base_dir is None:
self.castor_base_dir = self.castor_base_dir_default
msg = "No CASTOR area specified -> using default `%s'" % \
self.castor_base_dir
self.logger.warning(msg)
#raise Usage(msg)
# Only the CERN CASTOR area is supported.
if not self.castor_base_dir.startswith(self.castor_prefix):
msg = "CASTOR area does not start with `%s'" % \
self.castor_prefix
self.logger.fatal(msg)
if self.castor_base_dir.find("castor") > -1 and \
not self.castor_base_dir.find("cern.ch") > -1:
self.logger.fatal("Only CERN CASTOR is supported")
raise Usage(msg)
###
# TODO TODO TODO
# This should be removed in the future, once I find out how to
# get the config file used to create a given dataset from DBS.
# For data we need to have a GlobalTag. (For MC we can figure
# it out by ourselves.)
if self.globaltag is None:
self.logger.warning("No GlobalTag specified. This means I cannot")
self.logger.warning("run on data, only on MC.")
self.logger.warning("I will skip all data datasets.")
# TODO TODO TODO end
# Make sure the GlobalTag ends with `::All'.
if not self.globaltag is None:
if not self.globaltag.endswith("::All"):
self.logger.warning("Specified GlobalTag `%s' does " \
"not end in `::All' --> " \
"appending this missing piece" % \
self.globaltag)
self.globaltag = "%s::All" % self.globaltag
###
# Dump some info about the Frontier connections used.
for (key, value) in six.iteritems(self.frontier_connection_name):
frontier_type_str = "unknown"
if key == "globaltag":
frontier_type_str = "the GlobalTag"
elif key == "refhists":
frontier_type_str = "the reference histograms"
non_str = None
if self.frontier_connection_overridden[key] == True:
non_str = "non-"
else:
non_str = ""
self.logger.info("Using %sdefault Frontier " \
"connection for %s: `%s'" % \
(non_str, frontier_type_str, value))
###
# End of check_input_status.
##########
def check_cmssw(self):
"""Check if CMSSW is setup.
"""
# Try to access the CMSSW_VERSION environment variable. If
# it's something useful we consider CMSSW to be set up
# properly. Otherwise we raise an error.
cmssw_version = os.getenv("CMSSW_VERSION")
if cmssw_version is None:
self.logger.fatal("It seems CMSSW is not setup...")
self.logger.fatal("($CMSSW_VERSION is empty)")
raise Error("ERROR: CMSSW needs to be setup first!")
self.cmssw_version = cmssw_version
self.logger.info("Found CMSSW version %s properly set up" % \
self.cmssw_version)
# End of check_cmsssw.
return True
##########
def check_dbs(self):
"""Check if DBS is setup.
"""
# Try to access the DBSCMD_HOME environment variable. If this
# looks useful we consider DBS to be set up
# properly. Otherwise we raise an error.
dbs_home = os.getenv("DBSCMD_HOME")
if dbs_home is None:
self.logger.fatal("It seems DBS is not setup...")
self.logger.fatal(" $DBSCMD_HOME is empty")
raise Error("ERROR: DBS needs to be setup first!")
## # Now we try to do a very simple DBS search. If that works
## # instead of giving us the `Unsupported API call' crap, we
## # should be good to go.
## # NOTE: Not ideal, I know, but it reduces the amount of
## # complaints I get...
## cmd = "dbs search --query=\"find dataset where dataset = impossible\""
## (status, output) = commands.getstatusoutput(cmd)
## pdb.set_trace()
## if status != 0 or \
## output.lower().find("unsupported api call") > -1:
## self.logger.fatal("It seems DBS is not setup...")
## self.logger.fatal(" %s returns crap:" % cmd)
## for line in output.split("\n"):
## self.logger.fatal(" %s" % line)
## raise Error("ERROR: DBS needs to be setup first!")
self.logger.debug("Found DBS properly set up")
# End of check_dbs.
return True
##########
def setup_dbs(self):
"""Setup the Python side of DBS.
For more information see the DBS Python API documentation:
https://twiki.cern.ch/twiki/bin/view/CMS/DBSApiDocumentation
"""
try:
args={}
args["url"]= "http://cmsdbsprod.cern.ch/cms_dbs_prod_global/" \
"servlet/DBSServlet"
api = DbsApi(args)
self.dbs_api = api
except DBSAPI.dbsApiException.DbsApiException as ex:
self.logger.fatal("Caught DBS API exception %s: %s " % \
(ex.getClassName(), ex.getErrorMessage()))
if ex.getErrorCode() not in (None, ""):
logger.debug("DBS exception error code: ", ex.getErrorCode())
raise
# End of setup_dbs.
##########
def dbs_resolve_dataset_name(self, dataset_name):
"""Use DBS to resolve a wildcarded dataset name.
"""
# DEBUG DEBUG DEBUG
# If we get here DBS should have been set up already.
assert not self.dbs_api is None
# DEBUG DEBUG DEBUG end
# Some minor checking to make sure that whatever we've been
# given as dataset name actually sounds like a dataset name.
if not (dataset_name.startswith("/") and \
dataset_name.endswith("RECO")):
self.logger.warning("Dataset name `%s' does not sound " \
"like a valid dataset name!" % \
dataset_name)
#----------
api = self.dbs_api
dbs_query = "find dataset where dataset like %s " \
"and dataset.status = VALID" % \
dataset_name
try:
api_result = api.executeQuery(dbs_query)
except DBSAPI.dbsApiException.DbsApiException:
msg = "ERROR: Could not execute DBS query"
self.logger.fatal(msg)
raise Error(msg)
# Setup parsing.
handler = DBSXMLHandler(["dataset"])
parser = xml.sax.make_parser()
parser.setContentHandler(handler)
# Parse.
try:
xml.sax.parseString(api_result, handler)
except SAXParseException:
msg = "ERROR: Could not parse DBS server output"
self.logger.fatal(msg)
raise Error(msg)
# DEBUG DEBUG DEBUG
assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!"
# DEBUG DEBUG DEBUG end
# Extract the results.
datasets = handler.results.values()[0]
# End of dbs_resolve_dataset_name.
return datasets
##########
def dbs_resolve_cmssw_version(self, dataset_name):
"""Ask DBS for the CMSSW version used to create this dataset.
"""
# DEBUG DEBUG DEBUG
# If we get here DBS should have been set up already.
assert not self.dbs_api is None
# DEBUG DEBUG DEBUG end
api = self.dbs_api
dbs_query = "find algo.version where dataset = %s " \
"and dataset.status = VALID" % \
dataset_name
try:
api_result = api.executeQuery(dbs_query)
except DBSAPI.dbsApiException.DbsApiException:
msg = "ERROR: Could not execute DBS query"
self.logger.fatal(msg)
raise Error(msg)
handler = DBSXMLHandler(["algo.version"])
parser = xml.sax.make_parser()
parser.setContentHandler(handler)
try:
xml.sax.parseString(api_result, handler)
except SAXParseException:
msg = "ERROR: Could not parse DBS server output"
self.logger.fatal(msg)
raise Error(msg)
# DEBUG DEBUG DEBUG
assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!"
# DEBUG DEBUG DEBUG end
cmssw_version = handler.results.values()[0]
# DEBUG DEBUG DEBUG
assert len(cmssw_version) == 1
# DEBUG DEBUG DEBUG end
cmssw_version = cmssw_version[0]
# End of dbs_resolve_cmssw_version.
return cmssw_version
##########
## def dbs_resolve_dataset_number_of_events(self, dataset_name):
## """Ask DBS across how many events this dataset has been spread
## out.
## This is especially useful to check that we do not submit a job
## supposed to run on a complete sample that is not contained at
## a single site.
## """
## # DEBUG DEBUG DEBUG
## # If we get here DBS should have been set up already.
## assert not self.dbs_api is None
## # DEBUG DEBUG DEBUG end
## api = self.dbs_api
## dbs_query = "find count(site) where dataset = %s " \
## "and dataset.status = VALID" % \
## dataset_name
## try:
## api_result = api.executeQuery(dbs_query)
## except DbsApiException:
## raise Error("ERROR: Could not execute DBS query")
## try:
## num_events = []
## class Handler(xml.sax.handler.ContentHandler):
## def startElement(self, name, attrs):
## if name == "result":
## num_events.append(str(attrs["COUNT_STORAGEELEMENT"]))
## xml.sax.parseString(api_result, Handler())
## except SAXParseException:
## raise Error("ERROR: Could not parse DBS server output")
## # DEBUG DEBUG DEBUG
## assert len(num_events) == 1
## # DEBUG DEBUG DEBUG end
## num_events = int(num_events[0])
## # End of dbs_resolve_dataset_number_of_events.
## return num_events
##########
def dbs_resolve_runs(self, dataset_name):
"""Ask DBS for the list of runs in a given dataset.
# NOTE: This does not (yet?) skip/remove empty runs. There is
# a bug in the DBS entry run.numevents (i.e. it always returns
# zero) which should be fixed in the `next DBS release'.
# See also:
# https://savannah.cern.ch/bugs/?53452
# https://savannah.cern.ch/bugs/?53711
"""
# TODO TODO TODO
# We should remove empty runs as soon as the above mentioned
# bug is fixed.
# TODO TODO TODO end
# DEBUG DEBUG DEBUG
# If we get here DBS should have been set up already.
assert not self.dbs_api is None
# DEBUG DEBUG DEBUG end
api = self.dbs_api
dbs_query = "find run where dataset = %s " \
"and dataset.status = VALID" % \
dataset_name
try:
api_result = api.executeQuery(dbs_query)
except DBSAPI.dbsApiException.DbsApiException:
msg = "ERROR: Could not execute DBS query"
self.logger.fatal(msg)
raise Error(msg)
handler = DBSXMLHandler(["run"])
parser = xml.sax.make_parser()
parser.setContentHandler(handler)
try:
xml.sax.parseString(api_result, handler)
except SAXParseException:
msg = "ERROR: Could not parse DBS server output"
self.logger.fatal(msg)
raise Error(msg)
# DEBUG DEBUG DEBUG
assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!"
# DEBUG DEBUG DEBUG end
runs = handler.results.values()[0]
# Turn strings into integers.
runs = sorted([int(i) for i in runs])
# End of dbs_resolve_runs.
return runs
##########
def dbs_resolve_globaltag(self, dataset_name):
"""Ask DBS for the globaltag corresponding to a given dataset.
# BUG BUG BUG
# This does not seem to work for data datasets? E.g. for
# /Cosmics/Commissioning08_CRAFT0831X_V1_311_ReReco_FromSuperPointing_v1/RAW-RECO
# Probaly due to the fact that the GlobalTag changed during
# datataking...
BUG BUG BUG end
"""
# DEBUG DEBUG DEBUG
# If we get here DBS should have been set up already.
assert not self.dbs_api is None
# DEBUG DEBUG DEBUG end
api = self.dbs_api
dbs_query = "find dataset.tag where dataset = %s " \
"and dataset.status = VALID" % \
dataset_name
try:
api_result = api.executeQuery(dbs_query)
except DBSAPI.dbsApiException.DbsApiException:
msg = "ERROR: Could not execute DBS query"
self.logger.fatal(msg)
raise Error(msg)
handler = DBSXMLHandler(["dataset.tag"])
parser = xml.sax.make_parser()
parser.setContentHandler(parser)
try:
xml.sax.parseString(api_result, handler)
except SAXParseException:
msg = "ERROR: Could not parse DBS server output"
self.logger.fatal(msg)
raise Error(msg)
# DEBUG DEBUG DEBUG
assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!"
# DEBUG DEBUG DEBUG end
globaltag = handler.results.values()[0]
# DEBUG DEBUG DEBUG
assert len(globaltag) == 1
# DEBUG DEBUG DEBUG end
globaltag = globaltag[0]
# End of dbs_resolve_globaltag.
return globaltag
##########
def dbs_resolve_datatype(self, dataset_name):
"""Ask DBS for the the data type (data or mc) of a given
dataset.
"""
# DEBUG DEBUG DEBUG
# If we get here DBS should have been set up already.
assert not self.dbs_api is None
# DEBUG DEBUG DEBUG end
api = self.dbs_api
dbs_query = "find datatype.type where dataset = %s " \
"and dataset.status = VALID" % \
dataset_name
try:
api_result = api.executeQuery(dbs_query)
except DBSAPI.dbsApiException.DbsApiException:
msg = "ERROR: Could not execute DBS query"
self.logger.fatal(msg)
raise Error(msg)
handler = DBSXMLHandler(["datatype.type"])
parser = xml.sax.make_parser()
parser.setContentHandler(handler)
try:
xml.sax.parseString(api_result, handler)
except SAXParseException:
msg = "ERROR: Could not parse DBS server output"
self.logger.fatal(msg)
raise Error(msg)
# DEBUG DEBUG DEBUG
assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!"
# DEBUG DEBUG DEBUG end
datatype = handler.results.values()[0]
# DEBUG DEBUG DEBUG
assert len(datatype) == 1
# DEBUG DEBUG DEBUG end
datatype = datatype[0]
# End of dbs_resolve_datatype.
return datatype
##########
# OBSOLETE OBSOLETE OBSOLETE
# This method is no longer used.
def dbs_resolve_number_of_events(self, dataset_name, run_number=None):
"""Determine the number of events in a given dataset (and run).
Ask DBS for the number of events in a dataset. If a run number
is specified the number of events returned is that in that run
of that dataset. If problems occur we throw an exception.
# BUG BUG BUG
# Since DBS does not return the number of events correctly,
# neither for runs nor for whole datasets, we have to work
# around that a bit...
# BUG BUG BUG end
"""
# DEBUG DEBUG DEBUG
# If we get here DBS should have been set up already.
assert not self.dbs_api is None
# DEBUG DEBUG DEBUG end
api = self.dbs_api
dbs_query = "find file.name, file.numevents where dataset = %s " \
"and dataset.status = VALID" % \
dataset_name
if not run_number is None:
dbs_query = dbq_query + (" and run = %d" % run_number)
try:
api_result = api.executeQuery(dbs_query)
except DBSAPI.dbsApiException.DbsApiException:
msg = "ERROR: Could not execute DBS query"
self.logger.fatal(msg)
raise Error(msg)
handler = DBSXMLHandler(["file.name", "file.numevents"])
parser = xml.sax.make_parser()
parser.setContentHandler(handler)
try:
xml.sax.parseString(api_result, handler)
except SAXParseException:
msg = "ERROR: Could not parse DBS server output"
self.logger.fatal(msg)
raise Error(msg)
# DEBUG DEBUG DEBUG
assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!"
# DEBUG DEBUG DEBUG end
num_events = sum(handler.results["file.numevents"])
# End of dbs_resolve_number_of_events.
return num_events
# OBSOLETE OBSOLETE OBSOLETE end
##########
## def dbs_resolve_dataset_number_of_sites(self, dataset_name):
## """Ask DBS across how many sites this dataset has been spread
## out.
## This is especially useful to check that we do not submit a job
## supposed to run on a complete sample that is not contained at
## a single site.
## """
## # DEBUG DEBUG DEBUG
## # If we get here DBS should have been set up already.
## assert not self.dbs_api is None
## # DEBUG DEBUG DEBUG end
## api = self.dbs_api
## dbs_query = "find count(site) where dataset = %s " \
## "and dataset.status = VALID" % \
## dataset_name
## try:
## api_result = api.executeQuery(dbs_query)
## except DbsApiException:
## raise Error("ERROR: Could not execute DBS query")
## try:
## num_sites = []
## class Handler(xml.sax.handler.ContentHandler):
## def startElement(self, name, attrs):
## if name == "result":
## num_sites.append(str(attrs["COUNT_STORAGEELEMENT"]))
## xml.sax.parseString(api_result, Handler())
## except SAXParseException:
## raise Error("ERROR: Could not parse DBS server output")
## # DEBUG DEBUG DEBUG
## assert len(num_sites) == 1
## # DEBUG DEBUG DEBUG end
## num_sites = int(num_sites[0])
## # End of dbs_resolve_dataset_number_of_sites.
## return num_sites
##########
## def dbs_check_dataset_spread(self, dataset_name):
## """Figure out across how many sites this dataset is spread.
## NOTE: This is something we need to figure out per run, since
## we want to submit harvesting jobs per run.
## Basically three things can happen with a given dataset:
## - the whole dataset is available on a single site,
## - the whole dataset is available (mirrored) at multiple sites,
## - the dataset is spread across multiple sites and there is no
## single site containing the full dataset in one place.
## NOTE: If all goes well, it should not be possible that
## anything but a _full_ dataset is mirrored. So we ignore the
## possibility in which for example one site contains the full
## dataset and two others mirror half of it.
## ANOTHER NOTE: According to some people this last case _could_
## actually happen. I will not design for it, but make sure it
## ends up as a false negative, in which case we just loose some
## efficiency and treat the dataset (unnecessarily) as
## spread-out.
## We don't really care about the first two possibilities, but in
## the third case we need to make sure to run the harvesting in
## two-step mode.
## This method checks with DBS which of the above cases is true
## for the dataset name given, and returns a 1 for the first two
## cases, and the number of sites across which the dataset is
## spread for the third case.
## The way in which this is done is by asking how many files each
## site has for the dataset. In the first case there is only one
## site, in the second case all sites should have the same number
## of files (i.e. the total number of files in the dataset) and
## in the third case the file counts from all sites should add up
## to the total file count for the dataset.
## """
## # DEBUG DEBUG DEBUG
## # If we get here DBS should have been set up already.
## assert not self.dbs_api is None
## # DEBUG DEBUG DEBUG end
## api = self.dbs_api
## dbs_query = "find run, run.numevents, site, file.count " \
## "where dataset = %s " \
## "and dataset.status = VALID" % \
## dataset_name
## try:
## api_result = api.executeQuery(dbs_query)
## except DbsApiException:
## msg = "ERROR: Could not execute DBS query"
## self.logger.fatal(msg)
## raise Error(msg)
## # Index things by run number. No cross-check is done to make
## # sure we get results for each and every run in the
## # dataset. I'm not sure this would make sense since we'd be
## # cross-checking DBS info with DBS info anyway. Note that we
## # use the file count per site to see if we're dealing with an
## # incomplete vs. a mirrored dataset.
## sample_info = {}
## try:
## class Handler(xml.sax.handler.ContentHandler):
## def startElement(self, name, attrs):
## if name == "result":
## run_number = int(attrs["RUNS_RUNNUMBER"])
## site_name = str(attrs["STORAGEELEMENT_SENAME"])
## file_count = int(attrs["COUNT_FILES"])
## # BUG BUG BUG
## # Doh! For some reason DBS never returns any other
## # event count than zero.
## event_count = int(attrs["RUNS_NUMBEROFEVENTS"])
## # BUG BUG BUG end
## info = (site_name, file_count, event_count)
## try:
## sample_info[run_number].append(info)
## except KeyError:
## sample_info[run_number] = [info]
## xml.sax.parseString(api_result, Handler())
## except SAXParseException:
## msg = "ERROR: Could not parse DBS server output"
## self.logger.fatal(msg)
## raise Error(msg)
## # Now translate this into a slightly more usable mapping.
## sites = {}
## for (run_number, site_info) in six.iteritems(sample_info):
## # Quick-n-dirty trick to see if all file counts are the
## # same.
## unique_file_counts = set([i[1] for i in site_info])
## if len(unique_file_counts) == 1:
## # Okay, so this must be a mirrored dataset.
## # We have to pick one but we have to be careful. We
## # cannot submit to things like a T0, a T1, or CAF.
## site_names = [self.pick_a_site([i[0] for i in site_info])]
## nevents = [site_info[0][2]]
## else:
## # Looks like this is a spread-out sample.
## site_names = [i[0] for i in site_info]
## nevents = [i[2] for i in site_info]
## sites[run_number] = zip(site_names, nevents)
## self.logger.debug("Sample `%s' spread is:" % dataset_name)
## run_numbers = sites.keys()
## run_numbers.sort()
## for run_number in run_numbers:
## self.logger.debug(" run # %6d: %d sites (%s)" % \
## (run_number,
## len(sites[run_number]),
## ", ".join([i[0] for i in sites[run_number]])))
## # End of dbs_check_dataset_spread.
## return sites
## # DEBUG DEBUG DEBUG
## # Just kept for debugging now.
## def dbs_check_dataset_spread_old(self, dataset_name):
## """Figure out across how many sites this dataset is spread.
## NOTE: This is something we need to figure out per run, since
## we want to submit harvesting jobs per run.
## Basically three things can happen with a given dataset:
## - the whole dataset is available on a single site,
## - the whole dataset is available (mirrored) at multiple sites,
## - the dataset is spread across multiple sites and there is no
## single site containing the full dataset in one place.
## NOTE: If all goes well, it should not be possible that
## anything but a _full_ dataset is mirrored. So we ignore the
## possibility in which for example one site contains the full
## dataset and two others mirror half of it.
## ANOTHER NOTE: According to some people this last case _could_
## actually happen. I will not design for it, but make sure it
## ends up as a false negative, in which case we just loose some
## efficiency and treat the dataset (unnecessarily) as
## spread-out.
## We don't really care about the first two possibilities, but in
## the third case we need to make sure to run the harvesting in
## two-step mode.
## This method checks with DBS which of the above cases is true
## for the dataset name given, and returns a 1 for the first two
## cases, and the number of sites across which the dataset is
## spread for the third case.
## The way in which this is done is by asking how many files each
## site has for the dataset. In the first case there is only one
## site, in the second case all sites should have the same number
## of files (i.e. the total number of files in the dataset) and
## in the third case the file counts from all sites should add up
## to the total file count for the dataset.
## """
## # DEBUG DEBUG DEBUG
## # If we get here DBS should have been set up already.
## assert not self.dbs_api is None
## # DEBUG DEBUG DEBUG end
## api = self.dbs_api
## dbs_query = "find run, run.numevents, site, file.count " \
## "where dataset = %s " \
## "and dataset.status = VALID" % \
## dataset_name
## try:
## api_result = api.executeQuery(dbs_query)
## except DbsApiException:
## msg = "ERROR: Could not execute DBS query"
## self.logger.fatal(msg)
## raise Error(msg)
## # Index things by run number. No cross-check is done to make
## # sure we get results for each and every run in the
## # dataset. I'm not sure this would make sense since we'd be
## # cross-checking DBS info with DBS info anyway. Note that we
## # use the file count per site to see if we're dealing with an
## # incomplete vs. a mirrored dataset.
## sample_info = {}
## try:
## class Handler(xml.sax.handler.ContentHandler):
## def startElement(self, name, attrs):
## if name == "result":
## run_number = int(attrs["RUNS_RUNNUMBER"])
## site_name = str(attrs["STORAGEELEMENT_SENAME"])
## file_count = int(attrs["COUNT_FILES"])
## # BUG BUG BUG
## # Doh! For some reason DBS never returns any other
## # event count than zero.
## event_count = int(attrs["RUNS_NUMBEROFEVENTS"])
## # BUG BUG BUG end
## info = (site_name, file_count, event_count)
## try:
## sample_info[run_number].append(info)
## except KeyError:
## sample_info[run_number] = [info]
## xml.sax.parseString(api_result, Handler())
## except SAXParseException:
## msg = "ERROR: Could not parse DBS server output"
## self.logger.fatal(msg)
## raise Error(msg)
## # Now translate this into a slightly more usable mapping.
## sites = {}
## for (run_number, site_info) in six.iteritems(sample_info):
## # Quick-n-dirty trick to see if all file counts are the
## # same.
## unique_file_counts = set([i[1] for i in site_info])
## if len(unique_file_counts) == 1:
## # Okay, so this must be a mirrored dataset.
## # We have to pick one but we have to be careful. We
## # cannot submit to things like a T0, a T1, or CAF.
## site_names = [self.pick_a_site([i[0] for i in site_info])]
## nevents = [site_info[0][2]]
## else:
## # Looks like this is a spread-out sample.
## site_names = [i[0] for i in site_info]
## nevents = [i[2] for i in site_info]
## sites[run_number] = zip(site_names, nevents)
## self.logger.debug("Sample `%s' spread is:" % dataset_name)
## run_numbers = sites.keys()
## run_numbers.sort()
## for run_number in run_numbers:
## self.logger.debug(" run # %6d: %d site(s) (%s)" % \
## (run_number,
## len(sites[run_number]),
## ", ".join([i[0] for i in sites[run_number]])))
## # End of dbs_check_dataset_spread_old.
## return sites
## # DEBUG DEBUG DEBUG end
##########
def dbs_check_dataset_spread(self, dataset_name):
"""Figure out the number of events in each run of this dataset.
This is a more efficient way of doing this than calling
dbs_resolve_number_of_events for each run.
"""
self.logger.debug("Checking spread of dataset `%s'" % dataset_name)
# DEBUG DEBUG DEBUG
# If we get here DBS should have been set up already.
assert not self.dbs_api is None
# DEBUG DEBUG DEBUG end
api = self.dbs_api
dbs_query = "find run.number, site, file.name, file.numevents " \
"where dataset = %s " \
"and dataset.status = VALID" % \
dataset_name
try:
api_result = api.executeQuery(dbs_query)
except DBSAPI.dbsApiException.DbsApiException:
msg = "ERROR: Could not execute DBS query"
self.logger.fatal(msg)
raise Error(msg)
handler = DBSXMLHandler(["run.number", "site", "file.name", "file.numevents"])
parser = xml.sax.make_parser()
parser.setContentHandler(handler)
try:
# OBSOLETE OBSOLETE OBSOLETE
## class Handler(xml.sax.handler.ContentHandler):
## def startElement(self, name, attrs):
## if name == "result":
## site_name = str(attrs["STORAGEELEMENT_SENAME"])
## # TODO TODO TODO
## # Ugly hack to get around cases like this:
## # $ dbs search --query="find dataset, site, file.count where dataset=/RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO"
## # Using DBS instance at: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet
## # Processing ... \
## # PATH STORAGEELEMENT_SENAME COUNT_FILES
## # _________________________________________________________________________________
## # /RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO 1
## # /RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO cmssrm.fnal.gov 12
## # /RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO srm-cms.cern.ch 12
## if len(site_name) < 1:
## return
## # TODO TODO TODO end
## run_number = int(attrs["RUNS_RUNNUMBER"])
## file_name = str(attrs["FILES_LOGICALFILENAME"])
## nevents = int(attrs["FILES_NUMBEROFEVENTS"])
## # I know, this is a bit of a kludge.
## if not files_info.has_key(run_number):
## # New run.
## files_info[run_number] = {}
## files_info[run_number][file_name] = (nevents,
## [site_name])
## elif not files_info[run_number].has_key(file_name):
## # New file for a known run.
## files_info[run_number][file_name] = (nevents,
## [site_name])
## else:
## # New entry for a known file for a known run.
## # DEBUG DEBUG DEBUG
## # Each file should have the same number of
## # events independent of the site it's at.
## assert nevents == files_info[run_number][file_name][0]
## # DEBUG DEBUG DEBUG end
## files_info[run_number][file_name][1].append(site_name)
# OBSOLETE OBSOLETE OBSOLETE end
xml.sax.parseString(api_result, handler)
except SAXParseException:
msg = "ERROR: Could not parse DBS server output"
self.logger.fatal(msg)
raise Error(msg)
# DEBUG DEBUG DEBUG
assert(handler.check_results_validity()), "ERROR The DBSXMLHandler screwed something up!"
# DEBUG DEBUG DEBUG end
# Now reshuffle all results a bit so we can more easily use
# them later on. (Remember that all arrays in the results
# should have equal length.)
files_info = {}
for (index, site_name) in enumerate(handler.results["site"]):
# Ugly hack to get around cases like this:
# $ dbs search --query="find dataset, site, file.count where dataset=/RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO"
# Using DBS instance at: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet
# Processing ... \
# PATH STORAGEELEMENT_SENAME COUNT_FILES
# _________________________________________________________________________________
# /RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO 1
# /RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO cmssrm.fnal.gov 12
# /RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO srm-cms.cern.ch 12
if len(site_name) < 1:
continue
run_number = int(handler.results["run.number"][index])
file_name = handler.results["file.name"][index]
nevents = int(handler.results["file.numevents"][index])
# I know, this is a bit of a kludge.
if run_number not in files_info:
# New run.
files_info[run_number] = {}
files_info[run_number][file_name] = (nevents,
[site_name])
elif file_name not in files_info[run_number]:
# New file for a known run.
files_info[run_number][file_name] = (nevents,
[site_name])
else:
# New entry for a known file for a known run.
# DEBUG DEBUG DEBUG
# Each file should have the same number of
# events independent of the site it's at.
assert nevents == files_info[run_number][file_name][0]
# DEBUG DEBUG DEBUG end
files_info[run_number][file_name][1].append(site_name)
# Remove any information for files that are not available
# anywhere. NOTE: After introducing the ugly hack above, this
# is a bit redundant, but let's keep it for the moment.
for run_number in files_info.keys():
files_without_sites = [i for (i, j) in \
files_info[run_number].items() \
if len(j[1]) < 1]
if len(files_without_sites) > 0:
self.logger.warning("Removing %d file(s)" \
" with empty site names" % \
len(files_without_sites))
for file_name in files_without_sites:
del files_info[run_number][file_name]
# files_info[run_number][file_name] = (files_info \
# [run_number] \
# [file_name][0], [])
# And another bit of a kludge.
num_events_catalog = {}
for run_number in files_info.keys():
site_names = list(set([j for i in files_info[run_number].values() for j in i[1]]))
# NOTE: The term `mirrored' does not have the usual
# meaning here. It basically means that we can apply
# single-step harvesting.
mirrored = None
if len(site_names) > 1:
# Now we somehow need to figure out if we're dealing
# with a mirrored or a spread-out dataset. The rule we
# use here is that we're dealing with a spread-out
# dataset unless we can find at least one site
# containing exactly the full list of files for this
# dataset that DBS knows about. In that case we just
# use only that site.
all_file_names = files_info[run_number].keys()
all_file_names = set(all_file_names)
sites_with_complete_copies = []
for site_name in site_names:
files_at_site = [i for (i, (j, k)) \
in files_info[run_number].items() \
if site_name in k]
files_at_site = set(files_at_site)
if files_at_site == all_file_names:
sites_with_complete_copies.append(site_name)
if len(sites_with_complete_copies) < 1:
# This dataset/run is available at more than one
# site, but no one has a complete copy. So this is
# a spread-out sample.
mirrored = False
else:
if len(sites_with_complete_copies) > 1:
# This sample is available (and complete) at
# more than one site. Definitely mirrored.
mirrored = True
else:
# This dataset/run is available at more than
# one site and at least one of them has a
# complete copy. Even if this is only a single
# site, let's call this `mirrored' and run the
# single-step harvesting.
mirrored = True
## site_names_ref = set(files_info[run_number].values()[0][1])
## for site_names_tmp in files_info[run_number].values()[1:]:
## if set(site_names_tmp[1]) != site_names_ref:
## mirrored = False
## break
if mirrored:
self.logger.debug(" -> run appears to be `mirrored'")
else:
self.logger.debug(" -> run appears to be spread-out")
if mirrored and \
len(sites_with_complete_copies) != len(site_names):
# Remove any references to incomplete sites if we
# have at least one complete site (and if there
# are incomplete sites).
for (file_name, (i, sites)) in files_info[run_number].items():
complete_sites = [site for site in sites \
if site in sites_with_complete_copies]
files_info[run_number][file_name] = (i, complete_sites)
site_names = sites_with_complete_copies
self.logger.debug(" for run #%d:" % run_number)
num_events_catalog[run_number] = {}
num_events_catalog[run_number]["all_sites"] = sum([i[0] for i in files_info[run_number].values()])
if len(site_names) < 1:
self.logger.debug(" run is not available at any site")
self.logger.debug(" (but should contain %d events" % \
num_events_catalog[run_number]["all_sites"])
else:
self.logger.debug(" at all sites combined there are %d events" % \
num_events_catalog[run_number]["all_sites"])
for site_name in site_names:
num_events_catalog[run_number][site_name] = sum([i[0] for i in files_info[run_number].values() if site_name in i[1]])
self.logger.debug(" at site `%s' there are %d events" % \
(site_name, num_events_catalog[run_number][site_name]))
num_events_catalog[run_number]["mirrored"] = mirrored
# End of dbs_check_dataset_spread.
return num_events_catalog
# Beginning of old version.
## def dbs_check_dataset_num_events(self, dataset_name):
## """Figure out the number of events in each run of this dataset.
## This is a more efficient way of doing this than calling
## dbs_resolve_number_of_events for each run.
## # BUG BUG BUG
## # This might very well not work at all for spread-out samples. (?)
## # BUG BUG BUG end
## """
## # DEBUG DEBUG DEBUG
## # If we get here DBS should have been set up already.
## assert not self.dbs_api is None
## # DEBUG DEBUG DEBUG end
## api = self.dbs_api
## dbs_query = "find run.number, file.name, file.numevents where dataset = %s " \
## "and dataset.status = VALID" % \
## dataset_name
## try:
## api_result = api.executeQuery(dbs_query)
## except DbsApiException:
## msg = "ERROR: Could not execute DBS query"
## self.logger.fatal(msg)
## raise Error(msg)
## try:
## files_info = {}
## class Handler(xml.sax.handler.ContentHandler):
## def startElement(self, name, attrs):
## if name == "result":
## run_number = int(attrs["RUNS_RUNNUMBER"])
## file_name = str(attrs["FILES_LOGICALFILENAME"])
## nevents = int(attrs["FILES_NUMBEROFEVENTS"])
## try:
## files_info[run_number][file_name] = nevents
## except KeyError:
## files_info[run_number] = {file_name: nevents}
## xml.sax.parseString(api_result, Handler())
## except SAXParseException:
## msg = "ERROR: Could not parse DBS server output"
## self.logger.fatal(msg)
## raise Error(msg)
## num_events_catalog = {}
## for run_number in files_info.keys():
## num_events_catalog[run_number] = sum(files_info[run_number].values())
## # End of dbs_check_dataset_num_events.
## return num_events_catalog
# End of old version.
##########
def build_dataset_list(self, input_method, input_name):
"""Build a list of all datasets to be processed.
"""
dataset_names = []
# It may be, but only for the list of datasets to ignore, that
# the input method and name are None because nothing was
# specified. In that case just an empty list is returned.
if input_method is None:
pass
elif input_method == "dataset":
# Input comes from a dataset name directly on the command
# line. But, this can also contain wildcards so we need
# DBS to translate it conclusively into a list of explicit
# dataset names.
self.logger.info("Asking DBS for dataset names")
dataset_names = self.dbs_resolve_dataset_name(input_name)
elif input_method == "datasetfile":
# In this case a file containing a list of dataset names
# is specified. Still, each line may contain wildcards so
# this step also needs help from DBS.
# NOTE: Lines starting with a `#' are ignored.
self.logger.info("Reading input from list file `%s'" % \
input_name)
try:
listfile = open("/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/harvesting/bin/%s" %input_name, "r")
print("open listfile")
for dataset in listfile:
# Skip empty lines.
dataset_stripped = dataset.strip()
if len(dataset_stripped) < 1:
continue
# Skip lines starting with a `#'.
if dataset_stripped[0] != "#":
dataset_names.extend(self. \
dbs_resolve_dataset_name(dataset_stripped))
listfile.close()
except IOError:
msg = "ERROR: Could not open input list file `%s'" % \
input_name
self.logger.fatal(msg)
raise Error(msg)
else:
# DEBUG DEBUG DEBUG
# We should never get here.
assert False, "Unknown input method `%s'" % input_method
# DEBUG DEBUG DEBUG end
# Remove duplicates from the dataset list.
# NOTE: There should not be any duplicates in any list coming
# from DBS, but maybe the user provided a list file with less
# care.
# Store for later use.
dataset_names = sorted(set(dataset_names))
# End of build_dataset_list.
return dataset_names
##########
def build_dataset_use_list(self):
"""Build a list of datasets to process.
"""
self.logger.info("Building list of datasets to consider...")
input_method = self.input_method["datasets"]["use"]
input_name = self.input_name["datasets"]["use"]
dataset_names = self.build_dataset_list(input_method,
input_name)
self.datasets_to_use = dict(list(zip(dataset_names,
[None] * len(dataset_names))))
self.logger.info(" found %d dataset(s) to process:" % \
len(dataset_names))
for dataset in dataset_names:
self.logger.info(" `%s'" % dataset)
# End of build_dataset_use_list.
##########
def build_dataset_ignore_list(self):
"""Build a list of datasets to ignore.
NOTE: We should always have a list of datasets to process, but
it may be that we don't have a list of datasets to ignore.
"""
self.logger.info("Building list of datasets to ignore...")
input_method = self.input_method["datasets"]["ignore"]
input_name = self.input_name["datasets"]["ignore"]
dataset_names = self.build_dataset_list(input_method,
input_name)
self.datasets_to_ignore = dict(list(zip(dataset_names,
[None] * len(dataset_names))))
self.logger.info(" found %d dataset(s) to ignore:" % \
len(dataset_names))
for dataset in dataset_names:
self.logger.info(" `%s'" % dataset)
# End of build_dataset_ignore_list.
##########
def build_runs_list(self, input_method, input_name):
runs = []
# A list of runs (either to use or to ignore) is not
# required. This protects against `empty cases.'
if input_method is None:
pass
elif input_method == "runs":
# A list of runs was specified directly from the command
# line.
self.logger.info("Reading list of runs from the " \
"command line")
runs.extend([int(i.strip()) \
for i in input_name.split(",") \
if len(i.strip()) > 0])
elif input_method == "runslistfile":
# We were passed a file containing a list of runs.
self.logger.info("Reading list of runs from file `%s'" % \
input_name)
try:
listfile = open(input_name, "r")
for run in listfile:
# Skip empty lines.
run_stripped = run.strip()
if len(run_stripped) < 1:
continue
# Skip lines starting with a `#'.
if run_stripped[0] != "#":
runs.append(int(run_stripped))
listfile.close()
except IOError:
msg = "ERROR: Could not open input list file `%s'" % \
input_name
self.logger.fatal(msg)
raise Error(msg)
else:
# DEBUG DEBUG DEBUG
# We should never get here.
assert False, "Unknown input method `%s'" % input_method
# DEBUG DEBUG DEBUG end
# Remove duplicates, sort and done.
runs = list(set(runs))
# End of build_runs_list().
return runs
##########
def build_runs_use_list(self):
"""Build a list of runs to process.
"""
self.logger.info("Building list of runs to consider...")
input_method = self.input_method["runs"]["use"]
input_name = self.input_name["runs"]["use"]
runs = self.build_runs_list(input_method, input_name)
self.runs_to_use = dict(list(zip(runs, [None] * len(runs))))
self.logger.info(" found %d run(s) to process:" % \
len(runs))
if len(runs) > 0:
self.logger.info(" %s" % ", ".join([str(i) for i in runs]))
# End of build_runs_list().
##########
def build_runs_ignore_list(self):
"""Build a list of runs to ignore.
NOTE: We should always have a list of runs to process, but
it may be that we don't have a list of runs to ignore.
"""
self.logger.info("Building list of runs to ignore...")
input_method = self.input_method["runs"]["ignore"]
input_name = self.input_name["runs"]["ignore"]
runs = self.build_runs_list(input_method, input_name)
self.runs_to_ignore = dict(list(zip(runs, [None] * len(runs))))
self.logger.info(" found %d run(s) to ignore:" % \
len(runs))
if len(runs) > 0:
self.logger.info(" %s" % ", ".join([str(i) for i in runs]))
# End of build_runs_ignore_list().
##########
def process_dataset_ignore_list(self):
"""Update the list of datasets taking into account the ones to
ignore.
Both lists have been generated before from DBS and both are
assumed to be unique.
NOTE: The advantage of creating the ignore list from DBS (in
case a regexp is given) and matching that instead of directly
matching the ignore criterion against the list of datasets (to
consider) built from DBS is that in the former case we're sure
that all regexps are treated exactly as DBS would have done
without the cmsHarvester.
NOTE: This only removes complete samples. Exclusion of single
runs is done by the book keeping. So the assumption is that a
user never wants to harvest just part (i.e. n out of N runs)
of a sample.
"""
self.logger.info("Processing list of datasets to ignore...")
self.logger.debug("Before processing ignore list there are %d " \
"datasets in the list to be processed" % \
len(self.datasets_to_use))
# Simple approach: just loop and search.
dataset_names_filtered = copy.deepcopy(self.datasets_to_use)
for dataset_name in self.datasets_to_use.keys():
if dataset_name in self.datasets_to_ignore.keys():
del dataset_names_filtered[dataset_name]
self.logger.info(" --> Removed %d dataset(s)" % \
(len(self.datasets_to_use) -
len(dataset_names_filtered)))
self.datasets_to_use = dataset_names_filtered
self.logger.debug("After processing ignore list there are %d " \
"datasets in the list to be processed" % \
len(self.datasets_to_use))
# End of process_dataset_ignore_list.
##########
def process_runs_use_and_ignore_lists(self):
self.logger.info("Processing list of runs to use and ignore...")
# This basically adds all runs in a dataset to be processed,
# except for any runs that are not specified in the `to use'
# list and any runs that are specified in the `to ignore'
# list.
# NOTE: It is assumed that those lists make sense. The input
# should be checked against e.g. overlapping `use' and
# `ignore' lists.
runs_to_use = self.runs_to_use
runs_to_ignore = self.runs_to_ignore
for dataset_name in self.datasets_to_use:
runs_in_dataset = self.datasets_information[dataset_name]["runs"]
# First some sanity checks.
runs_to_use_tmp = []
for run in runs_to_use:
if not run in runs_in_dataset:
self.logger.warning("Dataset `%s' does not contain " \
"requested run %d " \
"--> ignoring `use' of this run" % \
(dataset_name, run))
else:
runs_to_use_tmp.append(run)
if len(runs_to_use) > 0:
runs = runs_to_use_tmp
self.logger.info("Using %d out of %d runs " \
"of dataset `%s'" % \
(len(runs), len(runs_in_dataset),
dataset_name))
else:
runs = runs_in_dataset
if len(runs_to_ignore) > 0:
runs_tmp = []
for run in runs:
if not run in runs_to_ignore:
runs_tmp.append(run)
self.logger.info("Ignoring %d out of %d runs " \
"of dataset `%s'" % \
(len(runs)- len(runs_tmp),
len(runs_in_dataset),
dataset_name))
runs = runs_tmp
if self.todofile != "YourToDofile.txt":
runs_todo = []
print("Reading runs from file /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/harvesting/%s" %self.todofile)
cmd="grep %s /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/harvesting/%s | cut -f5 -d' '" %(dataset_name,self.todofile)
(status, output)=commands.getstatusoutput(cmd)
for run in runs:
run_str="%s" %run
if run_str in output:
runs_todo.append(run)
self.logger.info("Using %d runs " \
"of dataset `%s'" % \
(len(runs_todo),
dataset_name))
runs=runs_todo
Json_runs = []
if self.Jsonfilename != "YourJSON.txt":
good_runs = []
self.Jsonlumi = True
# We were passed a Jsonfile containing a dictionary of
# run/lunisection-pairs
self.logger.info("Reading runs and lumisections from file `%s'" % \
self.Jsonfilename)
try:
Jsonfile = open(self.Jsonfilename, "r")
for names in Jsonfile:
dictNames= eval(str(names))
for key in dictNames:
intkey=int(key)
Json_runs.append(intkey)
Jsonfile.close()
except IOError:
msg = "ERROR: Could not open Jsonfile `%s'" % \
input_name
self.logger.fatal(msg)
raise Error(msg)
for run in runs:
if run in Json_runs:
good_runs.append(run)
self.logger.info("Using %d runs " \
"of dataset `%s'" % \
(len(good_runs),
dataset_name))
runs=good_runs
if (self.Jsonrunfilename != "YourJSON.txt") and (self.Jsonfilename == "YourJSON.txt"):
good_runs = []
# We were passed a Jsonfile containing a dictionary of
# run/lunisection-pairs
self.logger.info("Reading runs from file `%s'" % \
self.Jsonrunfilename)
try:
Jsonfile = open(self.Jsonrunfilename, "r")
for names in Jsonfile:
dictNames= eval(str(names))
for key in dictNames:
intkey=int(key)
Json_runs.append(intkey)
Jsonfile.close()
except IOError:
msg = "ERROR: Could not open Jsonfile `%s'" % \
input_name
self.logger.fatal(msg)
raise Error(msg)
for run in runs:
if run in Json_runs:
good_runs.append(run)
self.logger.info("Using %d runs " \
"of dataset `%s'" % \
(len(good_runs),
dataset_name))
runs=good_runs
self.datasets_to_use[dataset_name] = runs
# End of process_runs_use_and_ignore_lists().
##########
def singlify_datasets(self):
"""Remove all but the largest part of all datasets.
This allows us to harvest at least part of these datasets
using single-step harvesting until the two-step approach
works.
"""
# DEBUG DEBUG DEBUG
assert self.harvesting_mode == "single-step-allow-partial"
# DEBUG DEBUG DEBUG end
for dataset_name in self.datasets_to_use:
for run_number in self.datasets_information[dataset_name]["runs"]:
max_events = max(self.datasets_information[dataset_name]["sites"][run_number].values())
sites_with_max_events = [i[0] for i in self.datasets_information[dataset_name]["sites"][run_number].items() if i[1] == max_events]
self.logger.warning("Singlifying dataset `%s', " \
"run %d" % \
(dataset_name, run_number))
cmssw_version = self.datasets_information[dataset_name] \
["cmssw_version"]
selected_site = self.pick_a_site(sites_with_max_events,
cmssw_version)
# Let's tell the user that we're manhandling this dataset.
nevents_old = self.datasets_information[dataset_name]["num_events"][run_number]
self.logger.warning(" --> " \
"only harvesting partial statistics: " \
"%d out of %d events (5.1%f%%) " \
"at site `%s'" % \
(max_events,
nevents_old,
100. * max_events / nevents_old,
selected_site))
self.logger.warning("!!! Please note that the number of " \
"events in the output path name will " \
"NOT reflect the actual statistics in " \
"the harvested results !!!")
# We found the site with the highest statistics and
# the corresponding number of events. (CRAB gets upset
# if we ask for more events than there are at a given
# site.) Now update this information in our main
# datasets_information variable.
self.datasets_information[dataset_name]["sites"][run_number] = {selected_site: max_events}
self.datasets_information[dataset_name]["num_events"][run_number] = max_events
#self.datasets_information[dataset_name]["sites"][run_number] = [selected_site]
# End of singlify_datasets.
##########
def check_dataset_list(self):
"""Check list of dataset names for impossible ones.
Two kinds of checks are done:
- Checks for things that do not make sense. These lead to
errors and skipped datasets.
- Sanity checks. For these warnings are issued but the user is
considered to be the authoritative expert.
Checks performed:
- The CMSSW version encoded in the dataset name should match
self.cmssw_version. This is critical.
- There should be some events in the dataset/run. This is
critical in the sense that CRAB refuses to create jobs for
zero events. And yes, this does happen in practice. E.g. the
reprocessed CRAFT08 datasets contain runs with zero events.
- A cursory check is performed to see if the harvesting type
makes sense for the data type. This should prevent the user
from inadvertently running RelVal for data.
- It is not possible to run single-step harvesting jobs on
samples that are not fully contained at a single site.
- Each dataset/run has to be available at at least one site.
"""
self.logger.info("Performing sanity checks on dataset list...")
dataset_names_after_checks = copy.deepcopy(self.datasets_to_use)
for dataset_name in self.datasets_to_use.keys():
# Check CMSSW version.
version_from_dataset = self.datasets_information[dataset_name] \
["cmssw_version"]
if version_from_dataset != self.cmssw_version:
msg = " CMSSW version mismatch for dataset `%s' " \
"(%s vs. %s)" % \
(dataset_name,
self.cmssw_version, version_from_dataset)
if self.force_running:
# Expert mode: just warn, then continue.
self.logger.warning("%s " \
"--> `force mode' active: " \
"run anyway" % msg)
else:
del dataset_names_after_checks[dataset_name]
self.logger.warning("%s " \
"--> skipping" % msg)
continue
###
# Check that the harvesting type makes sense for the
# sample. E.g. normally one would not run the DQMOffline
# harvesting on Monte Carlo.
# TODO TODO TODO
# This should be further refined.
suspicious = False
datatype = self.datasets_information[dataset_name]["datatype"]
if datatype == "data":
# Normally only DQM harvesting is run on data.
if self.harvesting_type != "DQMOffline":
suspicious = True
elif datatype == "mc":
if self.harvesting_type == "DQMOffline":
suspicious = True
else:
# Doh!
assert False, "ERROR Impossible data type `%s' " \
"for dataset `%s'" % \
(datatype, dataset_name)
if suspicious:
msg = " Normally one does not run `%s' harvesting " \
"on %s samples, are you sure?" % \
(self.harvesting_type, datatype)
if self.force_running:
self.logger.warning("%s " \
"--> `force mode' active: " \
"run anyway" % msg)
else:
del dataset_names_after_checks[dataset_name]
self.logger.warning("%s " \
"--> skipping" % msg)
continue
# TODO TODO TODO end
###
# BUG BUG BUG
# For the moment, due to a problem with DBS, I cannot
# figure out the GlobalTag for data by myself. (For MC
# it's no problem.) This means that unless a GlobalTag was
# specified from the command line, we will have to skip
# any data datasets.
if datatype == "data":
if self.globaltag is None:
msg = "For data datasets (like `%s') " \
"we need a GlobalTag" % \
dataset_name
del dataset_names_after_checks[dataset_name]
self.logger.warning("%s " \
"--> skipping" % msg)
continue
# BUG BUG BUG end
###
# Check if the GlobalTag exists and (if we're using
# reference histograms) if it's ready to be used with
# reference histograms.
globaltag = self.datasets_information[dataset_name]["globaltag"]
if not globaltag in self.globaltag_check_cache:
if self.check_globaltag(globaltag):
self.globaltag_check_cache.append(globaltag)
else:
msg = "Something is wrong with GlobalTag `%s' " \
"used by dataset `%s'!" % \
(globaltag, dataset_name)
if self.use_ref_hists:
msg += "\n(Either it does not exist or it " \
"does not contain the required key to " \
"be used with reference histograms.)"
else:
msg += "\n(It probably just does not exist.)"
self.logger.fatal(msg)
raise Usage(msg)
###
# Require that each run is available at least somewhere.
runs_without_sites = [i for (i, j) in \
self.datasets_information[dataset_name] \
["sites"].items() \
if len(j) < 1 and \
i in self.datasets_to_use[dataset_name]]
if len(runs_without_sites) > 0:
for run_without_sites in runs_without_sites:
try:
dataset_names_after_checks[dataset_name].remove(run_without_sites)
except KeyError:
pass
self.logger.warning(" removed %d unavailable run(s) " \
"from dataset `%s'" % \
(len(runs_without_sites), dataset_name))
self.logger.debug(" (%s)" % \
", ".join([str(i) for i in \
runs_without_sites]))
###
# Unless we're running two-step harvesting: only allow
# samples located on a single site.
if not self.harvesting_mode == "two-step":
for run_number in self.datasets_to_use[dataset_name]:
# DEBUG DEBUG DEBUG
## if self.datasets_information[dataset_name]["num_events"][run_number] != 0:
## pdb.set_trace()
# DEBUG DEBUG DEBUG end
num_sites = len(self.datasets_information[dataset_name] \
["sites"][run_number])
if num_sites > 1 and \
not self.datasets_information[dataset_name] \
["mirrored"][run_number]:
# Cannot do this with a single-step job, not
# even in force mode. It just does not make
# sense.
msg = " Dataset `%s', run %d is spread across more " \
"than one site.\n" \
" Cannot run single-step harvesting on " \
"samples spread across multiple sites" % \
(dataset_name, run_number)
try:
dataset_names_after_checks[dataset_name].remove(run_number)
except KeyError:
pass
self.logger.warning("%s " \
"--> skipping" % msg)
###
# Require that the dataset/run is non-empty.
# NOTE: To avoid reconsidering empty runs/datasets next
# time around, we do include them in the book keeping.
# BUG BUG BUG
# This should sum only over the runs that we use!
tmp = [j for (i, j) in self.datasets_information \
[dataset_name]["num_events"].items() \
if i in self.datasets_to_use[dataset_name]]
num_events_dataset = sum(tmp)
# BUG BUG BUG end
if num_events_dataset < 1:
msg = " dataset `%s' is empty" % dataset_name
del dataset_names_after_checks[dataset_name]
self.logger.warning("%s " \
"--> skipping" % msg)
# Update the book keeping with all the runs in the dataset.
# DEBUG DEBUG DEBUG
#assert set([j for (i, j) in self.datasets_information \
# [dataset_name]["num_events"].items() \
# if i in self.datasets_to_use[dataset_name]]) == \
# set([0])
# DEBUG DEBUG DEBUG end
#self.book_keeping_information[dataset_name] = self.datasets_information \
# [dataset_name]["num_events"]
continue
tmp = [i for i in \
self.datasets_information[dataset_name] \
["num_events"].items() if i[1] < 1]
tmp = [i for i in tmp if i[0] in self.datasets_to_use[dataset_name]]
empty_runs = dict(tmp)
if len(empty_runs) > 0:
for empty_run in empty_runs:
try:
dataset_names_after_checks[dataset_name].remove(empty_run)
except KeyError:
pass
self.logger.info(" removed %d empty run(s) from dataset `%s'" % \
(len(empty_runs), dataset_name))
self.logger.debug(" (%s)" % \
", ".join([str(i) for i in empty_runs]))
###
# If we emptied out a complete dataset, remove the whole
# thing.
dataset_names_after_checks_tmp = copy.deepcopy(dataset_names_after_checks)
for (dataset_name, runs) in six.iteritems(dataset_names_after_checks):
if len(runs) < 1:
self.logger.warning(" Removing dataset without any runs " \
"(left) `%s'" % \
dataset_name)
del dataset_names_after_checks_tmp[dataset_name]
dataset_names_after_checks = dataset_names_after_checks_tmp
###
self.logger.warning(" --> Removed %d dataset(s)" % \
(len(self.datasets_to_use) -
len(dataset_names_after_checks)))
# Now store the modified version of the dataset list.
self.datasets_to_use = dataset_names_after_checks
# End of check_dataset_list.
##########
def escape_dataset_name(self, dataset_name):
"""Escape a DBS dataset name.
Escape a DBS dataset name such that it does not cause trouble
with the file system. This means turning each `/' into `__',
except for the first one which is just removed.
"""
escaped_dataset_name = dataset_name
escaped_dataset_name = escaped_dataset_name.strip("/")
escaped_dataset_name = escaped_dataset_name.replace("/", "__")
return escaped_dataset_name
##########
# BUG BUG BUG
# This is a bit of a redundant method, isn't it?
def create_config_file_name(self, dataset_name, run_number):
"""Generate the name of the configuration file to be run by
CRAB.
Depending on the harvesting mode (single-step or two-step)
this is the name of the real harvesting configuration or the
name of the first-step ME summary extraction configuration.
"""
if self.harvesting_mode == "single-step":
config_file_name = self.create_harvesting_config_file_name(dataset_name)
elif self.harvesting_mode == "single-step-allow-partial":
config_file_name = self.create_harvesting_config_file_name(dataset_name)
## # Only add the alarming piece to the file name if this is
## # a spread-out dataset.
## pdb.set_trace()
## if self.datasets_information[dataset_name] \
## ["mirrored"][run_number] == False:
## config_file_name = config_file_name.replace(".py", "_partial.py")
elif self.harvesting_mode == "two-step":
config_file_name = self.create_me_summary_config_file_name(dataset_name)
else:
assert False, "ERROR Unknown harvesting mode `%s'" % \
self.harvesting_mode
# End of create_config_file_name.
return config_file_name
# BUG BUG BUG end
##########
def create_harvesting_config_file_name(self, dataset_name):
"Generate the name to be used for the harvesting config file."
file_name_base = "harvesting.py"
dataset_name_escaped = self.escape_dataset_name(dataset_name)
config_file_name = file_name_base.replace(".py",
"_%s.py" % \
dataset_name_escaped)
# End of create_harvesting_config_file_name.
return config_file_name
##########
def create_me_summary_config_file_name(self, dataset_name):
"Generate the name of the ME summary extraction config file."
file_name_base = "me_extraction.py"
dataset_name_escaped = self.escape_dataset_name(dataset_name)
config_file_name = file_name_base.replace(".py",
"_%s.py" % \
dataset_name_escaped)
# End of create_me_summary_config_file_name.
return config_file_name
##########
def create_output_file_name(self, dataset_name, run_number=None):
"""Create the name of the output file name to be used.
This is the name of the output file of the `first step'. In
the case of single-step harvesting this is already the final
harvesting output ROOT file. In the case of two-step
harvesting it is the name of the intermediary ME summary
file.
"""
# BUG BUG BUG
# This method has become a bit of a mess. Originally it was
# nice to have one entry point for both single- and two-step
# output file names. However, now the former needs the run
# number, while the latter does not even know about run
# numbers. This should be fixed up a bit.
# BUG BUG BUG end
if self.harvesting_mode == "single-step":
# DEBUG DEBUG DEBUG
assert not run_number is None
# DEBUG DEBUG DEBUG end
output_file_name = self.create_harvesting_output_file_name(dataset_name, run_number)
elif self.harvesting_mode == "single-step-allow-partial":
# DEBUG DEBUG DEBUG
assert not run_number is None
# DEBUG DEBUG DEBUG end
output_file_name = self.create_harvesting_output_file_name(dataset_name, run_number)
elif self.harvesting_mode == "two-step":
# DEBUG DEBUG DEBUG
assert run_number is None
# DEBUG DEBUG DEBUG end
output_file_name = self.create_me_summary_output_file_name(dataset_name)
else:
# This should not be possible, but hey...
assert False, "ERROR Unknown harvesting mode `%s'" % \
self.harvesting_mode
# End of create_harvesting_output_file_name.
return output_file_name
##########
def create_harvesting_output_file_name(self, dataset_name, run_number):
"""Generate the name to be used for the harvesting output file.
This harvesting output file is the _final_ ROOT output file
containing the harvesting results. In case of two-step
harvesting there is an intermediate ME output file as well.
"""
dataset_name_escaped = self.escape_dataset_name(dataset_name)
# Hmmm, looking at the code for the DQMFileSaver this might
# actually be the place where the first part of this file
# naming scheme comes from.
# NOTE: It looks like the `V0001' comes from the DQM
# version. This is something that cannot be looked up from
# here, so let's hope it does not change too often.
output_file_name = "DQM_V0001_R%09d__%s.root" % \
(run_number, dataset_name_escaped)
if self.harvesting_mode.find("partial") > -1:
# Only add the alarming piece to the file name if this is
# a spread-out dataset.
if self.datasets_information[dataset_name] \
["mirrored"][run_number] == False:
output_file_name = output_file_name.replace(".root", \
"_partial.root")
# End of create_harvesting_output_file_name.
return output_file_name
##########
def create_me_summary_output_file_name(self, dataset_name):
"""Generate the name of the intermediate ME file name to be
used in two-step harvesting.
"""
dataset_name_escaped = self.escape_dataset_name(dataset_name)
output_file_name = "me_summary_%s.root" % \
dataset_name_escaped
# End of create_me_summary_output_file_name.
return output_file_name
##########
def create_multicrab_block_name(self, dataset_name, run_number, index):
"""Create the block name to use for this dataset/run number.
This is what appears in the brackets `[]' in multicrab.cfg. It
is used as the name of the job and to create output
directories.
"""
dataset_name_escaped = self.escape_dataset_name(dataset_name)
block_name = "%s_%09d_%s" % (dataset_name_escaped, run_number, index)
# End of create_multicrab_block_name.
return block_name
##########
def create_crab_config(self):
"""Create a CRAB configuration for a given job.
NOTE: This is _not_ a complete (as in: submittable) CRAB
configuration. It is used to store the common settings for the
multicrab configuration.
NOTE: Only CERN CASTOR area (/castor/cern.ch/) is supported.
NOTE: According to CRAB, you `Must define exactly two of
total_number_of_events, events_per_job, or
number_of_jobs.'. For single-step harvesting we force one job,
for the rest we don't really care.
# BUG BUG BUG
# With the current version of CRAB (2.6.1), in which Daniele
# fixed the behaviour of no_block_boundary for me, one _has to
# specify_ the total_number_of_events and one single site in
# the se_white_list.
# BUG BUG BUG end
"""
tmp = []
# This is the stuff we will need to fill in.
castor_prefix = self.castor_prefix
tmp.append(self.config_file_header())
tmp.append("")
## CRAB
##------
tmp.append("[CRAB]")
tmp.append("jobtype = cmssw")
tmp.append("")
## GRID
##------
tmp.append("[GRID]")
tmp.append("virtual_organization=cms")
tmp.append("")
## USER
##------
tmp.append("[USER]")
tmp.append("copy_data = 1")
tmp.append("")
## CMSSW
##-------
tmp.append("[CMSSW]")
tmp.append("# This reveals data hosted on T1 sites,")
tmp.append("# which is normally hidden by CRAB.")
tmp.append("show_prod = 1")
tmp.append("number_of_jobs = 1")
if self.Jsonlumi == True:
tmp.append("lumi_mask = %s" % self.Jsonfilename)
tmp.append("total_number_of_lumis = -1")
else:
if self.harvesting_type == "DQMOffline":
tmp.append("total_number_of_lumis = -1")
else:
tmp.append("total_number_of_events = -1")
if self.harvesting_mode.find("single-step") > -1:
tmp.append("# Force everything to run in one job.")
tmp.append("no_block_boundary = 1")
tmp.append("")
## CAF
##-----
tmp.append("[CAF]")
crab_config = "\n".join(tmp)
# End of create_crab_config.
return crab_config
##########
def create_multicrab_config(self):
"""Create a multicrab.cfg file for all samples.
This creates the contents for a multicrab.cfg file that uses
the crab.cfg file (generated elsewhere) for the basic settings
and contains blocks for each run of each dataset.
# BUG BUG BUG
# The fact that it's necessary to specify the se_white_list
# and the total_number_of_events is due to our use of CRAB
# version 2.6.1. This should no longer be necessary in the
# future.
# BUG BUG BUG end
"""
cmd="who i am | cut -f1 -d' '"
(status, output)=commands.getstatusoutput(cmd)
UserName = output
if self.caf_access == True:
print("Extracting %s as user name" %UserName)
number_max_sites = self.nr_max_sites + 1
multicrab_config_lines = []
multicrab_config_lines.append(self.config_file_header())
multicrab_config_lines.append("")
multicrab_config_lines.append("[MULTICRAB]")
multicrab_config_lines.append("cfg = crab.cfg")
multicrab_config_lines.append("")
dataset_names = sorted(self.datasets_to_use.keys())
for dataset_name in dataset_names:
runs = self.datasets_to_use[dataset_name]
dataset_name_escaped = self.escape_dataset_name(dataset_name)
castor_prefix = self.castor_prefix
for run in runs:
# CASTOR output dir.
castor_dir = self.datasets_information[dataset_name] \
["castor_path"][run]
cmd = "rfdir %s" % castor_dir
(status, output) = commands.getstatusoutput(cmd)
if len(output) <= 0:
# DEBUG DEBUG DEBUG
# We should only get here if we're treating a
# dataset/run that is fully contained at a single
# site.
assert (len(self.datasets_information[dataset_name] \
["sites"][run]) == 1) or \
self.datasets_information[dataset_name]["mirrored"]
# DEBUG DEBUG DEBUG end
site_names = self.datasets_information[dataset_name] \
["sites"][run].keys()
for i in range(1, number_max_sites, 1):
if len(site_names) > 0:
index = "site_%02d" % (i)
config_file_name = self. \
create_config_file_name(dataset_name, run)
output_file_name = self. \
create_output_file_name(dataset_name, run)
# If we're looking at a mirrored dataset we just pick
# one of the sites. Otherwise there is nothing to
# choose.
# Loop variable
loop = 0
if len(site_names) > 1:
cmssw_version = self.datasets_information[dataset_name] \
["cmssw_version"]
self.logger.info("Picking site for mirrored dataset " \
"`%s', run %d" % \
(dataset_name, run))
site_name = self.pick_a_site(site_names, cmssw_version)
if site_name in site_names:
site_names.remove(site_name)
else:
site_name = site_names[0]
site_names.remove(site_name)
if site_name is self.no_matching_site_found_str:
if loop < 1:
break
nevents = self.datasets_information[dataset_name]["num_events"][run]
# The block name.
multicrab_block_name = self.create_multicrab_block_name( \
dataset_name, run, index)
multicrab_config_lines.append("[%s]" % \
multicrab_block_name)
## CRAB
##------
if site_name == "caf.cern.ch":
multicrab_config_lines.append("CRAB.use_server=0")
multicrab_config_lines.append("CRAB.scheduler=caf")
else:
multicrab_config_lines.append("scheduler = glite")
## GRID
##------
if site_name == "caf.cern.ch":
pass
else:
multicrab_config_lines.append("GRID.se_white_list = %s" % \
site_name)
multicrab_config_lines.append("# This removes the default blacklisting of T1 sites.")
multicrab_config_lines.append("GRID.remove_default_blacklist = 1")
multicrab_config_lines.append("GRID.rb = CERN")
if not self.non_t1access:
multicrab_config_lines.append("GRID.role = t1access")
## USER
##------
castor_dir = castor_dir.replace(castor_prefix, "")
multicrab_config_lines.append("USER.storage_element=srm-cms.cern.ch")
multicrab_config_lines.append("USER.user_remote_dir = %s" % \
castor_dir)
multicrab_config_lines.append("USER.check_user_remote_dir=0")
if site_name == "caf.cern.ch":
multicrab_config_lines.append("USER.storage_path=%s" % castor_prefix)
#multicrab_config_lines.append("USER.storage_element=T2_CH_CAF")
#castor_dir = castor_dir.replace("/cms/store/caf/user/%s" %UserName, "")
#multicrab_config_lines.append("USER.user_remote_dir = %s" % \
# castor_dir)
else:
multicrab_config_lines.append("USER.storage_path=/srm/managerv2?SFN=%s" % castor_prefix)
#multicrab_config_lines.append("USER.user_remote_dir = %s" % \
# castor_dir)
#multicrab_config_lines.append("USER.storage_element=srm-cms.cern.ch")
## CMSSW
##-------
multicrab_config_lines.append("CMSSW.pset = %s" % \
config_file_name)
multicrab_config_lines.append("CMSSW.datasetpath = %s" % \
dataset_name)
multicrab_config_lines.append("CMSSW.runselection = %d" % \
run)
if self.Jsonlumi == True:
pass
else:
if self.harvesting_type == "DQMOffline":
pass
else:
multicrab_config_lines.append("CMSSW.total_number_of_events = %d" % \
nevents)
# The output file name.
multicrab_config_lines.append("CMSSW.output_file = %s" % \
output_file_name)
## CAF
##-----
if site_name == "caf.cern.ch":
multicrab_config_lines.append("CAF.queue=cmscaf1nd")
# End of block.
multicrab_config_lines.append("")
loop = loop + 1
self.all_sites_found = True
multicrab_config = "\n".join(multicrab_config_lines)
# End of create_multicrab_config.
return multicrab_config
##########
def check_globaltag(self, globaltag=None):
"""Check if globaltag exists.
Check if globaltag exists as GlobalTag in the database given
by self.frontier_connection_name['globaltag']. If globaltag is
None, self.globaltag is used instead.
If we're going to use reference histograms this method also
checks for the existence of the required key in the GlobalTag.
"""
if globaltag is None:
globaltag = self.globaltag
# All GlobalTags should end in `::All', right?
if globaltag.endswith("::All"):
globaltag = globaltag[:-5]
connect_name = self.frontier_connection_name["globaltag"]
# BUG BUG BUG
# There is a bug in cmscond_tagtree_list: some magic is
# missing from the implementation requiring one to specify
# explicitly the name of the squid to connect to. Since the
# cmsHarvester can only be run from the CERN network anyway,
# cmsfrontier:8000 is hard-coded in here. Not nice but it
# works.
connect_name = connect_name.replace("frontier://",
"frontier://cmsfrontier:8000/")
# BUG BUG BUG end
connect_name += self.db_account_name_cms_cond_globaltag()
tag_exists = self.check_globaltag_exists(globaltag, connect_name)
#----------
tag_contains_ref_hist_key = False
if self.use_ref_hists and tag_exists:
# Check for the key required to use reference histograms.
tag_contains_ref_hist_key = self.check_globaltag_contains_ref_hist_key(globaltag, connect_name)
#----------
if self.use_ref_hists:
ret_val = tag_exists and tag_contains_ref_hist_key
else:
ret_val = tag_exists
#----------
# End of check_globaltag.
return ret_val
##########
def check_globaltag_exists(self, globaltag, connect_name):
"""Check if globaltag exists.
"""
self.logger.info("Checking existence of GlobalTag `%s'" % \
globaltag)
self.logger.debug(" (Using database connection `%s')" % \
connect_name)
cmd = "cmscond_tagtree_list -c %s -T %s" % \
(connect_name, globaltag)
(status, output) = commands.getstatusoutput(cmd)
if status != 0 or \
output.find("error") > -1:
msg = "Could not check existence of GlobalTag `%s' in `%s'" % \
(globaltag, connect_name)
if output.find(".ALL_TABLES not found") > -1:
msg = "%s\n" \
"Missing database account `%s'" % \
(msg, output.split(".ALL_TABLES")[0].split()[-1])
self.logger.fatal(msg)
self.logger.debug("Command used:")
self.logger.debug(" %s" % cmd)
self.logger.debug("Output received:")
self.logger.debug(output)
raise Error(msg)
if output.find("does not exist") > -1:
self.logger.debug("GlobalTag `%s' does not exist in `%s':" % \
(globaltag, connect_name))
self.logger.debug("Output received:")
self.logger.debug(output)
tag_exists = False
else:
tag_exists = True
self.logger.info(" GlobalTag exists? -> %s" % tag_exists)
# End of check_globaltag_exists.
return tag_exists
##########
def check_globaltag_contains_ref_hist_key(self, globaltag, connect_name):
"""Check if globaltag contains the required RefHistos key.
"""
# Check for the key required to use reference histograms.
tag_contains_key = None
ref_hist_key = "RefHistos"
self.logger.info("Checking existence of reference " \
"histogram key `%s' in GlobalTag `%s'" % \
(ref_hist_key, globaltag))
self.logger.debug(" (Using database connection `%s')" % \
connect_name)
cmd = "cmscond_tagtree_list -c %s -T %s -n %s" % \
(connect_name, globaltag, ref_hist_key)
(status, output) = commands.getstatusoutput(cmd)
if status != 0 or \
output.find("error") > -1:
msg = "Could not check existence of key `%s'" % \
(ref_hist_key, connect_name)
self.logger.fatal(msg)
self.logger.debug("Command used:")
self.logger.debug(" %s" % cmd)
self.logger.debug("Output received:")
self.logger.debug(" %s" % output)
raise Error(msg)
if len(output) < 1:
self.logger.debug("Required key for use of reference " \
"histograms `%s' does not exist " \
"in GlobalTag `%s':" % \
(ref_hist_key, globaltag))
self.logger.debug("Output received:")
self.logger.debug(output)
tag_contains_key = False
else:
tag_contains_key = True
self.logger.info(" GlobalTag contains `%s' key? -> %s" % \
(ref_hist_key, tag_contains_key))
# End of check_globaltag_contains_ref_hist_key.
return tag_contains_key
##########
def check_ref_hist_tag(self, tag_name):
"""Check the existence of tag_name in database connect_name.
Check if tag_name exists as a reference histogram tag in the
database given by self.frontier_connection_name['refhists'].
"""
connect_name = self.frontier_connection_name["refhists"]
connect_name += self.db_account_name_cms_cond_dqm_summary()
self.logger.debug("Checking existence of reference " \
"histogram tag `%s'" % \
tag_name)
self.logger.debug(" (Using database connection `%s')" % \
connect_name)
cmd = "cmscond_list_iov -c %s" % \
connect_name
(status, output) = commands.getstatusoutput(cmd)
if status != 0:
msg = "Could not check existence of tag `%s' in `%s'" % \
(tag_name, connect_name)
self.logger.fatal(msg)
self.logger.debug("Command used:")
self.logger.debug(" %s" % cmd)
self.logger.debug("Output received:")
self.logger.debug(output)
raise Error(msg)
if not tag_name in output.split():
self.logger.debug("Reference histogram tag `%s' " \
"does not exist in `%s'" % \
(tag_name, connect_name))
self.logger.debug(" Existing tags: `%s'" % \
"', `".join(output.split()))
tag_exists = False
else:
tag_exists = True
self.logger.debug(" Reference histogram tag exists? " \
"-> %s" % tag_exists)
# End of check_ref_hist_tag.
return tag_exists
##########
def create_es_prefer_snippet(self, dataset_name):
"""Build the es_prefer snippet for the reference histograms.
The building of the snippet is wrapped in some care-taking
code that figures out the name of the reference histogram set
and makes sure the corresponding tag exists.
"""
# Figure out the name of the reference histograms tag.
# NOTE: The existence of these tags has already been checked.
ref_hist_tag_name = self.ref_hist_mappings[dataset_name]
connect_name = self.frontier_connection_name["refhists"]
connect_name += self.db_account_name_cms_cond_dqm_summary()
record_name = "DQMReferenceHistogramRootFileRcd"
# Build up the code snippet.
code_lines = []
code_lines.append("from CondCore.DBCommon.CondDBSetup_cfi import *")
code_lines.append("process.ref_hist_source = cms.ESSource(\"PoolDBESSource\", CondDBSetup,")
code_lines.append(" connect = cms.string(\"%s\")," % connect_name)
code_lines.append(" toGet = cms.VPSet(cms.PSet(record = cms.string(\"%s\")," % record_name)
code_lines.append(" tag = cms.string(\"%s\"))," % ref_hist_tag_name)
code_lines.append(" )")
code_lines.append(" )")
code_lines.append("process.es_prefer_ref_hist_source = cms.ESPrefer(\"PoolDBESSource\", \"ref_hist_source\")")
snippet = "\n".join(code_lines)
# End of create_es_prefer_snippet.
return snippet
##########
def create_harvesting_config(self, dataset_name):
"""Create the Python harvesting configuration for harvesting.
The basic configuration is created by
Configuration.PyReleaseValidation.ConfigBuilder. (This mimics
what cmsDriver.py does.) After that we add some specials
ourselves.
NOTE: On one hand it may not be nice to circumvent
cmsDriver.py, on the other hand cmsDriver.py does not really
do anything itself. All the real work is done by the
ConfigBuilder so there is not much risk that we miss out on
essential developments of cmsDriver in the future.
"""
# Setup some options needed by the ConfigBuilder.
config_options = defaultOptions
# These are fixed for all kinds of harvesting jobs. Some of
# them are not needed for the harvesting config, but to keep
# the ConfigBuilder happy.
config_options.name = "harvesting"
config_options.scenario = "pp"
config_options.number = 1
config_options.arguments = self.ident_string()
config_options.evt_type = config_options.name
config_options.customisation_file = None
config_options.filein = "dummy_value"
config_options.filetype = "EDM"
# This seems to be new in CMSSW 3.3.X, no clue what it does.
config_options.gflash = "dummy_value"
# This seems to be new in CMSSW 3.3.0.pre6, no clue what it
# does.
#config_options.himix = "dummy_value"
config_options.dbsquery = ""
###
# These options depend on the type of harvesting we're doing
# and are stored in self.harvesting_info.
config_options.step = "HARVESTING:%s" % \
self.harvesting_info[self.harvesting_type] \
["step_string"]
config_options.beamspot = self.harvesting_info[self.harvesting_type] \
["beamspot"]
config_options.eventcontent = self.harvesting_info \
[self.harvesting_type] \
["eventcontent"]
config_options.harvesting = self.harvesting_info \
[self.harvesting_type] \
["harvesting"]
###
# This one is required (see also above) for each dataset.
datatype = self.datasets_information[dataset_name]["datatype"]
config_options.isMC = (datatype.lower() == "mc")
config_options.isData = (datatype.lower() == "data")
globaltag = self.datasets_information[dataset_name]["globaltag"]
config_options.conditions = self.format_conditions_string(globaltag)
###
if "with_input" in getargspec(ConfigBuilder.__init__)[0]:
# This is the case for 3.3.X.
config_builder = ConfigBuilder(config_options, with_input=True)
else:
# This is the case in older CMSSW versions.
config_builder = ConfigBuilder(config_options)
config_builder.prepare(True)
config_contents = config_builder.pythonCfgCode
###
# Add our signature to the top of the configuration. and add
# some markers to the head and the tail of the Python code
# generated by the ConfigBuilder.
marker_lines = []
sep = "#" * 30
marker_lines.append(sep)
marker_lines.append("# Code between these markers was generated by")
marker_lines.append("# Configuration.PyReleaseValidation." \
"ConfigBuilder")
marker_lines.append(sep)
marker = "\n".join(marker_lines)
tmp = [self.config_file_header()]
tmp.append("")
tmp.append(marker)
tmp.append("")
tmp.append(config_contents)
tmp.append("")
tmp.append(marker)
tmp.append("")
config_contents = "\n".join(tmp)
###
# Now we add some stuff of our own.
customisations = [""]
customisations.append("# Now follow some customisations")
customisations.append("")
connect_name = self.frontier_connection_name["globaltag"]
connect_name += self.db_account_name_cms_cond_globaltag()
customisations.append("process.GlobalTag.connect = \"%s\"" % \
connect_name)
if self.saveByLumiSection == True:
customisations.append("process.dqmSaver.saveByLumiSection = 1")
##
##
customisations.append("")
# About the reference histograms... For data there is only one
# set of references and those are picked up automatically
# based on the GlobalTag. For MC we have to do some more work
# since the reference histograms to be used depend on the MC
# sample at hand. In this case we glue in an es_prefer snippet
# to pick up the references. We do this only for RelVals since
# for MC there are no meaningful references so far.
# NOTE: Due to the lack of meaningful references for
# MC samples reference histograms are explicitly
# switched off in this case.
use_es_prefer = (self.harvesting_type == "RelVal")
use_refs = use_es_prefer or \
(not self.harvesting_type == "MC")
# Allow global override.
use_refs = use_refs and self.use_ref_hists
if not use_refs:
# Disable reference histograms explicitly. The histograms
# are loaded by the dqmRefHistoRootFileGetter
# EDAnalyzer. This analyzer can be run from several
# sequences. Here we remove it from each sequence that
# exists.
customisations.append("print \"Not using reference histograms\"")
customisations.append("if hasattr(process, \"dqmRefHistoRootFileGetter\"):")
customisations.append(" for (sequence_name, sequence) in six.iteritems(process.sequences):")
customisations.append(" if sequence.remove(process.dqmRefHistoRootFileGetter):")
customisations.append(" print \"Removed process.dqmRefHistoRootFileGetter from sequence `%s'\" % \\")
customisations.append(" sequence_name")
customisations.append("process.dqmSaver.referenceHandling = \"skip\"")
else:
# This makes sure all reference histograms are saved to
# the output ROOT file.
customisations.append("process.dqmSaver.referenceHandling = \"all\"")
if use_es_prefer:
es_prefer_snippet = self.create_es_prefer_snippet(dataset_name)
customisations.append(es_prefer_snippet)
# Make sure we get the `workflow' correct. As far as I can see
# this is only important for the output file name.
workflow_name = dataset_name
if self.harvesting_mode == "single-step-allow-partial":
workflow_name += "_partial"
customisations.append("process.dqmSaver.workflow = \"%s\"" % \
workflow_name)
# BUG BUG BUG
# This still does not work. The current two-step harvesting
# efforts are on hold waiting for the solution to come from
# elsewhere. (In this case the elsewhere is <NAME>.)
## # In case this file is the second step (the real harvesting
## # step) of the two-step harvesting we have to tell it to use
## # our local files.
## if self.harvesting_mode == "two-step":
## castor_dir = self.datasets_information[dataset_name] \
## ["castor_path"][run]
## customisations.append("")
## customisations.append("# This is the second step (the real")
## customisations.append("# harvesting step) of a two-step")
## customisations.append("# harvesting procedure.")
## # BUG BUG BUG
## # To be removed in production version.
## customisations.append("import pdb")
## # BUG BUG BUG end
## customisations.append("import commands")
## customisations.append("import os")
## customisations.append("castor_dir = \"%s\"" % castor_dir)
## customisations.append("cmd = \"rfdir %s\" % castor_dir")
## customisations.append("(status, output) = commands.getstatusoutput(cmd)")
## customisations.append("if status != 0:")
## customisations.append(" print \"ERROR\"")
## customisations.append(" raise Exception, \"ERROR\"")
## customisations.append("file_names = [os.path.join(\"rfio:%s\" % path, i) for i in output.split() if i.startswith(\"EDM_summary\") and i.endswith(\".root\")]")
## #customisations.append("pdb.set_trace()")
## customisations.append("process.source.fileNames = cms.untracked.vstring(*file_names)")
## customisations.append("")
# BUG BUG BUG end
config_contents = config_contents + "\n".join(customisations)
###
# End of create_harvesting_config.
return config_contents
## ##########
## def create_harvesting_config_two_step(self, dataset_name):
## """Create the Python harvesting configuration for two-step
## harvesting.
## """
## # BUG BUG BUG
## config_contents = self.create_harvesting_config_single_step(dataset_name)
## # BUG BUG BUG end
## # End of create_harvesting_config_two_step.
## return config_contents
##########
def create_me_extraction_config(self, dataset_name):
"""
"""
# Big chunk of hard-coded Python. Not such a big deal since
# this does not do much and is not likely to break.
tmp = []
tmp.append(self.config_file_header())
tmp.append("")
tmp.append("import FWCore.ParameterSet.Config as cms")
tmp.append("")
tmp.append("process = cms.Process(\"ME2EDM\")")
tmp.append("")
tmp.append("# Import of standard configurations")
tmp.append("process.load(\"Configuration/EventContent/EventContent_cff\")")
tmp.append("")
tmp.append("# We don't really process any events, just keep this set to one to")
tmp.append("# make sure things work.")
tmp.append("process.maxEvents = cms.untracked.PSet(")
tmp.append(" input = cms.untracked.int32(1)")
tmp.append(" )")
tmp.append("")
tmp.append("process.options = cms.untracked.PSet(")
tmp.append(" Rethrow = cms.untracked.vstring(\"ProductNotFound\")")
tmp.append(" )")
tmp.append("")
tmp.append("process.source = cms.Source(\"PoolSource\",")
tmp.append(" processingMode = \\")
tmp.append(" cms.untracked.string(\"RunsAndLumis\"),")
tmp.append(" fileNames = \\")
tmp.append(" cms.untracked.vstring(\"no_file_specified\")")
tmp.append(" )")
tmp.append("")
tmp.append("# Output definition: drop everything except for the monitoring.")
tmp.append("process.output = cms.OutputModule(")
tmp.append(" \"PoolOutputModule\",")
tmp.append(" outputCommands = \\")
tmp.append(" cms.untracked.vstring(\"drop *\", \\")
tmp.append(" \"keep *_MEtoEDMConverter_*_*\"),")
output_file_name = self. \
create_output_file_name(dataset_name)
tmp.append(" fileName = \\")
tmp.append(" cms.untracked.string(\"%s\")," % output_file_name)
tmp.append(" dataset = cms.untracked.PSet(")
tmp.append(" dataTier = cms.untracked.string(\"RECO\"),")
tmp.append(" filterName = cms.untracked.string(\"\")")
tmp.append(" )")
tmp.append(" )")
tmp.append("")
tmp.append("# Additional output definition")
tmp.append("process.out_step = cms.EndPath(process.output)")
tmp.append("")
tmp.append("# Schedule definition")
tmp.append("process.schedule = cms.Schedule(process.out_step)")
tmp.append("")
config_contents = "\n".join(tmp)
# End of create_me_extraction_config.
return config_contents
##########
## def create_harvesting_config(self, dataset_name):
## """Create the Python harvesting configuration for a given job.
## NOTE: The reason to have a single harvesting configuration per
## sample is to be able to specify the GlobalTag corresponding to
## each sample. Since it has been decided that (apart from the
## prompt reco) datasets cannot contain runs with different
## GlobalTags, we don't need a harvesting config per run.
## NOTE: This is the place where we distinguish between
## single-step and two-step harvesting modes (at least for the
## Python job configuration).
## """
## ###
## if self.harvesting_mode == "single-step":
## config_contents = self.create_harvesting_config_single_step(dataset_name)
## elif self.harvesting_mode == "two-step":
## config_contents = self.create_harvesting_config_two_step(dataset_name)
## else:
## # Impossible harvesting mode, we should never get here.
## assert False, "ERROR: unknown harvesting mode `%s'" % \
## self.harvesting_mode
## ###
## # End of create_harvesting_config.
## return config_contents
##########
def write_crab_config(self):
"""Write a CRAB job configuration Python file.
"""
self.logger.info("Writing CRAB configuration...")
file_name_base = "crab.cfg"
# Create CRAB configuration.
crab_contents = self.create_crab_config()
# Write configuration to file.
crab_file_name = file_name_base
try:
crab_file = file(crab_file_name, "w")
crab_file.write(crab_contents)
crab_file.close()
except IOError:
self.logger.fatal("Could not write " \
"CRAB configuration to file `%s'" % \
crab_file_name)
raise Error("ERROR: Could not write to file `%s'!" % \
crab_file_name)
# End of write_crab_config.
##########
def write_multicrab_config(self):
"""Write a multi-CRAB job configuration Python file.
"""
self.logger.info("Writing multi-CRAB configuration...")
file_name_base = "multicrab.cfg"
# Create multi-CRAB configuration.
multicrab_contents = self.create_multicrab_config()
# Write configuration to file.
multicrab_file_name = file_name_base
try:
multicrab_file = file(multicrab_file_name, "w")
multicrab_file.write(multicrab_contents)
multicrab_file.close()
except IOError:
self.logger.fatal("Could not write " \
"multi-CRAB configuration to file `%s'" % \
multicrab_file_name)
raise Error("ERROR: Could not write to file `%s'!" % \
multicrab_file_name)
# End of write_multicrab_config.
##########
def write_harvesting_config(self, dataset_name):
"""Write a harvesting job configuration Python file.
NOTE: This knows nothing about single-step or two-step
harvesting. That's all taken care of by
create_harvesting_config.
"""
self.logger.debug("Writing harvesting configuration for `%s'..." % \
dataset_name)
# Create Python configuration.
config_contents = self.create_harvesting_config(dataset_name)
# Write configuration to file.
config_file_name = self. \
create_harvesting_config_file_name(dataset_name)
try:
config_file = file(config_file_name, "w")
config_file.write(config_contents)
config_file.close()
except IOError:
self.logger.fatal("Could not write " \
"harvesting configuration to file `%s'" % \
config_file_name)
raise Error("ERROR: Could not write to file `%s'!" % \
config_file_name)
# End of write_harvesting_config.
##########
def write_me_extraction_config(self, dataset_name):
"""Write an ME-extraction configuration Python file.
This `ME-extraction' (ME = Monitoring Element) is the first
step of the two-step harvesting.
"""
self.logger.debug("Writing ME-extraction configuration for `%s'..." % \
dataset_name)
# Create Python configuration.
config_contents = self.create_me_extraction_config(dataset_name)
# Write configuration to file.
config_file_name = self. \
create_me_summary_config_file_name(dataset_name)
try:
config_file = file(config_file_name, "w")
config_file.write(config_contents)
config_file.close()
except IOError:
self.logger.fatal("Could not write " \
"ME-extraction configuration to file `%s'" % \
config_file_name)
raise Error("ERROR: Could not write to file `%s'!" % \
config_file_name)
# End of write_me_extraction_config.
##########
def ref_hist_mappings_needed(self, dataset_name=None):
"""Check if we need to load and check the reference mappings.
For data the reference histograms should be taken
automatically from the GlobalTag, so we don't need any
mappings. For RelVals we need to know a mapping to be used in
the es_prefer code snippet (different references for each of
the datasets.)
WARNING: This implementation is a bit convoluted.
"""
# If no dataset name given, do everything, otherwise check
# only this one dataset.
if not dataset_name is None:
data_type = self.datasets_information[dataset_name] \
["datatype"]
mappings_needed = (data_type == "mc")
# DEBUG DEBUG DEBUG
if not mappings_needed:
assert data_type == "data"
# DEBUG DEBUG DEBUG end
else:
tmp = [self.ref_hist_mappings_needed(dataset_name) \
for dataset_name in \
self.datasets_information.keys()]
mappings_needed = (True in tmp)
# End of ref_hist_mappings_needed.
return mappings_needed
##########
def load_ref_hist_mappings(self):
"""Load the reference histogram mappings from file.
The dataset name to reference histogram name mappings are read
from a text file specified in self.ref_hist_mappings_file_name.
"""
# DEBUG DEBUG DEBUG
assert len(self.ref_hist_mappings) < 1, \
"ERROR Should not be RE-loading " \
"reference histogram mappings!"
# DEBUG DEBUG DEBUG end
self.logger.info("Loading reference histogram mappings " \
"from file `%s'" % \
self.ref_hist_mappings_file_name)
mappings_lines = None
try:
mappings_file = file(self.ref_hist_mappings_file_name, "r")
mappings_lines = mappings_file.readlines()
mappings_file.close()
except IOError:
msg = "ERROR: Could not open reference histogram mapping "\
"file `%s'" % self.ref_hist_mappings_file_name
self.logger.fatal(msg)
raise Error(msg)
##########
# The format we expect is: two white-space separated pieces
# per line. The first the dataset name for which the reference
# should be used, the second one the name of the reference
# histogram in the database.
for mapping in mappings_lines:
# Skip comment lines.
if not mapping.startswith("#"):
mapping = mapping.strip()
if len(mapping) > 0:
mapping_pieces = mapping.split()
if len(mapping_pieces) != 2:
msg = "ERROR: The reference histogram mapping " \
"file contains a line I don't " \
"understand:\n %s" % mapping
self.logger.fatal(msg)
raise Error(msg)
dataset_name = mapping_pieces[0].strip()
ref_hist_name = mapping_pieces[1].strip()
# We don't want people to accidentally specify
# multiple mappings for the same dataset. Just
# don't accept those cases.
if dataset_name in self.ref_hist_mappings:
msg = "ERROR: The reference histogram mapping " \
"file contains multiple mappings for " \
"dataset `%s'."
self.logger.fatal(msg)
raise Error(msg)
# All is well that ends well.
self.ref_hist_mappings[dataset_name] = ref_hist_name
##########
self.logger.info(" Successfully loaded %d mapping(s)" % \
len(self.ref_hist_mappings))
max_len = max([len(i) for i in self.ref_hist_mappings.keys()])
for (map_from, map_to) in six.iteritems(self.ref_hist_mappings):
self.logger.info(" %-*s -> %s" % \
(max_len, map_from, map_to))
# End of load_ref_hist_mappings.
##########
def check_ref_hist_mappings(self):
"""Make sure all necessary reference histograms exist.
Check that for each of the datasets to be processed a
reference histogram is specified and that that histogram
exists in the database.
NOTE: There's a little complication here. Since this whole
thing was designed to allow (in principle) harvesting of both
data and MC datasets in one go, we need to be careful to check
the availability fof reference mappings only for those
datasets that need it.
"""
self.logger.info("Checking reference histogram mappings")
for dataset_name in self.datasets_to_use:
try:
ref_hist_name = self.ref_hist_mappings[dataset_name]
except KeyError:
msg = "ERROR: No reference histogram mapping found " \
"for dataset `%s'" % \
dataset_name
self.logger.fatal(msg)
raise Error(msg)
if not self.check_ref_hist_tag(ref_hist_name):
msg = "Reference histogram tag `%s' " \
"(used for dataset `%s') does not exist!" % \
(ref_hist_name, dataset_name)
self.logger.fatal(msg)
raise Usage(msg)
self.logger.info(" Done checking reference histogram mappings.")
# End of check_ref_hist_mappings.
##########
def build_datasets_information(self):
"""Obtain all information on the datasets that we need to run.
Use DBS to figure out all required information on our
datasets, like the run numbers and the GlobalTag. All
information is stored in the datasets_information member
variable.
"""
# Get a list of runs in the dataset.
# NOTE: The harvesting has to be done run-by-run, so we
# split up datasets based on the run numbers. Strictly
# speaking this is not (yet?) necessary for Monte Carlo
# since all those samples use run number 1. Still, this
# general approach should work for all samples.
# Now loop over all datasets in the list and process them.
# NOTE: This processing has been split into several loops
# to be easier to follow, sacrificing a bit of efficiency.
self.datasets_information = {}
self.logger.info("Collecting information for all datasets to process")
dataset_names = sorted(self.datasets_to_use.keys())
for dataset_name in dataset_names:
# Tell the user which dataset: nice with many datasets.
sep_line = "-" * 30
self.logger.info(sep_line)
self.logger.info(" `%s'" % dataset_name)
self.logger.info(sep_line)
runs = self.dbs_resolve_runs(dataset_name)
self.logger.info(" found %d run(s)" % len(runs))
if len(runs) > 0:
self.logger.debug(" run number(s): %s" % \
", ".join([str(i) for i in runs]))
else:
# DEBUG DEBUG DEBUG
# This should never happen after the DBS checks.
self.logger.warning(" --> skipping dataset "
"without any runs")
assert False, "Panic: found a dataset without runs " \
"after DBS checks!"
# DEBUG DEBUG DEBUG end
cmssw_version = self.dbs_resolve_cmssw_version(dataset_name)
self.logger.info(" found CMSSW version `%s'" % cmssw_version)
# Figure out if this is data or MC.
datatype = self.dbs_resolve_datatype(dataset_name)
self.logger.info(" sample is data or MC? --> %s" % \
datatype)
###
# Try and figure out the GlobalTag to be used.
if self.globaltag is None:
globaltag = self.dbs_resolve_globaltag(dataset_name)
else:
globaltag = self.globaltag
self.logger.info(" found GlobalTag `%s'" % globaltag)
# DEBUG DEBUG DEBUG
if globaltag == "":
# Actually we should not even reach this point, after
# our dataset sanity checks.
assert datatype == "data", \
"ERROR Empty GlobalTag for MC dataset!!!"
# DEBUG DEBUG DEBUG end
###
# DEBUG DEBUG DEBUG
#tmp = self.dbs_check_dataset_spread_old(dataset_name)
# DEBUG DEBUG DEBUG end
sites_catalog = self.dbs_check_dataset_spread(dataset_name)
# Extract the total event counts.
num_events = {}
for run_number in sites_catalog.keys():
num_events[run_number] = sites_catalog \
[run_number]["all_sites"]
del sites_catalog[run_number]["all_sites"]
# Extract the information about whether or not datasets
# are mirrored.
mirror_catalog = {}
for run_number in sites_catalog.keys():
mirror_catalog[run_number] = sites_catalog \
[run_number]["mirrored"]
del sites_catalog[run_number]["mirrored"]
# BUG BUG BUG
# I think I could now get rid of that and just fill the
# "sites" entry with the `inverse' of this
# num_events_catalog(?).
#num_sites = self.dbs_resolve_dataset_number_of_sites(dataset_name)
#sites_catalog = self.dbs_check_dataset_spread(dataset_name)
#sites_catalog = dict(zip(num_events_catalog.keys(),
# [[j for i in num_events_catalog.values() for j in i.keys()]]))
# BUG BUG BUG end
## # DEBUG DEBUG DEBUG
## # This is probably only useful to make sure we don't muck
## # things up, right?
## # Figure out across how many sites this sample has been spread.
## if num_sites == 1:
## self.logger.info(" sample is contained at a single site")
## else:
## self.logger.info(" sample is spread across %d sites" % \
## num_sites)
## if num_sites < 1:
## # NOTE: This _should not_ happen with any valid dataset.
## self.logger.warning(" --> skipping dataset which is not " \
## "hosted anywhere")
## # DEBUG DEBUG DEBUG end
# Now put everything in a place where we can find it again
# if we need it.
self.datasets_information[dataset_name] = {}
self.datasets_information[dataset_name]["runs"] = runs
self.datasets_information[dataset_name]["cmssw_version"] = \
cmssw_version
self.datasets_information[dataset_name]["globaltag"] = globaltag
self.datasets_information[dataset_name]["datatype"] = datatype
self.datasets_information[dataset_name]["num_events"] = num_events
self.datasets_information[dataset_name]["mirrored"] = mirror_catalog
self.datasets_information[dataset_name]["sites"] = sites_catalog
# Each run of each dataset has a different CASTOR output
# path.
castor_path_common = self.create_castor_path_name_common(dataset_name)
self.logger.info(" output will go into `%s'" % \
castor_path_common)
castor_paths = dict(list(zip(runs,
[self.create_castor_path_name_special(dataset_name, i, castor_path_common) \
for i in runs])))
for path_name in castor_paths.values():
self.logger.debug(" %s" % path_name)
self.datasets_information[dataset_name]["castor_path"] = \
castor_paths
# End of build_datasets_information.
##########
def show_exit_message(self):
"""Tell the user what to do now, after this part is done.
This should provide the user with some (preferably
copy-pasteable) instructions on what to do now with the setups
and files that have been created.
"""
# TODO TODO TODO
# This could be improved a bit.
# TODO TODO TODO end
sep_line = "-" * 60
self.logger.info("")
self.logger.info(sep_line)
self.logger.info(" Configuration files have been created.")
self.logger.info(" From here on please follow the usual CRAB instructions.")
self.logger.info(" Quick copy-paste instructions are shown below.")
self.logger.info(sep_line)
self.logger.info("")
self.logger.info(" Create all CRAB jobs:")
self.logger.info(" multicrab -create")
self.logger.info("")
self.logger.info(" Submit all CRAB jobs:")
self.logger.info(" multicrab -submit")
self.logger.info("")
self.logger.info(" Check CRAB status:")
self.logger.info(" multicrab -status")
self.logger.info("")
self.logger.info("")
self.logger.info(" For more information please see the CMS Twiki:")
self.logger.info(" %s" % twiki_url)
self.logger.info(sep_line)
# If there were any jobs for which we could not find a
# matching site show a warning message about that.
if not self.all_sites_found:
self.logger.warning(" For some of the jobs no matching " \
"site could be found")
self.logger.warning(" --> please scan your multicrab.cfg" \
"for occurrences of `%s'." % \
self.no_matching_site_found_str)
self.logger.warning(" You will have to fix those " \
"by hand, sorry.")
# End of show_exit_message.
##########
def run(self):
"Main entry point of the CMS harvester."
# Start with a positive thought.
exit_code = 0
try:
try:
# Parse all command line options and arguments
self.parse_cmd_line_options()
# and check that they make sense.
self.check_input_status()
# Check if CMSSW is setup.
self.check_cmssw()
# Check if DBS is setup,
self.check_dbs()
# and if all is fine setup the Python side.
self.setup_dbs()
# Fill our dictionary with all the required info we
# need to understand harvesting jobs. This needs to be
# done after the CMSSW version is known.
self.setup_harvesting_info()
# Obtain list of dataset names to consider
self.build_dataset_use_list()
# and the list of dataset names to ignore.
self.build_dataset_ignore_list()
# The same for the runs lists (if specified).
self.build_runs_use_list()
self.build_runs_ignore_list()
# Process the list of datasets to ignore and fold that
# into the list of datasets to consider.
# NOTE: The run-based selection is done later since
# right now we don't know yet which runs a dataset
# contains.
self.process_dataset_ignore_list()
# Obtain all required information on the datasets,
# like run numbers and GlobalTags.
self.build_datasets_information()
if self.use_ref_hists and \
self.ref_hist_mappings_needed():
# Load the dataset name to reference histogram
# name mappings from file.
self.load_ref_hist_mappings()
# Now make sure that for all datasets we want to
# process there is a reference defined. Otherwise
# just bomb out before wasting any more time.
self.check_ref_hist_mappings()
else:
self.logger.info("No need to load reference " \
"histogram mappings file")
# OBSOLETE OBSOLETE OBSOLETE
## # TODO TODO TODO
## # Need to think about where this should go, but
## # somewhere we have to move over the fact that we want
## # to process all runs for each dataset that we're
## # considering. This basically means copying over the
## # information from self.datasets_information[]["runs"]
## # to self.datasets_to_use[].
## for dataset_name in self.datasets_to_use.keys():
## self.datasets_to_use[dataset_name] = self.datasets_information[dataset_name]["runs"]
## # TODO TODO TODO end
# OBSOLETE OBSOLETE OBSOLETE end
self.process_runs_use_and_ignore_lists()
# If we've been asked to sacrifice some parts of
# spread-out samples in order to be able to partially
# harvest them, we'll do that here.
if self.harvesting_mode == "single-step-allow-partial":
self.singlify_datasets()
# Check dataset name(s)
self.check_dataset_list()
# and see if there is anything left to do.
if len(self.datasets_to_use) < 1:
self.logger.info("After all checks etc. " \
"there are no datasets (left?) " \
"to process")
else:
self.logger.info("After all checks etc. we are left " \
"with %d dataset(s) to process " \
"for a total of %d runs" % \
(len(self.datasets_to_use),
sum([len(i) for i in \
self.datasets_to_use.values()])))
# NOTE: The order in which things are done here is
# important. At the end of the job, independent on
# how it ends (exception, CTRL-C, normal end) the
# book keeping is written to file. At that time it
# should be clear which jobs are done and can be
# submitted. This means we first create the
# general files, and then the per-job config
# files.
# TODO TODO TODO
# It would be good to modify the book keeping a
# bit. Now we write the crab.cfg (which is the
# same for all samples and runs) and the
# multicrab.cfg (which contains blocks for all
# runs of all samples) without updating our book
# keeping. The only place we update the book
# keeping is after writing the harvesting config
# file for a given dataset. Since there is only
# one single harvesting configuration for each
# dataset, we have no book keeping information on
# a per-run basis.
# TODO TODO TODO end
# Check if the CASTOR output area exists. If
# necessary create it.
self.create_and_check_castor_dirs()
# Create one crab and one multicrab configuration
# for all jobs together.
self.write_crab_config()
self.write_multicrab_config()
# Loop over all datasets and create harvesting
# config files for all of them. One harvesting
# config per dataset is enough. The same file will
# be re-used by CRAB for each run.
# NOTE: We always need a harvesting
# configuration. For the two-step harvesting we
# also need a configuration file for the first
# step: the monitoring element extraction.
for dataset_name in self.datasets_to_use.keys():
try:
self.write_harvesting_config(dataset_name)
if self.harvesting_mode == "two-step":
self.write_me_extraction_config(dataset_name)
except:
# Doh! Just re-raise the damn thing.
raise
else:
## tmp = self.datasets_information[dataset_name] \
## ["num_events"]
tmp = {}
for run_number in self.datasets_to_use[dataset_name]:
tmp[run_number] = self.datasets_information \
[dataset_name]["num_events"][run_number]
if dataset_name in self.book_keeping_information:
self.book_keeping_information[dataset_name].update(tmp)
else:
self.book_keeping_information[dataset_name] = tmp
# Explain the user what to do now.
self.show_exit_message()
except Usage as err:
# self.logger.fatal(err.msg)
# self.option_parser.print_help()
pass
except Error as err:
# self.logger.fatal(err.msg)
exit_code = 1
except Exception as err:
# Hmmm, ignore keyboard interrupts from the
# user. These are not a `serious problem'. We also
# skip SystemExit, which is the exception thrown when
# one calls sys.exit(). This, for example, is done by
# the option parser after calling print_help(). We
# also have to catch all `no such option'
# complaints. Everything else we catch here is a
# `serious problem'.
if isinstance(err, SystemExit):
self.logger.fatal(err.code)
elif not isinstance(err, KeyboardInterrupt):
self.logger.fatal("!" * 50)
self.logger.fatal(" This looks like a serious problem.")
self.logger.fatal(" If you are sure you followed all " \
"instructions")
self.logger.fatal(" please copy the below stack trace together")
self.logger.fatal(" with a description of what you were doing to")
self.logger.fatal(" <EMAIL>.")
self.logger.fatal(" %s" % self.ident_string())
self.logger.fatal("!" * 50)
self.logger.fatal(str(err))
import traceback
traceback_string = traceback.format_exc()
for line in traceback_string.split("\n"):
self.logger.fatal(line)
self.logger.fatal("!" * 50)
exit_code = 2
# This is the stuff that we should really do, no matter
# what. Of course cleaning up after ourselves is also done
# from this place. This alsokeeps track of the book keeping
# so far. (This means that if half of the configuration files
# were created before e.g. the disk was full, we should still
# have a consistent book keeping file.
finally:
self.cleanup()
###
if self.crab_submission == True:
os.system("multicrab -create")
os.system("multicrab -submit")
# End of run.
return exit_code
# End of CMSHarvester.
###########################################################################
## Main entry point.
###########################################################################
if __name__ == "__main__":
"Main entry point for harvesting."
CMSHarvester().run()
# Done.
###########################################################################
|
[
"optparse.IndentedHelpFormatter.format_usage",
"logging.Formatter",
"datetime.datetime.utcnow",
"six.iteritems",
"builtins.range",
"os.path.join",
"os.path.normpath",
"traceback.format_exc",
"copy.deepcopy",
"Configuration.PyReleaseValidation.ConfigBuilder.ConfigBuilder",
"logging.StreamHandler",
"os.system",
"commands.getstatusoutput",
"logging.shutdown",
"os.getenv",
"re.compile",
"DBSAPI.dbsApi.DbsApi",
"datetime.datetime.isoformat",
"random.choice",
"inspect.getargspec",
"functools.reduce",
"os.path.split",
"logging.getLogger"
] |
[((18398, 18421), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (18419, 18421), False, 'import logging\n'), ((18553, 18585), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (18570, 18585), False, 'import logging\n'), ((18651, 18670), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (18668, 18670), False, 'import logging\n'), ((19138, 19156), 'logging.shutdown', 'logging.shutdown', ([], {}), '()\n', (19154, 19156), False, 'import logging\n'), ((19312, 19338), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (19336, 19338), False, 'import datetime\n'), ((23821, 23869), 'logging.Formatter', 'logging.Formatter', (['"""[%(levelname)s] %(message)s"""'], {}), "('[%(levelname)s] %(message)s')\n", (23838, 23869), False, 'import logging\n'), ((38818, 38855), 'os.path.join', 'os.path.join', (['os.path.sep', 'castor_dir'], {}), '(os.path.sep, castor_dir)\n', (38830, 38855), False, 'import os\n'), ((38887, 38915), 'os.path.normpath', 'os.path.normpath', (['castor_dir'], {}), '(castor_dir)\n', (38903, 38915), False, 'import os\n'), ((47817, 47852), 'os.path.join', 'os.path.join', (['castor_path', 'datatype'], {}), '(castor_path, datatype)\n', (47829, 47852), False, 'import os\n'), ((48004, 48046), 'os.path.join', 'os.path.join', (['castor_path', 'harvesting_type'], {}), '(castor_path, harvesting_type)\n', (48016, 48046), False, 'import os\n'), ((48797, 48839), 'os.path.join', 'os.path.join', (['castor_path', 'release_version'], {}), '(castor_path, release_version)\n', (48809, 48839), False, 'import os\n'), ((48961, 49008), 'os.path.join', 'os.path.join', (['castor_path', 'dataset_name_escaped'], {}), '(castor_path, dataset_name_escaped)\n', (48973, 49008), False, 'import os\n'), ((49045, 49074), 'os.path.normpath', 'os.path.normpath', (['castor_path'], {}), '(castor_path)\n', (49061, 49074), False, 'import os\n'), ((50512, 50560), 'os.path.join', 'os.path.join', (['castor_path', "('run_%d' % run_number)"], {}), "(castor_path, 'run_%d' % run_number)\n", (50524, 50560), False, 'import os\n'), ((50803, 50839), 'os.path.join', 'os.path.join', (['castor_path', '"""nevents"""'], {}), "(castor_path, 'nevents')\n", (50815, 50839), False, 'import os\n'), ((50876, 50905), 'os.path.normpath', 'os.path.normpath', (['castor_path'], {}), '(castor_path)\n', (50892, 50905), False, 'import os\n'), ((51614, 51649), 'six.iteritems', 'six.iteritems', (['self.datasets_to_use'], {}), '(self.datasets_to_use)\n', (51627, 51649), False, 'import six\n'), ((56577, 56606), 'builtins.range', 'range', (['len_castor_path_pieces'], {}), '(len_castor_path_pieces)\n', (56582, 56606), False, 'from builtins import range\n'), ((89638, 89682), 'six.iteritems', 'six.iteritems', (['self.frontier_connection_name'], {}), '(self.frontier_connection_name)\n', (89651, 89682), False, 'import six\n'), ((90616, 90642), 'os.getenv', 'os.getenv', (['"""CMSSW_VERSION"""'], {}), "('CMSSW_VERSION')\n", (90625, 90642), False, 'import os\n'), ((91357, 91381), 'os.getenv', 'os.getenv', (['"""DBSCMD_HOME"""'], {}), "('DBSCMD_HOME')\n", (91366, 91381), False, 'import os\n'), ((141978, 142013), 'copy.deepcopy', 'copy.deepcopy', (['self.datasets_to_use'], {}), '(self.datasets_to_use)\n', (141991, 142013), False, 'import copy\n'), ((152425, 152460), 'copy.deepcopy', 'copy.deepcopy', (['self.datasets_to_use'], {}), '(self.datasets_to_use)\n', (152438, 152460), False, 'import copy\n'), ((161964, 162005), 'copy.deepcopy', 'copy.deepcopy', (['dataset_names_after_checks'], {}), '(dataset_names_after_checks)\n', (161977, 162005), False, 'import copy\n'), ((162042, 162083), 'six.iteritems', 'six.iteritems', (['dataset_names_after_checks'], {}), '(dataset_names_after_checks)\n', (162055, 162083), False, 'import six\n'), ((173176, 173205), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (173200, 173205), False, 'import commands\n'), ((183774, 183803), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (183798, 183803), False, 'import commands\n'), ((185675, 185704), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (185699, 185704), False, 'import commands\n'), ((187575, 187604), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (187599, 187604), False, 'import commands\n'), ((212419, 212456), 'six.iteritems', 'six.iteritems', (['self.ref_hist_mappings'], {}), '(self.ref_hist_mappings)\n', (212432, 212456), False, 'import six\n'), ((6175, 6231), 'optparse.IndentedHelpFormatter.format_usage', 'optparse.IndentedHelpFormatter.format_usage', (['self', 'usage'], {}), '(self, usage)\n', (6218, 6231), False, 'import optparse\n'), ((19471, 19508), 'datetime.datetime.isoformat', 'datetime.datetime.isoformat', (['time_now'], {}), '(time_now)\n', (19498, 19508), False, 'import datetime\n'), ((52906, 52935), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (52930, 52935), False, 'import commands\n'), ((54379, 54398), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (54392, 54398), False, 'import os\n'), ((54950, 54991), 're.compile', 're.compile', (['""".*\\\\(([0123456789]{5})\\\\).*"""'], {}), "('.*\\\\(([0123456789]{5})\\\\).*')\n", (54960, 54991), False, 'import re\n'), ((57827, 57852), 'os.path.join', 'os.path.join', (['path', 'piece'], {}), '(path, piece)\n', (57839, 57852), False, 'import os\n'), ((92927, 92939), 'DBSAPI.dbsApi.DbsApi', 'DbsApi', (['args'], {}), '(args)\n', (92933, 92939), False, 'from DBSAPI.dbsApi import DbsApi\n'), ((193266, 193312), 'Configuration.PyReleaseValidation.ConfigBuilder.ConfigBuilder', 'ConfigBuilder', (['config_options'], {'with_input': '(True)'}), '(config_options, with_input=True)\n', (193279, 193312), False, 'from Configuration.PyReleaseValidation.ConfigBuilder import ConfigBuilder, defaultOptions\n'), ((193412, 193441), 'Configuration.PyReleaseValidation.ConfigBuilder.ConfigBuilder', 'ConfigBuilder', (['config_options'], {}), '(config_options)\n', (193425, 193441), False, 'from Configuration.PyReleaseValidation.ConfigBuilder import ConfigBuilder, defaultOptions\n'), ((233137, 233167), 'os.system', 'os.system', (['"""multicrab -create"""'], {}), "('multicrab -create')\n", (233146, 233167), False, 'import os\n'), ((233180, 233210), 'os.system', 'os.system', (['"""multicrab -submit"""'], {}), "('multicrab -submit')\n", (233189, 233210), False, 'import os\n'), ((19831, 19877), 'functools.reduce', 'reduce', (["(lambda x, y: x + ' ' + y)", 'sys.argv[1:]'], {}), "(lambda x, y: x + ' ' + y, sys.argv[1:])\n", (19837, 19877), False, 'from functools import reduce\n'), ((59787, 59816), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (59811, 59816), False, 'import commands\n'), ((61166, 61195), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (61190, 61195), False, 'import commands\n'), ((66225, 66241), 'random.choice', 'choice', (['t1_sites'], {}), '(t1_sites)\n', (66231, 66241), False, 'from random import choice\n'), ((66319, 66332), 'random.choice', 'choice', (['sites'], {}), '(sites)\n', (66325, 66332), False, 'from random import choice\n'), ((67923, 67952), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (67947, 67952), False, 'import commands\n'), ((145134, 145163), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (145158, 145163), False, 'import commands\n'), ((174228, 174257), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (174252, 174257), False, 'import commands\n'), ((193156, 193190), 'inspect.getargspec', 'getargspec', (['ConfigBuilder.__init__'], {}), '(ConfigBuilder.__init__)\n', (193166, 193190), False, 'from inspect import getargspec\n'), ((60076, 60105), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (60100, 60105), False, 'import commands\n'), ((60385, 60414), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (60409, 60414), False, 'import commands\n'), ((62939, 62968), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (62963, 62968), False, 'import commands\n'), ((174911, 174940), 'builtins.range', 'range', (['(1)', 'number_max_sites', '(1)'], {}), '(1, number_max_sites, 1)\n', (174916, 174940), False, 'from builtins import range\n'), ((232422, 232444), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (232442, 232444), False, 'import traceback\n')]
|
#!/usr/bin/env python
"""Interactive control for the car"""
import time
import io
import pygame
import pygame.font
import picamera
import configuration
import helpers.motor_driver as motor_driver_helper
import helpers.image as image_helper
UP = LEFT = DOWN = RIGHT = ACCELERATE = DECELERATE = False
def get_keys():
"""Returns a tuple of (UP, DOWN, LEFT, RIGHT, change, ACCELERATE,
DECELERATE, stop) representing which keys are UP or DOWN and
whether or not the key states changed.
"""
change = False
stop = False
key_to_global_name = {
pygame.K_LEFT: 'LEFT',
pygame.K_RIGHT: 'RIGHT',
pygame.K_UP: 'UP',
pygame.K_DOWN: 'DOWN',
pygame.K_ESCAPE: 'QUIT',
pygame.K_q: 'QUIT',
pygame.K_w: 'ACCELERATE',
pygame.K_s: 'DECELERATE'
}
for event in pygame.event.get():
if event.type in {pygame.K_q, pygame.K_ESCAPE}:
stop = True
elif event.type in {pygame.KEYDOWN, pygame.KEYUP}:
down = (event.type == pygame.KEYDOWN)
change = (event.key in key_to_global_name)
if event.key in key_to_global_name:
globals()[key_to_global_name[event.key]] = down
return (UP, DOWN, LEFT, RIGHT, change, ACCELERATE, DECELERATE, stop)
def interactive_control():
"""Runs the interactive control"""
setup_interactive_control()
clock = pygame.time.Clock()
with picamera.PiCamera() as camera:
camera.resolution = configuration.PICAMERA_RESOLUTION
camera.framerate = configuration.PICAMERA_FRAMERATE
time.sleep(configuration.PICAMERA_WARM_UP_TIME)
# GPIO.output(BACK_MOTOR_ENABLE_PIN, True)
pwm = motor_driver_helper.get_pwm_imstance()
motor_driver_helper.start_pwm(pwm)
command = 'idle'
duty_cycle = configuration.INITIAL_PWM_DUTY_CYCLE
while True:
up_key, down, left, right, change, accelerate, decelerate, stop = get_keys()
if stop:
break
if accelerate:
duty_cycle = duty_cycle + 3 if (duty_cycle + 3) <= 100 else duty_cycle
motor_driver_helper.change_pwm_duty_cycle(pwm, duty_cycle)
print("speed: " + str(duty_cycle))
if decelerate:
duty_cycle = duty_cycle - 3 if (duty_cycle - 3) >= 0 else duty_cycle
motor_driver_helper.change_pwm_duty_cycle(pwm, duty_cycle)
print("speed: " + str(duty_cycle))
if change:
command = 'idle'
motor_driver_helper.set_idle_mode()
if up_key:
command = 'forward'
print(duty_cycle)
motor_driver_helper.set_forward_mode()
elif down:
command = 'reverse'
motor_driver_helper.set_reverse_mode()
append = lambda x: command + '_' + x if command != 'idle' else x
if left:
command = append('left')
motor_driver_helper.set_left_mode()
elif right:
command = append('right')
motor_driver_helper.set_right_mode()
print(command)
stream = io.BytesIO()
camera.capture(stream, format='jpeg', use_video_port=True)
image_helper.save_image_with_direction(stream, command)
stream.flush()
clock.tick(30)
pygame.quit()
def setup_interactive_control():
"""Setup the Pygame Interactive Control Screen"""
pygame.init()
display_size = (300, 400)
screen = pygame.display.set_mode(display_size)
background = pygame.Surface(screen.get_size())
color_white = (255, 255, 255)
display_font = pygame.font.Font(None, 40)
pygame.display.set_caption('RC Car Interactive Control')
text = display_font.render('Use arrows to move', 1, color_white)
text_position = text.get_rect(centerx=display_size[0] / 2)
background.blit(text, text_position)
screen.blit(background, (0, 0))
pygame.display.flip()
def main():
"""Main function"""
motor_driver_helper.set_gpio_pins()
interactive_control()
if __name__ == '__main__':
main()
|
[
"helpers.motor_driver.set_forward_mode",
"pygame.event.get",
"helpers.motor_driver.change_pwm_duty_cycle",
"helpers.motor_driver.set_right_mode",
"helpers.motor_driver.set_left_mode",
"helpers.motor_driver.get_pwm_imstance",
"helpers.motor_driver.set_idle_mode",
"pygame.font.Font",
"helpers.motor_driver.set_gpio_pins",
"pygame.display.set_mode",
"pygame.display.set_caption",
"picamera.PiCamera",
"pygame.quit",
"helpers.motor_driver.start_pwm",
"io.BytesIO",
"helpers.motor_driver.set_reverse_mode",
"pygame.init",
"time.sleep",
"helpers.image.save_image_with_direction",
"pygame.time.Clock",
"pygame.display.flip"
] |
[((839, 857), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (855, 857), False, 'import pygame\n'), ((1400, 1419), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1417, 1419), False, 'import pygame\n'), ((3605, 3618), 'pygame.init', 'pygame.init', ([], {}), '()\n', (3616, 3618), False, 'import pygame\n'), ((3662, 3699), 'pygame.display.set_mode', 'pygame.display.set_mode', (['display_size'], {}), '(display_size)\n', (3685, 3699), False, 'import pygame\n'), ((3804, 3830), 'pygame.font.Font', 'pygame.font.Font', (['None', '(40)'], {}), '(None, 40)\n', (3820, 3830), False, 'import pygame\n'), ((3835, 3891), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""RC Car Interactive Control"""'], {}), "('RC Car Interactive Control')\n", (3861, 3891), False, 'import pygame\n'), ((4105, 4126), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4124, 4126), False, 'import pygame\n'), ((4168, 4203), 'helpers.motor_driver.set_gpio_pins', 'motor_driver_helper.set_gpio_pins', ([], {}), '()\n', (4201, 4203), True, 'import helpers.motor_driver as motor_driver_helper\n'), ((1429, 1448), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (1446, 1448), False, 'import picamera\n'), ((1590, 1637), 'time.sleep', 'time.sleep', (['configuration.PICAMERA_WARM_UP_TIME'], {}), '(configuration.PICAMERA_WARM_UP_TIME)\n', (1600, 1637), False, 'import time\n'), ((1703, 1741), 'helpers.motor_driver.get_pwm_imstance', 'motor_driver_helper.get_pwm_imstance', ([], {}), '()\n', (1739, 1741), True, 'import helpers.motor_driver as motor_driver_helper\n'), ((1750, 1784), 'helpers.motor_driver.start_pwm', 'motor_driver_helper.start_pwm', (['pwm'], {}), '(pwm)\n', (1779, 1784), True, 'import helpers.motor_driver as motor_driver_helper\n'), ((3499, 3512), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3510, 3512), False, 'import pygame\n'), ((3284, 3296), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3294, 3296), False, 'import io\n'), ((3380, 3435), 'helpers.image.save_image_with_direction', 'image_helper.save_image_with_direction', (['stream', 'command'], {}), '(stream, command)\n', (3418, 3435), True, 'import helpers.image as image_helper\n'), ((2150, 2208), 'helpers.motor_driver.change_pwm_duty_cycle', 'motor_driver_helper.change_pwm_duty_cycle', (['pwm', 'duty_cycle'], {}), '(pwm, duty_cycle)\n', (2191, 2208), True, 'import helpers.motor_driver as motor_driver_helper\n'), ((2388, 2446), 'helpers.motor_driver.change_pwm_duty_cycle', 'motor_driver_helper.change_pwm_duty_cycle', (['pwm', 'duty_cycle'], {}), '(pwm, duty_cycle)\n', (2429, 2446), True, 'import helpers.motor_driver as motor_driver_helper\n'), ((2570, 2605), 'helpers.motor_driver.set_idle_mode', 'motor_driver_helper.set_idle_mode', ([], {}), '()\n', (2603, 2605), True, 'import helpers.motor_driver as motor_driver_helper\n'), ((2731, 2769), 'helpers.motor_driver.set_forward_mode', 'motor_driver_helper.set_forward_mode', ([], {}), '()\n', (2767, 2769), True, 'import helpers.motor_driver as motor_driver_helper\n'), ((3069, 3104), 'helpers.motor_driver.set_left_mode', 'motor_driver_helper.set_left_mode', ([], {}), '()\n', (3102, 3104), True, 'import helpers.motor_driver as motor_driver_helper\n'), ((2857, 2895), 'helpers.motor_driver.set_reverse_mode', 'motor_driver_helper.set_reverse_mode', ([], {}), '()\n', (2893, 2895), True, 'import helpers.motor_driver as motor_driver_helper\n'), ((3199, 3235), 'helpers.motor_driver.set_right_mode', 'motor_driver_helper.set_right_mode', ([], {}), '()\n', (3233, 3235), True, 'import helpers.motor_driver as motor_driver_helper\n')]
|
import random
from time import sleep
print('=+'*30)
print(' JOGO DA MEGA SENA ')
print('=+'*30)
quant=(int(input('Quantos jogos você quer sortear? : ')))
lista=[]
jogos=[]
cont=tot=0
l=0
while tot<= quant-1:
cont = 0
while True:
numeros = random.randint(1, 60)
if numeros not in lista:
lista.append(numeros)
cont+=1
if cont>=6:
break
lista.sort()
jogos.append(lista[:])
lista.clear()
tot+=1
for n in range(0,quant):
print(f' JOGO {n+1}: {jogos[n]}')
sleep(2)
|
[
"random.randint",
"time.sleep"
] |
[((600, 608), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (605, 608), False, 'from time import sleep\n'), ((293, 314), 'random.randint', 'random.randint', (['(1)', '(60)'], {}), '(1, 60)\n', (307, 314), False, 'import random\n')]
|
"""Support for Xiaomi Yeelight WiFi color bulb."""
from __future__ import annotations
import asyncio
import logging
import voluptuous as vol
from yeelight import BulbException
from yeelight.aio import AsyncBulb
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry, ConfigEntryNotReady
from homeassistant.const import (
CONF_DEVICES,
CONF_HOST,
CONF_ID,
CONF_NAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .const import (
ACTION_OFF,
ACTION_RECOVER,
ACTION_STAY,
ATTR_ACTION,
ATTR_COUNT,
ATTR_TRANSITIONS,
CONF_CUSTOM_EFFECTS,
CONF_DETECTED_MODEL,
CONF_FLOW_PARAMS,
CONF_MODE_MUSIC,
CONF_MODEL,
CONF_NIGHTLIGHT_SWITCH,
CONF_NIGHTLIGHT_SWITCH_TYPE,
CONF_SAVE_ON_CHANGE,
CONF_TRANSITION,
DATA_CONFIG_ENTRIES,
DATA_CUSTOM_EFFECTS,
DATA_DEVICE,
DEFAULT_MODE_MUSIC,
DEFAULT_NAME,
DEFAULT_NIGHTLIGHT_SWITCH,
DEFAULT_SAVE_ON_CHANGE,
DEFAULT_TRANSITION,
DOMAIN,
NIGHTLIGHT_SWITCH_TYPE_LIGHT,
PLATFORMS,
YEELIGHT_HSV_TRANSACTION,
YEELIGHT_RGB_TRANSITION,
YEELIGHT_SLEEP_TRANSACTION,
YEELIGHT_TEMPERATURE_TRANSACTION,
)
from .device import YeelightDevice, async_format_id
from .scanner import YeelightScanner
_LOGGER = logging.getLogger(__name__)
YEELIGHT_FLOW_TRANSITION_SCHEMA = {
vol.Optional(ATTR_COUNT, default=0): cv.positive_int,
vol.Optional(ATTR_ACTION, default=ACTION_RECOVER): vol.Any(
ACTION_RECOVER, ACTION_OFF, ACTION_STAY
),
vol.Required(ATTR_TRANSITIONS): [
{
vol.Exclusive(YEELIGHT_RGB_TRANSITION, CONF_TRANSITION): vol.All(
cv.ensure_list, [cv.positive_int]
),
vol.Exclusive(YEELIGHT_HSV_TRANSACTION, CONF_TRANSITION): vol.All(
cv.ensure_list, [cv.positive_int]
),
vol.Exclusive(YEELIGHT_TEMPERATURE_TRANSACTION, CONF_TRANSITION): vol.All(
cv.ensure_list, [cv.positive_int]
),
vol.Exclusive(YEELIGHT_SLEEP_TRANSACTION, CONF_TRANSITION): vol.All(
cv.ensure_list, [cv.positive_int]
),
}
],
}
DEVICE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TRANSITION, default=DEFAULT_TRANSITION): cv.positive_int,
vol.Optional(CONF_MODE_MUSIC, default=False): cv.boolean,
vol.Optional(CONF_SAVE_ON_CHANGE, default=False): cv.boolean,
vol.Optional(CONF_NIGHTLIGHT_SWITCH_TYPE): vol.Any(
NIGHTLIGHT_SWITCH_TYPE_LIGHT
),
vol.Optional(CONF_MODEL): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA},
vol.Optional(CONF_CUSTOM_EFFECTS): [
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_FLOW_PARAMS): YEELIGHT_FLOW_TRANSITION_SCHEMA,
}
],
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Yeelight bulbs."""
conf = config.get(DOMAIN, {})
hass.data[DOMAIN] = {
DATA_CUSTOM_EFFECTS: conf.get(CONF_CUSTOM_EFFECTS, {}),
DATA_CONFIG_ENTRIES: {},
}
# Make sure the scanner is always started in case we are
# going to retry via ConfigEntryNotReady and the bulb has changed
# ip
scanner = YeelightScanner.async_get(hass)
await scanner.async_setup()
# Import manually configured devices
for host, device_config in config.get(DOMAIN, {}).get(CONF_DEVICES, {}).items():
_LOGGER.debug("Importing configured %s", host)
entry_config = {CONF_HOST: host, **device_config}
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=entry_config
)
)
return True
async def _async_initialize(
hass: HomeAssistant,
entry: ConfigEntry,
device: YeelightDevice,
) -> None:
entry_data = hass.data[DOMAIN][DATA_CONFIG_ENTRIES][entry.entry_id] = {}
await device.async_setup()
entry_data[DATA_DEVICE] = device
if (
device.capabilities
and entry.data.get(CONF_DETECTED_MODEL) != device.capabilities["model"]
):
hass.config_entries.async_update_entry(
entry,
data={**entry.data, CONF_DETECTED_MODEL: device.capabilities["model"]},
)
@callback
def _async_normalize_config_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Move options from data for imported entries.
Initialize options with default values for other entries.
Copy the unique id to CONF_ID if it is missing
"""
if not entry.options:
hass.config_entries.async_update_entry(
entry,
data={
CONF_HOST: entry.data.get(CONF_HOST),
CONF_ID: entry.data.get(CONF_ID) or entry.unique_id,
CONF_DETECTED_MODEL: entry.data.get(CONF_DETECTED_MODEL),
},
options={
CONF_NAME: entry.data.get(CONF_NAME, ""),
CONF_MODEL: entry.data.get(
CONF_MODEL, entry.data.get(CONF_DETECTED_MODEL, "")
),
CONF_TRANSITION: entry.data.get(CONF_TRANSITION, DEFAULT_TRANSITION),
CONF_MODE_MUSIC: entry.data.get(CONF_MODE_MUSIC, DEFAULT_MODE_MUSIC),
CONF_SAVE_ON_CHANGE: entry.data.get(
CONF_SAVE_ON_CHANGE, DEFAULT_SAVE_ON_CHANGE
),
CONF_NIGHTLIGHT_SWITCH: entry.data.get(
CONF_NIGHTLIGHT_SWITCH, DEFAULT_NIGHTLIGHT_SWITCH
),
},
unique_id=entry.unique_id or entry.data.get(CONF_ID),
)
elif entry.unique_id and not entry.data.get(CONF_ID):
hass.config_entries.async_update_entry(
entry,
data={CONF_HOST: entry.data.get(CONF_HOST), CONF_ID: entry.unique_id},
)
elif entry.data.get(CONF_ID) and not entry.unique_id:
hass.config_entries.async_update_entry(
entry,
unique_id=entry.data[CONF_ID],
)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Yeelight from a config entry."""
_async_normalize_config_entry(hass, entry)
if not entry.data.get(CONF_HOST):
bulb_id = async_format_id(entry.data.get(CONF_ID, entry.unique_id))
raise ConfigEntryNotReady(f"Waiting for {bulb_id} to be discovered")
try:
device = await _async_get_device(hass, entry.data[CONF_HOST], entry)
await _async_initialize(hass, entry, device)
except (asyncio.TimeoutError, OSError, BulbException) as ex:
raise ConfigEntryNotReady from ex
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
# Wait to install the reload listener until everything was successfully initialized
entry.async_on_unload(entry.add_update_listener(_async_update_listener))
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
data_config_entries = hass.data[DOMAIN][DATA_CONFIG_ENTRIES]
data_config_entries.pop(entry.entry_id)
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
async def _async_get_device(
hass: HomeAssistant, host: str, entry: ConfigEntry
) -> YeelightDevice:
# Get model from config and capabilities
model = entry.options.get(CONF_MODEL) or entry.data.get(CONF_DETECTED_MODEL)
# Set up device
bulb = AsyncBulb(host, model=model or None)
device = YeelightDevice(hass, host, entry.options, bulb)
# start listening for local pushes
await device.bulb.async_listen(device.async_update_callback)
# register stop callback to shutdown listening for local pushes
async def async_stop_listen_task(event):
"""Stop listen task."""
_LOGGER.debug("Shutting down Yeelight Listener (stop event)")
await device.bulb.async_stop_listening()
@callback
def _async_stop_listen_on_unload():
"""Stop listen task."""
_LOGGER.debug("Shutting down Yeelight Listener (unload)")
hass.async_create_task(device.bulb.async_stop_listening())
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_listen_task)
)
entry.async_on_unload(_async_stop_listen_on_unload)
# fetch initial state
await device.async_update()
if (
# Must have last_properties
not device.bulb.last_properties
# Must have at least a power property
or (
"main_power" not in device.bulb.last_properties
and "power" not in device.bulb.last_properties
)
):
raise ConfigEntryNotReady(
"Could not fetch initial state; try power cycling the device"
)
return device
|
[
"voluptuous.Exclusive",
"voluptuous.Optional",
"voluptuous.Any",
"voluptuous.All",
"voluptuous.Required",
"homeassistant.config_entries.ConfigEntryNotReady",
"yeelight.aio.AsyncBulb",
"logging.getLogger"
] |
[((1420, 1447), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1437, 1447), False, 'import logging\n'), ((1490, 1525), 'voluptuous.Optional', 'vol.Optional', (['ATTR_COUNT'], {'default': '(0)'}), '(ATTR_COUNT, default=0)\n', (1502, 1525), True, 'import voluptuous as vol\n'), ((1548, 1597), 'voluptuous.Optional', 'vol.Optional', (['ATTR_ACTION'], {'default': 'ACTION_RECOVER'}), '(ATTR_ACTION, default=ACTION_RECOVER)\n', (1560, 1597), True, 'import voluptuous as vol\n'), ((1667, 1697), 'voluptuous.Required', 'vol.Required', (['ATTR_TRANSITIONS'], {}), '(ATTR_TRANSITIONS)\n', (1679, 1697), True, 'import voluptuous as vol\n'), ((1599, 1647), 'voluptuous.Any', 'vol.Any', (['ACTION_RECOVER', 'ACTION_OFF', 'ACTION_STAY'], {}), '(ACTION_RECOVER, ACTION_OFF, ACTION_STAY)\n', (1606, 1647), True, 'import voluptuous as vol\n'), ((8114, 8150), 'yeelight.aio.AsyncBulb', 'AsyncBulb', (['host'], {'model': '(model or None)'}), '(host, model=model or None)\n', (8123, 8150), False, 'from yeelight.aio import AsyncBulb\n'), ((2358, 2403), 'voluptuous.Optional', 'vol.Optional', (['CONF_NAME'], {'default': 'DEFAULT_NAME'}), '(CONF_NAME, default=DEFAULT_NAME)\n', (2370, 2403), True, 'import voluptuous as vol\n'), ((2424, 2481), 'voluptuous.Optional', 'vol.Optional', (['CONF_TRANSITION'], {'default': 'DEFAULT_TRANSITION'}), '(CONF_TRANSITION, default=DEFAULT_TRANSITION)\n', (2436, 2481), True, 'import voluptuous as vol\n'), ((2508, 2552), 'voluptuous.Optional', 'vol.Optional', (['CONF_MODE_MUSIC'], {'default': '(False)'}), '(CONF_MODE_MUSIC, default=False)\n', (2520, 2552), True, 'import voluptuous as vol\n'), ((2574, 2622), 'voluptuous.Optional', 'vol.Optional', (['CONF_SAVE_ON_CHANGE'], {'default': '(False)'}), '(CONF_SAVE_ON_CHANGE, default=False)\n', (2586, 2622), True, 'import voluptuous as vol\n'), ((2644, 2685), 'voluptuous.Optional', 'vol.Optional', (['CONF_NIGHTLIGHT_SWITCH_TYPE'], {}), '(CONF_NIGHTLIGHT_SWITCH_TYPE)\n', (2656, 2685), True, 'import voluptuous as vol\n'), ((2756, 2780), 'voluptuous.Optional', 'vol.Optional', (['CONF_MODEL'], {}), '(CONF_MODEL)\n', (2768, 2780), True, 'import voluptuous as vol\n'), ((2687, 2724), 'voluptuous.Any', 'vol.Any', (['NIGHTLIGHT_SWITCH_TYPE_LIGHT'], {}), '(NIGHTLIGHT_SWITCH_TYPE_LIGHT)\n', (2694, 2724), True, 'import voluptuous as vol\n'), ((6821, 6883), 'homeassistant.config_entries.ConfigEntryNotReady', 'ConfigEntryNotReady', (['f"""Waiting for {bulb_id} to be discovered"""'], {}), "(f'Waiting for {bulb_id} to be discovered')\n", (6840, 6883), False, 'from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry, ConfigEntryNotReady\n'), ((9331, 9418), 'homeassistant.config_entries.ConfigEntryNotReady', 'ConfigEntryNotReady', (['"""Could not fetch initial state; try power cycling the device"""'], {}), "(\n 'Could not fetch initial state; try power cycling the device')\n", (9350, 9418), False, 'from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry, ConfigEntryNotReady\n'), ((1723, 1778), 'voluptuous.Exclusive', 'vol.Exclusive', (['YEELIGHT_RGB_TRANSITION', 'CONF_TRANSITION'], {}), '(YEELIGHT_RGB_TRANSITION, CONF_TRANSITION)\n', (1736, 1778), True, 'import voluptuous as vol\n'), ((1866, 1922), 'voluptuous.Exclusive', 'vol.Exclusive', (['YEELIGHT_HSV_TRANSACTION', 'CONF_TRANSITION'], {}), '(YEELIGHT_HSV_TRANSACTION, CONF_TRANSITION)\n', (1879, 1922), True, 'import voluptuous as vol\n'), ((2010, 2074), 'voluptuous.Exclusive', 'vol.Exclusive', (['YEELIGHT_TEMPERATURE_TRANSACTION', 'CONF_TRANSITION'], {}), '(YEELIGHT_TEMPERATURE_TRANSACTION, CONF_TRANSITION)\n', (2023, 2074), True, 'import voluptuous as vol\n'), ((2162, 2220), 'voluptuous.Exclusive', 'vol.Exclusive', (['YEELIGHT_SLEEP_TRANSACTION', 'CONF_TRANSITION'], {}), '(YEELIGHT_SLEEP_TRANSACTION, CONF_TRANSITION)\n', (2175, 2220), True, 'import voluptuous as vol\n'), ((1780, 1822), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[cv.positive_int]'], {}), '(cv.ensure_list, [cv.positive_int])\n', (1787, 1822), True, 'import voluptuous as vol\n'), ((1924, 1966), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[cv.positive_int]'], {}), '(cv.ensure_list, [cv.positive_int])\n', (1931, 1966), True, 'import voluptuous as vol\n'), ((2076, 2118), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[cv.positive_int]'], {}), '(cv.ensure_list, [cv.positive_int])\n', (2083, 2118), True, 'import voluptuous as vol\n'), ((2222, 2264), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[cv.positive_int]'], {}), '(cv.ensure_list, [cv.positive_int])\n', (2229, 2264), True, 'import voluptuous as vol\n'), ((2894, 2932), 'voluptuous.Optional', 'vol.Optional', (['CONF_DEVICES'], {'default': '{}'}), '(CONF_DEVICES, default={})\n', (2906, 2932), True, 'import voluptuous as vol\n'), ((2978, 3011), 'voluptuous.Optional', 'vol.Optional', (['CONF_CUSTOM_EFFECTS'], {}), '(CONF_CUSTOM_EFFECTS)\n', (2990, 3011), True, 'import voluptuous as vol\n'), ((3061, 3084), 'voluptuous.Required', 'vol.Required', (['CONF_NAME'], {}), '(CONF_NAME)\n', (3073, 3084), True, 'import voluptuous as vol\n'), ((3121, 3151), 'voluptuous.Required', 'vol.Required', (['CONF_FLOW_PARAMS'], {}), '(CONF_FLOW_PARAMS)\n', (3133, 3151), True, 'import voluptuous as vol\n')]
|
import os
import subprocess
import georinex as gr
import datetime
# For naming Rinex files
alphabet = 'abcdefghijklmnopqrstuvwx'
def convert_T01_to_dat(fullfile):
os.system('runpkr00.exe -d "{}"'.format(fullfile))
def convert_dat_to_rinex(fullfile):
os.system('teqc.exe -tr d "{}" > "{}"'.format(fullfile.replace('T01', 'dat'),
fullfile.replace('T01', 'rnx')))
class GPS_data:
def __init__(self, rinex_file):
self._hdr = gr.rinexheader(rinex_file)
self.times = gr.gettime(rinex_file)
self.file = rinex_file
self.T01_file = rinex_file.replace('rnx', 'T01')
self.rinex_filename = ''
@property
def duration(self):
# Just need a measure of duration for comparison, not the actual duration
return len(self.times)
@property
def doy(self):
result = subprocess.check_output('teqc +quiet +mds +doy "{}"'.format(self.file))
year_day = result.decode().split()[0]
doy = year_day.split(':')[1]
return doy
@property
def hour(self):
return int(self._hdr['TIME OF FIRST OBS'].split()[3])
@property
def year(self):
return self.dtime.strftime('%Y')
@property
def month(self):
return self.dtime.strftime('%m')
@property
def hour_as_letter(self):
return alphabet[self.hour]
@property
def dtime(self):
# Return mean observation time
avgTime = datetime.datetime.fromtimestamp(sum(map(datetime.datetime.timestamp, self.times)) / len(self.times))
return avgTime
@property
def year_suffix(self):
return '.' + self.dtime.strftime('%y') + 'o'
def update_rinex_filename(self, stationname):
if len(stationname) >= 4:
sn = stationname[:4]
else:
sn = stationname + 'x' * 4 - (len(stationname))
self.rinex_filename = sn + self.doy + self.hour_as_letter + self.year_suffix
|
[
"georinex.gettime",
"georinex.rinexheader"
] |
[((492, 518), 'georinex.rinexheader', 'gr.rinexheader', (['rinex_file'], {}), '(rinex_file)\n', (506, 518), True, 'import georinex as gr\n'), ((540, 562), 'georinex.gettime', 'gr.gettime', (['rinex_file'], {}), '(rinex_file)\n', (550, 562), True, 'import georinex as gr\n')]
|
from dataclasses import dataclass
import os
import logging
import json
from functools import lru_cache
import cv2
import numpy as np
import app
from util import cvimage as Image
logger = logging.getLogger(__name__)
net_file = app.cache_path / 'ark_material.onnx'
index_file = app.cache_path / 'index_itemid_relation.json'
model_timestamp = 0
@dataclass
class DnnItemRecord:
class_id: int
item_id: str
item_name: str
item_type: str
dnn_items_by_class : dict[int, DnnItemRecord] = {}
dnn_items_by_item_id : dict[str, DnnItemRecord] = {}
dnn_items_by_item_name : dict[str, DnnItemRecord] = {}
@lru_cache(1)
def load_net():
update_index_info()
with open(net_file, 'rb') as f:
data = f.read()
net = cv2.dnn.readNetFromONNX(data)
return net
@lru_cache(1)
def _update_index_info():
with open(index_file, 'r', encoding='utf-8') as f:
data = json.load(f)
global model_timestamp
model_timestamp = data['time']
idx2id, id2idx, idx2name, idx2type = data['idx2id'], data['id2idx'], data['idx2name'], data['idx2type']
dnn_items_by_class.clear()
dnn_items_by_item_id.clear()
dnn_items_by_item_name.clear()
for index, item_id in enumerate(idx2id):
record = DnnItemRecord(index, item_id, idx2name[index], idx2type[index])
dnn_items_by_class[index] = record
dnn_items_by_item_id[item_id] = record
dnn_items_by_item_name[idx2name[index]] = record
def update_index_info():
update_net()
return _update_index_info()
def retry_get(url, max_retry=5, timeout=3):
import requests
c = 0
ex = None
while c < max_retry:
try:
return requests.get(url, timeout=timeout)
except Exception as e:
c += 1
ex = e
raise ex
def update_net():
local_cache_time = 0
import time
os.makedirs(os.path.dirname(index_file), exist_ok=True)
try:
stat = os.stat(index_file)
cache_mtime = stat.st_mtime
with open(index_file, 'r', encoding='utf-8') as f:
local_rel = json.load(f)
model_gen_time = local_rel['time'] / 1000
now = time.time()
logger.debug(f'{cache_mtime=} {now=} {model_gen_time=}')
if cache_mtime > model_gen_time and now - cache_mtime < 60 * 60 * 8:
return
except:
pass
logger.info('检查物品识别模型更新')
resp = retry_get('https://cdn.jsdelivr.net/gh/triwinds/arknights-ml@latest/inventory/index_itemid_relation.json')
remote_relation = resp.json()
if remote_relation['time'] > local_cache_time:
from datetime import datetime
logger.info(f'更新物品识别模型, 模型生成时间: {datetime.fromtimestamp(remote_relation["time"]/1000).strftime("%Y-%m-%d %H:%M:%S")}')
with open(index_file, 'w', encoding='utf-8') as f:
json.dump(remote_relation, f, ensure_ascii=False)
resp = retry_get('https://cdn.jsdelivr.net/gh/triwinds/arknights-ml@latest/inventory/ark_material.onnx')
with open(net_file, 'wb') as f:
f.write(resp.content)
_update_index_info.cache_clear()
else:
os.utime(index_file, None)
def _update_mat_collection(collection, name, img):
global itemmask
if img.size != (48, 48):
img = img.resize((48, 48), Image.BILINEAR)
mat = np.array(img)
mat[itemmask] = 0
collection[name] = mat
resources_known_items = {}
def load():
from . import resources
from . import minireco
resource_files = [(x[:-4], resources.resolve('items/' + x)) for x in resources.get_entries('items')[1] if x.endswith('.png')]
global resources_itemmats, num_recognizer, itemmask, resources_known_items
resources_itemmats = {}
itemmask = np.asarray(resources.load_image('common/itemmask.png', '1'))
for name, index in resource_files:
img = resources.load_image(index, 'RGB')
_update_mat_collection(resources_itemmats, name, img)
model = resources.load_pickle('minireco/NotoSansCJKsc-DemiLight-nums.dat')
reco = minireco.MiniRecognizer(model, minireco.compare_ccoeff)
num_recognizer=reco
for prefix in ['items', 'items/archive', 'items/not-loot']:
_, files = resources.get_entries(prefix)
for filename in files:
itemname = filename[:-4] if filename.endswith('.png') else filename
path = prefix + '/' + filename
resources_known_items[itemname] = resources.resolve(path)
update_extra_items()
def update_extra_items():
import app
new_mtime = os.path.getmtime(app.extra_items_path)
if new_mtime <= update_extra_items.old_mtime:
return
from . import resources
from glob import glob
extra_files = [(os.path.basename(x)[:-4], resources.FileSystemIndex(x)) for x in glob(os.path.join(
app.extra_items_path, '*.png'))]
extra_known_items = {}
extra_itemmats = {}
for key, value in extra_files:
for name, index in extra_files:
img = resources.load_image(index, 'RGB')
_update_mat_collection(extra_itemmats, name, img)
extra_known_items[key] = value
global itemmats
itemmats = {}
itemmats.update(resources_itemmats)
itemmats.update(extra_itemmats)
global all_known_items
all_known_items = {}
all_known_items.update(resources_known_items)
all_known_items.update(extra_known_items)
update_extra_items.old_mtime = new_mtime
update_extra_items.old_mtime = 0
def add_item(image) -> str:
import os
import time
import app
date = time.strftime('%Y-%m-%d')
index = add_item.last_index + 1
while True:
name = '未知物品-%s-%d' % (date, index)
filename = app.extra_items_path.joinpath(name + '.png')
if not os.path.exists(filename):
break
index += 1
add_item.last_index = index
image.save(filename)
update_extra_items()
return name
add_item.last_index = 0
load()
|
[
"json.dump",
"json.load",
"os.path.join",
"os.stat",
"app.extra_items_path.joinpath",
"os.path.basename",
"os.path.dirname",
"os.path.exists",
"time.strftime",
"cv2.dnn.readNetFromONNX",
"time.time",
"numpy.array",
"os.path.getmtime",
"requests.get",
"datetime.datetime.fromtimestamp",
"functools.lru_cache",
"os.utime",
"logging.getLogger"
] |
[((190, 217), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (207, 217), False, 'import logging\n'), ((614, 626), 'functools.lru_cache', 'lru_cache', (['(1)'], {}), '(1)\n', (623, 626), False, 'from functools import lru_cache\n'), ((789, 801), 'functools.lru_cache', 'lru_cache', (['(1)'], {}), '(1)\n', (798, 801), False, 'from functools import lru_cache\n'), ((3309, 3322), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3317, 3322), True, 'import numpy as np\n'), ((4527, 4565), 'os.path.getmtime', 'os.path.getmtime', (['app.extra_items_path'], {}), '(app.extra_items_path)\n', (4543, 4565), False, 'import os\n'), ((5537, 5562), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d"""'], {}), "('%Y-%m-%d')\n", (5550, 5562), False, 'import time\n'), ((741, 770), 'cv2.dnn.readNetFromONNX', 'cv2.dnn.readNetFromONNX', (['data'], {}), '(data)\n', (764, 770), False, 'import cv2\n'), ((898, 910), 'json.load', 'json.load', (['f'], {}), '(f)\n', (907, 910), False, 'import json\n'), ((1868, 1895), 'os.path.dirname', 'os.path.dirname', (['index_file'], {}), '(index_file)\n', (1883, 1895), False, 'import os\n'), ((1936, 1955), 'os.stat', 'os.stat', (['index_file'], {}), '(index_file)\n', (1943, 1955), False, 'import os\n'), ((2156, 2167), 'time.time', 'time.time', ([], {}), '()\n', (2165, 2167), False, 'import time\n'), ((3119, 3145), 'os.utime', 'os.utime', (['index_file', 'None'], {}), '(index_file, None)\n', (3127, 3145), False, 'import os\n'), ((5678, 5722), 'app.extra_items_path.joinpath', 'app.extra_items_path.joinpath', (["(name + '.png')"], {}), "(name + '.png')\n", (5707, 5722), False, 'import app\n'), ((1674, 1708), 'requests.get', 'requests.get', (['url'], {'timeout': 'timeout'}), '(url, timeout=timeout)\n', (1686, 1708), False, 'import requests\n'), ((2075, 2087), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2084, 2087), False, 'import json\n'), ((2823, 2872), 'json.dump', 'json.dump', (['remote_relation', 'f'], {'ensure_ascii': '(False)'}), '(remote_relation, f, ensure_ascii=False)\n', (2832, 2872), False, 'import json\n'), ((5738, 5762), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (5752, 5762), False, 'import os\n'), ((4706, 4725), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (4722, 4725), False, 'import os\n'), ((4776, 4819), 'os.path.join', 'os.path.join', (['app.extra_items_path', '"""*.png"""'], {}), "(app.extra_items_path, '*.png')\n", (4788, 4819), False, 'import os\n'), ((2694, 2748), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["(remote_relation['time'] / 1000)"], {}), "(remote_relation['time'] / 1000)\n", (2716, 2748), False, 'from datetime import datetime\n')]
|
import copy
import logging
import time
import numpy as np
import torch
import wandb
from torch import nn
from .utils import transform_list_to_tensor
from ....core.robustness.robust_aggregation import RobustAggregator, is_weight_param
from ....utils.logging import logger
def test(
model,
device,
test_loader,
criterion,
mode="raw-task",
dataset="cifar10",
poison_type="fashion",
):
class_correct = list(0.0 for i in range(10))
class_total = list(0.0 for i in range(10))
if dataset in ("mnist", "emnist"):
target_class = 7
if mode == "raw-task":
classes = [str(i) for i in range(10)]
elif mode == "targetted-task":
if poison_type == "ardis":
classes = [str(i) for i in range(10)]
else:
classes = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
elif dataset == "cifar10":
classes = (
"plane",
"car",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
)
# target_class = 2 for greencar, 9 for southwest
if poison_type in ("howto", "greencar-neo"):
target_class = 2
else:
target_class = 9
model.eval()
test_loss = 0
correct = 0
backdoor_correct = 0
backdoor_tot = 0
final_acc = 0
task_acc = None
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
_, predicted = torch.max(output, 1)
c = (predicted == target).squeeze()
# test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
test_loss += criterion(output, target).item()
pred = output.argmax(
dim=1, keepdim=True
) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
# check backdoor accuracy
if poison_type == "ardis":
backdoor_index = torch.where(target == target_class)
target_backdoor = torch.ones_like(target[backdoor_index])
predicted_backdoor = predicted[backdoor_index]
backdoor_correct += (predicted_backdoor == target_backdoor).sum().item()
backdoor_tot = backdoor_index[0].shape[0]
# logger.info("Target: {}".format(target_backdoor))
# logger.info("Predicted: {}".format(predicted_backdoor))
# for image_index in range(test_batch_size):
for image_index in range(len(target)):
label = target[image_index]
class_correct[label] += c[image_index].item()
class_total[label] += 1
test_loss /= len(test_loader.dataset)
if mode == "raw-task":
for i in range(10):
logger.info(
"Accuracy of %5s : %.2f %%"
% (classes[i], 100 * class_correct[i] / class_total[i])
)
if i == target_class:
task_acc = 100 * class_correct[i] / class_total[i]
logger.info(
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n".format(
test_loss,
correct,
len(test_loader.dataset),
100.0 * correct / len(test_loader.dataset),
)
)
final_acc = 100.0 * correct / len(test_loader.dataset)
elif mode == "targetted-task":
if dataset in ("mnist", "emnist"):
for i in range(10):
logger.info(
"Accuracy of %5s : %.2f %%"
% (classes[i], 100 * class_correct[i] / class_total[i])
)
if poison_type == "ardis":
# ensure 7 is being classified as 1
logger.info(
"Backdoor Accuracy of %.2f : %.2f %%"
% (target_class, 100 * backdoor_correct / backdoor_tot)
)
final_acc = 100 * backdoor_correct / backdoor_tot
else:
# trouser acc
final_acc = 100 * class_correct[1] / class_total[1]
elif dataset == "cifar10":
logger.info(
"#### Targetted Accuracy of %5s : %.2f %%"
% (
classes[target_class],
100 * class_correct[target_class] / class_total[target_class],
)
)
final_acc = 100 * class_correct[target_class] / class_total[target_class]
return final_acc, task_acc
class FedAvgRobustAggregator(object):
def __init__(
self,
train_global,
test_global,
all_train_data_num,
train_data_local_dict,
test_data_local_dict,
train_data_local_num_dict,
worker_num,
device,
model,
targetted_task_test_loader,
num_dps_poisoned_dataset,
args,
):
self.train_global = train_global
self.test_global = test_global
self.all_train_data_num = all_train_data_num
self.train_data_local_dict = train_data_local_dict
self.test_data_local_dict = test_data_local_dict
self.train_data_local_num_dict = train_data_local_num_dict
self.worker_num = worker_num
self.device = device
self.args = args
self.model_dict = dict()
self.sample_num_dict = dict()
self.flag_client_model_uploaded_dict = dict()
self.robust_aggregator = RobustAggregator(args)
self.targetted_task_test_loader = targetted_task_test_loader
self.num_dps_poisoned_dataset = num_dps_poisoned_dataset
self.adversary_fl_rounds = [
i for i in range(1, args.comm_round + 1) if (i - 1) % args.attack_freq == 0
]
for idx in range(self.worker_num):
self.flag_client_model_uploaded_dict[idx] = False
self.model, _ = self.init_model(model)
def init_model(self, model):
model_params = model.state_dict()
# logging.info(model)
return model, model_params
def get_global_model_params(self):
return self.model.state_dict()
def add_local_trained_result(self, index, model_params, sample_num):
logging.info("add_model. index = %d" % index)
self.model_dict[index] = model_params
self.sample_num_dict[index] = sample_num
self.flag_client_model_uploaded_dict[index] = True
def check_whether_all_receive(self):
for idx in range(self.worker_num):
if not self.flag_client_model_uploaded_dict[idx]:
return False
for idx in range(self.worker_num):
self.flag_client_model_uploaded_dict[idx] = False
return True
def aggregate(self):
start_time = time.time()
model_list = []
training_num = 0
for idx in range(self.worker_num):
if self.args.is_mobile == 1:
self.model_dict[idx] = transform_list_to_tensor(self.model_dict[idx])
# conduct the defense here:
local_sample_number, local_model_params = (
self.sample_num_dict[idx],
self.model_dict[idx],
)
if self.robust_aggregator.defense_type in ("norm_diff_clipping", "weak_dp"):
clipped_local_state_dict = self.robust_aggregator.norm_diff_clipping(
local_model_params, self.model.state_dict()
)
else:
raise NotImplementedError("Non-supported Defense type ... ")
model_list.append((local_sample_number, clipped_local_state_dict))
training_num += self.sample_num_dict[idx]
logging.info("len of self.model_dict[idx] = " + str(len(self.model_dict)))
# logging.info("################aggregate: %d" % len(model_list))
(num0, averaged_params) = model_list[0]
for k in averaged_params.keys():
for i in range(0, len(model_list)):
local_sample_number, local_model_params = model_list[i]
w = local_sample_number / training_num
local_layer_update = local_model_params[k]
if self.robust_aggregator.defense_type == "weak_dp":
if is_weight_param(k):
local_layer_update = self.robust_aggregator.add_noise(
local_layer_update, self.device
)
if i == 0:
averaged_params[k] = local_model_params[k] * w
else:
averaged_params[k] += local_model_params[k] * w
# update the global model which is cached at the server side
self.model.load_state_dict(averaged_params)
end_time = time.time()
logging.info("aggregate time cost: %d" % (end_time - start_time))
return averaged_params
def client_sampling(self, round_idx, client_num_in_total, client_num_per_round):
num_clients = min(client_num_per_round, client_num_in_total)
np.random.seed(
round_idx
) # make sure for each comparison, we are selecting the same clients each round
if round_idx not in self.adversary_fl_rounds:
client_indexes = np.random.choice(
range(client_num_in_total), num_clients, replace=False
)
else:
client_indexes = np.array(
[1]
+ list(
np.random.choice(
range(client_num_in_total), num_clients, replace=False
)
)
) # we gaurantee that the attacker will participate in a certain frequency
logging.info("client_indexes = %s" % str(client_indexes))
return client_indexes
def test_on_all_clients(self, round_idx):
if (
round_idx % self.args.frequency_of_the_test == 0
or round_idx == self.args.comm_round - 1
):
logging.info(
"################local_test_on_all_clients : {}".format(round_idx)
)
train_num_samples = []
train_tot_corrects = []
train_losses = []
test_num_samples = []
test_tot_corrects = []
test_losses = []
for client_idx in range(self.args.client_num_in_total):
# train data
train_tot_correct, train_num_sample, train_loss = self._infer(
self.train_data_local_dict[client_idx]
)
train_tot_corrects.append(copy.deepcopy(train_tot_correct))
train_num_samples.append(copy.deepcopy(train_num_sample))
train_losses.append(copy.deepcopy(train_loss))
# test data
test_tot_correct, test_num_sample, test_loss = self._infer(
self.test_data_local_dict[client_idx]
)
test_tot_corrects.append(copy.deepcopy(test_tot_correct))
test_num_samples.append(copy.deepcopy(test_num_sample))
test_losses.append(copy.deepcopy(test_loss))
# test on training dataset
train_acc = sum(train_tot_corrects) / sum(train_num_samples)
train_loss = sum(train_losses) / sum(train_num_samples)
wandb.log({"Train/Acc": train_acc, "round": round_idx})
wandb.log({"Train/Loss": train_loss, "round": round_idx})
stats = {"training_acc": train_acc, "training_loss": train_loss}
logging.info(stats)
# test on test dataset
test_acc = sum(test_tot_corrects) / sum(test_num_samples)
test_loss = sum(test_losses) / sum(test_num_samples)
wandb.log({"Test/Acc": test_acc, "round": round_idx})
wandb.log({"Test/Loss": test_loss, "round": round_idx})
stats = {"test_acc": test_acc, "test_loss": test_loss}
logging.info(stats)
def test_target_accuracy(self, round_idx):
test(
self.model,
self.device,
self.targetted_task_test_loader,
criterion=nn.CrossEntropyLoss().to(self.device),
mode="targetted-task",
dataset=self.args.dataset,
poison_type=self.args.poison_type,
)
def _infer(self, test_data):
self.model.eval()
self.model.to(self.device)
test_loss = test_acc = test_total = 0.0
criterion = nn.CrossEntropyLoss().to(self.device)
with torch.no_grad():
for batch_idx, (x, target) in enumerate(test_data):
x = x.to(self.device)
target = target.to(self.device)
pred = self.model(x)
loss = criterion(pred, target)
_, predicted = torch.max(pred, -1)
correct = predicted.eq(target).sum()
test_acc += correct.item()
test_loss += loss.item() * target.size(0)
test_total += target.size(0)
return test_acc, test_total, test_loss
|
[
"wandb.log",
"torch.ones_like",
"copy.deepcopy",
"numpy.random.seed",
"torch.where",
"torch.nn.CrossEntropyLoss",
"time.time",
"logging.info",
"torch.max",
"torch.no_grad"
] |
[((1748, 1763), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1761, 1763), False, 'import torch\n'), ((6728, 6773), 'logging.info', 'logging.info', (["('add_model. index = %d' % index)"], {}), "('add_model. index = %d' % index)\n", (6740, 6773), False, 'import logging\n'), ((7276, 7287), 'time.time', 'time.time', ([], {}), '()\n', (7285, 7287), False, 'import time\n'), ((9276, 9287), 'time.time', 'time.time', ([], {}), '()\n', (9285, 9287), False, 'import time\n'), ((9296, 9361), 'logging.info', 'logging.info', (["('aggregate time cost: %d' % (end_time - start_time))"], {}), "('aggregate time cost: %d' % (end_time - start_time))\n", (9308, 9361), False, 'import logging\n'), ((9556, 9581), 'numpy.random.seed', 'np.random.seed', (['round_idx'], {}), '(round_idx)\n', (9570, 9581), True, 'import numpy as np\n'), ((1928, 1948), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (1937, 1948), False, 'import torch\n'), ((11862, 11917), 'wandb.log', 'wandb.log', (["{'Train/Acc': train_acc, 'round': round_idx}"], {}), "({'Train/Acc': train_acc, 'round': round_idx})\n", (11871, 11917), False, 'import wandb\n'), ((11930, 11987), 'wandb.log', 'wandb.log', (["{'Train/Loss': train_loss, 'round': round_idx}"], {}), "({'Train/Loss': train_loss, 'round': round_idx})\n", (11939, 11987), False, 'import wandb\n'), ((12077, 12096), 'logging.info', 'logging.info', (['stats'], {}), '(stats)\n', (12089, 12096), False, 'import logging\n'), ((12280, 12333), 'wandb.log', 'wandb.log', (["{'Test/Acc': test_acc, 'round': round_idx}"], {}), "({'Test/Acc': test_acc, 'round': round_idx})\n", (12289, 12333), False, 'import wandb\n'), ((12346, 12401), 'wandb.log', 'wandb.log', (["{'Test/Loss': test_loss, 'round': round_idx}"], {}), "({'Test/Loss': test_loss, 'round': round_idx})\n", (12355, 12401), False, 'import wandb\n'), ((12481, 12500), 'logging.info', 'logging.info', (['stats'], {}), '(stats)\n', (12493, 12500), False, 'import logging\n'), ((13064, 13079), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13077, 13079), False, 'import torch\n'), ((2459, 2494), 'torch.where', 'torch.where', (['(target == target_class)'], {}), '(target == target_class)\n', (2470, 2494), False, 'import torch\n'), ((2529, 2568), 'torch.ones_like', 'torch.ones_like', (['target[backdoor_index]'], {}), '(target[backdoor_index])\n', (2544, 2568), False, 'import torch\n'), ((13013, 13034), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (13032, 13034), False, 'from torch import nn\n'), ((13346, 13365), 'torch.max', 'torch.max', (['pred', '(-1)'], {}), '(pred, -1)\n', (13355, 13365), False, 'import torch\n'), ((11110, 11142), 'copy.deepcopy', 'copy.deepcopy', (['train_tot_correct'], {}), '(train_tot_correct)\n', (11123, 11142), False, 'import copy\n'), ((11185, 11216), 'copy.deepcopy', 'copy.deepcopy', (['train_num_sample'], {}), '(train_num_sample)\n', (11198, 11216), False, 'import copy\n'), ((11254, 11279), 'copy.deepcopy', 'copy.deepcopy', (['train_loss'], {}), '(train_loss)\n', (11267, 11279), False, 'import copy\n'), ((11503, 11534), 'copy.deepcopy', 'copy.deepcopy', (['test_tot_correct'], {}), '(test_tot_correct)\n', (11516, 11534), False, 'import copy\n'), ((11576, 11606), 'copy.deepcopy', 'copy.deepcopy', (['test_num_sample'], {}), '(test_num_sample)\n', (11589, 11606), False, 'import copy\n'), ((11643, 11667), 'copy.deepcopy', 'copy.deepcopy', (['test_loss'], {}), '(test_loss)\n', (11656, 11667), False, 'import copy\n'), ((12679, 12700), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (12698, 12700), False, 'from torch import nn\n')]
|
import os, ConfigParser, time, sys
sys.path.insert(0, os.path.join(os.getcwd(), "Jinja2-2.3-py2.5.egg"))
import jinja2
_config = ConfigParser.SafeConfigParser()
_config.read("config.ini")
def version(wantInt=True):
import commands, re
v=re.findall("\(varnish-(.*?)(?: |\))", commands.getoutput("/usr/sbin/varnishd -V").split("\n")[0])[0].split(".")
if wantInt:
return map(lambda x: int(x), v)
else:
return v
# Varnish configuration generator
def updateBackend(data):
jinja = jinja2.Environment(loader=jinja2.loaders.FileSystemLoader("template"))
ver = version()
backend = jinja.get_template("varnishbackend.vcl").render(backend=data, prefix=ver[0] < 2, version=map(lambda x: str(x), ver))
open("sysconf/varnishbackend.vcl", "w").write(backend)
if version()[0] > 1:
restart()
def updateRecv(data):
jinja = jinja2.Environment(loader=jinja2.loaders.FileSystemLoader("template"))
ver = version()
recv = jinja.get_template("varnishrecv.vcl").render(cond=data, nomatch=_config.get("varnish", "noMatch"),
ovzcphost=_config.get("varnish", "ovzcphost"), prefix=ver[0] < 2, version=map(lambda x: str(x), ver))
open("sysconf/varnishrecv.vcl", "w").write(recv)
if version()[0] > 1:
restart()
def restart():
import subprocess, commands
if version()[0] < 2:
subprocess.Popen("/etc/init.d/varnish restart", shell=True).wait()
while True:
if commands.getoutput("pidof varnishd"):
break
else:
return commands.getoutput("/etc/init.d/varnish reload")
if __name__ == "__main__":
from models import VarnishBackend, VarnishCond
updateBackend(VarnishBackend.select())
updateRecv(VarnishCond.select())
restart()
|
[
"ConfigParser.SafeConfigParser",
"subprocess.Popen",
"os.getcwd",
"models.VarnishBackend.select",
"jinja2.loaders.FileSystemLoader",
"models.VarnishCond.select",
"commands.getoutput"
] |
[((134, 165), 'ConfigParser.SafeConfigParser', 'ConfigParser.SafeConfigParser', ([], {}), '()\n', (163, 165), False, 'import os, ConfigParser, time, sys\n'), ((68, 79), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (77, 79), False, 'import os, ConfigParser, time, sys\n'), ((1471, 1519), 'commands.getoutput', 'commands.getoutput', (['"""/etc/init.d/varnish reload"""'], {}), "('/etc/init.d/varnish reload')\n", (1489, 1519), False, 'import subprocess, commands\n'), ((1615, 1638), 'models.VarnishBackend.select', 'VarnishBackend.select', ([], {}), '()\n', (1636, 1638), False, 'from models import VarnishBackend, VarnishCond\n'), ((1653, 1673), 'models.VarnishCond.select', 'VarnishCond.select', ([], {}), '()\n', (1671, 1673), False, 'from models import VarnishBackend, VarnishCond\n'), ((530, 573), 'jinja2.loaders.FileSystemLoader', 'jinja2.loaders.FileSystemLoader', (['"""template"""'], {}), "('template')\n", (561, 573), False, 'import jinja2\n'), ((876, 919), 'jinja2.loaders.FileSystemLoader', 'jinja2.loaders.FileSystemLoader', (['"""template"""'], {}), "('template')\n", (907, 919), False, 'import jinja2\n'), ((1404, 1440), 'commands.getoutput', 'commands.getoutput', (['"""pidof varnishd"""'], {}), "('pidof varnishd')\n", (1422, 1440), False, 'import subprocess, commands\n'), ((1315, 1374), 'subprocess.Popen', 'subprocess.Popen', (['"""/etc/init.d/varnish restart"""'], {'shell': '(True)'}), "('/etc/init.d/varnish restart', shell=True)\n", (1331, 1374), False, 'import subprocess, commands\n'), ((288, 331), 'commands.getoutput', 'commands.getoutput', (['"""/usr/sbin/varnishd -V"""'], {}), "('/usr/sbin/varnishd -V')\n", (306, 331), False, 'import subprocess, commands\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from commands.base import CommandBase
from py_utils.emu_utils import run, error, input_with_options, UNINSTALL_PATH
class Uninstall(CommandBase):
def __init__(self):
super().__init__()
self.name = 'uninstall'
self.description = '👋 Uninstalls emu'
@staticmethod
def _uninstall():
print('Are you sure you want to uninstall emu?')
if input_with_options(['Y', 'n'], 'n')[0] == 0:
run(['sh', UNINSTALL_PATH])
else:
error('Not uninstalling!')
|
[
"py_utils.emu_utils.error",
"py_utils.emu_utils.input_with_options",
"py_utils.emu_utils.run"
] |
[((453, 480), 'py_utils.emu_utils.run', 'run', (["['sh', UNINSTALL_PATH]"], {}), "(['sh', UNINSTALL_PATH])\n", (456, 480), False, 'from py_utils.emu_utils import run, error, input_with_options, UNINSTALL_PATH\n'), ((497, 523), 'py_utils.emu_utils.error', 'error', (['"""Not uninstalling!"""'], {}), "('Not uninstalling!')\n", (502, 523), False, 'from py_utils.emu_utils import run, error, input_with_options, UNINSTALL_PATH\n'), ((402, 437), 'py_utils.emu_utils.input_with_options', 'input_with_options', (["['Y', 'n']", '"""n"""'], {}), "(['Y', 'n'], 'n')\n", (420, 437), False, 'from py_utils.emu_utils import run, error, input_with_options, UNINSTALL_PATH\n')]
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 4.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'shared',
'handler',
'users',
'companies',
'content',
'djangofront',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATICFILES_DIRS = (
BASE_DIR / 'static/',
)
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
# 'djangorestframework_camel_case.render.CamelCaseJSONRenderer',
# 'djangorestframework_camel_case.render.CamelCaseBrowsableAPIRenderer',
],
# 'DEFAULT_PARSER_CLASSES': (
# If you use MultiPartFormParser or FormParser, we also have a camel case version
# 'djangorestframework_camel_case.parser.CamelCaseFormParser',
# 'djangorestframework_camel_case.parser.CamelCaseMultiPartParser',
# 'djangorestframework_camel_case.parser.CamelCaseJSONParser',
# ),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 100,
'DEFAULT_AUTHENTICATION_CLASSES': [
# passing session id (for debugging in browser)
'rest_framework.authentication.SessionAuthentication',
# passing login/password in request.args (http authentification)
'rest_framework.authentication.BasicAuthentication',
# passing token in headers
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
# permissions to makeOperations/toViewOnly for authenticated users in current session
# 'rest_framework.permissions.IsAuthenticated',
# 'rest_framework.permissions.IsAuthenticatedOrReadOnly',
# permissions according to auth_permission
'rest_framework.permissions.DjangoModelPermissions',
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
# for making own permissions
# 'rest_framework.permissions.BasePermission',
],
'DEFAULT_VERSIONING_CLASS':
# http://127.0.0.1:8000/api/v2/users/
'rest_framework.versioning.URLPathVersioning',
# path('api/v1/users/', include('userapp.urls', namespace='v1')),
# 'rest_framework.versioning.NamespaceVersioning',
# http://v1.example.com/bookings/
# 'rest_framework.versioning.HostNameVersioning',
# http://127.0.0.1:8000/api/users/?version=v1/
# 'rest_framework.versioning.QueryParameterVersioning',
# requests.get('http://127.0.0.1:8000/api/users/', headers={'Accept': 'application/json; version=v2'})
# 'rest_framework.versioning.AcceptHeaderVersioning',
}
AUTH_USER_MODEL = 'users.User'
LOGIN_URL = '/djangofront/login/'
# LOGIN_REDIRECT_URL = ''
|
[
"pathlib.Path"
] |
[((406, 420), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (410, 420), False, 'from pathlib import Path\n')]
|
import unittest
from tensorboardX.crc32c import _crc32c, _crc32c_native, crc32c
class CRC32CTest(unittest.TestCase):
def test_crc32c(self):
data = b'abcd'
assert crc32c(data) == 0x92c80a31
def test_crc32c_python(self):
data = b'abcd'
assert _crc32c(data) == 0x92c80a31
def test_crc32c_native(self):
if _crc32c_native is None:
return
data = b'abcd'
assert _crc32c_native(data) == 0x92c80a31
|
[
"tensorboardX.crc32c._crc32c",
"tensorboardX.crc32c._crc32c_native",
"tensorboardX.crc32c.crc32c"
] |
[((184, 196), 'tensorboardX.crc32c.crc32c', 'crc32c', (['data'], {}), '(data)\n', (190, 196), False, 'from tensorboardX.crc32c import _crc32c, _crc32c_native, crc32c\n'), ((284, 297), 'tensorboardX.crc32c._crc32c', '_crc32c', (['data'], {}), '(data)\n', (291, 297), False, 'from tensorboardX.crc32c import _crc32c, _crc32c_native, crc32c\n'), ((439, 459), 'tensorboardX.crc32c._crc32c_native', '_crc32c_native', (['data'], {}), '(data)\n', (453, 459), False, 'from tensorboardX.crc32c import _crc32c, _crc32c_native, crc32c\n')]
|
"""
"""
import psutil
def get_virtual_memory():
print(psutil.cpu_count())
return psutil.virtual_memory().percent
mem = get_virtual_memory()
print(mem)
# nested function
def print_test_logs(msg):
def logger():
print(f'Logging memory data {msg}')
def test():
print('a')
test()
logger()
print_test_logs(mem)
# closure
def print_test_logs_3(msg):
threshold = 90
def logger():
print(f'Logging memory data with closure {msg}. Keep an eye to the threshold {threshold}')
return logger
x = print_test_logs_3(mem)()
# print_test_logs_3(mem)()
print(x)
|
[
"psutil.virtual_memory",
"psutil.cpu_count"
] |
[((61, 79), 'psutil.cpu_count', 'psutil.cpu_count', ([], {}), '()\n', (77, 79), False, 'import psutil\n'), ((92, 115), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (113, 115), False, 'import psutil\n')]
|
#!/usr/bin/python
from Solution import Solution
obj = Solution()
#A = "23"
#A = ""
A = "238"
out = obj.letterCombinations(A)
print(out)
|
[
"Solution.Solution"
] |
[((55, 65), 'Solution.Solution', 'Solution', ([], {}), '()\n', (63, 65), False, 'from Solution import Solution\n')]
|
import subprocess
from threading import Timer
import signal
#from pprint import pprint
import sys
import random
import time
import os
from probes.base_probe import Base_Probe
from core import utils
import logging
logger = logging.getLogger(__name__)
class Probe_Sar(Base_Probe):
"""SAR probe. Obtains information about the entire system at regular intervals.
Lowest possible resolution appears to be 1 second
"""
probe_thread = None
target = None
local_dir = None
def run_probe(self, target, local_dir, BINARY_FILE, interval_str):
logger.info("Running SAR probe, writing to %s with interval %s" % (BINARY_FILE, interval_str))
self.local_dir = local_dir
#probe_thread = subprocess.Popen(["sar" ,"-b" ,"-B" ,"-q" ,"-r" ,"-R" ,"-S" ,"-u" interval_str], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Some systems dont have -R
#with open(os.devnull, 'w') as devnull:
# self.probe_thread = subprocess.Popen(["sar" ,"-b" ,"-B" ,"-q" ,"-r" ,"-S" ,"-u", "-o", BINARY_FILE, interval_str], stdin=None, stdout=devnull, stderr=subprocess.PIPE)
self.target = target
cmd_args = " ".join(["-b" ,"-B" ,"-q" ,"-r" ,"-S" ,"-u", "-o", BINARY_FILE, interval_str])
self.probe_thread = utils.run_anywhere(target, "sar", cmd_args, None, None, True)
self.probe_thread.remote_stdout = self.probe_thread.local_stdout = BINARY_FILE
def stop_probe(self):
logger.info("Stopping SAR probe")
utils.stop_anywhere(self.target, self.probe_thread, hasOutput=False)
def extract_data(self):
logger.info("Extracting data from SAR probe")
local_file = os.path.join(self.local_dir, "%s_sar.bin" % utils.get_ip(self.target))
utils.get_anywhere(self.target, self.probe_thread.remote_stdout, local_file)
return local_file
|
[
"core.utils.get_anywhere",
"core.utils.run_anywhere",
"core.utils.stop_anywhere",
"core.utils.get_ip",
"logging.getLogger"
] |
[((223, 250), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (240, 250), False, 'import logging\n'), ((1211, 1272), 'core.utils.run_anywhere', 'utils.run_anywhere', (['target', '"""sar"""', 'cmd_args', 'None', 'None', '(True)'], {}), "(target, 'sar', cmd_args, None, None, True)\n", (1229, 1272), False, 'from core import utils\n'), ((1417, 1485), 'core.utils.stop_anywhere', 'utils.stop_anywhere', (['self.target', 'self.probe_thread'], {'hasOutput': '(False)'}), '(self.target, self.probe_thread, hasOutput=False)\n', (1436, 1485), False, 'from core import utils\n'), ((1648, 1724), 'core.utils.get_anywhere', 'utils.get_anywhere', (['self.target', 'self.probe_thread.remote_stdout', 'local_file'], {}), '(self.target, self.probe_thread.remote_stdout, local_file)\n', (1666, 1724), False, 'from core import utils\n'), ((1619, 1644), 'core.utils.get_ip', 'utils.get_ip', (['self.target'], {}), '(self.target)\n', (1631, 1644), False, 'from core import utils\n')]
|
# Copyright 2015, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import pprint
import time
import uuid
import neutronclient.neutron.client as neutron_client
import novaclient.client as nova_client
import novaclient.exceptions as nova_exceptions
import glanceclient.client as glance_client
import a10_neutron_lbaas.a10_exceptions as a10_ex
import a10_neutron_lbaas.vthunder.keystone as a10_keystone
pp = pprint.PrettyPrinter(indent=4)
LOG = logging.getLogger(__name__)
CREATE_TIMEOUT = 900
# TODO(mdurrant) - These may need to go into a configuration file.
KEYSTONE_VERSION = "3.0"
NOVA_VERSION = "2.1"
NEUTRON_VERSION = "2.0"
GLANCE_VERSION = 2.4
OS_INTERFACE_URLS = ["public", "publicURL"]
_default_server = {
"id": None,
"name": None,
"image": None,
"flavor": None,
"meta": {},
"files": {},
"min_count": 1, # optional extension
"max_count": 1, # optional extension
"security_groups": [],
"userdata": None,
"key_name": None, # optional extension
"availability_zone": None,
"block_device_mapping": None, # optional extension
"block_device_mapping_v2": None, # optional extension
"scheduler_hints": {}, # optional extension
"config_drive": False, # optional extension
"disk_config": "AUTO", # AUTO or MANUAL # optional extension
"admin_pass": None # optional extension
}
MISSING_ERR_FORMAT = "{0} with name or id {1} could not be found"
class InstanceManager(object):
def __init__(self, ks_session, network_ks_session=None,
nova_api=None, nova_version=NOVA_VERSION,
neutron_api=None, glance_api=None, glance_version=GLANCE_VERSION):
# This is the keystone session that we use for spawning instances,
# aka our "service tenant" user.
self._ks_session = ks_session
# And this is the keystone session that we use for finding the network
# that we are going to plumb into, aka the "end user".
if network_ks_session is not None:
self._network_ks_session = network_ks_session
else:
self._network_ks_session = ks_session
# Yes, we really want both of these to use the "service tenant".
self._nova_api = nova_api or nova_client.Client(
nova_version, session=self._ks_session)
self._neutron_api = neutron_api or neutron_client.Client(
NEUTRON_VERSION, session=self._ks_session)
self._glance_api = glance_api or glance_client.Client(glance_version,
session=self._ks_session)
@classmethod
def _factory_with_service_tenant(cls, config, user_keystone_session):
ks = user_keystone_session
vth = config.get_vthunder_config()
if 'service_tenant' in vth:
service_ks = a10_keystone.KeystoneFromConfig(config)
else:
service_ks = ks
nova_version = config.get('nova_api_version')
glance_version = config.get("glance_api_version")
return InstanceManager(
ks_session=service_ks.session, network_ks_session=ks.session,
nova_version=nova_version, glance_version=glance_version)
@classmethod
def from_config(cls, config, openstack_context=None):
ks = a10_keystone.KeystoneFromContext(config, openstack_context)
return cls._factory_with_service_tenant(config, ks)
@classmethod
def from_cmdline(cls, config, tenant_name, username, password):
ks = a10_keystone.KeystoneFromPassword(config, tenant_name, username, password)
return cls._factory_with_service_tenant(config, ks)
def _build_server(self, instance):
retval = {}
for k in _default_server:
retval[k] = instance.get(k, _default_server[k])
retval['name'] = retval['name'] or 'a10-' + str(uuid.uuid4())
return retval
def list_instances(self, detailed=True, search_opts=None,
marker=None, limit=None, sort_keys=None, sort_dirs=None):
return self._nova_api.servers.list(detailed, search_opts, marker, limit,
sort_keys, sort_dirs)
def create_instance(self, context):
return self._create_instance(context)
def _get_ip_addresses_from_instance(self, addresses, mgmt_network_name):
address_block = addresses[mgmt_network_name]
v4addresses = filter(lambda x: x["version"] == 4,
address_block)
return v4addresses[0]["addr"]
def _create_instance(self, context):
server = self._build_server(context)
image_id = context.get("image", None)
flavor_id = context.get("flavor", None)
net_ids = context.get("networks")
image = self.get_image(identifier=image_id)
flavor = self.get_flavor(identifier=flavor_id)
networks = self.get_networks(net_ids)
if image is None:
raise a10_ex.ImageNotFoundError(MISSING_ERR_FORMAT.format("Image", image_id))
if flavor is None:
raise a10_ex.FlavorNotFoundError(MISSING_ERR_FORMAT.format("Flavor", flavor_id))
if networks is None:
msg = map(lambda x: MISSING_ERR_FORMAT.format("Network", x), net_ids)
raise a10_ex.NetworksNotFoundError(msg)
server["image"] = image.id
server["flavor"] = flavor.id
server["nics"] = [{'net-id': x['id']} for x in networks]
created_instance = self._nova_api.servers.create(**server)
# Next 6 lines - Added due to insane API on the other side
if hasattr(created_instance.manager, 'client'):
# This craziness works around a bug in Liberty.
created_instance.manager.client.last_request_id = None
self._create_server_spinlock(created_instance)
# Get the IP address of the first interface (should be management)
ip_address = self._get_ip_addresses_from_instance(
created_instance.addresses, networks[0]['name'])
return {
'name': server['name'],
'instance': created_instance,
'ip_address': ip_address,
'nova_instance_id': created_instance.id
}
def _create_server_spinlock(self, created_instance):
created_id = created_instance.id
timeout = False
start_time = time.time()
sleep_time = 1
pending_statuses = ["INITALIZED"]
active_statuses = ["ACTIVE"]
fatal_statuses = ["ERROR",
"SOFT_DELETED",
"HARD_DELETED",
"STOPPED",
"PAUSED"]
while not timeout:
get_instance = self._nova_api.servers.get(created_id)
vm_state = getattr(get_instance, "OS-EXT-STS:vm_state").upper()
end_time = time.time()
if ((get_instance.id == created_id and len(get_instance.addresses) > 0
and vm_state in active_statuses + pending_statuses)):
timeout = True
break
elif vm_state in fatal_statuses:
raise Exception("Instance created in error state %s" % (vm_state))
break
if end_time - start_time > CREATE_TIMEOUT:
timeout = True
raise Exception("Timed out creating instance.")
break
time.sleep(sleep_time)
def delete_instance(self, instance_id):
try:
return self._nova_api.servers.delete(instance_id)
except nova_exceptions.NotFound:
pass
def get_instance(self, instance):
return self._nova_api.servers.get(instance)
def get_flavor(self, identifier=None):
result = None
if identifier is None:
raise a10_ex.IdentifierUnspecifiedError(
"Parameter identifier must specify flavor id or name")
flavor_filter = (lambda x: x is not None and
((hasattr(x, "name") and x.name == identifier)
or (hasattr(x, "id") and x.id == identifier)))
flavors = self._nova_api.flavors.list()
filtered = filter(flavor_filter, flavors)
# TODO(mdurrant): What if we accidentally hit multiple flavors?
if filtered and len(filtered) > 0:
result = filtered[0]
return result
def get_image(self, identifier=None):
result = None
images = []
if identifier is None:
raise a10_ex.IdentifierUnspecifiedError(
"Parameter identifier must specify image id or name")
img_filter = (lambda x: x is not None and
((hasattr(x, "name") and x.name is not None and identifier in x.name)
or (hasattr(x, "id") and x.id == identifier)))
try:
images = list(self._glance_api.images.list())
except Exception as ex:
raise a10_ex.ImageNotFoundError(
"Unable to retrieve images from glance. Error %s" % (ex))
filtered = filter(img_filter, images)
if filtered:
result = filtered[0]
return result
def _handle_missing_networks(self, not_found):
msg_format = "Network {0} was not found by ID or name."
msgs = []
for net in not_found:
msgs.append(msg_format.format(net))
ex_msg = "\n".join(msgs)
LOG.exception(ex_msg)
raise a10_ex.NetworksNotFoundError(ex_msg)
def _get_networks(self, session, networks=[]):
network_list = {"networks": []}
net_list = []
if networks is None:
raise a10_ex.IdentifierUnspecifiedError(
"Parameter networks must be specified.")
try:
# Lookup as user, since names are not unique
q_api = neutron_client.Client(NEUTRON_VERSION, session=session)
network_list = q_api.list_networks()
net_list = network_list.get("networks", [])
# TODO(mdurrant) - Create specific exceptions.
except Exception as ex:
LOG.exception(
"Unable to retrieve networks from neutron.\nError %s" % (ex))
# TODO(mdurrant-jk-cshock) - Look up networks by name too
id_func = (lambda x: x.get("net-id",
x.get("uuid", x.get("id"))) if x is not None else None)
networks_by_id = dict((id_func(x), x) for x in net_list)
networks_by_name = dict((x.get("name"), x) for x in net_list)
available_networks = networks_by_name.copy()
available_networks.update(networks_by_id)
missing_networks = [x for x in networks if x not in available_networks.keys()]
if any(missing_networks):
self._handle_missing_networks(missing_networks)
return [{
'id': id_func(available_networks[x]),
'name': available_networks[x].get('name', '')
} for x in networks]
def get_networks(self, networks=[]):
if self._ks_session != self._network_ks_session:
mgmt = self._get_networks(self._ks_session, networks[:1])
data = self._get_networks(self._network_ks_session, networks[1:])
return mgmt + data
else:
return self._get_networks(self._ks_session, networks)
def _device_instance(self, vthunder_config, name=None):
# Pick an image, any image
image_id = vthunder_config['glance_image']
if image_id is None:
raise a10_ex.FeatureNotConfiguredError("Launching instance requires configured image")
# Get the flavor from config
flavor = vthunder_config['nova_flavor']
if flavor is None:
raise a10_ex.FeatureNotConfiguredError("Launching instance requires configured flavor")
mgmt_network = vthunder_config.get("vthunder_management_network")
networks = [mgmt_network] if mgmt_network else []
networks += vthunder_config.get('vthunder_data_networks')
if networks is None or len(networks) < 1:
raise a10_ex.FeatureNotConfiguredError(
"Launching instance requires configured networks")
return {
'name': name,
'image': image_id,
'flavor': flavor,
'networks': networks
}
def create_device_instance(self, vthunder_config, name=None):
instance_configuration = self._device_instance(vthunder_config, name=name)
return self._create_instance(instance_configuration)
def _plumb_port(self, server, network_id, wrong_ips):
"""Look for an existing port on the network
Add one if it doesn't exist
"""
for attached_interface in server.interface_list():
if attached_interface.net_id == network_id:
if any(map(lambda x: x['ip_address'] in wrong_ips, attached_interface.fixed_ips)):
continue
return attached_interface
return server.interface_attach(None, network_id, None)
def plumb_instance(self, instance_id, network_id, allowed_ips, wrong_ips=[]):
server = self._nova_api.servers.get(instance_id)
interface = self._plumb_port(server, network_id, wrong_ips=wrong_ips)
port = self._neutron_api.show_port(interface.port_id)
allowed_address_pairs = port["port"].get("allowed_address_pairs", [])
existing_allowed_ips = set(pair["ip_address"] for pair in allowed_address_pairs)
new_allowed_ips = set(allowed_ips) - existing_allowed_ips
new_address_pairs = [{"ip_address": ip} for ip in new_allowed_ips]
merged_address_pairs = allowed_address_pairs + new_address_pairs
self._neutron_api.update_port(interface.port_id, {
"port": {
"allowed_address_pairs": merged_address_pairs
}
})
return interface.fixed_ips[0]['ip_address']
def plumb_instance_subnet(self, instance_id, subnet_id, allowed_ips, wrong_ips=[]):
subnet = self._neutron_api.show_subnet(subnet_id)
network_id = subnet["subnet"]["network_id"]
return self.plumb_instance(instance_id, network_id, allowed_ips, wrong_ips=wrong_ips)
|
[
"neutronclient.neutron.client.Client",
"a10_neutron_lbaas.vthunder.keystone.KeystoneFromConfig",
"uuid.uuid4",
"a10_neutron_lbaas.a10_exceptions.NetworksNotFoundError",
"novaclient.client.Client",
"a10_neutron_lbaas.a10_exceptions.IdentifierUnspecifiedError",
"glanceclient.client.Client",
"a10_neutron_lbaas.a10_exceptions.ImageNotFoundError",
"time.time",
"a10_neutron_lbaas.vthunder.keystone.KeystoneFromPassword",
"pprint.PrettyPrinter",
"time.sleep",
"a10_neutron_lbaas.vthunder.keystone.KeystoneFromContext",
"a10_neutron_lbaas.a10_exceptions.FeatureNotConfiguredError",
"logging.getLogger"
] |
[((966, 996), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (986, 996), False, 'import pprint\n'), ((1005, 1032), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1022, 1032), False, 'import logging\n'), ((3850, 3909), 'a10_neutron_lbaas.vthunder.keystone.KeystoneFromContext', 'a10_keystone.KeystoneFromContext', (['config', 'openstack_context'], {}), '(config, openstack_context)\n', (3882, 3909), True, 'import a10_neutron_lbaas.vthunder.keystone as a10_keystone\n'), ((4069, 4143), 'a10_neutron_lbaas.vthunder.keystone.KeystoneFromPassword', 'a10_keystone.KeystoneFromPassword', (['config', 'tenant_name', 'username', 'password'], {}), '(config, tenant_name, username, password)\n', (4102, 4143), True, 'import a10_neutron_lbaas.vthunder.keystone as a10_keystone\n'), ((6929, 6940), 'time.time', 'time.time', ([], {}), '()\n', (6938, 6940), False, 'import time\n'), ((10051, 10087), 'a10_neutron_lbaas.a10_exceptions.NetworksNotFoundError', 'a10_ex.NetworksNotFoundError', (['ex_msg'], {}), '(ex_msg)\n', (10079, 10087), True, 'import a10_neutron_lbaas.a10_exceptions as a10_ex\n'), ((2799, 2857), 'novaclient.client.Client', 'nova_client.Client', (['nova_version'], {'session': 'self._ks_session'}), '(nova_version, session=self._ks_session)\n', (2817, 2857), True, 'import novaclient.client as nova_client\n'), ((2914, 2978), 'neutronclient.neutron.client.Client', 'neutron_client.Client', (['NEUTRON_VERSION'], {'session': 'self._ks_session'}), '(NEUTRON_VERSION, session=self._ks_session)\n', (2935, 2978), True, 'import neutronclient.neutron.client as neutron_client\n'), ((3033, 3095), 'glanceclient.client.Client', 'glance_client.Client', (['glance_version'], {'session': 'self._ks_session'}), '(glance_version, session=self._ks_session)\n', (3053, 3095), True, 'import glanceclient.client as glance_client\n'), ((3390, 3429), 'a10_neutron_lbaas.vthunder.keystone.KeystoneFromConfig', 'a10_keystone.KeystoneFromConfig', (['config'], {}), '(config)\n', (3421, 3429), True, 'import a10_neutron_lbaas.vthunder.keystone as a10_keystone\n'), ((5846, 5879), 'a10_neutron_lbaas.a10_exceptions.NetworksNotFoundError', 'a10_ex.NetworksNotFoundError', (['msg'], {}), '(msg)\n', (5874, 5879), True, 'import a10_neutron_lbaas.a10_exceptions as a10_ex\n'), ((7429, 7440), 'time.time', 'time.time', ([], {}), '()\n', (7438, 7440), False, 'import time\n'), ((7987, 8009), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (7997, 8009), False, 'import time\n'), ((8394, 8487), 'a10_neutron_lbaas.a10_exceptions.IdentifierUnspecifiedError', 'a10_ex.IdentifierUnspecifiedError', (['"""Parameter identifier must specify flavor id or name"""'], {}), "(\n 'Parameter identifier must specify flavor id or name')\n", (8427, 8487), True, 'import a10_neutron_lbaas.a10_exceptions as a10_ex\n'), ((9100, 9192), 'a10_neutron_lbaas.a10_exceptions.IdentifierUnspecifiedError', 'a10_ex.IdentifierUnspecifiedError', (['"""Parameter identifier must specify image id or name"""'], {}), "(\n 'Parameter identifier must specify image id or name')\n", (9133, 9192), True, 'import a10_neutron_lbaas.a10_exceptions as a10_ex\n'), ((10250, 10324), 'a10_neutron_lbaas.a10_exceptions.IdentifierUnspecifiedError', 'a10_ex.IdentifierUnspecifiedError', (['"""Parameter networks must be specified."""'], {}), "('Parameter networks must be specified.')\n", (10283, 10324), True, 'import a10_neutron_lbaas.a10_exceptions as a10_ex\n'), ((10433, 10488), 'neutronclient.neutron.client.Client', 'neutron_client.Client', (['NEUTRON_VERSION'], {'session': 'session'}), '(NEUTRON_VERSION, session=session)\n', (10454, 10488), True, 'import neutronclient.neutron.client as neutron_client\n'), ((12119, 12204), 'a10_neutron_lbaas.a10_exceptions.FeatureNotConfiguredError', 'a10_ex.FeatureNotConfiguredError', (['"""Launching instance requires configured image"""'], {}), "('Launching instance requires configured image'\n )\n", (12151, 12204), True, 'import a10_neutron_lbaas.a10_exceptions as a10_ex\n'), ((12331, 12417), 'a10_neutron_lbaas.a10_exceptions.FeatureNotConfiguredError', 'a10_ex.FeatureNotConfiguredError', (['"""Launching instance requires configured flavor"""'], {}), "(\n 'Launching instance requires configured flavor')\n", (12363, 12417), True, 'import a10_neutron_lbaas.a10_exceptions as a10_ex\n'), ((12682, 12770), 'a10_neutron_lbaas.a10_exceptions.FeatureNotConfiguredError', 'a10_ex.FeatureNotConfiguredError', (['"""Launching instance requires configured networks"""'], {}), "(\n 'Launching instance requires configured networks')\n", (12714, 12770), True, 'import a10_neutron_lbaas.a10_exceptions as a10_ex\n'), ((9538, 9625), 'a10_neutron_lbaas.a10_exceptions.ImageNotFoundError', 'a10_ex.ImageNotFoundError', (["('Unable to retrieve images from glance. Error %s' % ex)"], {}), "(\n 'Unable to retrieve images from glance. Error %s' % ex)\n", (9563, 9625), True, 'import a10_neutron_lbaas.a10_exceptions as a10_ex\n'), ((4415, 4427), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4425, 4427), False, 'import uuid\n')]
|
import json
import pytest
import eternal.card
@pytest.fixture
def json_doorbot():
card_info = json.loads("""{"SetNumber":3,
"EternalID":2,
"Name":"<NAME>",
"CardText":"",
"Cost":0,
"Influence":"{F}",
"Attack":0,
"Health":3,
"Rarity":"Common",
"Type":"Unit",
"UnitType":["Grenadin"],
"ImageUrl":"https://cards.eternalwarcry.com/cards/full/Helpful_Doorbot.png",
"DetailsUrl":"https://eternalwarcry.com/cards/d/3-2/helpful-doorbot",
"DeckBuildable":true}""")
return card_info
def test_init(json_doorbot):
card_info = eternal.card.CardInfo(json_doorbot)
assert isinstance(card_info, eternal.card.CardInfo)
assert card_info.id == '3-2'
def test_missing_field(json_doorbot):
del json_doorbot['Name']
with pytest.raises(AssertionError):
eternal.card.CardInfo(json_doorbot)
def test_extra_field(json_doorbot):
json_doorbot['Extra-Field'] = 'This should not be here'
print(json_doorbot)
with pytest.raises(AssertionError):
eternal.card.CardInfo(json_doorbot)
def test_influence_to_faction():
assert eternal.card.influence_to_faction("''") == 'None'
assert eternal.card.influence_to_faction("'{F}{F}{J}'") == 'FJ'
assert eternal.card.influence_to_faction("'{J}{F}'") == 'FJ'
assert eternal.card.influence_to_faction("'{T}{T}{T}'") == 'T'
|
[
"pytest.raises",
"json.loads"
] |
[((102, 694), 'json.loads', 'json.loads', (['"""{"SetNumber":3,\n "EternalID":2,\n "Name":"<NAME>",\n "CardText":"",\n "Cost":0,\n "Influence":"{F}",\n "Attack":0,\n "Health":3,\n "Rarity":"Common",\n "Type":"Unit",\n "UnitType":["Grenadin"],\n "ImageUrl":"https://cards.eternalwarcry.com/cards/full/Helpful_Doorbot.png",\n "DetailsUrl":"https://eternalwarcry.com/cards/d/3-2/helpful-doorbot",\n "DeckBuildable":true}"""'], {}), '(\n """{"SetNumber":3,\n "EternalID":2,\n "Name":"<NAME>",\n "CardText":"",\n "Cost":0,\n "Influence":"{F}",\n "Attack":0,\n "Health":3,\n "Rarity":"Common",\n "Type":"Unit",\n "UnitType":["Grenadin"],\n "ImageUrl":"https://cards.eternalwarcry.com/cards/full/Helpful_Doorbot.png",\n "DetailsUrl":"https://eternalwarcry.com/cards/d/3-2/helpful-doorbot",\n "DeckBuildable":true}"""\n )\n', (112, 694), False, 'import json\n'), ((956, 985), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (969, 985), False, 'import pytest\n'), ((1162, 1191), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1175, 1191), False, 'import pytest\n')]
|
import os
import redis
import hashlib
import time
import urllib.parse as urlparse
from app.schemas.schemas import TrackEvent
from app.utils.constants import REDIS_SS_DELIMITER
class RedisClient:
def __init__(self):
endpoint_url = os.environ.get("REDIS_URL", "redis://127.0.0.1:6379")
url = urlparse.urlparse(endpoint_url)
self.client = redis.StrictRedis(host=url.hostname, port=url.port, password=url.password, charset='utf-8', decode_responses=True)
# Default to every 10 minutes, todo make this configurable
self.DEFAULT_INTERVAL = 600
# Default to 1 token
self.DEFAULT_REFILL = 1
def _set(self, key, val, expiration):
return self.client.set(key, val, ex=expiration)
def _exists(self, key):
return self.client.get(key)
def _get(self, key, default_value=None):
val = self.client.get(key)
return val if val is not None else default_value
def _updateMessageHistory(self, userId, currentTime, messageId):
ssName = self._hashKey(userId)
self.client.zrem(ssName, messageId)
self.client.zadd(ssName, {messageId: currentTime})
def _cleanSortedSet(self, prevMessageIds, currentMessageIds, ssName):
messagesToClean = set(prevMessageIds).difference(set(currentMessageIds))
for idToDelete in messagesToClean:
print(f"Deleting messageId {idToDelete} from {ssName} sorted set")
self.client.zrem(ssName, idToDelete)
def rankMessages(self, userId, availableMessages):
idToMessage = {m.id: m for m in availableMessages}
ssName = self._hashKey(userId)
sortedMessageHistory = self.client.zrange(ssName, 0, -1, desc=False, withscores=True)
prevMessageIds = list(map(lambda x: int(x[0]), sortedMessageHistory))
# clean up messageIds that have been deleted
self._cleanSortedSet(prevMessageIds, idToMessage.keys(), ssName)
orderedCandidates = []
for candidateId, last_sent in sortedMessageHistory:
try:
orderedCandidates.append(idToMessage[int(candidateId)])
del idToMessage[int(candidateId)]
except KeyError:
print(f"Did not find {candidateId} in the available messages!")
# This prioritizes messages that have not been sent before
messagesToSend = [v for k, v in idToMessage.items()]
messagesToSend.extend(orderedCandidates)
for candidateMessage in messagesToSend:
yield candidateMessage
def checkMessage(self, userId, message):
print(f"Checking rate limiting for user id {userId} and message id {message.id}")
currentTime = time.time()
rawKey = f"{userId}_{message.id}"
refillKey = self._hashKey(rawKey) + "_last_reset"
bucketKey = self._hashKey(rawKey) + "_tokens"
lastRefilled = float(self._get(refillKey, currentTime))
# If the current time minus last refilled is 0 - its the first request. We need to add both keys
if (currentTime - lastRefilled == 0) or (currentTime - lastRefilled) >= message.rule.seconds:
# expire refill tokens every day to clean up old messages
self._set(bucketKey, message.rule.tokens, message.rule.seconds)
self._set(refillKey, currentTime, message.rule.seconds)
else:
tokens_left = int(self._get(bucketKey))
if tokens_left < 1:
return False
self.client.decr(bucketKey, amount=1)
# Update/Add the message to the user's sorted set message history
self._updateMessageHistory(userId, currentTime, message.id)
return True
def _hashKey(self, rawKey):
return hashlib.sha512(str.encode(rawKey)).hexdigest()
def updateTrackRank(self, track_event: TrackEvent):
ssName = track_event.guild_id + "_track_ranks"
memberName = track_event.id + REDIS_SS_DELIMITER + track_event.title
self.client.zincrby(ssName, 1, memberName)
def getNTopTracks(self, guild_id:str, n: int):
ssName = guild_id + "_track_ranks"
return self.client.zrevrange(ssName, 0, n, withscores=True)
|
[
"os.environ.get",
"redis.StrictRedis",
"urllib.parse.urlparse",
"time.time"
] |
[((246, 299), 'os.environ.get', 'os.environ.get', (['"""REDIS_URL"""', '"""redis://127.0.0.1:6379"""'], {}), "('REDIS_URL', 'redis://127.0.0.1:6379')\n", (260, 299), False, 'import os\n'), ((314, 345), 'urllib.parse.urlparse', 'urlparse.urlparse', (['endpoint_url'], {}), '(endpoint_url)\n', (331, 345), True, 'import urllib.parse as urlparse\n'), ((368, 486), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': 'url.hostname', 'port': 'url.port', 'password': 'url.password', 'charset': '"""utf-8"""', 'decode_responses': '(True)'}), "(host=url.hostname, port=url.port, password=url.password,\n charset='utf-8', decode_responses=True)\n", (385, 486), False, 'import redis\n'), ((2709, 2720), 'time.time', 'time.time', ([], {}), '()\n', (2718, 2720), False, 'import time\n')]
|
import numpy as np
import h5py
import pandas as pd
from svhn_io import load_svhn
from keras_uncertainty.utils import classifier_calibration_curve, classifier_calibration_error
EPSILON = 1e-10
def load_hdf5_data(filename):
inp = h5py.File(filename, "r")
preds = inp["preds"][...]
inp.close()
return preds
NUM_ENSEMBLES = 15
NUM_BINS=7
#IOD_FILE_PATTERN = "cnn_svhn-num_ens-{}-preds.hdf5"
#OUTPUT_PATTERN = "svhn-calibration-sub-deepensembles_1_num-ens-{}_cnn_svhn.csv"
IOD_FILE_PATTERN = "deepensembles-cnn_svhn-num_ens-{}-preds.hdf5"
OUTPUT_PATTERN = "svhn-calibration-deepensembles-num-ens-{}_cnn_svhn.csv"
if __name__ == "__main__":
for num_ens in range(1, NUM_ENSEMBLES + 1):
(_, __), (___, y_true) = load_svhn()
y_true = y_true.flatten()
y_probs = load_hdf5_data(IOD_FILE_PATTERN.format(num_ens))
y_confs = np.max(y_probs, axis=1)
y_pred = np.argmax(y_probs, axis=1)
curve_conf, curve_acc = classifier_calibration_curve(y_pred, y_true, y_confs, num_bins=NUM_BINS)
error = classifier_calibration_error(y_pred, y_true, y_confs, num_bins=NUM_BINS)
print("Processing calibration curve for {} ensembles. Error: {}".format(num_ens, error))
output_df = pd.DataFrame(data={"conf": curve_conf, "acc": curve_acc})
output_df.to_csv(OUTPUT_PATTERN.format(num_ens), sep=';', index=False)
|
[
"pandas.DataFrame",
"h5py.File",
"numpy.argmax",
"keras_uncertainty.utils.classifier_calibration_curve",
"numpy.max",
"keras_uncertainty.utils.classifier_calibration_error",
"svhn_io.load_svhn"
] |
[((235, 259), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (244, 259), False, 'import h5py\n'), ((741, 752), 'svhn_io.load_svhn', 'load_svhn', ([], {}), '()\n', (750, 752), False, 'from svhn_io import load_svhn\n'), ((873, 896), 'numpy.max', 'np.max', (['y_probs'], {'axis': '(1)'}), '(y_probs, axis=1)\n', (879, 896), True, 'import numpy as np\n'), ((914, 940), 'numpy.argmax', 'np.argmax', (['y_probs'], {'axis': '(1)'}), '(y_probs, axis=1)\n', (923, 940), True, 'import numpy as np\n'), ((974, 1046), 'keras_uncertainty.utils.classifier_calibration_curve', 'classifier_calibration_curve', (['y_pred', 'y_true', 'y_confs'], {'num_bins': 'NUM_BINS'}), '(y_pred, y_true, y_confs, num_bins=NUM_BINS)\n', (1002, 1046), False, 'from keras_uncertainty.utils import classifier_calibration_curve, classifier_calibration_error\n'), ((1063, 1135), 'keras_uncertainty.utils.classifier_calibration_error', 'classifier_calibration_error', (['y_pred', 'y_true', 'y_confs'], {'num_bins': 'NUM_BINS'}), '(y_pred, y_true, y_confs, num_bins=NUM_BINS)\n', (1091, 1135), False, 'from keras_uncertainty.utils import classifier_calibration_curve, classifier_calibration_error\n'), ((1255, 1312), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'conf': curve_conf, 'acc': curve_acc}"}), "(data={'conf': curve_conf, 'acc': curve_acc})\n", (1267, 1312), True, 'import pandas as pd\n')]
|
import numpy as np
import sys
import gpflow
import VFF
from time import time
from config import *
dim = sys.argv[1]
rep = sys.argv[2]
print('vff: dimension {}, replicate {}'.format(dim, r))
# data
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, 0))
# full_gp
def prodkern(dim):
return gpflow.kernels.Prod([gpflow.kernels.Matern32(1, active_dims=[i], lengthscales=lengthscale)
for i in range(dim)])
k = prodkern(dim)
m = gpflow.gpr.GPR(data['Xtrain'], data['Ytrain'], kern=k)
m.likelihood.variance = noise_var
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
file = open("results/full.csv","a")
file.write("{}, {}, {}, {}".format(dim, rep, marg_lik, mean_log_pred))
file.close()
##########################
# kron
results = pd.DataFrame()
for dim in dimensions:
a, b = -1.5 * np.ones(dim), 1.5 * np.ones(dim)
k = prodkern(dim)
for r in range(repeats):
print('kron replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
for M in num_freqs:
if (2*M-1)**dim:
a, b = -0.5 * np.ones(dim), 1.5 * np.ones(dim)
m = VFF.vgp.VGP_kron(data['Xtrain'], data['Ytrain'], np.arange(M), a, b,
kerns=prodkern(dim).kern_list,
likelihood=gpflow.likelihoods.Gaussian(),
use_two_krons=True)
m.likelihood.variance = noise_var
# only optimize q(u)
m.kerns.fixed = True
m.likelihood.fixed = True
start = time()
m.optimize()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
# do this inside the loop so we can get partial results if something crashes
results.to_csv('results/kron.csv')
##########################
# kron_opt
results = pd.DataFrame()
for dim in dimensions:
a, b = -1.5 * np.ones(dim), 1.5 * np.ones(dim)
k = prodkern(dim)
for r in range(repeats):
print('kron_opt replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
for M in num_freqs:
if (2*M-1)**dim:
m = VFF.vgp.VGP_kron(data['Xtrain'], data['Ytrain'], np.arange(M), a, b,
kerns=k.kern_list,
likelihood=gpflow.likelihoods.Gaussian(),
use_two_krons=True)
m.likelihood.variance = noise_var
# build kronecker GP model
start = time()
m.optimize()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
results.to_csv('results/kron_opt.csv')
##########################
# Sparse
results = pd.DataFrame()
for dim in dimensions:
for r in range(repeats):
print('Sparse replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
num_inducing = (2*num_freqs-1)**dim
for M in num_inducing:
if M < 500:
# build sparse GP model
Z = KMeans(n_clusters=M).fit(data['Xtrain']).cluster_centers_
m = gpflow.sgpr.SGPR(data['Xtrain'], data['Ytrain'], Z=Z, kern=prodkern(dim))
m.likelihood.variance = noise_var
start = time()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
# do this inside the loop so we can get partial results if something crashes
results.to_csv('results/sparse_kmeans.csv')
##########################
# Sparse GP opt
results = pd.DataFrame()
for dim in dimensions:
for r in range(repeats):
print('sparse opt replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
num_inducing = (2*num_freqs-1)**dim
for M in num_inducing:
if M < 500:
# build sparse GP model
Z = KMeans(n_clusters=M).fit(data['Xtrain']).cluster_centers_
m = gpflow.sgpr.SGPR(data['Xtrain'], data['Ytrain'], Z=Z, kern=prodkern(dim))
m.likelihood.variance = noise_var
# only optimize Z
m.kern.fixed = True
m.likelihood.fixed = True
start = time()
m.optimize()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
# do this inside the loop so we can get partial results if something crashes
results.to_csv('results/sparse_opt.csv')
##########################
#
|
[
"gpflow.likelihoods.Gaussian",
"numpy.ones",
"time.time",
"numpy.arange",
"gpflow.gpr.GPR",
"gpflow.kernels.Matern32"
] |
[((469, 523), 'gpflow.gpr.GPR', 'gpflow.gpr.GPR', (["data['Xtrain']", "data['Ytrain']"], {'kern': 'k'}), "(data['Xtrain'], data['Ytrain'], kern=k)\n", (483, 523), False, 'import gpflow\n'), ((323, 392), 'gpflow.kernels.Matern32', 'gpflow.kernels.Matern32', (['(1)'], {'active_dims': '[i]', 'lengthscales': 'lengthscale'}), '(1, active_dims=[i], lengthscales=lengthscale)\n', (346, 392), False, 'import gpflow\n'), ((965, 977), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (972, 977), True, 'import numpy as np\n'), ((985, 997), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (992, 997), True, 'import numpy as np\n'), ((2532, 2544), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (2539, 2544), True, 'import numpy as np\n'), ((2552, 2564), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (2559, 2564), True, 'import numpy as np\n'), ((1769, 1775), 'time.time', 'time', ([], {}), '()\n', (1773, 1775), False, 'from time import time\n'), ((3189, 3195), 'time.time', 'time', ([], {}), '()\n', (3193, 3195), False, 'from time import time\n'), ((4377, 4383), 'time.time', 'time', ([], {}), '()\n', (4381, 4383), False, 'from time import time\n'), ((5759, 5765), 'time.time', 'time', ([], {}), '()\n', (5763, 5765), False, 'from time import time\n'), ((1353, 1365), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (1362, 1365), True, 'import numpy as np\n'), ((1978, 1984), 'time.time', 'time', ([], {}), '()\n', (1982, 1984), False, 'from time import time\n'), ((2860, 2872), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (2869, 2872), True, 'import numpy as np\n'), ((3398, 3404), 'time.time', 'time', ([], {}), '()\n', (3402, 3404), False, 'from time import time\n'), ((4557, 4563), 'time.time', 'time', ([], {}), '()\n', (4561, 4563), False, 'from time import time\n'), ((5968, 5974), 'time.time', 'time', ([], {}), '()\n', (5972, 5974), False, 'from time import time\n'), ((1251, 1263), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (1258, 1263), True, 'import numpy as np\n'), ((1271, 1283), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (1278, 1283), True, 'import numpy as np\n'), ((1489, 1518), 'gpflow.likelihoods.Gaussian', 'gpflow.likelihoods.Gaussian', ([], {}), '()\n', (1516, 1518), False, 'import gpflow\n'), ((2984, 3013), 'gpflow.likelihoods.Gaussian', 'gpflow.likelihoods.Gaussian', ([], {}), '()\n', (3011, 3013), False, 'import gpflow\n')]
|
import torch.utils.data as data
import os
import os.path
from numpy.random import randint
from ops.io import load_proposal_file
from transforms import *
from ops.utils import temporal_iou
class SSNInstance:
def __init__(
self,
start_frame,
end_frame,
video_frame_count,
fps=1,
label=None,
best_iou=None,
overlap_self=None,
):
self.start_frame = start_frame
self.end_frame = min(end_frame, video_frame_count)
self._label = label
self.fps = fps
self.coverage = (end_frame - start_frame) / video_frame_count
self.best_iou = best_iou
self.overlap_self = overlap_self
self.loc_reg = None
self.size_reg = None
def compute_regression_targets(self, gt_list, fg_thresh):
if self.best_iou < fg_thresh:
# background proposals do not need this
return
# find the groundtruth instance with the highest IOU
ious = [
temporal_iou(
(self.start_frame, self.end_frame), (gt.start_frame, gt.end_frame)
)
for gt in gt_list
]
best_gt_id = np.argmax(ious)
best_gt = gt_list[best_gt_id]
prop_center = (self.start_frame + self.end_frame) / 2
gt_center = (best_gt.start_frame + best_gt.end_frame) / 2
prop_size = self.end_frame - self.start_frame + 1
gt_size = best_gt.end_frame - best_gt.start_frame + 1
# get regression target:
# (1). center shift propotional to the proposal duration
# (2). logarithm of the groundtruth duration over proposal duraiton
self.loc_reg = (gt_center - prop_center) / prop_size
try:
self.size_reg = math.log(gt_size / prop_size)
except:
print((gt_size, prop_size, self.start_frame, self.end_frame))
raise
@property
def start_time(self):
return self.start_frame / self.fps
@property
def end_time(self):
return self.end_frame / self.fps
@property
def label(self):
return self._label if self._label is not None else -1
@property
def regression_targets(self):
return [self.loc_reg, self.size_reg] if self.loc_reg is not None else [0, 0]
class SSNVideoRecord:
def __init__(self, prop_record):
self._data = prop_record
frame_count = int(self._data[1])
# build instance record
self.gt = [
SSNInstance(
int(x[1]), int(x[2]), frame_count, label=int(x[0]), best_iou=1.0
)
for x in self._data[2]
if int(x[2]) > int(x[1])
]
self.gt = list([x for x in self.gt if x.start_frame < frame_count])
self.proposals = [
SSNInstance(
int(x[3]),
int(x[4]),
frame_count,
label=int(x[0]),
best_iou=float(x[1]),
overlap_self=float(x[2]),
)
for x in self._data[3]
if int(x[4]) > int(x[3])
]
self.proposals = list(
[x for x in self.proposals if x.start_frame < frame_count]
)
@property
def id(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
def get_fg(self, fg_thresh, with_gt=True):
fg = [p for p in self.proposals if p.best_iou > fg_thresh]
if with_gt:
fg.extend(self.gt)
for x in fg:
x.compute_regression_targets(self.gt, fg_thresh)
return fg
def get_negatives(
self,
incomplete_iou_thresh,
bg_iou_thresh,
bg_coverage_thresh=0.01,
incomplete_overlap_thresh=0.7,
):
tag = [0] * len(self.proposals)
incomplete_props = []
background_props = []
for i in range(len(tag)):
if (
self.proposals[i].best_iou < incomplete_iou_thresh
and self.proposals[i].overlap_self > incomplete_overlap_thresh
):
tag[i] = 1 # incomplete
incomplete_props.append(self.proposals[i])
for i in range(len(tag)):
if (
tag[i] == 0
and self.proposals[i].best_iou < bg_iou_thresh
and self.proposals[i].coverage > bg_coverage_thresh
):
background_props.append(self.proposals[i])
return incomplete_props, background_props
class SSNDataSet(data.Dataset):
def __init__(
self,
root_path,
prop_file=None,
body_seg=5,
aug_seg=2,
video_centric=True,
new_length=1,
modality="RGB",
image_tmpl="img_{:05d}.jpg",
transform=None,
random_shift=True,
test_mode=False,
prop_per_video=8,
fg_ratio=1,
bg_ratio=1,
incomplete_ratio=6,
fg_iou_thresh=0.7,
bg_iou_thresh=0.01,
incomplete_iou_thresh=0.3,
bg_coverage_thresh=0.02,
incomplete_overlap_thresh=0.7,
gt_as_fg=True,
reg_stats=None,
test_interval=6,
verbose=True,
exclude_empty=True,
epoch_multiplier=1,
):
self.root_path = root_path
self.prop_file = prop_file
self.verbose = verbose
self.body_seg = body_seg
self.aug_seg = aug_seg
self.video_centric = video_centric
self.exclude_empty = exclude_empty
self.epoch_multiplier = epoch_multiplier
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.test_interval = test_interval
self.fg_iou_thresh = fg_iou_thresh
self.incomplete_iou_thresh = incomplete_iou_thresh
self.bg_iou_thresh = bg_iou_thresh
self.bg_coverage_thresh = bg_coverage_thresh
self.incomplete_overlap_thresh = incomplete_overlap_thresh
self.starting_ratio = 0.5
self.ending_ratio = 0.5
self.gt_as_fg = gt_as_fg
denum = fg_ratio + bg_ratio + incomplete_ratio
self.fg_per_video = int(prop_per_video * (fg_ratio / denum))
self.bg_per_video = int(prop_per_video * (bg_ratio / denum))
self.incomplete_per_video = (
prop_per_video - self.fg_per_video - self.bg_per_video
)
self._parse_prop_file(stats=reg_stats)
def _load_image(self, directory, idx):
if self.modality == "RGB" or self.modality == "RGBDiff":
return [
Image.open(
os.path.join(directory, self.image_tmpl.format(idx))
).convert("RGB")
]
elif self.modality == "Flow":
x_img = Image.open(
os.path.join(directory, self.image_tmpl.format("x", idx))
).convert("L")
y_img = Image.open(
os.path.join(directory, self.image_tmpl.format("y", idx))
).convert("L")
return [x_img, y_img]
def _parse_prop_file(self, stats=None):
prop_info = load_proposal_file(self.prop_file)
self.video_list = [SSNVideoRecord(p) for p in prop_info]
if self.exclude_empty:
self.video_list = list([x for x in self.video_list if len(x.gt) > 0])
self.video_dict = {v.id: v for v in self.video_list}
# construct three pools:
# 1. Foreground
# 2. Background
# 3. Incomplete
self.fg_pool = []
self.bg_pool = []
self.incomp_pool = []
for v in self.video_list:
self.fg_pool.extend(
[(v.id, prop) for prop in v.get_fg(self.fg_iou_thresh, self.gt_as_fg)]
)
incomp, bg = v.get_negatives(
self.incomplete_iou_thresh,
self.bg_iou_thresh,
self.bg_coverage_thresh,
self.incomplete_overlap_thresh,
)
self.incomp_pool.extend([(v.id, prop) for prop in incomp])
self.bg_pool.extend([(v.id, prop) for prop in bg])
if stats is None:
self._compute_regresssion_stats()
else:
self.stats = stats
if self.verbose:
print(
(
"""
SSNDataset: Proposal file {prop_file} parsed.
There are {pnum} usable proposals from {vnum} videos.
{fnum} foreground proposals
{inum} incomplete_proposals
{bnum} background_proposals
Sampling config:
FG/BG/INC: {fr}/{br}/{ir}
Video Centric: {vc}
Epoch size multiplier: {em}
Regression Stats:
Location: mean {stats[0][0]:.05f} std {stats[1][0]:.05f}
Duration: mean {stats[0][1]:.05f} std {stats[1][1]:.05f}
""".format(
prop_file=self.prop_file,
pnum=len(self.fg_pool)
+ len(self.bg_pool)
+ len(self.incomp_pool),
fnum=len(self.fg_pool),
inum=len(self.incomp_pool),
bnum=len(self.bg_pool),
fr=self.fg_per_video,
br=self.bg_per_video,
ir=self.incomplete_per_video,
vnum=len(self.video_dict),
vc=self.video_centric,
stats=self.stats,
em=self.epoch_multiplier,
)
)
)
else:
print(
(
"""
SSNDataset: Proposal file {prop_file} parsed.
""".format(
prop_file=self.prop_file
)
)
)
def _video_centric_sampling(self, video):
fg = video.get_fg(self.fg_iou_thresh, self.gt_as_fg)
incomp, bg = video.get_negatives(
self.incomplete_iou_thresh,
self.bg_iou_thresh,
self.bg_coverage_thresh,
self.incomplete_overlap_thresh,
)
def sample_video_proposals(
proposal_type, video_id, video_pool, requested_num, dataset_pool
):
if len(video_pool) == 0:
# if there is nothing in the video pool, go fetch from the dataset pool
return [
(dataset_pool[x], proposal_type)
for x in np.random.choice(
len(dataset_pool), requested_num, replace=False
)
]
else:
replicate = len(video_pool) < requested_num
idx = np.random.choice(
len(video_pool), requested_num, replace=replicate
)
return [((video_id, video_pool[x]), proposal_type) for x in idx]
out_props = []
out_props.extend(
sample_video_proposals(0, video.id, fg, self.fg_per_video, self.fg_pool)
) # sample foreground
out_props.extend(
sample_video_proposals(
1, video.id, incomp, self.incomplete_per_video, self.incomp_pool
)
) # sample incomp.
out_props.extend(
sample_video_proposals(2, video.id, bg, self.bg_per_video, self.bg_pool)
) # sample background
return out_props
def _random_sampling(self):
out_props = []
out_props.extend(
[
(x, 0)
for x in np.random.choice(
self.fg_pool, self.fg_per_video, replace=False
)
]
)
out_props.extend(
[
(x, 1)
for x in np.random.choice(
self.incomp_pool, self.incomplete_per_video, replace=False
)
]
)
out_props.extend(
[
(x, 2)
for x in np.random.choice(
self.bg_pool, self.bg_per_video, replace=False
)
]
)
return out_props
def _sample_indices(self, valid_length, num_seg):
"""
:param record: VideoRecord
:return: list
"""
average_duration = (valid_length + 1) // num_seg
if average_duration > 0:
# normal cases
offsets = np.multiply(list(range(num_seg)), average_duration) + randint(
average_duration, size=num_seg
)
elif valid_length > num_seg:
offsets = np.sort(randint(valid_length, size=num_seg))
else:
offsets = np.zeros((num_seg,))
return offsets
def _get_val_indices(self, valid_length, num_seg):
if valid_length > num_seg:
tick = valid_length / float(num_seg)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(num_seg)])
else:
offsets = np.zeros((num_seg,))
return offsets
def _sample_ssn_indices(self, prop, frame_cnt):
start_frame = prop.start_frame + 1
end_frame = prop.end_frame
duration = end_frame - start_frame + 1
assert duration != 0, (prop.start_frame, prop.end_frame, prop.best_iou)
valid_length = duration - self.new_length
valid_starting = max(1, start_frame - int(duration * self.starting_ratio))
valid_ending = min(
frame_cnt - self.new_length + 1,
end_frame + int(duration * self.ending_ratio),
)
valid_starting_length = start_frame - valid_starting - self.new_length + 1
valid_ending_length = valid_ending - end_frame - self.new_length + 1
starting_scale = (valid_starting_length + self.new_length - 1) / (
duration * self.starting_ratio
)
ending_scale = (valid_ending_length + self.new_length - 1) / (
duration * self.ending_ratio
)
# get starting
starting_offsets = (
self._sample_indices(valid_starting_length, self.aug_seg)
if self.random_shift
else self._get_val_indices(valid_starting_length, self.aug_seg)
) + valid_starting
course_offsets = (
self._sample_indices(valid_length, self.body_seg)
if self.random_shift
else self._get_val_indices(valid_length, self.body_seg)
) + start_frame
ending_offsets = (
self._sample_indices(valid_ending_length, self.aug_seg)
if self.random_shift
else self._get_val_indices(valid_ending_length, self.aug_seg)
) + end_frame
offsets = np.concatenate((starting_offsets, course_offsets, ending_offsets))
stage_split = [
self.aug_seg,
self.aug_seg + self.body_seg,
self.aug_seg * 2 + self.body_seg,
]
return offsets, starting_scale, ending_scale, stage_split
def _load_prop_data(self, prop):
# read frame count
frame_cnt = self.video_dict[prop[0][0]].num_frames
# sample segment indices
prop_indices, starting_scale, ending_scale, stage_split = self._sample_ssn_indices(
prop[0][1], frame_cnt
)
# turn prop into standard format
# get label
if prop[1] == 0:
label = prop[0][1].label
elif prop[1] == 1:
label = prop[0][1].label # incomplete
elif prop[1] == 2:
label = 0 # background
else:
raise ValueError()
frames = []
for idx, seg_ind in enumerate(prop_indices):
p = int(seg_ind)
for x in range(self.new_length):
frames.extend(self._load_image(prop[0][0], min(frame_cnt, p + x)))
# get regression target
if prop[1] == 0:
reg_targets = prop[0][1].regression_targets
reg_targets = (
(reg_targets[0] - self.stats[0][0]) / self.stats[1][0],
(reg_targets[1] - self.stats[0][1]) / self.stats[1][1],
)
else:
reg_targets = (0.0, 0.0)
return (
frames,
label,
reg_targets,
starting_scale,
ending_scale,
stage_split,
prop[1],
)
def _compute_regresssion_stats(self):
if self.verbose:
print("computing regression target normalizing constants")
targets = []
for video in self.video_list:
fg = video.get_fg(self.fg_iou_thresh, False)
for p in fg:
targets.append(list(p.regression_targets))
self.stats = np.array((np.mean(targets, axis=0), np.std(targets, axis=0)))
def get_test_data(self, video, test_interval, gen_batchsize=4):
props = video.proposals
video_id = video.id
frame_cnt = video.num_frames
frame_ticks = (
np.arange(0, frame_cnt - self.new_length, test_interval, dtype=np.int) + 1
)
num_sampled_frames = len(frame_ticks)
# avoid empty proposal list
if len(props) == 0:
props.append(SSNInstance(0, frame_cnt - 1, frame_cnt))
# process proposals to subsampled sequences
rel_prop_list = []
proposal_tick_list = []
scaling_list = []
for proposal in props:
rel_prop = proposal.start_frame / frame_cnt, proposal.end_frame / frame_cnt
rel_duration = rel_prop[1] - rel_prop[0]
rel_starting_duration = rel_duration * self.starting_ratio
rel_ending_duration = rel_duration * self.ending_ratio
rel_starting = rel_prop[0] - rel_starting_duration
rel_ending = rel_prop[1] + rel_ending_duration
real_rel_starting = max(0.0, rel_starting)
real_rel_ending = min(1.0, rel_ending)
starting_scaling = (rel_prop[0] - real_rel_starting) / rel_starting_duration
ending_scaling = (real_rel_ending - rel_prop[1]) / rel_ending_duration
proposal_ticks = (
int(real_rel_starting * num_sampled_frames),
int(rel_prop[0] * num_sampled_frames),
int(rel_prop[1] * num_sampled_frames),
int(real_rel_ending * num_sampled_frames),
)
rel_prop_list.append(rel_prop)
proposal_tick_list.append(proposal_ticks)
scaling_list.append((starting_scaling, ending_scaling))
# load frames
# Since there are many frames for each video during testing, instead of returning the read frames,
# we return a generator which gives the frames in small batches, this lower the memory burden
# and runtime overhead. Usually setting batchsize=4 would fit most cases.
def frame_gen(batchsize):
frames = []
cnt = 0
for idx, seg_ind in enumerate(frame_ticks):
p = int(seg_ind)
for x in range(self.new_length):
frames.extend(self._load_image(video_id, min(frame_cnt, p + x)))
cnt += 1
if cnt % batchsize == 0:
frames = self.transform(frames)
yield frames
frames = []
if len(frames):
frames = self.transform(frames)
yield frames
return (
frame_gen(gen_batchsize),
len(frame_ticks),
torch.from_numpy(np.array(rel_prop_list)),
torch.from_numpy(np.array(proposal_tick_list)),
torch.from_numpy(np.array(scaling_list)),
)
def get_training_data(self, index):
if self.video_centric:
video = self.video_list[index]
props = self._video_centric_sampling(video)
else:
props = self._random_sampling()
out_frames = []
out_prop_len = []
out_prop_scaling = []
out_prop_type = []
out_prop_labels = []
out_prop_reg_targets = []
out_stage_split = []
for idx, p in enumerate(props):
prop_frames, prop_label, reg_targets, starting_scale, ending_scale, stage_split, prop_type = self._load_prop_data(
p
)
processed_frames = self.transform(prop_frames)
out_frames.append(processed_frames)
out_prop_len.append(self.body_seg + 2 * self.aug_seg)
out_prop_scaling.append([starting_scale, ending_scale])
out_prop_labels.append(prop_label)
out_prop_reg_targets.append(reg_targets)
out_prop_type.append(prop_type)
out_stage_split.append(stage_split)
out_prop_len = torch.from_numpy(np.array(out_prop_len))
out_prop_scaling = torch.from_numpy(
np.array(out_prop_scaling, dtype=np.float32)
)
out_prop_labels = torch.from_numpy(np.array(out_prop_labels))
out_prop_reg_targets = torch.from_numpy(
np.array(out_prop_reg_targets, dtype=np.float32)
)
out_prop_type = torch.from_numpy(np.array(out_prop_type))
out_stage_split = torch.from_numpy(np.array(out_stage_split))
out_frames = torch.cat(out_frames)
return (
out_frames,
out_prop_len,
out_prop_scaling,
out_prop_type,
out_prop_labels,
out_prop_reg_targets,
out_stage_split,
)
def get_all_gt(self):
gt_list = []
for video in self.video_list:
vid = video.id
gt_list.extend(
[
[
vid,
x.label - 1,
x.start_frame / video.num_frames,
x.end_frame / video.num_frames,
]
for x in video.gt
]
)
return gt_list
def __getitem__(self, index):
real_index = index % len(self.video_list)
if self.test_mode:
return self.get_test_data(self.video_list[real_index], self.test_interval)
else:
return self.get_training_data(real_index)
def __len__(self):
return len(self.video_list) * self.epoch_multiplier
|
[
"ops.io.load_proposal_file",
"numpy.random.randint",
"ops.utils.temporal_iou"
] |
[((7277, 7311), 'ops.io.load_proposal_file', 'load_proposal_file', (['self.prop_file'], {}), '(self.prop_file)\n', (7295, 7311), False, 'from ops.io import load_proposal_file\n'), ((1017, 1102), 'ops.utils.temporal_iou', 'temporal_iou', (['(self.start_frame, self.end_frame)', '(gt.start_frame, gt.end_frame)'], {}), '((self.start_frame, self.end_frame), (gt.start_frame, gt.end_frame)\n )\n', (1029, 1102), False, 'from ops.utils import temporal_iou\n'), ((12799, 12838), 'numpy.random.randint', 'randint', (['average_duration'], {'size': 'num_seg'}), '(average_duration, size=num_seg)\n', (12806, 12838), False, 'from numpy.random import randint\n'), ((12936, 12971), 'numpy.random.randint', 'randint', (['valid_length'], {'size': 'num_seg'}), '(valid_length, size=num_seg)\n', (12943, 12971), False, 'from numpy.random import randint\n')]
|
#!/usr/bin/env python
import uuid
from construct import Container
class SMAPI_Request(object):
'''
Implentation of a ICUV Request
'''
def __init__(self, function_name, target_identifier,
authenticated_userid=b"", password=b"", additional_parameters=b""):
self._function_name = function_name
self._function_name_length = len(function_name)
self._authenticated_userid = authenticated_userid
self._authenticated_userid_length = len(authenticated_userid)
self._password = password
self._password_length = len(password)
self._target_identifier = target_identifier
self._target_identifier_length = len(target_identifier)
self._additional_parameters = additional_parameters
self._additional_parameters_length = len(additional_parameters)
self._input_length = (self._function_name_length + 4 +
self._authenticated_userid_length + 4 +
self._password_length + 4 +
self._target_identifier_length + 4 +
self._additional_parameters_length)
def get_container(self):
return Container(input_length = self._input_length,
function_name_length = self._function_name_length,
function_name = self._function_name,
authenticated_userid_length = self._authenticated_userid_length,
authenticated_userid = self._authenticated_userid,
password_length = self._password_length,
password = self._password,
target_identifier_length = self._target_identifier_length,
target_identifier = self._target_identifier,
additional_parameters = self._additional_parameters)
def __repr__(self):
"<{} (container={})>".format(
self.__class__.__name__,
self.get_container())
|
[
"construct.Container"
] |
[((1221, 1702), 'construct.Container', 'Container', ([], {'input_length': 'self._input_length', 'function_name_length': 'self._function_name_length', 'function_name': 'self._function_name', 'authenticated_userid_length': 'self._authenticated_userid_length', 'authenticated_userid': 'self._authenticated_userid', 'password_length': 'self._password_length', 'password': 'self._password', 'target_identifier_length': 'self._target_identifier_length', 'target_identifier': 'self._target_identifier', 'additional_parameters': 'self._additional_parameters'}), '(input_length=self._input_length, function_name_length=self.\n _function_name_length, function_name=self._function_name,\n authenticated_userid_length=self._authenticated_userid_length,\n authenticated_userid=self._authenticated_userid, password_length=self.\n _password_length, password=self._password, target_identifier_length=\n self._target_identifier_length, target_identifier=self.\n _target_identifier, additional_parameters=self._additional_parameters)\n', (1230, 1702), False, 'from construct import Container\n')]
|
import logging
import json
from typing import List, Type, Union
from keras.models import Model
from keras.layers.merge import Concatenate
from keras.layers import (
Dense, LSTM, Bidirectional, Embedding, Input, Dropout,
TimeDistributed
)
import delft.sequenceLabelling.wrapper
from delft.utilities.layers import ChainCRF
from delft.sequenceLabelling.models import BaseModel
from delft.sequenceLabelling.models import get_model as _get_model, BidLSTM_CRF_FEATURES
from sciencebeam_trainer_delft.sequence_labelling.config import ModelConfig
LOGGER = logging.getLogger(__name__)
class CustomModel(BaseModel):
def __init__(
self, config, ntags,
require_casing: bool = False,
use_crf: bool = False,
supports_features: bool = False,
require_features_indices_input: bool = False,
stateful: bool = False):
super().__init__(config, ntags)
self.require_casing = require_casing
self.use_crf = use_crf
self.supports_features = supports_features
self.require_features_indices_input = require_features_indices_input
self.stateful = stateful
def _concatenate_inputs(inputs: list, **kwargs):
if len(inputs) == 1:
return inputs[0]
return Concatenate(**kwargs)(inputs)
# renamed copy of BidLSTM_CRF to demonstrate a custom model
class CustomBidLSTM_CRF(CustomModel):
"""
A Keras implementation of BidLSTM-CRF for sequence labelling.
References
--
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"Neural Architectures for Named Entity Recognition". Proceedings of NAACL 2016.
https://arxiv.org/abs/1603.01360
"""
def __init__(self, config: ModelConfig, ntags=None):
super().__init__(
config, ntags,
require_casing=False, use_crf=True, supports_features=True,
stateful=config.stateful
)
stateful = self.stateful
# stateful RNNs require the batch size to be passed in
input_batch_size = config.batch_size if stateful else None
model_inputs = []
lstm_inputs = []
# build input, directly feed with word embedding by the data generator
word_input = Input(
shape=(None, config.word_embedding_size),
batch_shape=(input_batch_size, None, config.word_embedding_size),
name='word_input'
)
model_inputs.append(word_input)
lstm_inputs.append(word_input)
# build character based embedding
char_input = Input(
shape=(None, config.max_char_length),
batch_shape=(input_batch_size, None, config.max_char_length),
dtype='int32',
name='char_input'
)
model_inputs.append(char_input)
if config.char_embedding_size:
assert config.char_vocab_size, 'config.char_vocab_size required'
char_embeddings = TimeDistributed(Embedding(
input_dim=config.char_vocab_size,
output_dim=config.char_embedding_size,
mask_zero=config.char_input_mask_zero,
name='char_embeddings_embedding'
), name='char_embeddings')(char_input)
chars = TimeDistributed(
Bidirectional(LSTM(
config.num_char_lstm_units,
dropout=config.char_input_dropout,
recurrent_dropout=config.char_lstm_dropout,
return_sequences=False
)),
name='char_lstm'
)(char_embeddings)
lstm_inputs.append(chars)
# length of sequence not used for the moment (but used for f1 communication)
length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')
# combine characters and word embeddings
LOGGER.debug('model, config.use_features: %s', config.use_features)
if config.use_features:
LOGGER.info('model using features')
assert config.max_feature_size > 0
features_input = Input(
batch_shape=(input_batch_size, None, config.max_feature_size),
name='features_input'
)
model_inputs.append(features_input)
features = features_input
if config.features_embedding_size:
features = TimeDistributed(Dense(
config.features_embedding_size,
name='features_embeddings_dense'
), name='features_embeddings')(features)
LOGGER.info(
'word_input=%s, chars=%s, features=%s',
word_input, chars, features
)
lstm_inputs.append(features)
x = _concatenate_inputs(lstm_inputs, name='word_lstm_input')
x = Dropout(config.dropout, name='word_lstm_input_dropout')(x)
x = Bidirectional(LSTM(
units=config.num_word_lstm_units,
return_sequences=True,
recurrent_dropout=config.recurrent_dropout,
stateful=stateful,
), name='word_lstm')(x)
x = Dropout(config.dropout, name='word_lstm_output_dropout')(x)
x = Dense(
config.num_word_lstm_units, name='word_lstm_dense', activation='tanh'
)(x)
x = Dense(ntags, name='dense_ntags')(x)
self.crf = ChainCRF(name='crf')
pred = self.crf(x)
model_inputs.append(length_input)
self.model = Model(inputs=model_inputs, outputs=[pred])
self.config = config
# copied from
# https://github.com/kermitt2/delft/blob/d2f8390ac01779cab959f57aa6e1a8f1d2723505/
# delft/sequenceLabelling/models.py
class CustomBidLSTM_CRF_FEATURES(CustomModel):
"""
A Keras implementation of BidLSTM-CRF for sequence labelling which create features
from additional orthogonal information generated by GROBID.
References
--
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"Neural Architectures for Named Entity Recognition". Proceedings of NAACL 2016.
https://arxiv.org/abs/1603.01360
"""
name = 'CustomBidLSTM_CRF_FEATURES'
def __init__(self, config, ntags=None):
super().__init__(
config, ntags,
require_casing=False, use_crf=True, supports_features=True,
require_features_indices_input=True
)
# build input, directly feed with word embedding by the data generator
word_input = Input(shape=(None, config.word_embedding_size), name='word_input')
# build character based embedding
char_input = Input(shape=(None, config.max_char_length), dtype='int32', name='char_input')
char_embeddings = TimeDistributed(Embedding(
input_dim=config.char_vocab_size,
output_dim=config.char_embedding_size,
mask_zero=True,
name='char_embeddings'
))(char_input)
chars = TimeDistributed(Bidirectional(LSTM(
config.num_char_lstm_units,
return_sequences=False
)))(char_embeddings)
# layout features input and embeddings
features_input = Input(
shape=(None, len(config.features_indices)),
dtype='float32',
name='features_input'
)
assert config.features_vocabulary_size, "config.features_vocabulary_size required"
assert config.features_embedding_size, "config.features_embedding_size required"
# features_vocabulary_size (default 12) * number_of_features + 1
# (the zero is reserved for masking / padding)
features_embedding = TimeDistributed(
Embedding(
input_dim=config.features_vocabulary_size * len(config.features_indices) + 1,
output_dim=config.features_embedding_size,
mask_zero=True,
trainable=True,
name='features_embedding'),
name="features_embedding_td_1"
)(features_input)
assert config.features_lstm_units, "config.features_lstm_units required"
features_embedding_bd = TimeDistributed(
Bidirectional(LSTM(config.features_lstm_units, return_sequences=False)),
name="features_embedding_td_2"
)(features_embedding)
features_embedding_out = Dropout(config.dropout)(features_embedding_bd)
# length of sequence not used for the moment (but used for f1 communication)
length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')
# combine characters and word embeddings
x = Concatenate()([word_input, chars, features_embedding_out])
x = Dropout(config.dropout)(x)
x = Bidirectional(LSTM(
units=config.num_word_lstm_units,
return_sequences=True,
recurrent_dropout=config.recurrent_dropout
))(x)
x = Dropout(config.dropout)(x)
x = Dense(config.num_word_lstm_units, activation='tanh')(x)
x = Dense(ntags)(x)
self.crf = ChainCRF()
pred = self.crf(x)
self.model = Model(
inputs=[word_input, char_input, features_input, length_input],
outputs=[pred]
)
self.config = config
DEFAULT_MODEL_NAMES = [
'BidLSTM_CRF', 'BidLSTM_CNN', 'BidLSTM_CNN_CRF', 'BidGRU_CRF', 'BidLSTM_CRF_CASING',
BidLSTM_CRF_FEATURES.name
]
MODEL_MAP = {
'CustomBidLSTM_CRF': CustomBidLSTM_CRF,
CustomBidLSTM_CRF_FEATURES.name: CustomBidLSTM_CRF_FEATURES
}
IMPLICIT_MODEL_CONFIG_PROPS_MAP = {
BidLSTM_CRF_FEATURES.name: dict(
use_features=True,
use_features_indices_input=True
),
CustomBidLSTM_CRF_FEATURES.name: dict(
use_features=True,
use_features_indices_input=True
)
}
def register_model(name: str, model_class: Type[CustomModel]):
MODEL_MAP[name] = model_class
def updated_implicit_model_config_props(model_config: ModelConfig):
implicit_model_config_props = IMPLICIT_MODEL_CONFIG_PROPS_MAP.get(model_config.model_type)
if not implicit_model_config_props:
return
for key, value in implicit_model_config_props.items():
setattr(model_config, key, value)
def _create_model(
model_class: Type[CustomModel],
config: ModelConfig,
ntags=None) -> CustomModel:
return model_class(config, ntags=ntags)
def is_model_stateful(model: Union[BaseModel, CustomModel]) -> bool:
try:
return model.stateful
except AttributeError:
return False
def get_model(config, preprocessor, ntags=None):
LOGGER.info(
'get_model, config: %s, ntags=%s',
json.dumps(vars(config), indent=4),
ntags
)
model_class = MODEL_MAP.get(config.model_type)
if not model_class:
return _get_model(config, preprocessor, ntags=ntags)
model = _create_model(model_class, config, ntags=ntags)
config.use_crf = model.use_crf
preprocessor.return_casing = model.require_casing
if config.use_features and not model.supports_features:
LOGGER.warning('features enabled but not supported by model (disabling)')
config.use_features = False
preprocessor.return_features = config.use_features
return model
def get_model_names() -> List[str]:
return sorted(set(DEFAULT_MODEL_NAMES) | set(MODEL_MAP.keys()))
def patch_get_model():
delft.sequenceLabelling.wrapper.get_model = get_model
|
[
"delft.utilities.layers.ChainCRF",
"delft.sequenceLabelling.models.get_model",
"keras.layers.Dropout",
"keras.layers.LSTM",
"keras.models.Model",
"keras.layers.merge.Concatenate",
"keras.layers.Dense",
"keras.layers.Embedding",
"keras.layers.Input",
"logging.getLogger"
] |
[((561, 588), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (578, 588), False, 'import logging\n'), ((1278, 1299), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {}), '(**kwargs)\n', (1289, 1299), False, 'from keras.layers.merge import Concatenate\n'), ((2224, 2361), 'keras.layers.Input', 'Input', ([], {'shape': '(None, config.word_embedding_size)', 'batch_shape': '(input_batch_size, None, config.word_embedding_size)', 'name': '"""word_input"""'}), "(shape=(None, config.word_embedding_size), batch_shape=(\n input_batch_size, None, config.word_embedding_size), name='word_input')\n", (2229, 2361), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((2546, 2689), 'keras.layers.Input', 'Input', ([], {'shape': '(None, config.max_char_length)', 'batch_shape': '(input_batch_size, None, config.max_char_length)', 'dtype': '"""int32"""', 'name': '"""char_input"""'}), "(shape=(None, config.max_char_length), batch_shape=(input_batch_size,\n None, config.max_char_length), dtype='int32', name='char_input')\n", (2551, 2689), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((3733, 3797), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, 1)', 'dtype': '"""int32"""', 'name': '"""length_input"""'}), "(batch_shape=(None, 1), dtype='int32', name='length_input')\n", (3738, 3797), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((5370, 5390), 'delft.utilities.layers.ChainCRF', 'ChainCRF', ([], {'name': '"""crf"""'}), "(name='crf')\n", (5378, 5390), False, 'from delft.utilities.layers import ChainCRF\n'), ((5483, 5525), 'keras.models.Model', 'Model', ([], {'inputs': 'model_inputs', 'outputs': '[pred]'}), '(inputs=model_inputs, outputs=[pred])\n', (5488, 5525), False, 'from keras.models import Model\n'), ((6462, 6528), 'keras.layers.Input', 'Input', ([], {'shape': '(None, config.word_embedding_size)', 'name': '"""word_input"""'}), "(shape=(None, config.word_embedding_size), name='word_input')\n", (6467, 6528), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((6593, 6670), 'keras.layers.Input', 'Input', ([], {'shape': '(None, config.max_char_length)', 'dtype': '"""int32"""', 'name': '"""char_input"""'}), "(shape=(None, config.max_char_length), dtype='int32', name='char_input')\n", (6598, 6670), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((8459, 8523), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, 1)', 'dtype': '"""int32"""', 'name': '"""length_input"""'}), "(batch_shape=(None, 1), dtype='int32', name='length_input')\n", (8464, 8523), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((9021, 9031), 'delft.utilities.layers.ChainCRF', 'ChainCRF', ([], {}), '()\n', (9029, 9031), False, 'from delft.utilities.layers import ChainCRF\n'), ((9081, 9169), 'keras.models.Model', 'Model', ([], {'inputs': '[word_input, char_input, features_input, length_input]', 'outputs': '[pred]'}), '(inputs=[word_input, char_input, features_input, length_input],\n outputs=[pred])\n', (9086, 9169), False, 'from keras.models import Model\n'), ((10781, 10826), 'delft.sequenceLabelling.models.get_model', '_get_model', (['config', 'preprocessor'], {'ntags': 'ntags'}), '(config, preprocessor, ntags=ntags)\n', (10791, 10826), True, 'from delft.sequenceLabelling.models import get_model as _get_model, BidLSTM_CRF_FEATURES\n'), ((4080, 4176), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(input_batch_size, None, config.max_feature_size)', 'name': '"""features_input"""'}), "(batch_shape=(input_batch_size, None, config.max_feature_size), name=\n 'features_input')\n", (4085, 4176), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((4825, 4880), 'keras.layers.Dropout', 'Dropout', (['config.dropout'], {'name': '"""word_lstm_input_dropout"""'}), "(config.dropout, name='word_lstm_input_dropout')\n", (4832, 4880), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((5129, 5185), 'keras.layers.Dropout', 'Dropout', (['config.dropout'], {'name': '"""word_lstm_output_dropout"""'}), "(config.dropout, name='word_lstm_output_dropout')\n", (5136, 5185), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((5201, 5277), 'keras.layers.Dense', 'Dense', (['config.num_word_lstm_units'], {'name': '"""word_lstm_dense"""', 'activation': '"""tanh"""'}), "(config.num_word_lstm_units, name='word_lstm_dense', activation='tanh')\n", (5206, 5277), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((5315, 5347), 'keras.layers.Dense', 'Dense', (['ntags'], {'name': '"""dense_ntags"""'}), "(ntags, name='dense_ntags')\n", (5320, 5347), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((8304, 8327), 'keras.layers.Dropout', 'Dropout', (['config.dropout'], {}), '(config.dropout)\n', (8311, 8327), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((8586, 8599), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {}), '()\n', (8597, 8599), False, 'from keras.layers.merge import Concatenate\n'), ((8657, 8680), 'keras.layers.Dropout', 'Dropout', (['config.dropout'], {}), '(config.dropout)\n', (8664, 8680), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((8879, 8902), 'keras.layers.Dropout', 'Dropout', (['config.dropout'], {}), '(config.dropout)\n', (8886, 8902), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((8918, 8970), 'keras.layers.Dense', 'Dense', (['config.num_word_lstm_units'], {'activation': '"""tanh"""'}), "(config.num_word_lstm_units, activation='tanh')\n", (8923, 8970), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((8986, 8998), 'keras.layers.Dense', 'Dense', (['ntags'], {}), '(ntags)\n', (8991, 8998), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((4911, 5039), 'keras.layers.LSTM', 'LSTM', ([], {'units': 'config.num_word_lstm_units', 'return_sequences': '(True)', 'recurrent_dropout': 'config.recurrent_dropout', 'stateful': 'stateful'}), '(units=config.num_word_lstm_units, return_sequences=True,\n recurrent_dropout=config.recurrent_dropout, stateful=stateful)\n', (4915, 5039), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((6713, 6840), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'config.char_vocab_size', 'output_dim': 'config.char_embedding_size', 'mask_zero': '(True)', 'name': '"""char_embeddings"""'}), "(input_dim=config.char_vocab_size, output_dim=config.\n char_embedding_size, mask_zero=True, name='char_embeddings')\n", (6722, 6840), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((8711, 8820), 'keras.layers.LSTM', 'LSTM', ([], {'units': 'config.num_word_lstm_units', 'return_sequences': '(True)', 'recurrent_dropout': 'config.recurrent_dropout'}), '(units=config.num_word_lstm_units, return_sequences=True,\n recurrent_dropout=config.recurrent_dropout)\n', (8715, 8820), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((2947, 3112), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'config.char_vocab_size', 'output_dim': 'config.char_embedding_size', 'mask_zero': 'config.char_input_mask_zero', 'name': '"""char_embeddings_embedding"""'}), "(input_dim=config.char_vocab_size, output_dim=config.\n char_embedding_size, mask_zero=config.char_input_mask_zero, name=\n 'char_embeddings_embedding')\n", (2956, 3112), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((6954, 7010), 'keras.layers.LSTM', 'LSTM', (['config.num_char_lstm_units'], {'return_sequences': '(False)'}), '(config.num_char_lstm_units, return_sequences=False)\n', (6958, 7010), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((8138, 8194), 'keras.layers.LSTM', 'LSTM', (['config.features_lstm_units'], {'return_sequences': '(False)'}), '(config.features_lstm_units, return_sequences=False)\n', (8142, 8194), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((3286, 3425), 'keras.layers.LSTM', 'LSTM', (['config.num_char_lstm_units'], {'dropout': 'config.char_input_dropout', 'recurrent_dropout': 'config.char_lstm_dropout', 'return_sequences': '(False)'}), '(config.num_char_lstm_units, dropout=config.char_input_dropout,\n recurrent_dropout=config.char_lstm_dropout, return_sequences=False)\n', (3290, 3425), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n'), ((4394, 4465), 'keras.layers.Dense', 'Dense', (['config.features_embedding_size'], {'name': '"""features_embeddings_dense"""'}), "(config.features_embedding_size, name='features_embeddings_dense')\n", (4399, 4465), False, 'from keras.layers import Dense, LSTM, Bidirectional, Embedding, Input, Dropout, TimeDistributed\n')]
|
"""
This module contains common reusable functions.
"""
from traceback import print_stack
from configparser import ConfigParser
from SupportLibraries.ui_helpers import UIHelpers
class BaseHelpers(UIHelpers):
"""
This class includes basic reusable base_helpers.
"""
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
def load_properties_file(self):
"""
This method loads the properties/ini file
:return: this method returns config reader instance.
"""
config = None
try:
# noinspection PyBroadException
config = ConfigParser()
config.read('test.ini')
except Exception as ex:
self.log.error("Failed to load ini/properties file.", ex)
print_stack()
return config
|
[
"configparser.ConfigParser",
"traceback.print_stack"
] |
[((648, 662), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (660, 662), False, 'from configparser import ConfigParser\n'), ((814, 827), 'traceback.print_stack', 'print_stack', ([], {}), '()\n', (825, 827), False, 'from traceback import print_stack\n')]
|
# Copyright (c) 2020, <NAME>
# License: MIT License
from pathlib import Path
from time import perf_counter
import ezdxf
from ezdxf.render.forms import sphere
from ezdxf.addons import MengerSponge
from ezdxf.addons.pycsg import CSG
DIR = Path('~/Desktop/Outbox').expanduser()
doc = ezdxf.new()
doc.layers.new('sponge', dxfattribs={'color': 5})
doc.layers.new('sphere', dxfattribs={'color': 6})
doc.set_modelspace_vport(6, center=(5, 0))
msp = doc.modelspace()
sponge1 = MengerSponge(level=3).mesh()
sphere1 = sphere(count=32, stacks=16, radius=.5, quads=True).translate(.25, .25, 1)
t0 = perf_counter()
subtract = (CSG(sponge1, meshid=1) - CSG(sphere1, meshid=2))
t1 = perf_counter()
# get mesh result by id
subtract.mesh(1).render(msp, dxfattribs={'layer': 'sponge'})
subtract.mesh(2).render(msp, dxfattribs={'layer': 'sphere'})
print(f'runtime: {t1-t0:.3f}s')
doc.saveas(DIR / 'csg_sphere_vs_menger_sponge.dxf')
|
[
"ezdxf.addons.pycsg.CSG",
"time.perf_counter",
"ezdxf.addons.MengerSponge",
"ezdxf.new",
"pathlib.Path",
"ezdxf.render.forms.sphere"
] |
[((284, 295), 'ezdxf.new', 'ezdxf.new', ([], {}), '()\n', (293, 295), False, 'import ezdxf\n'), ((593, 607), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (605, 607), False, 'from time import perf_counter\n'), ((674, 688), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (686, 688), False, 'from time import perf_counter\n'), ((620, 642), 'ezdxf.addons.pycsg.CSG', 'CSG', (['sponge1'], {'meshid': '(1)'}), '(sponge1, meshid=1)\n', (623, 642), False, 'from ezdxf.addons.pycsg import CSG\n'), ((645, 667), 'ezdxf.addons.pycsg.CSG', 'CSG', (['sphere1'], {'meshid': '(2)'}), '(sphere1, meshid=2)\n', (648, 667), False, 'from ezdxf.addons.pycsg import CSG\n'), ((239, 263), 'pathlib.Path', 'Path', (['"""~/Desktop/Outbox"""'], {}), "('~/Desktop/Outbox')\n", (243, 263), False, 'from pathlib import Path\n'), ((474, 495), 'ezdxf.addons.MengerSponge', 'MengerSponge', ([], {'level': '(3)'}), '(level=3)\n', (486, 495), False, 'from ezdxf.addons import MengerSponge\n'), ((513, 564), 'ezdxf.render.forms.sphere', 'sphere', ([], {'count': '(32)', 'stacks': '(16)', 'radius': '(0.5)', 'quads': '(True)'}), '(count=32, stacks=16, radius=0.5, quads=True)\n', (519, 564), False, 'from ezdxf.render.forms import sphere\n')]
|
"""Central data class and associated."""
# --- import --------------------------------------------------------------------------------------
import collections
import operator
import functools
import warnings
import numpy as np
import h5py
import scipy
from scipy.interpolate import griddata, interp1d
from .._group import Group
from .. import collection as wt_collection
from .. import exceptions as wt_exceptions
from .. import kit as wt_kit
from .. import units as wt_units
from ._axis import Axis, identifier_to_operator
from ._channel import Channel
from ._constant import Constant
from ._variable import Variable
# --- define --------------------------------------------------------------------------------------
__all__ = ["Data"]
# --- class ---------------------------------------------------------------------------------------
class Data(Group):
"""Multidimensional dataset."""
class_name = "Data"
def __init__(self, *args, **kwargs):
self._axes = []
self._constants = []
Group.__init__(self, *args, **kwargs)
# populate axes, constants from attrs string
for identifier in self.attrs.get("axes", []):
if hasattr(identifier, "decode"):
identifier = identifier.decode()
expression, units = identifier.split("{")
units = units.replace("}", "").strip()
if units == "None":
units = None
# Should not be needed for wt5 >= 1.0.3, kept for opening older wt5 files.
for i in identifier_to_operator.keys():
expression = expression.replace(i, identifier_to_operator[i])
expression = expression.replace(" ", "") # remove all whitespace
axis = Axis(self, expression, units)
self._axes.append(axis)
for identifier in self.attrs.get("constants", []):
if hasattr(identifier, "decode"):
identifier = identifier.decode()
expression, units = identifier.split("{")
units = units.replace("}", "").strip()
if units == "None":
units = None
for i in identifier_to_operator.keys():
expression = expression.replace(i, identifier_to_operator[i])
expression = expression.replace(" ", "") # remove all whitespace
const = Constant(self, expression, units)
self._constants.append(const)
self._current_axis_identities_in_natural_namespace = []
if self.file.mode is not None and self.file.mode != "r":
self._on_constants_updated()
self._on_axes_updated()
# the following are populated if not already recorded
self.channel_names
self.source
self.variable_names
def __repr__(self) -> str:
return "<WrightTools.Data '{0}' {1} at {2}>".format(
self.natural_name, str(self.axis_names), "::".join([self.filepath, self.name])
)
@property
def axes(self) -> tuple:
return tuple(self._axes)
@property
def axis_expressions(self) -> tuple:
"""Axis expressions."""
return tuple(a.expression for a in self._axes)
@property
def axis_names(self) -> tuple:
"""Axis names."""
return tuple(a.natural_name for a in self._axes)
@property
def constants(self) -> tuple:
return tuple(self._constants)
@property
def constant_expressions(self) -> tuple:
"""Axis expressions."""
return tuple(a.expression for a in self._constants)
@property
def constant_names(self) -> tuple:
"""Axis names."""
return tuple(a.natural_name for a in self._constants)
@property
def channel_names(self) -> tuple:
"""Channel names."""
if "channel_names" not in self.attrs.keys():
self.attrs["channel_names"] = np.array([], dtype="S")
return tuple(s.decode() for s in self.attrs["channel_names"])
@channel_names.setter
def channel_names(self, value):
"""Set channel names."""
self.attrs["channel_names"] = np.array(value, dtype="S")
@property
def channels(self) -> tuple:
"""Channels."""
return tuple(self[n] for n in self.channel_names)
@property
def datasets(self) -> tuple:
"""Datasets."""
return tuple(v for _, v in self.items() if isinstance(v, h5py.Dataset))
@property
def kind(self):
"""Kind."""
if "kind" not in self.attrs.keys():
self.attrs["kind"] = "None"
value = self.attrs["kind"]
return value if not value == "None" else None
@property
def ndim(self) -> int:
"""Get number of dimensions."""
try:
assert self._ndim is not None
except (AssertionError, AttributeError):
if len(self.variables) == 0:
self._ndim = 0
else:
self._ndim = self.variables[0].ndim
finally:
return self._ndim
@property
def shape(self) -> tuple:
"""Shape."""
try:
assert self._shape is not None
except (AssertionError, AttributeError):
self._shape = wt_kit.joint_shape(*self.variables)
finally:
return self._shape
@property
def size(self) -> int:
"""Size."""
return functools.reduce(operator.mul, self.shape)
@property
def source(self):
"""Source."""
if "source" not in self.attrs.keys():
self.attrs["source"] = "None"
value = self.attrs["source"]
return value if not value == "None" else None
@property
def units(self) -> tuple:
"""All axis units."""
return tuple(a.units for a in self._axes)
@property
def constant_units(self) -> tuple:
"""All constant units."""
return tuple(a.units for a in self._constants)
@property
def variable_names(self) -> tuple:
"""Variable names."""
if "variable_names" not in self.attrs.keys():
self.attrs["variable_names"] = np.array([], dtype="S")
return tuple(s.decode() for s in self.attrs["variable_names"])
@variable_names.setter
def variable_names(self, value):
"""Set variable names."""
self.attrs["variable_names"] = np.array(value, dtype="S")
@property
def variables(self) -> tuple:
"""Variables."""
try:
assert self._variables is not None
except (AssertionError, AttributeError):
self._variables = [self[n] for n in self.variable_names]
finally:
return tuple(self._variables)
@property
def _leaf(self):
return "{0} {1}".format(self.natural_name, self.shape)
def _on_axes_updated(self):
"""Method to run when axes are changed in any way.
Propagates updated axes properly.
"""
# update attrs
self.attrs["axes"] = np.array([a.identity.encode() for a in self._axes], dtype="S")
# remove old attributes
while len(self._current_axis_identities_in_natural_namespace) > 0:
key = self._current_axis_identities_in_natural_namespace.pop(0)
try:
delattr(self, key)
except AttributeError:
pass # already gone
# populate new attributes
for a in self._axes:
key = a.natural_name
setattr(self, key, a)
self._current_axis_identities_in_natural_namespace.append(key)
def _on_constants_updated(self):
"""Method to run when constants are changed in any way.
Propagates updated constants properly.
"""
# update attrs
self.attrs["constants"] = np.array(
[a.identity.encode() for a in self._constants], dtype="S"
)
def _print_branch(self, prefix, depth, verbose):
def print_leaves(prefix, lis, vline=True):
for i, item in enumerate(lis):
if vline:
a = "│ "
else:
a = " "
if i + 1 == len(lis):
b = "└── "
else:
b = "├── "
s = prefix + a + b + "{0}: {1}".format(i, item._leaf)
print(s)
if verbose:
# axes
print(prefix + "├── axes")
print_leaves(prefix, self.axes)
# constants
print(prefix + "├── constants")
print_leaves(prefix, self.constants)
# variables
print(prefix + "├── variables")
print_leaves(prefix, self.variables)
# channels
print(prefix + "└── channels")
print_leaves(prefix, self.channels, vline=False)
else:
# axes
s = "axes: "
s += ", ".join(["{0} ({1})".format(a.expression, a.units) for a in self.axes])
print(prefix + "├── " + s)
# constants
s = "constants: "
s += ", ".join(
["{0} ({1} {2})".format(a.expression, a.value, a.units) for a in self.constants]
)
print(prefix + "├── " + s)
# channels
s = "channels: "
s += ", ".join(self.channel_names)
print(prefix + "└── " + s)
def bring_to_front(self, channel):
"""Bring a specific channel to the zero-indexed position in channels.
All other channels get pushed back but remain in order.
Parameters
----------
channel : int or str
Channel index or name.
"""
channel_index = wt_kit.get_index(self.channel_names, channel)
new = list(self.channel_names)
new.insert(0, new.pop(channel_index))
self.channel_names = new
def chop(self, *args, at={}, parent=None, verbose=True) -> wt_collection.Collection:
"""Divide the dataset into its lower-dimensionality components.
Parameters
----------
axis : str or int (args)
Axes of the returned data objects. Strings refer to the names of
axes in this object, integers refer to their index. Provide multiple
axes to return multidimensional data objects.
at : dict (optional)
Choice of position along an axis. Keys are axis names, values are lists
``[position, input units]``. If exact position does not exist,
the closest valid position is used.
parent : WrightTools Collection instance (optional)
Collection to place the new "chop" collection within. Default is
None (new parent).
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools Collection
Collection of chopped data objects.
Examples
--------
>>> data.axis_names
['d2', 'w1', 'w2']
Get all w1 wigners.
>>> datas = data.chop('d2', 'w1')
>>> len(datas)
51
Get 2D frequency at d2=0 fs.
>>> datas = data.chop('w1', 'w2', at={'d2': [0, 'fs']})
>>> len(datas)
0
>>> datas[0].axis_names
['w1', 'w2']
>>> datas[0].d2[:]
0.
See Also
--------
collapse
Collapse the dataset along one axis.
split
Split the dataset while maintaining its dimensionality.
"""
from ._axis import operators, operator_to_identifier
# parse args
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, int):
args[i] = self._axes[arg].natural_name
elif isinstance(arg, str):
# same normalization that occurs in the natural_name @property
arg = arg.strip()
for op in operators:
arg = arg.replace(op, operator_to_identifier[op])
args[i] = wt_kit.string2identifier(arg)
# normalize the at keys to the natural name
for k in [ak for ak in at.keys() if type(ak) == str]:
for op in operators:
if op in k:
nk = k.replace(op, operator_to_identifier[op])
at[nk] = at[k]
at.pop(k)
k = nk
# get output collection
out = wt_collection.Collection(name="chop", parent=parent)
# get output shape
kept = args + [ak for ak in at.keys() if type(ak) == str]
kept_axes = [self._axes[self.axis_names.index(a)] for a in kept]
removed_axes = [a for a in self._axes if a not in kept_axes]
removed_shape = wt_kit.joint_shape(*removed_axes)
if removed_shape == ():
removed_shape = (1,) * self.ndim
removed_shape = list(removed_shape)
for i in at.keys():
if type(i) == int:
removed_shape[i] = 1
for ax in kept_axes:
if ax.shape.count(1) == ax.ndim - 1:
removed_shape[ax.shape.index(ax.size)] = 1
removed_shape = tuple(removed_shape)
# iterate
i = 0
for idx in np.ndindex(removed_shape):
idx = np.array(idx, dtype=object)
idx[np.array(removed_shape) == 1] = slice(None)
for axis, point in at.items():
if type(axis) == int:
idx[axis] = point
continue
point, units = point
destination_units = self._axes[self.axis_names.index(axis)].units
point = wt_units.converter(point, units, destination_units)
axis_index = self.axis_names.index(axis)
axis = self._axes[axis_index]
idx_index = np.array(axis.shape) > 1
if np.sum(idx_index) > 1:
raise wt_exceptions.MultidimensionalAxisError("chop", axis.natural_name)
idx_index = list(idx_index).index(True)
idx[idx_index] = np.argmin(np.abs(axis[tuple(idx)] - point))
data = out.create_data(name="chop%03i" % i)
for v in self.variables:
kwargs = {}
kwargs["name"] = v.natural_name
kwargs["values"] = v[idx]
kwargs["units"] = v.units
kwargs["label"] = v.label
kwargs.update(v.attrs)
data.create_variable(**kwargs)
for c in self.channels:
kwargs = {}
kwargs["name"] = c.natural_name
kwargs["values"] = c[idx]
kwargs["units"] = c.units
kwargs["label"] = c.label
kwargs["signed"] = c.signed
kwargs.update(c.attrs)
data.create_channel(**kwargs)
new_axes = [a.expression for a in kept_axes if a.expression not in at.keys()]
new_axis_units = [a.units for a in kept_axes if a.expression not in at.keys()]
data.transform(*new_axes)
for const in self.constant_expressions:
data.create_constant(const, verbose=False)
for ax in self.axis_expressions:
if ax not in new_axes:
data.create_constant(ax, verbose=False)
for j, units in enumerate(new_axis_units):
data.axes[j].convert(units)
i += 1
out.flush()
# return
if verbose:
print("chopped data into %d piece(s)" % len(out), "in", new_axes)
return out
def gradient(self, axis, *, channel=0):
"""
Compute the gradient along one axis.
New channels have names ``<channel name>_<axis name>_gradient``.
Parameters
----------
axis : int or str
The axis to differentiate along.
If given as an integer, the axis in the underlying array is used,
and unitary spacing is assumed.
If given as a string, the axis must exist, and be a 1D array-aligned axis.
(i.e. have a shape with a single value which is not ``1``)
The axis to collapse along is inferred from the shape of the axis.
channel : int or str
The channel to differentiate.
Default is the first channel.
"""
# get axis index --------------------------------------------------------------------------
if isinstance(axis, int):
axis_index = axis
elif isinstance(axis, str):
index = self.axis_names.index(axis)
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "collapse")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis '{}' is a single point, cannot compute gradient".format(axis)
)
axis_index = axes[0]
else:
raise wt_exceptions.TypeError("axis: expected {int, str}, got %s" % type(axis))
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channel_names[channel_index]
if self[channel].shape[axis_index] == 1:
raise wt_exceptions.ValueError(
"Channel '{}' has a single point along Axis '{}', cannot compute gradient".format(
channel, axis
)
)
rtype = np.result_type(self[channel].dtype, float)
new = self.create_channel(
"{}_{}_gradient".format(channel, axis),
values=np.empty(self[channel].shape, dtype=rtype),
)
channel = self[channel]
if axis == axis_index:
new[:] = np.gradient(channel[:], axis=axis_index)
else:
new[:] = np.gradient(channel[:], self[axis].points, axis=axis_index)
def moment(self, axis, channel=0, moment=1, *, resultant=None):
"""Take the nth moment the dataset along one axis, adding lower rank channels.
New channels have names ``<channel name>_<axis name>_moment_<moment num>``.
Moment 0 is the integral of the slice.
Moment 1 is the weighted average or "Center of Mass", normalized by the integral
Moment 2 is the variance, the central moment about the center of mass,
normalized by the integral
Moments 3+ are central moments about the center of mass, normalized by the integral
and by the standard deviation to the power of the moment.
Moments, especially higher order moments, are susceptible to noise and baseline.
It is recommended when used with real data to use :meth:`WrightTools.data.Channel.clip`
in conjunction with moments to reduce effects of noise.
Parameters
----------
axis : int or str
The axis to take the moment along.
If given as an integer, the axis with that index is used.
If given as a string, the axis with that name is used.
The axis must exist, and be a 1D array-aligned axis.
(i.e. have a shape with a single value which is not ``1``)
The collapsed axis must be monotonic to produce correct results.
The axis to collapse along is inferred from the shape of the axis.
channel : int or str
The channel to take the moment.
If given as an integer, the channel with that index is used.
If given as a string, the channel with that name is used.
The channel must have values along the axis
(i.e. its shape must not be ``1`` in the dimension for which the axis is not ``1``)
Default is 0, the first channel.
moment : int or tuple of int
The moments to take.
One channel will be created for each number given.
Default is 1, the center of mass.
resultant : tuple of int
The resultant shape after the moment operation.
By default, it is intuited by the axis along which the moment is being taken.
This default only works if that axis is 1D, so resultant is required if a
multidimensional axis is passed as the first argument.
The requirement of monotonicity applies on a per pixel basis.
See Also
--------
collapse
Reduce dimensionality by some mathematical operation
clip
Set values above/below a threshold to a particular value
WrightTools.kit.joint_shape
Useful for setting `resultant` kwarg based off of axes not collapsed.
"""
# get axis index --------------------------------------------------------------------------
axis_index = None
if resultant is not None:
for i, (s, r) in enumerate(zip(wt_kit.joint_shape(*self.axes), resultant)):
if s != r and r == 1 and axis_index is None:
axis_index = i
elif s == r:
continue
else:
raise wt_exceptions.ValueError(
f"Invalid resultant shape '{resultant}' for shape {wt_kit.joint_shape(*self.axes)}. "
+ "Consider using `wt.kit.joint_shape` to join non-collapsed axes."
)
index = wt_kit.get_index(self.axis_names, axis)
if axis_index is None:
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "moment")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis {} is a single point, cannot compute moment".format(axis)
)
axis_index = axes[0]
warnings.warn("moment", category=wt_exceptions.EntireDatasetInMemoryWarning)
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channel_names[channel_index]
if self[channel].shape[axis_index] == 1:
raise wt_exceptions.ValueError(
"Channel '{}' has a single point along Axis '{}', cannot compute moment".format(
channel, axis
)
)
new_shape = list(self[channel].shape)
new_shape[axis_index] = 1
channel = self[channel]
axis_inp = axis
axis = self.axes[index]
x = axis[:]
if np.any(np.isnan(x)):
raise wt_exceptions.ValueError("Axis '{}' includes NaN".format(axis_inp))
y = np.nan_to_num(channel[:])
try:
moments = tuple(moment)
except TypeError:
moments = (moment,)
multiplier = 1
if 0 in moments:
# May be possible to optimize, probably doesn't need the sum
# only matters for integral, all others normalize by integral
multiplier = np.sign(
np.sum(np.diff(x, axis=axis_index), axis=axis_index, keepdims=True)
)
for moment in moments:
about = 0
norm = 1
if moment > 0:
norm = np.trapz(y, x, axis=axis_index)
norm = np.array(norm)
norm.shape = new_shape
if moment > 1:
about = np.trapz(x * y, x, axis=axis_index)
about = np.array(about)
about.shape = new_shape
about /= norm
if moment > 2:
sigma = np.trapz((x - about) ** 2 * y, x, axis=axis_index)
sigma = np.array(sigma)
sigma.shape = new_shape
sigma /= norm
sigma **= 0.5
norm *= sigma ** moment
values = np.trapz((x - about) ** moment * y, x, axis=axis_index)
values = np.array(values)
values.shape = new_shape
values /= norm
if moment == 0:
values *= multiplier
self.create_channel(
"{}_{}_{}_{}".format(channel.natural_name, axis_inp, "moment", moment),
values=values,
)
def collapse(self, axis, method="sum"):
"""Collapse the dataset along one axis, adding lower rank channels.
New channels have names ``<channel name>_<axis name>_<method>``.
Parameters
----------
axis : int or str
The axis to collapse along.
If given as an integer, the axis in the underlying array is used.
If given as a string, the axis must exist, and be a 1D array-aligned axis.
(i.e. have a shape with a single value which is not ``1``)
The axis to collapse along is inferred from the shape of the axis.
method : {'average', 'sum', 'max', 'min'} (optional)
The method of collapsing the given axis. Method may also be list
of methods corresponding to the channels of the object. Default
is sum. NaNs are ignored.
Can also be a list, allowing for different treatment for varied channels.
In this case, None indicates that no change to that channel should occur.
See Also
--------
chop
Divide the dataset into its lower-dimensionality components.
split
Split the dataset while maintaining its dimensionality.
moment
Take the moment along a particular axis
"""
if method in ("int", "integrate"):
warnings.warn(
"integrate method of collapse is deprecated, use moment(moment=0) instead",
wt_exceptions.VisibleDeprecationWarning,
)
for channel in self.channel_names:
try:
self.moment(axis, channel, moment=0)
self.rename_channels(
**{self.channel_names[-1]: f"{channel}_{axis}_{method}"}, verbose=False
)
except wt_exceptions.ValueError:
pass # may have some channels which fail, do so silently
return
# get axis index --------------------------------------------------------------------------
if isinstance(axis, int):
axis_index = axis
elif isinstance(axis, str):
index = self.axis_names.index(axis)
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "collapse")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis {} is a single point, cannot collapse".format(axis)
)
axis_index = axes[0]
else:
raise wt_exceptions.TypeError("axis: expected {int, str}, got %s" % type(axis))
new_shape = list(self.shape)
new_shape[axis_index] = 1
func = {
"sum": np.nansum,
"max": np.nanmax,
"maximum": np.nanmax,
"min": np.nanmin,
"minimum": np.nanmin,
"ave": np.nanmean,
"average": np.nanmean,
"mean": np.nanmean,
}
# methods ---------------------------------------------------------------------------------
if isinstance(method, str):
methods = [method for _ in self.channels]
if isinstance(method, list):
if len(method) == len(self.channels):
methods = method
else:
raise wt_exceptions.ValueError(
"method argument must have same number of elements as there are channels"
)
for m in methods:
if m not in func.keys():
raise wt_exceptions.ValueError("method '{}' not recognized".format(m))
warnings.warn("collapse", category=wt_exceptions.EntireDatasetInMemoryWarning)
# collapse --------------------------------------------------------------------------------
for method, channel in zip(methods, self.channel_names):
if method is None:
continue
if self[channel].shape[axis_index] == 1:
continue # Cannot collapse any further, don't clutter data object
new_shape = list(self[channel].shape)
new_shape[axis_index] = 1
rtype = self[channel].dtype
if method in ["ave", "average", "mean"]:
rtype = np.result_type(self[channel].dtype, float)
new = self.create_channel(
"{}_{}_{}".format(channel, axis, method),
values=np.empty(new_shape, dtype=rtype),
units=self[channel].units,
)
new[:] = func[method](self[channel], axis=axis_index, keepdims=True)
def convert(self, destination_units, *, convert_variables=False, verbose=True):
"""Convert all compatable axes and constants to given units.
Parameters
----------
destination_units : str
Destination units.
convert_variables : boolean (optional)
Toggle conversion of stored arrays. Default is False
verbose : bool (optional)
Toggle talkback. Default is True.
See Also
--------
Axis.convert
Convert a single axis object to compatable units. Call on an
axis object in data.axes.
"""
# apply to all compatible axes
for axis in self.axes:
if wt_units.is_valid_conversion(axis.units, destination_units):
orig = axis.units
axis.convert(destination_units, convert_variables=convert_variables)
if verbose:
print(
"axis {} converted from {} to {}".format(
axis.expression, orig, destination_units
)
)
# apply to all compatible constants
for constant in self.constants:
if wt_units.is_valid_conversion(constant.units, destination_units):
orig = constant.units
constant.convert(destination_units, convert_variables=convert_variables)
if verbose:
print(
"constant {} converted from {} to {}".format(
constant.expression, orig, destination_units
)
)
if convert_variables:
for var in self.variables:
if wt_units.is_valid_conversion(var.units, destination_units):
orig = var.units
var.convert(destination_units)
if verbose:
print(
"variable {} converted from {} to {}".format(
var.natural_name, orig, destination_units
)
)
self._on_axes_updated()
self._on_constants_updated()
def create_channel(
self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs
) -> Channel:
"""Append a new channel.
Parameters
----------
name : string
Unique name for this channel.
values : array (optional)
Array. If None, an empty array equaling the data shape is
created. Default is None.
shape : tuple of int
Shape to use. Must broadcast with the full shape.
Only used if `values` is None.
Default is the full shape of self.
units : string (optional)
Channel units. Default is None.
dtype : numpy.dtype (optional)
dtype to use for dataset, default is np.float64.
Only used if `values` is None.
kwargs : dict
Additional keyword arguments passed to Channel instantiation.
Returns
-------
Channel
Created channel.
"""
if name in self.channel_names:
warnings.warn(name, wt_exceptions.ObjectExistsWarning)
return self[name]
elif name in self.variable_names:
raise wt_exceptions.NameNotUniqueError(name)
require_kwargs = {"chunks": True}
if values is None:
if shape is None:
require_kwargs["shape"] = self.shape
else:
require_kwargs["shape"] = shape
if dtype is None:
require_kwargs["dtype"] = np.dtype(np.float64)
else:
require_kwargs["dtype"] = dtype
if require_kwargs["dtype"].kind in "fcmM":
require_kwargs["fillvalue"] = np.nan
else:
require_kwargs["fillvalue"] = 0
else:
require_kwargs["data"] = values
require_kwargs["shape"] = values.shape
require_kwargs["dtype"] = values.dtype
if np.prod(require_kwargs["shape"]) == 1:
require_kwargs["chunks"] = None
# create dataset
dataset_id = self.require_dataset(name=name, **require_kwargs).id
channel = Channel(self, dataset_id, units=units, **kwargs)
# finish
self.attrs["channel_names"] = np.append(self.attrs["channel_names"], name.encode())
return channel
def create_variable(
self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs
) -> Variable:
"""Add new child variable.
Parameters
----------
name : string
Unique identifier.
values : array-like (optional)
Array to populate variable with. If None, an variable will be filled with NaN.
Default is None.
shape : tuple of int
Shape to use. must broadcast with the full shape.
Only used if `values` is None.
Default is the full shape of self.
units : string (optional)
Variable units. Default is None.
dtype : numpy.dtype (optional)
dtype to use for dataset, default is np.float64.
Only used if `values` is None.
kwargs
Additional kwargs to variable instantiation.
Returns
-------
WrightTools Variable
New child variable.
"""
if name in self.variable_names:
warnings.warn(name, wt_exceptions.ObjectExistsWarning)
return self[name]
elif name in self.channel_names:
raise wt_exceptions.NameNotUniqueError(name)
if values is None:
if shape is None:
shape = self.shape
if dtype is None:
dtype = np.dtype(np.float64)
if dtype.kind in "fcmM":
fillvalue = np.nan
else:
fillvalue = 0
else:
shape = values.shape
dtype = values.dtype
fillvalue = None
# create dataset
id = self.require_dataset(
name=name, data=values, shape=shape, dtype=dtype, fillvalue=fillvalue
).id
variable = Variable(self, id, units=units, **kwargs)
# finish
self._variables = None
self.attrs["variable_names"] = np.append(self.attrs["variable_names"], name.encode())
return variable
def get_nadir(self, channel=0) -> tuple:
"""Get the coordinates, in units, of the minimum in a channel.
Parameters
----------
channel : int or str (optional)
Channel. Default is 0.
Returns
-------
generator of numbers
Coordinates in units for each axis.
"""
# get channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
# get indicies
idx = channel.argmin()
# finish
return tuple(a[idx] for a in self._axes)
def get_zenith(self, channel=0) -> tuple:
"""Get the coordinates, in units, of the maximum in a channel.
Parameters
----------
channel : int or str (optional)
Channel. Default is 0.
Returns
-------
generator of numbers
Coordinates in units for each axis.
"""
# get channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
# get indicies
idx = channel.argmax()
# finish
return tuple(a[idx] for a in self._axes)
def heal(self, channel=0, method="linear", fill_value=np.nan, verbose=True):
"""
Remove nans from channel using interpolation.
Parameters
----------
channel : int or str (optional)
Channel to heal. Default is 0.
method : {'linear', 'nearest', 'cubic'} (optional)
The interpolation method. Note that cubic interpolation is only
possible for 1D and 2D data. See `griddata`__ for more information.
Default is linear.
fill_value : number-like (optional)
The value written to pixels that cannot be filled by interpolation.
Default is nan.
verbose : bool (optional)
Toggle talkback. Default is True.
__ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
.. note:: Healing may take several minutes for large datasets.
Interpolation time goes as nearest, linear, then cubic.
"""
warnings.warn("heal", category=wt_exceptions.EntireDatasetInMemoryWarning)
timer = wt_kit.Timer(verbose=False)
with timer:
# channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
values = self.channels[channel_index][:]
points = [axis[:] for axis in self._axes]
xi = tuple(np.meshgrid(*points, indexing="ij"))
# 'undo' gridding
arr = np.zeros((len(self._axes) + 1, values.size))
for i in range(len(self._axes)):
arr[i] = xi[i].flatten()
arr[-1] = values.flatten()
# remove nans
arr = arr[:, ~np.isnan(arr).any(axis=0)]
# grid data wants tuples
tup = tuple([arr[i] for i in range(len(arr) - 1)])
# grid data
out = griddata(tup, arr[-1], xi, method=method, fill_value=fill_value)
self.channels[channel_index][:] = out
# print
if verbose:
print(
"channel {0} healed in {1} seconds".format(
channel.name, np.around(timer.interval, decimals=3)
)
)
def level(self, channel, axis, npts, *, verbose=True):
"""Subtract the average value of npts at the edge of a given axis.
Parameters
----------
channel : int or str
Channel to level.
axis : int
Axis to level along.
npts : int
Number of points to average for each slice. Positive numbers
take points at leading indicies and negative numbers take points
at trailing indicies.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
warnings.warn("level", category=wt_exceptions.EntireDatasetInMemoryWarning)
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channels[channel_index]
# verify npts not zero
npts = int(npts)
if npts == 0:
raise wt_exceptions.ValueError("npts must not be zero")
# get subtrahend
ss = [slice(None)] * self.ndim
if npts > 0:
ss[axis] = slice(0, npts, None)
else:
ss[axis] = slice(npts, None, None)
subtrahend = np.nanmean(channel[ss], axis=axis)
if self.ndim > 1:
subtrahend = np.expand_dims(subtrahend, axis=axis)
# level
channel -= subtrahend
# finish
channel._null = 0
if verbose:
print("channel {0} leveled along axis {1}".format(channel.natural_name, axis))
def map_variable(
self, variable, points, input_units="same", *, name=None, parent=None, verbose=True
) -> "Data":
"""Map points of an axis to new points using linear interpolation.
Out-of-bounds points are written nan.
Parameters
----------
variable : string
The variable to map onto.
points : array-like or int
If array, the new points. If int, new points will have the same
limits, with int defining the number of evenly spaced points
between.
input_units : str (optional)
The units of the new points. Default is same, which assumes
the new points have the same units as the axis.
name : string (optional)
The name of the new data object. If None, generated from
natural_name. Default is None.
parent : WrightTools.Collection (optional)
Parent of new data object. If None, data is made at root of a
new temporary file.
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.Data
New data object.
"""
# get variable index
variable_index = wt_kit.get_index(self.variable_names, variable)
variable = self.variables[variable_index]
# get points
if isinstance(points, int):
points = np.linspace(variable.min(), variable.max(), points)
points = np.array(points)
# points dimensionality
if points.ndim < variable.ndim:
for i, d in enumerate(variable.shape):
if d == 1:
points = np.expand_dims(points, axis=i)
# convert points
if input_units == "same":
pass
else:
points = wt_units.converter(points, input_units, variable.units)
# construct new data object
special = ["name", "axes", "constants", "channel_names", "variable_names"]
kwargs = {k: v for k, v in self.attrs.items() if k not in special}
if name is None:
name = "{0}_{1}_mapped".format(self.natural_name, variable.natural_name)
kwargs["name"] = name
kwargs["parent"] = parent
out = Data(**kwargs)
# mapped variable
values = points
out.create_variable(values=values, **variable.attrs)
# orthogonal variables
for v in self.variables:
if wt_kit.orthogonal(v.shape, variable.shape):
out.create_variable(values=v[:], **v.attrs)
out.transform(*self.axis_expressions)
# interpolate
if self.ndim == 1:
def interpolate(dataset, points):
function = scipy.interpolate.interp1d(variable[:], dataset[:], bounds_error=False)
return function(points)
else:
pts = np.array([a.full.flatten() for a in self.axes]).T
out_pts = np.array([a.full.flatten() for a in out.axes]).T
def interpolate(dataset, points):
values = dataset.full.flatten()
function = scipy.interpolate.LinearNDInterpolator(pts, values, rescale=True)
new = function(out_pts)
new.shape = out.shape
return new
for v in self.variables:
if v.natural_name not in out.variable_names:
out.create_variable(values=interpolate(v, points), **v.attrs)
out.variable_names = self.variable_names # enforce old order
out._variables = None # force regeneration of variables @property
for channel in self.channels:
out.create_channel(values=interpolate(channel, points), **channel.attrs)
# finish
if verbose:
print("data mapped from {0} to {1}".format(self.shape, out.shape))
return out
def offset(
self,
points,
offsets,
along,
offset_axis,
units="same",
offset_units="same",
mode="valid",
method="linear",
verbose=True,
):
"""Offset one axis based on another axis' values.
Useful for correcting instrumental artifacts such as zerotune.
Parameters
----------
points : 1D array-like
Points.
offsets : 1D array-like
Offsets.
along : str or int
Axis that points array lies along.
offset_axis : str or int
Axis to offset using offsets.
units : str (optional)
Units of points array.
offset_units : str (optional)
Units of offsets aray.
mode : {'valid', 'full', 'old'} (optional)
Define how far the new axis will extend. Points outside of valid
interpolation range will be written nan.
method : {'linear', 'nearest', 'cubic'} (optional)
The interpolation method. Note that cubic interpolation is only
possible for 1D and 2D data. See `griddata`__ for more information.
Default is linear.
verbose : bool (optional)
Toggle talkback. Default is True.
__ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
>>> points # an array of w1 points
>>> offsets # an array of d1 corrections
>>> data.offset(points, offsets, 'w1', 'd1')
"""
raise NotImplementedError
# axis ------------------------------------------------------------------------------------
if isinstance(along, int):
axis_index = along
elif isinstance(along, str):
axis_index = self.axis_names.index(along)
else:
raise TypeError("along: expected {int, str}, got %s" % type(along))
axis = self._axes[axis_index]
# values & points -------------------------------------------------------------------------
# get values, points, units
if units == "same":
input_units = axis.units
else:
input_units = units
# check offsets is 1D or 0D
if len(offsets.shape) == 1:
pass
else:
raise RuntimeError("values must be 1D or 0D in offset!")
# check if units is compatible, convert
dictionary = getattr(wt_units, axis.units_kind)
if input_units in dictionary.keys():
pass
else:
raise RuntimeError("units incompatible in offset!")
points = wt_units.converter(points, input_units, axis.units)
# create correction array
function = interp1d(points, offsets, bounds_error=False)
corrections = function(axis[:])
# remove nans
finite_indicies = np.where(np.isfinite(corrections))[0]
left_pad_width = finite_indicies[0]
right_pad_width = len(corrections) - finite_indicies[-1] - 1
corrections = np.pad(
corrections[np.isfinite(corrections)],
(int(left_pad_width), int(right_pad_width)),
mode="edge",
)
# do correction ---------------------------------------------------------------------------
# transpose so axis is last
transpose_order = np.arange(len(self._axes))
transpose_order[axis_index] = len(self._axes) - 1
transpose_order[-1] = axis_index
self.transpose(transpose_order, verbose=False)
# get offset axis index
if isinstance(offset_axis, int):
offset_axis_index = offset_axis
elif isinstance(offset_axis, str):
offset_axis_index = self.axis_names.index(offset_axis)
else:
raise TypeError("offset_axis: expected {int, str}, got %s" % type(offset_axis))
# new points
new_points = [a[:] for a in self._axes]
old_offset_axis_points = self._axes[offset_axis_index][:]
spacing = abs(
(old_offset_axis_points.max() - old_offset_axis_points.min())
/ float(len(old_offset_axis_points))
)
if mode == "old":
new_offset_axis_points = old_offset_axis_points
elif mode == "valid":
_max = old_offset_axis_points.max() + corrections.min()
_min = old_offset_axis_points.min() + corrections.max()
n = int(abs(np.ceil((_max - _min) / spacing)))
new_offset_axis_points = np.linspace(_min, _max, n)
elif mode == "full":
_max = old_offset_axis_points.max() + corrections.max()
_min = old_offset_axis_points.min() + corrections.min()
n = np.ceil((_max - _min) / spacing)
new_offset_axis_points = np.linspace(_min, _max, n)
new_points[offset_axis_index] = new_offset_axis_points
new_xi = tuple(np.meshgrid(*new_points, indexing="ij"))
xi = tuple(np.meshgrid(*[a[:] for a in self._axes], indexing="ij"))
for channel in self.channels:
# 'undo' gridding
arr = np.zeros((len(self._axes) + 1, channel[:].size))
for i in range(len(self._axes)):
arr[i] = xi[i].flatten()
arr[-1] = channel[:].flatten()
# do corrections
corrections = list(corrections)
corrections = corrections * int((len(arr[0]) / len(corrections)))
arr[offset_axis_index] += corrections
# grid data
tup = tuple([arr[i] for i in range(len(arr) - 1)])
# note that rescale is crucial in this operation
out = griddata(tup, arr[-1], new_xi, method=method, fill_value=np.nan, rescale=True)
channel[:] = out
self._axes[offset_axis_index][:] = new_offset_axis_points
# transpose out
self.transpose(transpose_order, verbose=False)
def print_tree(self, *, verbose=True):
"""Print a ascii-formatted tree representation of the data contents."""
print("{0} ({1})".format(self.natural_name, self.filepath))
self._print_branch("", depth=0, verbose=verbose)
def prune(self, keep_channels=True, *, verbose=True):
"""Remove unused variables and (optionally) channels from the Data object.
Unused variables are those that are not included in either axes or constants.
Unused channels are those not specified in keep_channels, or the first channel.
Parameters
----------
keep_channels : boolean or int or str or tuple
If False, removes all but the first channel.
If int or str, removes all but that index/name channel.
If tuple, removes all channels except those in the tuple by index or name.
Default is True: do not delete channels
verbose : boolean
Toggle talkback. Default is True.
"""
for v in self.variables:
for var in wt_kit.flatten_list([ax.variables for ax in self._axes + self._constants]):
if v == var:
break
else:
self.remove_variable(v.natural_name, implied=False, verbose=verbose)
if keep_channels is not True:
try:
if isinstance(keep_channels, str):
raise TypeError
indexes = tuple(keep_channels)
except TypeError:
indexes = (keep_channels,)
for i, ch in enumerate(self.channels):
if i not in indexes and not ch.natural_name in indexes:
self.remove_channel(ch.natural_name, verbose=verbose)
def remove_channel(self, channel, *, verbose=True):
"""Remove channel from data.
Parameters
----------
channel : int or str
Channel index or name to remove.
verbose : boolean (optional)
Toggle talkback. Default is True.
"""
channel_index = wt_kit.get_index(self.channel_names, channel)
new = list(self.channel_names)
name = new.pop(channel_index)
del self[name]
self.channel_names = new
if verbose:
print("channel {0} removed".format(name))
def remove_variable(self, variable, *, implied=True, verbose=True):
"""Remove variable from data.
Parameters
----------
variable : int or str
Variable index or name to remove.
implied : boolean (optional)
Toggle deletion of other variables that start with the same
name. Default is True.
verbose : boolean (optional)
Toggle talkback. Default is True.
"""
if isinstance(variable, int):
variable = self.variable_names[variable]
# find all of the implied variables
removed = []
if implied:
for n in self.variable_names:
if n.startswith(variable):
removed.append(n)
else:
removed = [variable]
# check that axes will not be ruined
for n in removed:
for a in self._axes:
if n in [v.natural_name for v in a.variables]:
message = "{0} is contained in axis {1}".format(n, a.expression)
raise RuntimeError(message)
for c in self._constants:
if n in [v.natural_name for v in c.variables]:
warnings.warn(
"Variable being removed used in a constant",
wt_exceptions.WrightToolsWarning,
)
# do removal
for n in removed:
variable_index = wt_kit.get_index(self.variable_names, n)
new = list(self.variable_names)
name = new.pop(variable_index)
del self[name]
self.variable_names = new
self._variables = None
# finish
if verbose:
print("{0} variable(s) removed:".format(len(removed)))
for n in removed:
print(" {0}".format(n))
def rename_channels(self, *, verbose=True, **kwargs):
"""Rename a set of channels.
Parameters
----------
kwargs
Keyword arguments of the form current:'new'.
verbose : boolean (optional)
Toggle talkback. Default is True
"""
# ensure that items will remain unique
changed = kwargs.keys()
for k, v in kwargs.items():
if v not in changed and v in self.keys():
raise wt_exceptions.NameNotUniqueError(v)
# compile references to items that are changing
new = {}
for k, v in kwargs.items():
obj = self[k]
index = self.channel_names.index(k)
# rename
new[v] = obj, index
Group._instances.pop(obj.fullpath, None)
obj.natural_name = str(v)
# remove old references
del self[k]
# apply new references
names = list(self.channel_names)
for v, value in new.items():
obj, index = value
self[v] = obj
names[index] = v
self.channel_names = names
# finish
if verbose:
print("{0} channel(s) renamed:".format(len(kwargs)))
for k, v in kwargs.items():
print(" {0} --> {1}".format(k, v))
def rename_variables(self, *, implied=True, verbose=True, **kwargs):
"""Rename a set of variables.
Parameters
----------
kwargs
Keyword arguments of the form current:'new'.
implied : boolean (optional)
Toggle inclusion of other variables that start with the same
name. Default is True.
verbose : boolean (optional)
Toggle talkback. Default is True
"""
# find all of the implied variables
kwargs = collections.OrderedDict(kwargs)
if implied:
new = collections.OrderedDict()
for k, v in kwargs.items():
for n in self.variable_names:
if n.startswith(k):
new[n] = n.replace(k, v, 1)
kwargs = new
# ensure that items will remain unique
changed = kwargs.keys()
for k, v in kwargs.items():
if v not in changed and v in self.keys():
raise wt_exceptions.NameNotUniqueError(v)
# compile references to items that are changing
new = {}
for k, v in kwargs.items():
obj = self[k]
index = self.variable_names.index(k)
# rename
new[v] = obj, index
Group._instances.pop(obj.fullpath, None)
obj.natural_name = str(v)
# remove old references
del self[k]
# apply new references
names = list(self.variable_names)
for v, value in new.items():
obj, index = value
self[v] = obj
names[index] = v
self.variable_names = names
units = self.units
new = list(self.axis_expressions)
for i, v in enumerate(kwargs.keys()):
for j, n in enumerate(new):
new[j] = n.replace(v, "{%i}" % i)
for i, n in enumerate(new):
new[i] = n.format(*kwargs.values())
self.transform(*new)
for a, u in zip(self._axes, units):
a.convert(u)
units = self.constant_units
new = list(self.constant_expressions)
for i, v in enumerate(kwargs.keys()):
for j, n in enumerate(new):
new[j] = n.replace(v, "{%i}" % i)
for i, n in enumerate(new):
new[i] = n.format(*kwargs.values())
self.set_constants(*new)
for c, u in zip(self._constants, units):
c.convert(u)
# finish
if verbose:
print("{0} variable(s) renamed:".format(len(kwargs)))
for k, v in kwargs.items():
print(" {0} --> {1}".format(k, v))
def share_nans(self):
"""Share not-a-numbers between all channels.
If any channel is nan at a given index, all channels will be nan
at that index after this operation.
Uses the share_nans method found in wt.kit.
"""
def f(_, s, channels):
outs = wt_kit.share_nans(*[c[s] for c in channels])
for c, o in zip(channels, outs):
c[s] = o
self.channels[0].chunkwise(f, self.channels)
def smooth(self, factors, channel=None, verbose=True) -> "Data":
"""Smooth a channel using an n-dimenional kaiser window.
Note, all arrays are loaded into memory.
For more info see `Kaiser_window`__ wikipedia entry.
__ https://en.wikipedia.org/wiki/Kaiser_window
Parameters
----------
factors : int or list of int
The smoothing factor. You may provide a list of smoothing factors
for each axis.
channel : int or str or None (optional)
The channel to smooth. If None, all channels will be smoothed.
Default is None.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
warnings.warn("smooth", category=wt_exceptions.EntireDatasetInMemoryWarning)
# get factors -----------------------------------------------------------------------------
if isinstance(factors, list):
pass
else:
dummy = np.zeros(len(self._axes))
dummy[::] = factors
factors = list(dummy)
# get channels ----------------------------------------------------------------------------
if channel is None:
channels = self.channels
else:
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channels = [self.channels[channel_index]]
# smooth ----------------------------------------------------------------------------------
for channel in channels:
values = channel[:]
for axis_index in range(len(factors)):
factor = factors[axis_index]
# transpose so the axis of interest is last
transpose_order = range(len(values.shape))
# replace axis_index with zero
transpose_order = [
len(values.shape) - 1 if i == axis_index else i for i in transpose_order
]
transpose_order[len(values.shape) - 1] = axis_index
values = values.transpose(transpose_order)
# get kaiser window
beta = 5.0
w = np.kaiser(2 * factor + 1, beta)
# for all slices...
for index in np.ndindex(values[..., 0].shape):
current_slice = values[index]
temp_slice = np.pad(current_slice, int(factor), mode=str("edge"))
values[index] = np.convolve(temp_slice, w / w.sum(), mode=str("valid"))
# transpose out
values = values.transpose(transpose_order)
# return array to channel object
channel[:] = values
if verbose:
print("smoothed data")
def split(
self, expression, positions, *, units=None, parent=None, verbose=True
) -> wt_collection.Collection:
"""
Split the data object along a given expression, in units.
Parameters
----------
expression : int or str
The expression to split along. If given as an integer, the axis at that index
is used.
positions : number-type or 1D array-type
The position(s) to split at, in units.
units : str (optional)
The units of the given positions. Default is same, which assumes
input units are identical to first variable units.
parent : WrightTools.Collection (optional)
The parent collection in which to place the 'split' collection.
Default is a new Collection.
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.collection.Collection
A Collection of data objects.
The order of the objects is such that the axis points retain their original order.
See Also
--------
chop
Divide the dataset into its lower-dimensionality components.
collapse
Collapse the dataset along one axis.
"""
# axis ------------------------------------------------------------------------------------
old_expr = self.axis_expressions
old_units = self.units
out = wt_collection.Collection(name="split", parent=parent)
if isinstance(expression, int):
if units is None:
units = self._axes[expression].units
expression = self._axes[expression].expression
elif isinstance(expression, str):
pass
else:
raise TypeError("expression: expected {int, str}, got %s" % type(expression))
self.transform(expression)
if units:
self.convert(units, verbose=False)
try:
positions = [-np.inf] + sorted(list(positions)) + [np.inf]
except TypeError:
positions = [-np.inf, positions, np.inf]
values = self._axes[0].full
masks = [(values >= lo) & (values < hi) for lo, hi in wt_kit.pairwise(positions)]
omasks = []
cuts = []
for mask in masks:
try:
omasks.append(wt_kit.mask_reduce(mask))
cuts.append([i == 1 for i in omasks[-1].shape])
# Ensure at least one axis is kept
if np.all(cuts[-1]):
cuts[-1][0] = False
except ValueError:
omasks.append(None)
cuts.append(None)
for i in range(len(positions) - 1):
out.create_data("split%03i" % i)
for var in self.variables:
for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):
if omask is None:
# Zero length split
continue
omask = wt_kit.enforce_mask_shape(omask, var.shape)
omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])
out_arr = np.full(omask.shape, np.nan)
imask = wt_kit.enforce_mask_shape(imask, var.shape)
out_arr[omask] = var[:][imask]
out[i].create_variable(values=out_arr, **var.attrs)
for ch in self.channels:
for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):
if omask is None:
# Zero length split
continue
omask = wt_kit.enforce_mask_shape(omask, ch.shape)
omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])
out_arr = np.full(omask.shape, np.nan)
imask = wt_kit.enforce_mask_shape(imask, ch.shape)
out_arr[omask] = ch[:][imask]
out[i].create_channel(values=out_arr, **ch.attrs)
if verbose:
for d in out.values():
try:
d.transform(expression)
except IndexError:
continue
print("split data into {0} pieces along <{1}>:".format(len(positions) - 1, expression))
for i, (lo, hi) in enumerate(wt_kit.pairwise(positions)):
new_data = out[i]
if new_data.shape == ():
print(" {0} : None".format(i))
else:
new_axis = new_data.axes[0]
print(
" {0} : {1:0.2f} to {2:0.2f} {3} {4}".format(
i, lo, hi, self.axes[0].units, new_axis.shape
)
)
for d in out.values():
try:
d.transform(*old_expr)
keep = []
keep_units = []
for ax, u in zip(d.axes, old_units):
if ax.size > 1:
keep.append(ax.expression)
keep_units.append(u)
else:
d.create_constant(ax.expression, verbose=False)
d.transform(*keep)
for ax, u in zip(d.axes, keep_units):
ax.convert(u)
except IndexError:
continue
tempax = Axis(d, expression)
if all(
np.all(
np.sum(~np.isnan(tempax.masked), axis=tuple(set(range(tempax.ndim)) - {j}))
<= 1
)
for j in range(tempax.ndim)
):
d.create_constant(expression, verbose=False)
self.transform(*old_expr)
for ax, u in zip(self.axes, old_units):
ax.convert(u)
return out
def transform(self, *axes, verbose=True):
"""Transform the data.
Parameters
----------
axes : strings
Expressions for the new set of axes.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Similar method except for constants
"""
# TODO: ensure that transform does not break data
# create
new = []
newt = "newt" in self.axis_expressions
current = {a.expression: a for a in self._axes}
for expression in axes:
axis = current.get(expression, Axis(self, expression))
new.append(axis)
self._axes = new
# units
for a in self._axes:
if a.units is None:
a.convert(a.variables[0].units)
# finish
self.flush()
self._on_axes_updated()
nownewt = "newt" in self.axis_expressions
if verbose and nownewt and not newt:
print("Look she turned me into a newt")
elif verbose and newt and not nownewt:
print("I got better")
def set_constants(self, *constants, verbose=True):
"""Set the constants associated with the data.
Parameters
----------
constants : str
Expressions for the new set of constants.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
transform
Similar method except for axes.
create_constant
Add an individual constant.
remove_constant
Remove an individual constant.
"""
# create
new = []
current = {c.expression: c for c in self._constants}
for expression in constants:
constant = current.get(expression, Constant(self, expression))
new.append(constant)
self._constants = new
# units
for c in self._constants:
if c.units is None:
c.convert(c.variables[0].units)
# finish
self.flush()
self._on_constants_updated()
def create_constant(self, expression, *, verbose=True):
"""Append a constant to the stored list.
Parameters
----------
expression : str
Expression for the new constant.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Remove and replace all constants.
remove_constant
Remove an individual constant.
"""
if expression in self.constant_expressions:
wt_exceptions.ObjectExistsWarning.warn(expression)
return self.constants[self.constant_expressions.index(expression)]
constant = Constant(self, expression)
if constant.units is None:
constant.convert(constant.variables[0].units)
self._constants.append(constant)
self.flush()
self._on_constants_updated()
if verbose:
print("Constant '{}' added".format(constant.expression))
return constant
def remove_constant(self, constant, *, verbose=True):
"""Remove a constant from the stored list.
Parameters
----------
constant : str or Constant or int
Expression for the new constant.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Remove and replace all constants.
create_constant
Add an individual constant.
"""
if isinstance(constant, (str, int)):
constant_index = wt_kit.get_index(self.constant_expressions, constant)
elif isinstance(constant, Constant):
constant_index = wt_kit.get_index(self.constants, constant)
constant = self._constants[constant_index]
self._constants.pop(constant_index)
self.flush()
self._on_constants_updated()
if verbose:
print("Constant '{}' removed".format(constant.expression))
def zoom(self, factor, order=1, verbose=True):
"""Zoom the data array using spline interpolation of the requested order.
The number of points along each axis is increased by factor.
See `scipy ndimage`__ for more info.
__ http://docs.scipy.org/doc/scipy/reference/
generated/scipy.ndimage.interpolation.zoom.html
Parameters
----------
factor : float
The number of points along each axis will increase by this factor.
order : int (optional)
The order of the spline used to interpolate onto new points.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
raise NotImplementedError
import scipy.ndimage
# axes
for axis in self._axes:
axis[:] = scipy.ndimage.interpolation.zoom(axis[:], factor, order=order)
# channels
for channel in self.channels:
channel[:] = scipy.ndimage.interpolation.zoom(channel[:], factor, order=order)
# return
if verbose:
print("data zoomed to new shape:", self.shape)
|
[
"numpy.kaiser",
"numpy.sum",
"numpy.nan_to_num",
"numpy.empty",
"numpy.isnan",
"numpy.around",
"scipy.interpolate.interp1d",
"numpy.prod",
"numpy.nanmean",
"numpy.full",
"numpy.meshgrid",
"numpy.isfinite",
"scipy.ndimage.interpolation.zoom",
"numpy.linspace",
"numpy.trapz",
"numpy.result_type",
"numpy.ceil",
"scipy.interpolate.griddata",
"scipy.interpolate.LinearNDInterpolator",
"collections.OrderedDict",
"numpy.all",
"numpy.ndindex",
"numpy.dtype",
"numpy.expand_dims",
"numpy.diff",
"numpy.array",
"functools.reduce",
"warnings.warn",
"numpy.gradient"
] |
[((4139, 4165), 'numpy.array', 'np.array', (['value'], {'dtype': '"""S"""'}), "(value, dtype='S')\n", (4147, 4165), True, 'import numpy as np\n'), ((5409, 5451), 'functools.reduce', 'functools.reduce', (['operator.mul', 'self.shape'], {}), '(operator.mul, self.shape)\n', (5425, 5451), False, 'import functools\n'), ((6372, 6398), 'numpy.array', 'np.array', (['value'], {'dtype': '"""S"""'}), "(value, dtype='S')\n", (6380, 6398), True, 'import numpy as np\n'), ((13292, 13317), 'numpy.ndindex', 'np.ndindex', (['removed_shape'], {}), '(removed_shape)\n', (13302, 13317), True, 'import numpy as np\n'), ((17620, 17662), 'numpy.result_type', 'np.result_type', (['self[channel].dtype', 'float'], {}), '(self[channel].dtype, float)\n', (17634, 17662), True, 'import numpy as np\n'), ((22019, 22095), 'warnings.warn', 'warnings.warn', (['"""moment"""'], {'category': 'wt_exceptions.EntireDatasetInMemoryWarning'}), "('moment', category=wt_exceptions.EntireDatasetInMemoryWarning)\n", (22032, 22095), False, 'import warnings\n'), ((22796, 22821), 'numpy.nan_to_num', 'np.nan_to_num', (['channel[:]'], {}), '(channel[:])\n', (22809, 22821), True, 'import numpy as np\n'), ((28138, 28216), 'warnings.warn', 'warnings.warn', (['"""collapse"""'], {'category': 'wt_exceptions.EntireDatasetInMemoryWarning'}), "('collapse', category=wt_exceptions.EntireDatasetInMemoryWarning)\n", (28151, 28216), False, 'import warnings\n'), ((38329, 38403), 'warnings.warn', 'warnings.warn', (['"""heal"""'], {'category': 'wt_exceptions.EntireDatasetInMemoryWarning'}), "('heal', category=wt_exceptions.EntireDatasetInMemoryWarning)\n", (38342, 38403), False, 'import warnings\n'), ((40365, 40440), 'warnings.warn', 'warnings.warn', (['"""level"""'], {'category': 'wt_exceptions.EntireDatasetInMemoryWarning'}), "('level', category=wt_exceptions.EntireDatasetInMemoryWarning)\n", (40378, 40440), False, 'import warnings\n'), ((40915, 40949), 'numpy.nanmean', 'np.nanmean', (['channel[ss]'], {'axis': 'axis'}), '(channel[ss], axis=axis)\n', (40925, 40949), True, 'import numpy as np\n'), ((42750, 42766), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (42758, 42766), True, 'import numpy as np\n'), ((47908, 47953), 'scipy.interpolate.interp1d', 'interp1d', (['points', 'offsets'], {'bounds_error': '(False)'}), '(points, offsets, bounds_error=False)\n', (47916, 47953), False, 'from scipy.interpolate import griddata, interp1d\n'), ((57148, 57179), 'collections.OrderedDict', 'collections.OrderedDict', (['kwargs'], {}), '(kwargs)\n', (57171, 57179), False, 'import collections\n'), ((60504, 60580), 'warnings.warn', 'warnings.warn', (['"""smooth"""'], {'category': 'wt_exceptions.EntireDatasetInMemoryWarning'}), "('smooth', category=wt_exceptions.EntireDatasetInMemoryWarning)\n", (60517, 60580), False, 'import warnings\n'), ((3911, 3934), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""S"""'}), "([], dtype='S')\n", (3919, 3934), True, 'import numpy as np\n'), ((6139, 6162), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""S"""'}), "([], dtype='S')\n", (6147, 6162), True, 'import numpy as np\n'), ((13337, 13364), 'numpy.array', 'np.array', (['idx'], {'dtype': 'object'}), '(idx, dtype=object)\n', (13345, 13364), True, 'import numpy as np\n'), ((17908, 17948), 'numpy.gradient', 'np.gradient', (['channel[:]'], {'axis': 'axis_index'}), '(channel[:], axis=axis_index)\n', (17919, 17948), True, 'import numpy as np\n'), ((17984, 18043), 'numpy.gradient', 'np.gradient', (['channel[:]', 'self[axis].points'], {'axis': 'axis_index'}), '(channel[:], self[axis].points, axis=axis_index)\n', (17995, 18043), True, 'import numpy as np\n'), ((22684, 22695), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (22692, 22695), True, 'import numpy as np\n'), ((23993, 24048), 'numpy.trapz', 'np.trapz', (['((x - about) ** moment * y)', 'x'], {'axis': 'axis_index'}), '((x - about) ** moment * y, x, axis=axis_index)\n', (24001, 24048), True, 'import numpy as np\n'), ((24070, 24086), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (24078, 24086), True, 'import numpy as np\n'), ((25758, 25897), 'warnings.warn', 'warnings.warn', (['"""integrate method of collapse is deprecated, use moment(moment=0) instead"""', 'wt_exceptions.VisibleDeprecationWarning'], {}), "(\n 'integrate method of collapse is deprecated, use moment(moment=0) instead',\n wt_exceptions.VisibleDeprecationWarning)\n", (25771, 25897), False, 'import warnings\n'), ((32399, 32453), 'warnings.warn', 'warnings.warn', (['name', 'wt_exceptions.ObjectExistsWarning'], {}), '(name, wt_exceptions.ObjectExistsWarning)\n', (32412, 32453), False, 'import warnings\n'), ((33306, 33338), 'numpy.prod', 'np.prod', (["require_kwargs['shape']"], {}), "(require_kwargs['shape'])\n", (33313, 33338), True, 'import numpy as np\n'), ((34732, 34786), 'warnings.warn', 'warnings.warn', (['name', 'wt_exceptions.ObjectExistsWarning'], {}), '(name, wt_exceptions.ObjectExistsWarning)\n', (34745, 34786), False, 'import warnings\n'), ((39443, 39507), 'scipy.interpolate.griddata', 'griddata', (['tup', 'arr[-1]', 'xi'], {'method': 'method', 'fill_value': 'fill_value'}), '(tup, arr[-1], xi, method=method, fill_value=fill_value)\n', (39451, 39507), False, 'from scipy.interpolate import griddata, interp1d\n'), ((41001, 41038), 'numpy.expand_dims', 'np.expand_dims', (['subtrahend'], {'axis': 'axis'}), '(subtrahend, axis=axis)\n', (41015, 41038), True, 'import numpy as np\n'), ((50072, 50111), 'numpy.meshgrid', 'np.meshgrid', (['*new_points'], {'indexing': '"""ij"""'}), "(*new_points, indexing='ij')\n", (50083, 50111), True, 'import numpy as np\n'), ((50132, 50187), 'numpy.meshgrid', 'np.meshgrid', (['*[a[:] for a in self._axes]'], {'indexing': '"""ij"""'}), "(*[a[:] for a in self._axes], indexing='ij')\n", (50143, 50187), True, 'import numpy as np\n'), ((50820, 50898), 'scipy.interpolate.griddata', 'griddata', (['tup', 'arr[-1]', 'new_xi'], {'method': 'method', 'fill_value': 'np.nan', 'rescale': '(True)'}), '(tup, arr[-1], new_xi, method=method, fill_value=np.nan, rescale=True)\n', (50828, 50898), False, 'from scipy.interpolate import griddata, interp1d\n'), ((57218, 57243), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (57241, 57243), False, 'import collections\n'), ((73676, 73738), 'scipy.ndimage.interpolation.zoom', 'scipy.ndimage.interpolation.zoom', (['axis[:]', 'factor'], {'order': 'order'}), '(axis[:], factor, order=order)\n', (73708, 73738), False, 'import scipy\n'), ((73821, 73886), 'scipy.ndimage.interpolation.zoom', 'scipy.ndimage.interpolation.zoom', (['channel[:]', 'factor'], {'order': 'order'}), '(channel[:], factor, order=order)\n', (73853, 73886), False, 'import scipy\n'), ((17769, 17811), 'numpy.empty', 'np.empty', (['self[channel].shape'], {'dtype': 'rtype'}), '(self[channel].shape, dtype=rtype)\n', (17777, 17811), True, 'import numpy as np\n'), ((23383, 23414), 'numpy.trapz', 'np.trapz', (['y', 'x'], {'axis': 'axis_index'}), '(y, x, axis=axis_index)\n', (23391, 23414), True, 'import numpy as np\n'), ((23438, 23452), 'numpy.array', 'np.array', (['norm'], {}), '(norm)\n', (23446, 23452), True, 'import numpy as np\n'), ((23543, 23578), 'numpy.trapz', 'np.trapz', (['(x * y)', 'x'], {'axis': 'axis_index'}), '(x * y, x, axis=axis_index)\n', (23551, 23578), True, 'import numpy as np\n'), ((23603, 23618), 'numpy.array', 'np.array', (['about'], {}), '(about)\n', (23611, 23618), True, 'import numpy as np\n'), ((23740, 23790), 'numpy.trapz', 'np.trapz', (['((x - about) ** 2 * y)', 'x'], {'axis': 'axis_index'}), '((x - about) ** 2 * y, x, axis=axis_index)\n', (23748, 23790), True, 'import numpy as np\n'), ((23815, 23830), 'numpy.array', 'np.array', (['sigma'], {}), '(sigma)\n', (23823, 23830), True, 'import numpy as np\n'), ((28782, 28824), 'numpy.result_type', 'np.result_type', (['self[channel].dtype', 'float'], {}), '(self[channel].dtype, float)\n', (28796, 28824), True, 'import numpy as np\n'), ((32874, 32894), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (32882, 32894), True, 'import numpy as np\n'), ((35061, 35081), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (35069, 35081), True, 'import numpy as np\n'), ((38967, 39002), 'numpy.meshgrid', 'np.meshgrid', (['*points'], {'indexing': '"""ij"""'}), "(*points, indexing='ij')\n", (38978, 39002), True, 'import numpy as np\n'), ((44004, 44075), 'scipy.interpolate.interp1d', 'scipy.interpolate.interp1d', (['variable[:]', 'dataset[:]'], {'bounds_error': '(False)'}), '(variable[:], dataset[:], bounds_error=False)\n', (44030, 44075), False, 'import scipy\n'), ((44392, 44457), 'scipy.interpolate.LinearNDInterpolator', 'scipy.interpolate.LinearNDInterpolator', (['pts', 'values'], {'rescale': '(True)'}), '(pts, values, rescale=True)\n', (44430, 44457), False, 'import scipy\n'), ((48051, 48075), 'numpy.isfinite', 'np.isfinite', (['corrections'], {}), '(corrections)\n', (48062, 48075), True, 'import numpy as np\n'), ((48247, 48271), 'numpy.isfinite', 'np.isfinite', (['corrections'], {}), '(corrections)\n', (48258, 48271), True, 'import numpy as np\n'), ((49681, 49707), 'numpy.linspace', 'np.linspace', (['_min', '_max', 'n'], {}), '(_min, _max, n)\n', (49692, 49707), True, 'import numpy as np\n'), ((62176, 62207), 'numpy.kaiser', 'np.kaiser', (['(2 * factor + 1)', 'beta'], {}), '(2 * factor + 1, beta)\n', (62185, 62207), True, 'import numpy as np\n'), ((62273, 62305), 'numpy.ndindex', 'np.ndindex', (['values[..., 0].shape'], {}), '(values[..., 0].shape)\n', (62283, 62305), True, 'import numpy as np\n'), ((65326, 65342), 'numpy.all', 'np.all', (['cuts[-1]'], {}), '(cuts[-1])\n', (65332, 65342), True, 'import numpy as np\n'), ((65970, 65998), 'numpy.full', 'np.full', (['omask.shape', 'np.nan'], {}), '(omask.shape, np.nan)\n', (65977, 65998), True, 'import numpy as np\n'), ((66575, 66603), 'numpy.full', 'np.full', (['omask.shape', 'np.nan'], {}), '(omask.shape, np.nan)\n', (66582, 66603), True, 'import numpy as np\n'), ((13381, 13404), 'numpy.array', 'np.array', (['removed_shape'], {}), '(removed_shape)\n', (13389, 13404), True, 'import numpy as np\n'), ((13899, 13919), 'numpy.array', 'np.array', (['axis.shape'], {}), '(axis.shape)\n', (13907, 13919), True, 'import numpy as np\n'), ((13943, 13960), 'numpy.sum', 'np.sum', (['idx_index'], {}), '(idx_index)\n', (13949, 13960), True, 'import numpy as np\n'), ((23183, 23210), 'numpy.diff', 'np.diff', (['x'], {'axis': 'axis_index'}), '(x, axis=axis_index)\n', (23190, 23210), True, 'import numpy as np\n'), ((28946, 28978), 'numpy.empty', 'np.empty', (['new_shape'], {'dtype': 'rtype'}), '(new_shape, dtype=rtype)\n', (28954, 28978), True, 'import numpy as np\n'), ((39707, 39744), 'numpy.around', 'np.around', (['timer.interval'], {'decimals': '(3)'}), '(timer.interval, decimals=3)\n', (39716, 39744), True, 'import numpy as np\n'), ((42946, 42976), 'numpy.expand_dims', 'np.expand_dims', (['points'], {'axis': 'i'}), '(points, axis=i)\n', (42960, 42976), True, 'import numpy as np\n'), ((49889, 49921), 'numpy.ceil', 'np.ceil', (['((_max - _min) / spacing)'], {}), '((_max - _min) / spacing)\n', (49896, 49921), True, 'import numpy as np\n'), ((49959, 49985), 'numpy.linspace', 'np.linspace', (['_min', '_max', 'n'], {}), '(_min, _max, n)\n', (49970, 49985), True, 'import numpy as np\n'), ((54642, 54739), 'warnings.warn', 'warnings.warn', (['"""Variable being removed used in a constant"""', 'wt_exceptions.WrightToolsWarning'], {}), "('Variable being removed used in a constant', wt_exceptions.\n WrightToolsWarning)\n", (54655, 54739), False, 'import warnings\n'), ((49609, 49641), 'numpy.ceil', 'np.ceil', (['((_max - _min) / spacing)'], {}), '((_max - _min) / spacing)\n', (49616, 49641), True, 'import numpy as np\n'), ((39274, 39287), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (39282, 39287), True, 'import numpy as np\n'), ((68277, 68300), 'numpy.isnan', 'np.isnan', (['tempax.masked'], {}), '(tempax.masked)\n', (68285, 68300), True, 'import numpy as np\n')]
|
from . import git
import os
from .utils import errordie, mkpath, msg
def _trim(lines):
stripped = [line.strip() for line in lines]
return [line for line in stripped if line and not line.startswith('#')]
def _git_destname(repository):
git_folder = repository.rsplit('/', 1)[1]
if git_folder.endswith('.git'):
return git_folder[:-4]
else:
return git_folder
class Repo(object):
def __init__(self, repository, prefix):
self._repository = repository
self._prefix = prefix
@classmethod
def parse(cls, line):
parts = line.split(' ', 1)
if len(parts) == 2:
repository = parts[1]
prefix = parts[0]
else:
errordie('Invalid repository file line: {}'.format(line))
return cls(repository, prefix)
def _group_folder(self, folder):
if self._prefix:
return os.path.join(folder, self._prefix)
else:
return folder
def clone(self, folder):
group_folder = self._group_folder(folder)
mkpath(group_folder)
git_folder = _git_destname(self._repository)
destination = os.path.join(group_folder, git_folder)
if os.path.exists(destination):
msg('IN %s SKIPPING %s' % (self._prefix, git_folder))
return
msg('IN %s CLONING %s' % (self._prefix, git_folder))
git.clone_or_die(self._repository, destination)
def fast_forward(self, folder):
group_folder = self._group_folder(folder)
git_folder = _git_destname(self._repository)
destination = os.path.join(group_folder, git_folder)
if not os.path.exists(destination):
errordie('Can\'t fast forward missing repository: {}'.format(destination))
msg('IN %s FAST FORWARDING %s' % (self._prefix, git_folder))
git.fast_forward_or_die(destination)
def as_line(self):
if self._prefix:
return '{} {}'.format(self._prefix, self._repository)
else:
return '. {}'.format(self._repository)
def __eq__(self, other):
if isinstance(other, Repo):
return (
self._repository == other._repository
and
self._prefix == other._prefix
)
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.as_line()
class ReposFile(object):
def __init__(self, stored_file):
self._stored_file = stored_file
self._repositories = [
Repo.parse(line)
for line in _trim(stored_file.readlines())]
def clone(self, folder):
for repo in self._repositories:
repo.clone(folder)
def fast_forward(self, folder):
for repo in self._repositories:
repo.fast_forward(folder)
def add(self, repo):
for existing in self._repositories:
if repo == existing:
errordie('Duplicate entry {}'.format(repo))
self._repositories.append(repo)
def save(self):
lines = [repo.as_line()+'\n' for repo in self._repositories]
self._stored_file.writelines(sorted(lines))
class RepoSet(object):
def __init__(self, name, folder, stored_file):
self._name = name
self._reposfile = ReposFile(stored_file)
self._folder = folder
def clone(self):
msg('CLONING SET {}'.format(self._name))
self._reposfile.clone(self._folder)
def fast_forward(self):
msg('FAST FORWARD IN SET {}'.format(self._name))
self._reposfile.fast_forward(self._folder)
def add_and_clone(self, repository, prefix):
msg('IN SET {}'.format(self._name))
repo = Repo(repository=repository, prefix=prefix)
self._reposfile.add(repo)
self._reposfile.save()
repo.clone(self._folder)
|
[
"os.path.join",
"os.path.exists"
] |
[((1164, 1202), 'os.path.join', 'os.path.join', (['group_folder', 'git_folder'], {}), '(group_folder, git_folder)\n', (1176, 1202), False, 'import os\n'), ((1214, 1241), 'os.path.exists', 'os.path.exists', (['destination'], {}), '(destination)\n', (1228, 1241), False, 'import os\n'), ((1607, 1645), 'os.path.join', 'os.path.join', (['group_folder', 'git_folder'], {}), '(group_folder, git_folder)\n', (1619, 1645), False, 'import os\n'), ((905, 939), 'os.path.join', 'os.path.join', (['folder', 'self._prefix'], {}), '(folder, self._prefix)\n', (917, 939), False, 'import os\n'), ((1661, 1688), 'os.path.exists', 'os.path.exists', (['destination'], {}), '(destination)\n', (1675, 1688), False, 'import os\n')]
|
# Plot polynomial regression on 1d problem
# Based on https://github.com/probml/pmtk3/blob/master/demos/linregPolyVsDegree.m
import numpy as np
import matplotlib.pyplot as plt
from pyprobml_utils import save_fig
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler
import sklearn.metrics
from sklearn.metrics import mean_squared_error as mse
def make_1dregression_data(n=21):
np.random.seed(0)
xtrain = np.linspace(0.0, 20, n)
xtest = np.arange(0.0, 20, 0.1)
sigma2 = 4
w = np.array([-1.5, 1/9.])
fun = lambda x: w[0]*x + w[1]*np.square(x)
ytrain = fun(xtrain) + np.random.normal(0, 1, xtrain.shape) * \
np.sqrt(sigma2)
ytest= fun(xtest) + np.random.normal(0, 1, xtest.shape) * \
np.sqrt(sigma2)
return xtrain, ytrain, xtest, ytest
xtrain, ytrain, xtest, ytest = make_1dregression_data(n=21)
#Rescaling data
scaler = MinMaxScaler(feature_range=(-1, 1))
Xtrain = scaler.fit_transform(xtrain.reshape(-1, 1))
Xtest = scaler.transform(xtest.reshape(-1, 1))
degs = np.arange(1, 21, 1)
ndegs = np.max(degs)
mse_train = np.empty(ndegs)
mse_test = np.empty(ndegs)
ytest_pred_stored = np.empty(ndegs, dtype=np.ndarray)
ytrain_pred_stored = np.empty(ndegs, dtype=np.ndarray)
for deg in degs:
model = LinearRegression()
poly_features = PolynomialFeatures(degree=deg, include_bias=False)
Xtrain_poly = poly_features.fit_transform(Xtrain)
model.fit(Xtrain_poly, ytrain)
ytrain_pred = model.predict(Xtrain_poly)
ytrain_pred_stored[deg-1] = ytrain_pred
Xtest_poly = poly_features.transform(Xtest)
ytest_pred = model.predict(Xtest_poly)
mse_train[deg-1] = mse(ytrain_pred, ytrain)
mse_test[deg-1] = mse(ytest_pred, ytest)
ytest_pred_stored[deg-1] = ytest_pred
# Plot MSE vs degree
fig, ax = plt.subplots()
mask = degs <= 15
ax.plot(degs[mask], mse_test[mask], color = 'r', marker = 'x',label='test')
ax.plot(degs[mask], mse_train[mask], color='b', marker = 's', label='train')
ax.legend(loc='upper right', shadow=True)
plt.xlabel('degree')
plt.ylabel('mse')
save_fig('polyfitVsDegree.pdf')
plt.show()
# Plot fitted functions
chosen_degs = [1, 2, 14, 20]
for deg in chosen_degs:
fig, ax = plt.subplots()
ax.scatter(xtrain, ytrain)
ax.plot(xtest, ytest_pred_stored[deg-1])
ax.set_ylim((-10, 15))
plt.title('degree {}'.format(deg))
save_fig('polyfitDegree{}.pdf'.format(deg))
plt.show()
# Plot residuals
#https://blog.minitab.com/blog/adventures-in-statistics-2/why-you-need-to-check-your-residual-plots-for-regression-analysis
chosen_degs = [1, 2, 14, 20]
for deg in chosen_degs:
fig, ax = plt.subplots()
ypred = ytrain_pred_stored[deg-1]
residuals = ytrain - ypred
ax.plot(ypred, residuals, 'o')
ax.set_xlabel('predicted y')
ax.set_ylabel('residual')
plt.title('degree {}. Predictions on the training set'.format(deg))
save_fig('polyfitDegree{}Residuals.pdf'.format(deg))
plt.show()
# Plot fit vs actual
# https://blog.minitab.com/blog/adventures-in-statistics-2/regression-analysis-how-do-i-interpret-r-squared-and-assess-the-goodness-of-fit
chosen_degs = [1, 2, 14, 20]
for deg in chosen_degs:
for train in [True, False]:
if train:
ytrue = ytrain
ypred = ytrain_pred_stored[deg-1]
dataset = 'Train'
else:
ytrue = ytest
ypred = ytest_pred_stored[deg-1]
dataset = 'Test'
fig, ax = plt.subplots()
ax.scatter(ytrue, ypred)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c=".3")
ax.set_xlabel('true y')
ax.set_ylabel('predicted y')
r2 = sklearn.metrics.r2_score(ytrue, ypred)
plt.title('degree {}. R2 on {} = {:0.3f}'.format(deg, dataset, r2))
save_fig('polyfitDegree{}FitVsActual{}.pdf'.format(deg, dataset))
plt.show()
|
[
"pyprobml_utils.save_fig",
"matplotlib.pyplot.show",
"numpy.random.seed",
"numpy.empty",
"sklearn.preprocessing.MinMaxScaler",
"numpy.square",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.PolynomialFeatures",
"numpy.max",
"numpy.arange",
"numpy.array",
"numpy.linspace",
"numpy.random.normal",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"sklearn.metrics.mean_squared_error",
"numpy.sqrt"
] |
[((974, 1009), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (986, 1009), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1119, 1138), 'numpy.arange', 'np.arange', (['(1)', '(21)', '(1)'], {}), '(1, 21, 1)\n', (1128, 1138), True, 'import numpy as np\n'), ((1147, 1159), 'numpy.max', 'np.max', (['degs'], {}), '(degs)\n', (1153, 1159), True, 'import numpy as np\n'), ((1172, 1187), 'numpy.empty', 'np.empty', (['ndegs'], {}), '(ndegs)\n', (1180, 1187), True, 'import numpy as np\n'), ((1199, 1214), 'numpy.empty', 'np.empty', (['ndegs'], {}), '(ndegs)\n', (1207, 1214), True, 'import numpy as np\n'), ((1235, 1268), 'numpy.empty', 'np.empty', (['ndegs'], {'dtype': 'np.ndarray'}), '(ndegs, dtype=np.ndarray)\n', (1243, 1268), True, 'import numpy as np\n'), ((1290, 1323), 'numpy.empty', 'np.empty', (['ndegs'], {'dtype': 'np.ndarray'}), '(ndegs, dtype=np.ndarray)\n', (1298, 1323), True, 'import numpy as np\n'), ((1884, 1898), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1896, 1898), True, 'import matplotlib.pyplot as plt\n'), ((2112, 2132), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""degree"""'], {}), "('degree')\n", (2122, 2132), True, 'import matplotlib.pyplot as plt\n'), ((2133, 2150), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mse"""'], {}), "('mse')\n", (2143, 2150), True, 'import matplotlib.pyplot as plt\n'), ((2151, 2182), 'pyprobml_utils.save_fig', 'save_fig', (['"""polyfitVsDegree.pdf"""'], {}), "('polyfitVsDegree.pdf')\n", (2159, 2182), False, 'from pyprobml_utils import save_fig\n'), ((2183, 2193), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2191, 2193), True, 'import matplotlib.pyplot as plt\n'), ((483, 500), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (497, 500), True, 'import numpy as np\n'), ((514, 537), 'numpy.linspace', 'np.linspace', (['(0.0)', '(20)', 'n'], {}), '(0.0, 20, n)\n', (525, 537), True, 'import numpy as np\n'), ((550, 573), 'numpy.arange', 'np.arange', (['(0.0)', '(20)', '(0.1)'], {}), '(0.0, 20, 0.1)\n', (559, 573), True, 'import numpy as np\n'), ((597, 622), 'numpy.array', 'np.array', (['[-1.5, 1 / 9.0]'], {}), '([-1.5, 1 / 9.0])\n', (605, 622), True, 'import numpy as np\n'), ((1353, 1371), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1369, 1371), False, 'from sklearn.linear_model import LinearRegression\n'), ((1392, 1442), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': 'deg', 'include_bias': '(False)'}), '(degree=deg, include_bias=False)\n', (1410, 1442), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1735, 1759), 'sklearn.metrics.mean_squared_error', 'mse', (['ytrain_pred', 'ytrain'], {}), '(ytrain_pred, ytrain)\n', (1738, 1759), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((1783, 1805), 'sklearn.metrics.mean_squared_error', 'mse', (['ytest_pred', 'ytest'], {}), '(ytest_pred, ytest)\n', (1786, 1805), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((2286, 2300), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2298, 2300), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2505), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2503, 2505), True, 'import matplotlib.pyplot as plt\n'), ((2719, 2733), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2731, 2733), True, 'import matplotlib.pyplot as plt\n'), ((3035, 3045), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3043, 3045), True, 'import matplotlib.pyplot as plt\n'), ((3548, 3562), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3560, 3562), True, 'import matplotlib.pyplot as plt\n'), ((3938, 3948), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3946, 3948), True, 'import matplotlib.pyplot as plt\n'), ((694, 730), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'xtrain.shape'], {}), '(0, 1, xtrain.shape)\n', (710, 730), True, 'import numpy as np\n'), ((743, 758), 'numpy.sqrt', 'np.sqrt', (['sigma2'], {}), '(sigma2)\n', (750, 758), True, 'import numpy as np\n'), ((783, 818), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'xtest.shape'], {}), '(0, 1, xtest.shape)\n', (799, 818), True, 'import numpy as np\n'), ((831, 846), 'numpy.sqrt', 'np.sqrt', (['sigma2'], {}), '(sigma2)\n', (838, 846), True, 'import numpy as np\n'), ((654, 666), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (663, 666), True, 'import numpy as np\n')]
|
from _socket import timeout
from urllib.error import URLError
from pytube import YouTube
from pytube.exceptions import RegexMatchError
from old_code.Stream import Stream
import time
import tools as tools
class YoutubeVideo(object):
# todo (2): subtitles
conn_errors = 0
def __init__(self, url, score=0, preferred_container='mp4', min_resolution=360,
max_resolution=1080, force_preferred_container=False):
########################################
self.url = None
self.source = None
self.delete = None
self.complete = None
self.is_play_trailer = None
self.title = None
self.thumbnail_url = None
self.channel = None
self.tags = list()
self.view_count = None
self.rating = None
self.adjusted_rating = None
self.resolution = None
self.quality_score = None
self.length = None
self.resolution_ratio = None
self.streams = list()
self.best_video_stream = None
self.best_audio_stream = None
self.best_combined_stream = None
########################################
self.url = url
self.delete = False
self.is_play_trailer = False
self.complete = True
tries = 0
while True:
try:
self.source = YouTube(url)
except KeyError as e:
if e.args[0] == 'url':
self.delete = True
self.is_play_trailer = True
# todo (1): add youtube-dl info grabber/downloader
# stuff I need: title, length, keywords?
return
elif e.args[0] == 'url_encoded_fmt_stream_map':
if tries > 4:
print('Failed to load youtube data, retrying. Reason: ' + str(e))
self.delete = True
return
print('Failed to load youtube data, retrying. Reason: ' + str(e))
time.sleep(2)
tries += 1
else:
raise
except RegexMatchError as e:
print('Pytube failed to load video info. Reason: ' + url + ': ' + str(e))
self.delete = True
return
except timeout as e:
if tries > 4:
print('Pytube failed to load video info. Reason: ' + str(e))
self.complete = False
if Stream.conn_errors > 2:
raise
else:
Stream.conn_errors += 1
return
print('Pytube failed to load video info. Reason: ' + str(e) + ', retrying...')
tries += 1
time.sleep(1)
except URLError as e:
if tries > 2:
print('Pytube failed to load video info. Reason: ' + str(e))
self.complete = False
if YoutubeVideo.conn_errors > 2:
raise
else:
YoutubeVideo.conn_errors += 1
return
print('Pytube failed to load video info. Reason: ' + str(e) + ', retrying...')
time.sleep(1)
tries += 1
else:
YoutubeVideo.conn_errors = 0
break
self.score = score
self.title = self.source.title
self.title = tools.get_clean_string(self.title)
self.rating = float(self.source.player_config_args['avg_rating'])
self.view_count = int(self.source.player_config_args['view_count'])
self.channel = self.source.player_config_args['author']
self.length = self.source.player_config_args['length_seconds']
self.thumbnail_url = self.source.thumbnail_url
try:
self.thumbnail_url = self.source.thumbnail_url
except KeyError:
self.thumbnail_url = None
try:
self.tags = self.source.player_config_args['keywords'].split(',')
except KeyError:
self.tags = ''
if self.view_count < 100:
self.view_count = 100
self.adjusted_rating = self.rating * (1 - 1 / ((self.view_count / 60) ** 0.5))
self.load_streams(min_resolution, max_resolution)
self.update_quality_score(preferred_container)
self.update_best_audio_stream(preferred_container, force_preferred_container)
self.update_best_video_stream(preferred_container, force_preferred_container)
self.update_best_combined_stream(preferred_container, force_preferred_container)
if self.is_play_trailer:
self.update_youtube_dl_info()
def update_youtube_dl_info(self):
pass
def update_quality_score(self, preferred_container='mp4'):
self.quality_score = 0
max_res = 0
for stream in self.streams:
if stream.type != 'video':
continue
quality_score = 0
pixel_bitrate = stream.bitrate_per_pixel
if stream.resolution == 1080:
pixel_bitrate /= 1
quality_score = 120
elif stream.resolution == 720:
pixel_bitrate /= 1.22
quality_score = 108
elif stream.resolution == 480:
pixel_bitrate /= 1.52
quality_score = 65
elif stream.resolution == 360:
pixel_bitrate /= 1.39
quality_score = 40
elif stream.resolution == 240:
pixel_bitrate /= 2.15
quality_score = 20
elif stream.resolution == 144:
pixel_bitrate /= 2.65
quality_score = 10
if preferred_container.lower() == stream.container:
quality_score *= 1.2
quality_score *= pixel_bitrate
if stream.resolution > max_res:
self.quality_score = quality_score
max_res = stream.resolution
self.resolution_ratio = stream.size[0] / stream.size[1]
elif stream.resolution == max_res:
if quality_score > self.quality_score:
self.quality_score = quality_score
def load_streams(self, min_resolution=360, max_resolution=1080):
self.streams = list()
self.complete = True
for source_stream in self.source.streams.fmt_streams:
stream = Stream(source_stream, int(self.length))
if stream.complete:
if stream.resolution is not None:
if stream.resolution > max_resolution or stream.resolution < min_resolution:
continue
self.streams.append(stream)
elif stream.retry:
self.complete = False
if Stream.conn_errors != 0:
self.complete = False
def update_best_video_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_resolution = 0
best_stream = None
highest_pref_resolution = 0
best_pref_stream = None
for stream in self.streams:
if 'video' != stream.type:
continue
if stream.resolution > highest_resolution:
highest_resolution = stream.resolution
best_stream = stream
if stream.container.lower() == preferred_container.lower():
if stream.resolution > highest_pref_resolution:
highest_pref_resolution = stream.resolution
best_pref_stream = stream
if highest_resolution == highest_pref_resolution or force_preferred_container:
ret = best_pref_stream
else:
ret = best_stream
self.best_video_stream = ret
def update_best_audio_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_bitrate = 0
best_stream = None
highest_pref_bitrate = 0
best_pref_stream = None
for stream in self.streams:
if 'audio' != stream.type:
continue
if stream.bitrate > highest_bitrate:
highest_bitrate = stream.bitrate
best_stream = stream
if stream.container.lower() == preferred_container.lower():
if stream.bitrate > highest_pref_bitrate:
highest_pref_bitrate = stream.bitrate
best_pref_stream = stream
if highest_bitrate <= highest_pref_bitrate * 1.35 or force_preferred_container:
ret = best_pref_stream
else:
ret = best_stream
self.best_audio_stream = ret
def update_best_combined_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_resolution = 0
for stream in self.streams:
if 'combined' != stream.type:
continue
if stream.resolution > highest_resolution:
highest_resolution = stream.resolution
max_score = 0
selected_stream = None
for stream in self.streams:
if 'combined' != stream.type:
continue
score = 0
resolution = stream.resolution
if force_preferred_container:
if stream.container != preferred_container:
continue
if resolution == highest_resolution:
score += 10 ** 1
if stream.container == preferred_container:
score += 10 ** 0
if score > max_score:
max_score = score
selected_stream = stream
self.best_combined_stream = selected_stream
|
[
"time.sleep",
"pytube.YouTube",
"tools.get_clean_string"
] |
[((3578, 3612), 'tools.get_clean_string', 'tools.get_clean_string', (['self.title'], {}), '(self.title)\n', (3600, 3612), True, 'import tools as tools\n'), ((1373, 1385), 'pytube.YouTube', 'YouTube', (['url'], {}), '(url)\n', (1380, 1385), False, 'from pytube import YouTube\n'), ((2860, 2873), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2870, 2873), False, 'import time\n'), ((3363, 3376), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3373, 3376), False, 'import time\n'), ((2074, 2087), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2084, 2087), False, 'import time\n')]
|
from .conf import Configuration, parse_config, read_config
from .model import OpenAmundsen, Model
from . import constants, errors, terrain
# Get version (method as used by matplotlib: https://github.com/matplotlib/matplotlib/blob/bcc1ce8461f5b6e874baaaa02ef776d0243a4abe/lib/matplotlib/__init__.py#L133-L151)
def __getattr__(name):
if name == '__version__':
from pathlib import Path
import setuptools_scm
global __version__
root = Path(__file__).resolve().parents[1]
if (root / '.git').exists() and not (root / '.git/shallow').exists():
__version__ = setuptools_scm.get_version(
root=root,
version_scheme='post-release',
fallback_version='0.0.0+UNKNOWN',
)
else:
try:
from . import _version
__version__ = _version.version
except ImportError:
__version__ = '0.0.0+UNKNOWN'
return __version__
raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
__all__ = [
'OpenAmundsen',
'Configuration',
'parse_config',
'read_config',
]
|
[
"setuptools_scm.get_version",
"pathlib.Path"
] |
[((609, 715), 'setuptools_scm.get_version', 'setuptools_scm.get_version', ([], {'root': 'root', 'version_scheme': '"""post-release"""', 'fallback_version': '"""0.0.0+UNKNOWN"""'}), "(root=root, version_scheme='post-release',\n fallback_version='0.0.0+UNKNOWN')\n", (635, 715), False, 'import setuptools_scm\n'), ((469, 483), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (473, 483), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python3
from snipsTools import SnipsConfigParser
from hermes_python.hermes import Hermes
# imported to get type check and IDE completion
from hermes_python.ontology.dialogue.intent import IntentMessage
CONFIG_INI = "config.ini"
# If this skill is supposed to run on the satellite,
# please get this mqtt connection info from <config.ini>
# Hint: MQTT server is always running on the master device
MQTT_IP_ADDR: str = "localhost"
MQTT_PORT: int = 1883
MQTT_ADDR: str = "{}:{}".format(MQTT_IP_ADDR, str(MQTT_PORT))
class Template(object):
"""Class used to wrap action code with mqtt connection
Please change the name refering to your application
"""
def __init__(self):
# get the configuration if needed
try:
self.config = SnipsConfigParser.read_configuration_file(CONFIG_INI)
except Exception:
self.config = None
# start listening to MQTT
self.start_blocking()
@staticmethod
def intent_1_callback(self,
hermes: Hermes,
intent_message: IntentMessage):
# terminate the session first if not continue
hermes.publish_end_session(intent_message.session_id, "")
# action code goes here...
print('[Received] intent: {}'.format(
intent_message.intent.intent_name))
# if need to speak the execution result by tts
hermes.publish_start_session_notification(
intent_message.site_id,
"Action 1", "")
@staticmethod
def intent_2_callback(self,
hermes: Hermes,
intent_message: IntentMessage):
# terminate the session first if not continue
hermes.publish_end_session()
hermes.publish_end_session(intent_message.session_id, "")
# action code goes here...
print('[Received] intent: {}'.format(
intent_message.intent.intent_name))
# if need to speak the execution result by tts
hermes.publish_start_session_notification(
intent_message.site_id,
"Action 2", "")
@staticmethod
def master_intent_callback(self,
hermes: Hermes,
intent_message: IntentMessage,):
coming_intent = intent_message.intent.intent_name
if coming_intent == 'intent_1':
self.intent_1_callback(hermes, intent_message)
if coming_intent == 'intent_2':
self.intent_2_callback(hermes, intent_message)
# more callback and if condition goes here...
# --> Register callback function and start MQTT
def start_blocking(self):
with Hermes(MQTT_ADDR) as h:
h.subscribe_intents(self.master_intent_callback).start()
if __name__ == "__main__":
Template()
|
[
"hermes_python.hermes.Hermes",
"snipsTools.SnipsConfigParser.read_configuration_file"
] |
[((790, 843), 'snipsTools.SnipsConfigParser.read_configuration_file', 'SnipsConfigParser.read_configuration_file', (['CONFIG_INI'], {}), '(CONFIG_INI)\n', (831, 843), False, 'from snipsTools import SnipsConfigParser\n'), ((2740, 2757), 'hermes_python.hermes.Hermes', 'Hermes', (['MQTT_ADDR'], {}), '(MQTT_ADDR)\n', (2746, 2757), False, 'from hermes_python.hermes import Hermes\n')]
|
from web3 import Web3
from brownie import Contract
from brownie.convert import to_bytes
from brownie.network import accounts
from brownie.network.account import Account
from brownie import (
Wei,
Contract,
# Registry,
# RegistryController,
License,
LicenseController,
Policy,
PolicyController,
QueryController,
ProductService,
OracleService,
ComponentOwnerService,
PolicyFlowDefault,
InstanceOperatorService,
TestOracle,
TestProduct,
)
from scripts.const import (
ORACLE_INPUT_FORMAT,
ORACLE_OUTPUT_FORMAT,
ORACLE_NAME,
PRODUCT_NAME,
)
from scripts.util import (
get_account,
encode_function_data,
# s2h,
s2b32,
deployGifModule,
deployGifService,
)
from scripts.instance import (
GifInstance,
)
class GifTestOracle(object):
def __init__(self, instance: GifInstance, oracleOwner: Account):
operatorService = instance.getInstanceOperatorService()
componentOwnerService = instance.getComponentOwnerService()
oracleService = instance.getOracleService()
# 1) add oracle provider role to owner
opRole = operatorService.oracleProviderRole()
operatorService.addRoleToAccount(oracleOwner, opRole)
# 2) oracle owner creates oracle
self.oracle = TestOracle.deploy(
s2b32(ORACLE_NAME),
instance.getRegistry(),
{'from': oracleOwner})
# 3) oracle owner proposes oracle to instance
componentOwnerService.propose(
self.oracle,
{'from': oracleOwner})
# 4) instance operator approves oracle
operatorService.approveOracle(
self.oracle.getId(),
{'from': instance.getOwner()})
def getOracleId(self) -> int:
return self.oracle.getId()
def getOracleContract(self) -> TestOracle:
return self.oracle
class GifTestProduct(object):
def __init__(self, instance: GifInstance, oracle: GifTestOracle, productOwner: Account):
self.policyController = instance.getPolicyController()
operatorService = instance.getInstanceOperatorService()
productService = instance.getProductService()
self.product = TestProduct.deploy(
productService,
s2b32(PRODUCT_NAME),
oracle.getOracleId(),
{'from': productOwner})
operatorService.approveProduct(
self.product.getId(),
{'from': instance.getOwner()})
def getProductId(self) -> int:
return self.product.getId()
def getProductContract(self) -> TestProduct:
return self.product
def getPolicy(self, policyId: str):
return self.policyController.getPolicy(policyId)
|
[
"scripts.util.s2b32"
] |
[((1411, 1429), 'scripts.util.s2b32', 's2b32', (['ORACLE_NAME'], {}), '(ORACLE_NAME)\n', (1416, 1429), False, 'from scripts.util import get_account, encode_function_data, s2b32, deployGifModule, deployGifService\n'), ((2395, 2414), 'scripts.util.s2b32', 's2b32', (['PRODUCT_NAME'], {}), '(PRODUCT_NAME)\n', (2400, 2414), False, 'from scripts.util import get_account, encode_function_data, s2b32, deployGifModule, deployGifService\n')]
|
from django.test import TestCase
from dojo.tools.acunetix.parser import AcunetixParser
from dojo.models import Test
class TestAcunetixParser(TestCase):
def test_parse_without_file(self):
parser = AcunetixParser()
findings = parser.get_findings(None, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_finding(self):
testfile = open("dojo/unittests/scans/acunetix/one_finding.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_finding(self):
testfile = open("dojo/unittests/scans/acunetix/many_findings.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
|
[
"dojo.tools.acunetix.parser.AcunetixParser",
"dojo.models.Test"
] |
[((210, 226), 'dojo.tools.acunetix.parser.AcunetixParser', 'AcunetixParser', ([], {}), '()\n', (224, 226), False, 'from dojo.tools.acunetix.parser import AcunetixParser\n'), ((462, 478), 'dojo.tools.acunetix.parser.AcunetixParser', 'AcunetixParser', ([], {}), '()\n', (476, 478), False, 'from dojo.tools.acunetix.parser import AcunetixParser\n'), ((725, 741), 'dojo.tools.acunetix.parser.AcunetixParser', 'AcunetixParser', ([], {}), '()\n', (739, 741), False, 'from dojo.tools.acunetix.parser import AcunetixParser\n'), ((272, 278), 'dojo.models.Test', 'Test', ([], {}), '()\n', (276, 278), False, 'from dojo.models import Test\n'), ((528, 534), 'dojo.models.Test', 'Test', ([], {}), '()\n', (532, 534), False, 'from dojo.models import Test\n'), ((791, 797), 'dojo.models.Test', 'Test', ([], {}), '()\n', (795, 797), False, 'from dojo.models import Test\n')]
|
from __future__ import unicode_literals
from django.db import models
import re
import json
# import nlp
try:
import Queue as Q #python version < 3.0
except ImportError:
import queue as Q #python3.*
class wordBlock():
def __init__(self, start, end, kind):
self.start = start;
self.end = end;
self.kind = kind;
def __lt__(self,other):#operator <
return self.end < other.start
def __cmp__(self,other):
#call global(builtin) function cmp for int
return cmp(self.start,other.end)
class sentence(models.Model):
originalText = models.TextField(blank=True)
annotatedText = models.TextField(blank=True)
#alteredText = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
@classmethod
def create(originalText):
stc = sentence();
#stc.originalText = originalText.replace('\n', ' ').replace('\r', ' ').strip();
stc.originalText = ' '.join((originalText.split())).strip();
#stc.analyze();
return stc;
# def getScrubbedText(self):
# self.scrubbedText = '';
# return self.analyze();
# #we would rescrub due to frequent update of algorithm.
# if self.scrubbedText is None:
# return self.analyze();
# if self.scrubbedText == '':
# return self.analyze();
# return self.scrubbedText;
def __unicode__(self):
return self.originalText;
# def analyze(self):
# scrubbedContent = "test";#nlp.scrub(self.originalText);
# i = 0;
# str_suffix = '';
# for token in scrubbedContent:
# j = token[0].idx;
# if self.originalText[j - 1] == ' ':
# j = j - 1;
# str_suffix = ''.join((str_suffix, ' '));
# if token[1] != '':
# self.scrubbedText = "".join((self.scrubbedText, self.originalText[i:j], str_suffix, "<scrub type='", token[1].lower() ,"'>"));
# str_suffix = '</scrub>';
# else:
# self.scrubbedText = "".join((self.scrubbedText, self.originalText[i:j], str_suffix));
# str_suffix = '';
# i = token[0].idx;
# self.scrubbedText = "".join((self.scrubbedText, self.originalText[i:len(self.originalText)], str_suffix));
# self.save();
# #self.scrubbedText = self.scrubbedText.replace('<scrub></scrub>', ' ').strip();
# return self.scrubbedText;
class task(models.Model):
#msg_a1 = models.ForeignKey(sentence, related_name="sentence_a1")
#msg_a2 = models.ForeignKey(sentence, related_name="sentence_a2")
#msg_b1 = models.ForeignKey(sentence, related_name="sentence_b1")
#msg_b2 = models.ForeignKey(sentence, related_name="sentence_b2")
#msg_c1 = models.ForeignKey(sentence, related_name="sentence_c1")
sentences = models.TextField(default="[]")
status = models.IntegerField() #0: init 1: opened 2: answered
workers = models.TextField(default="[]")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return " ".join((self.id, self.status));
def getSentences(self):
ret = []
stc_ids = json.loads(self.sentences)
for stc_id in stc_ids:
ret.append(sentence.objects.get(id=stc_id))
return ret
class hit(models.Model):
mTurk_id = models.TextField()
status = models.IntegerField()
code = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return " ".join((self.mTurk_id, self.code, self.status));
|
[
"django.db.models.DateTimeField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"json.loads"
] |
[((597, 625), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (613, 625), False, 'from django.db import models\n'), ((646, 674), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (662, 674), False, 'from django.db import models\n'), ((740, 779), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (760, 779), False, 'from django.db import models\n'), ((797, 832), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (817, 832), False, 'from django.db import models\n'), ((2913, 2943), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""[]"""'}), "(default='[]')\n", (2929, 2943), False, 'from django.db import models\n'), ((2957, 2978), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2976, 2978), False, 'from django.db import models\n'), ((3024, 3054), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""[]"""'}), "(default='[]')\n", (3040, 3054), False, 'from django.db import models\n'), ((3072, 3111), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3092, 3111), False, 'from django.db import models\n'), ((3129, 3164), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (3149, 3164), False, 'from django.db import models\n'), ((3480, 3498), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (3496, 3498), False, 'from django.db import models\n'), ((3512, 3533), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (3531, 3533), False, 'from django.db import models\n'), ((3545, 3573), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (3561, 3573), False, 'from django.db import models\n'), ((3591, 3630), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3611, 3630), False, 'from django.db import models\n'), ((3648, 3683), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (3668, 3683), False, 'from django.db import models\n'), ((3306, 3332), 'json.loads', 'json.loads', (['self.sentences'], {}), '(self.sentences)\n', (3316, 3332), False, 'import json\n')]
|
from dataclasses import dataclass
from debussy_concert.core.config.movement_parameters.base import MovementParametersBase
@dataclass(frozen=True)
class BigQueryDataPartitioning:
partitioning_type: str
gcs_partition_schema: str
partition_field: str
destination_partition: str
@dataclass(frozen=True)
class BigQueryTimeDataPartitioning(BigQueryDataPartitioning):
partition_granularity: str
@dataclass(frozen=True)
class TimePartitionedDataIngestionMovementParameters(MovementParametersBase):
extract_connection_id: str
data_partitioning: BigQueryTimeDataPartitioning
def __post_init__(self):
if isinstance(self.data_partitioning, BigQueryTimeDataPartitioning):
return
data_partitioning = BigQueryTimeDataPartitioning(**self.data_partitioning)
# hack for frozen dataclass https://stackoverflow.com/a/54119384
# overwriting data_partitioning with BigQueryTimeDataPartitioning instance
object.__setattr__(self, 'data_partitioning', data_partitioning)
|
[
"dataclasses.dataclass"
] |
[((125, 147), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (134, 147), False, 'from dataclasses import dataclass\n'), ((296, 318), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (305, 318), False, 'from dataclasses import dataclass\n'), ((415, 437), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (424, 437), False, 'from dataclasses import dataclass\n')]
|
import socket
def is_connected_to_internet(host="8.8.8.8", port=53, timeout=3):
"""
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((host, port))
return True
except socket.error:
return False
|
[
"socket.setdefaulttimeout",
"socket.socket"
] |
[((217, 250), 'socket.setdefaulttimeout', 'socket.setdefaulttimeout', (['timeout'], {}), '(timeout)\n', (241, 250), False, 'import socket\n'), ((264, 313), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (277, 313), False, 'import socket\n')]
|
import psycopg2
import psycopg2.extras
import os
url = os.getenv('DATABASE_URL')
def connection(url):
conn = psycopg2.connect(url)
return conn
def init_db():
con = connection(url)
return con
def create_tables():
conn = connection(url)
curr = conn.cursor()
queries = tables()
for query in queries:
curr.execute(query)
conn.commit()
def tables():
users_table = '''CREATE TABLE IF NOT EXISTS users(
id serial PRIMARY KEY,
firstname char(20) NOT NULL,
lastname char(20) NOT NULL,
email char(50) NOT NULL,
username char(20) NOT NULL,
phone char(14) NOT NULL,
isAdmin BOOLEAN DEFAULT False,
password char(<PASSWORD>) NOT NULL,
registered DATE NOT NULL DEFAULT CURRENT_DATE)
'''
incidents_table = '''CREATE TABLE IF NOT EXISTS incidents(
id serial PRIMARY KEY,
title char(100) NOT NULL,
incident char(50) NOT NULL,
location char(100) NOT NULL,
status char(30) DEFAULT 'Draft',
description char(200) NOT NULL,
images char(100) NOT NULL,
createdBy char(100) NOT NULL,
createdOn DATE NOT NULL DEFAULT CURRENT_DATE) '''
queries = [users_table, incidents_table]
return queries
def destroy_tables():
conn = connection(url)
curr = conn.cursor()
users_table = ''' DROP TABLE IF EXISTS users CASCADE'''
incidents_table = ''' DROP TABLE IF EXISTS incidents CASCADE'''
queries = [users_table, incidents_table]
for query in queries:
curr.execute(query)
conn.commit()
|
[
"os.getenv",
"psycopg2.connect"
] |
[((56, 81), 'os.getenv', 'os.getenv', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (65, 81), False, 'import os\n'), ((116, 137), 'psycopg2.connect', 'psycopg2.connect', (['url'], {}), '(url)\n', (132, 137), False, 'import psycopg2\n')]
|
import pandas as pd
import numpy as np
import pickle
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers import LSTM
from keras.layers import Flatten
from keras.layers import Dense
from keras.callbacks import EarlyStopping
# Main function of cnn training
def run_neural_network():
print(" == Enter into CNN training step ==")
np.random.seed(0)
x_train = pd.read_pickle("data/pickles/train_after_preprocess.pkl")
x_train = np.array(x_train['tweet'])
x_test = pd.read_pickle("data/pickles/test_after_preprocess.pkl")
x_test = np.array(x_test['tweet'])
y = np.array(int(2500000 / 2) * [0] + int(2500000 / 2) * [1])
print("Data loading finish!")
# Tokenization
tokenizer = Tokenizer(filters='')
tokenizer.fit_on_texts(x_train)
# Turn x_train into sequence form
sequence_train = tokenizer.texts_to_sequences(x_train)
# Turn x_test into sequence form
sequence_test = tokenizer.texts_to_sequences(x_test)
# Transform sequence_train into into a 2D Numpy array
sequence_train = sequence.pad_sequences(sequence_train, maxlen = 30)
# Transform sequence_test into into a 2D Numpy array
sequence_test = sequence.pad_sequences(sequence_test, maxlen = 30)
# Affect input dimension
input_dim = len(tokenizer.word_index) + 1
input_length = sequence_train.shape[1]
print("Tokenization finish!")
# Shuffle training dataset
new_index = np.arange(sequence_train.shape[0])
np.random.shuffle(new_index)
sequence_train = sequence_train[new_index]
y = y[new_index]
print("Data shuffling finish!")
earlyStopping = EarlyStopping(monitor = 'val_loss', patience = 2)
### Model 1 ###
print("Build model1!")
np.random.seed(1)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(Conv1D(padding = "same", kernel_size = 3, filters = 32, activation = "relu"))
model.add(MaxPooling1D(pool_size = 2))
model.add(Flatten())
model.add(Dense(250, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model1!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model1 = model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model1, open('data/xgboost/train_model1.txt', 'wb'))
test_model1 = model.predict(sequence_test)
pickle.dump(test_model1, open('data/xgboost/test_model1.txt', 'wb'))
print("Model1 finished!")
### Model 2 ###
print("Build model2!")
np.random.seed(2)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(LSTM(100, recurrent_dropout = 0.2, dropout = 0.2))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model2!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model2 = model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model2, open('data/xgboost/train_model2.txt', 'wb'))
test_model2 = model.predict(sequence_test)
pickle.dump(test_model2, open('data/xgboost/test_model2.txt', 'wb'))
print("Model2 finished!")
### Model 3 ###
print("Build model1!")
np.random.seed(3)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(Conv1D(padding = "same", kernel_size = 3, filters = 32, activation = "relu"))
model.add(MaxPooling1D(pool_size = 2))
model.add(LSTM(100))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model3!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model3= model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model3, open('data/xgboost/train_model3.txt', 'wb'))
test_model3 = model.predict(sequence_test)
pickle.dump(test_model3, open('data/xgboost/test_model3.txt', 'wb'))
print("Model3 finished!")
if __name__ == "__main__":
run_neural_network()
|
[
"keras.layers.embeddings.Embedding",
"numpy.random.seed",
"keras.preprocessing.sequence.pad_sequences",
"keras.layers.LSTM",
"keras.layers.Flatten",
"keras.layers.convolutional.MaxPooling1D",
"keras.preprocessing.text.Tokenizer",
"numpy.array",
"numpy.arange",
"keras.callbacks.EarlyStopping",
"keras.layers.convolutional.Conv1D",
"pandas.read_pickle",
"keras.models.Sequential",
"keras.layers.Dense",
"numpy.random.shuffle"
] |
[((572, 589), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (586, 589), True, 'import numpy as np\n'), ((605, 662), 'pandas.read_pickle', 'pd.read_pickle', (['"""data/pickles/train_after_preprocess.pkl"""'], {}), "('data/pickles/train_after_preprocess.pkl')\n", (619, 662), True, 'import pandas as pd\n'), ((677, 703), 'numpy.array', 'np.array', (["x_train['tweet']"], {}), "(x_train['tweet'])\n", (685, 703), True, 'import numpy as np\n'), ((718, 774), 'pandas.read_pickle', 'pd.read_pickle', (['"""data/pickles/test_after_preprocess.pkl"""'], {}), "('data/pickles/test_after_preprocess.pkl')\n", (732, 774), True, 'import pandas as pd\n'), ((788, 813), 'numpy.array', 'np.array', (["x_test['tweet']"], {}), "(x_test['tweet'])\n", (796, 813), True, 'import numpy as np\n'), ((951, 972), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'filters': '""""""'}), "(filters='')\n", (960, 972), False, 'from keras.preprocessing.text import Tokenizer\n'), ((1281, 1330), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['sequence_train'], {'maxlen': '(30)'}), '(sequence_train, maxlen=30)\n', (1303, 1330), False, 'from keras.preprocessing import sequence\n'), ((1410, 1458), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['sequence_test'], {'maxlen': '(30)'}), '(sequence_test, maxlen=30)\n', (1432, 1458), False, 'from keras.preprocessing import sequence\n'), ((1662, 1696), 'numpy.arange', 'np.arange', (['sequence_train.shape[0]'], {}), '(sequence_train.shape[0])\n', (1671, 1696), True, 'import numpy as np\n'), ((1701, 1729), 'numpy.random.shuffle', 'np.random.shuffle', (['new_index'], {}), '(new_index)\n', (1718, 1729), True, 'import numpy as np\n'), ((1855, 1900), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(2)'}), "(monitor='val_loss', patience=2)\n", (1868, 1900), False, 'from keras.callbacks import EarlyStopping\n'), ((1958, 1975), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1972, 1975), True, 'import numpy as np\n'), ((1988, 2000), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1998, 2000), False, 'from keras.models import Sequential\n'), ((2969, 2986), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (2983, 2986), True, 'import numpy as np\n'), ((2999, 3011), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3009, 3011), False, 'from keras.models import Sequential\n'), ((3838, 3855), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (3852, 3855), True, 'import numpy as np\n'), ((3868, 3880), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3878, 3880), False, 'from keras.models import Sequential\n'), ((2015, 2066), 'keras.layers.embeddings.Embedding', 'Embedding', (['input_dim', '(50)'], {'input_length': 'input_length'}), '(input_dim, 50, input_length=input_length)\n', (2024, 2066), False, 'from keras.layers.embeddings import Embedding\n'), ((2084, 2152), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'padding': '"""same"""', 'kernel_size': '(3)', 'filters': '(32)', 'activation': '"""relu"""'}), "(padding='same', kernel_size=3, filters=32, activation='relu')\n", (2090, 2152), False, 'from keras.layers.convolutional import Conv1D\n'), ((2176, 2201), 'keras.layers.convolutional.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (2188, 2201), False, 'from keras.layers.convolutional import MaxPooling1D\n'), ((2219, 2228), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2226, 2228), False, 'from keras.layers import Flatten\n'), ((2244, 2273), 'keras.layers.Dense', 'Dense', (['(250)'], {'activation': '"""relu"""'}), "(250, activation='relu')\n", (2249, 2273), False, 'from keras.layers import Dense\n'), ((2291, 2321), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (2296, 2321), False, 'from keras.layers import Dense\n'), ((3026, 3077), 'keras.layers.embeddings.Embedding', 'Embedding', (['input_dim', '(50)'], {'input_length': 'input_length'}), '(input_dim, 50, input_length=input_length)\n', (3035, 3077), False, 'from keras.layers.embeddings import Embedding\n'), ((3095, 3140), 'keras.layers.LSTM', 'LSTM', (['(100)'], {'recurrent_dropout': '(0.2)', 'dropout': '(0.2)'}), '(100, recurrent_dropout=0.2, dropout=0.2)\n', (3099, 3140), False, 'from keras.layers import LSTM\n'), ((3160, 3190), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3165, 3190), False, 'from keras.layers import Dense\n'), ((3895, 3946), 'keras.layers.embeddings.Embedding', 'Embedding', (['input_dim', '(50)'], {'input_length': 'input_length'}), '(input_dim, 50, input_length=input_length)\n', (3904, 3946), False, 'from keras.layers.embeddings import Embedding\n'), ((3964, 4032), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'padding': '"""same"""', 'kernel_size': '(3)', 'filters': '(32)', 'activation': '"""relu"""'}), "(padding='same', kernel_size=3, filters=32, activation='relu')\n", (3970, 4032), False, 'from keras.layers.convolutional import Conv1D\n'), ((4056, 4081), 'keras.layers.convolutional.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (4068, 4081), False, 'from keras.layers.convolutional import MaxPooling1D\n'), ((4099, 4108), 'keras.layers.LSTM', 'LSTM', (['(100)'], {}), '(100)\n', (4103, 4108), False, 'from keras.layers import LSTM\n'), ((4124, 4154), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (4129, 4154), False, 'from keras.layers import Dense\n')]
|
from flask import request
from flask_restful import Resource
from models.category import CategoryModel
from schemas.category import CategorySchema
category_schema = CategorySchema()
category_list_schema = CategorySchema(many=True)
class Category(Resource):
@classmethod
def get(cls, name: str):
category = CategoryModel.find_by_name(name)
if category:
return category_schema.dump(category), 200
return {"message": "Category not found"}, 404
class CategoryList(Resource):
@classmethod
def get(cls):
page = 1 if (request.args.get("page") is None or request.args.get("page") == 0) \
else int(request.args.get("page")) + 1
size = 10 if request.args.get("size") is None else int(request.args.get("size"))
return {"categories": category_list_schema.dump(CategoryModel.find_all())}, 200
|
[
"flask.request.args.get",
"schemas.category.CategorySchema",
"models.category.CategoryModel.find_by_name",
"models.category.CategoryModel.find_all"
] |
[((167, 183), 'schemas.category.CategorySchema', 'CategorySchema', ([], {}), '()\n', (181, 183), False, 'from schemas.category import CategorySchema\n'), ((207, 232), 'schemas.category.CategorySchema', 'CategorySchema', ([], {'many': '(True)'}), '(many=True)\n', (221, 232), False, 'from schemas.category import CategorySchema\n'), ((327, 359), 'models.category.CategoryModel.find_by_name', 'CategoryModel.find_by_name', (['name'], {}), '(name)\n', (353, 359), False, 'from models.category import CategoryModel\n'), ((720, 744), 'flask.request.args.get', 'request.args.get', (['"""size"""'], {}), "('size')\n", (736, 744), False, 'from flask import request\n'), ((762, 786), 'flask.request.args.get', 'request.args.get', (['"""size"""'], {}), "('size')\n", (778, 786), False, 'from flask import request\n'), ((579, 603), 'flask.request.args.get', 'request.args.get', (['"""page"""'], {}), "('page')\n", (595, 603), False, 'from flask import request\n'), ((615, 639), 'flask.request.args.get', 'request.args.get', (['"""page"""'], {}), "('page')\n", (631, 639), False, 'from flask import request\n'), ((669, 693), 'flask.request.args.get', 'request.args.get', (['"""page"""'], {}), "('page')\n", (685, 693), False, 'from flask import request\n'), ((844, 868), 'models.category.CategoryModel.find_all', 'CategoryModel.find_all', ([], {}), '()\n', (866, 868), False, 'from models.category import CategoryModel\n')]
|
# -*- coding: utf-8 -*-
import datetime
import json
import logging
import subprocess
from email.mime.text import MIMEText
from smtplib import SMTP
from smtplib import SMTPException
from socket import error
from jira.client import JIRA
from jira.exceptions import JIRAError
from staticconf.loader import yaml_loader
from util import EAException
from util import pretty_ts
def get_counts_string(match):
""" Looks for keys matching top_events_X in matches, generated by get_top_counts, and
returns a readable string about the various counts. """
message = ''
for key, counts in match.items():
if key.startswith('top_events_'):
message += '%s:\n' % (key[11:])
top_events = counts.items()
top_events.sort(key=lambda x: x[1], reverse=True)
for term, count in top_events:
message += '%s: %s\n' % (term, count)
message += '\n'
return message
def basic_match_string(rule, match):
""" Returns a string for the given rule and match. """
text = rule['name'] + '\n\n'
# Add custom alert text
alert_text = rule.get('alert_text', '')
if 'alert_text_args' in rule:
alert_text_args = rule.get('alert_text_args')
alert_text_values = [match.get(arg, '<MISSING VALUE>') for arg in alert_text_args]
alert_text = alert_text.format(*alert_text_values)
text += alert_text
while text[-2:] != '\n\n':
text += '\n'
if rule.get('alert_text_type') != 'alert_text_only':
# Add rule text
text += rule['type'].get_match_str(match)
while text[-2:] != '\n\n':
text += '\n'
# Add top_counts
if rule.get('top_count_keys'):
text += get_counts_string(match)
if rule.get('alert_text_type') != 'exclude_fields':
# Add match items
match_items = match.items()
match_items.sort(key=lambda x: x[0])
text += '\n'.join(['%s: %s' % (key, val) for key, val in match.items() if not key.startswith('top_events_')])
return text
class Alerter(object):
""" Base class for types of alerts.
:param rule: The rule configuration.
"""
required_options = frozenset([])
def __init__(self, rule):
self.rule = rule
self.pipeline = None
def alert(self, match):
""" Send an alert. Match is a dictionary of information about the alert.
:param match: A dictionary of relevant information to the alert.
"""
raise NotImplementedError()
def get_info(self):
""" Returns a dictionary of data related to this alert. At minimum, this should contain
a field type corresponding to the type of Alerter. """
return {'type': 'Unknown'}
def create_title(self, matches):
""" Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary.
:param matches: A list of dictionaries of relevant information to the alert.
"""
if 'alert_subject' in self.rule:
return self.create_custom_title(matches)
return self.create_default_title(matches)
def create_custom_title(self, matches):
alert_subject = self.rule['alert_subject']
if 'alert_subject_args' in self.rule:
alert_subject_args = self.rule['alert_subject_args']
alert_subject_values = [matches[0].get(arg, '<MISSING VALUE>') for arg in alert_subject_args]
return alert_subject.format(*alert_subject_values)
return alert_subject
def create_default_title(self, matches):
return self.rule['name']
class DebugAlerter(Alerter):
""" The debug alerter uses a Python logger (by default, alerting to terminal). """
def alert(self, matches):
logging.info('%d match(es)' % (len(matches)))
qk = self.rule.get('query_key', None)
for match in matches:
if qk in match:
logging.info('%s matched %s at %s' % (match[qk], self.rule['name'], match[self.rule['timestamp_field']]))
else:
logging.info('%s at %s' % (self.rule['name'], match[self.rule['timestamp_field']]))
logging.info(basic_match_string(self.rule, match))
def get_info(self):
return {'type': 'debug'}
class EmailAlerter(Alerter):
""" Sends an email alert """
required_options = frozenset(['email'])
def __init__(self, *args):
super(EmailAlerter, self).__init__(*args)
self.smtp_host = self.rule.get('smtp_host', 'localhost')
self.from_addr = self.rule.get('from_addr', 'ElastAlert')
# Convert email to a list if it isn't already
if isinstance(self.rule['email'], str):
self.rule['email'] = [self.rule['email']]
def alert(self, matches):
body = ''
for match in matches:
body += basic_match_string(self.rule, match)
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
# Add JIRA ticket if it exists
if self.pipeline is not None and 'jira_ticket' in self.pipeline:
url = '%s/browse/%s' % (self.rule['jira_server'], self.pipeline['jira_ticket'])
body += '\nJIRA ticket: %s' % (url)
email_msg = MIMEText(body)
email_msg['Subject'] = self.create_title(matches)
email_msg['To'] = ', '.join(self.rule['email'])
email_msg['From'] = self.from_addr
email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To'])
try:
self.smtp = SMTP(self.smtp_host)
except (SMTPException, error) as e:
raise EAException("Error connecting to SMTP host: %s" % (e))
self.smtp.sendmail(self.from_addr, self.rule['email'], email_msg.as_string())
self.smtp.close()
logging.info("Sent email to %s" % (self.rule['email']))
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
# If the rule has a query_key, add that value plus timestamp to subject
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
subject += ' - %s' % (qk)
return subject
def get_info(self):
return {'type': 'email',
'recipients': self.rule['email']}
class JiraAlerter(Alerter):
""" Creates a Jira ticket for each alert """
required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype'])
def __init__(self, rule):
super(JiraAlerter, self).__init__(rule)
self.server = self.rule['jira_server']
self.get_jira_account(self.rule['jira_account_file'])
self.project = self.rule['jira_project']
self.issue_type = self.rule['jira_issuetype']
self.component = self.rule.get('jira_component')
self.label = self.rule.get('jira_label')
self.assignee = self.rule.get('jira_assignee')
self.max_age = self.rule.get('jira_max_age', 30)
self.bump_tickets = self.rule.get('jira_bump_tickets', False)
self.jira_args = {'project': {'key': self.project},
'issuetype': {'name': self.issue_type}}
if self.component:
self.jira_args['components'] = [{'name': self.component}]
if self.label:
self.jira_args['labels'] = [self.label]
if self.assignee:
self.jira_args['assignee'] = {'name': self.assignee}
try:
self.client = JIRA(self.server, basic_auth=(self.user, self.password))
except JIRAError as e:
# JIRAError may contain HTML, pass along only first 1024 chars
raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024]))
def set_assignee(self, assignee):
self.assignee = assignee
if assignee:
self.jira_args['assignee'] = {'name': assignee}
elif 'assignee' in self.jira_args:
self.jira_args.pop('assignee')
def get_jira_account(self, account_file):
""" Gets the username and password from a jira account file.
:param account_file: Name of the file which contains user and password information.
"""
account_conf = yaml_loader(account_file)
if 'user' not in account_conf or 'password' not in account_conf:
raise EAException('Jira account file must have user and password fields')
self.user = account_conf['user']
self.password = account_conf['password']
def find_existing_ticket(self, matches):
# Default title, get stripped search version
if 'alert_subject' not in self.rule:
title = self.create_default_title(matches, True)
else:
title = self.create_title(matches)
# This is necessary for search for work. Other special characters and dashes
# directly adjacent to words appear to be ok
title = title.replace(' - ', ' ')
date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y/%m/%d')
jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date)
try:
issues = self.client.search_issues(jql)
except JIRAError as e:
logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e))
return None
if len(issues):
return issues[0]
def comment_on_ticket(self, ticket, match):
text = basic_match_string(self.rule, match)
timestamp = pretty_ts(match[self.rule['timestamp_field']])
comment = "This alert was triggered again at %s\n%s" % (timestamp, text)
self.client.add_comment(ticket, comment)
def alert(self, matches):
title = self.create_title(matches)
if self.bump_tickets:
ticket = self.find_existing_ticket(matches)
if ticket:
logging.info('Commenting on existing ticket %s' % (ticket.key))
for match in matches:
self.comment_on_ticket(ticket, match)
return
description = ''
for match in matches:
description += basic_match_string(self.rule, match)
if len(matches) > 1:
description += '\n----------------------------------------\n'
self.jira_args['summary'] = title
self.jira_args['description'] = description
try:
self.issue = self.client.create_issue(**self.jira_args)
except JIRAError as e:
raise EAException("Error creating JIRA ticket: %s" % (e))
logging.info("Opened Jira ticket: %s" % (self.issue))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = self.issue
def create_default_title(self, matches, for_search=False):
# If there is a query_key, use that in the title
if 'query_key' in self.rule and self.rule['query_key'] in matches[0]:
title = 'ElastAlert: %s matched %s' % (matches[0][self.rule['query_key']], self.rule['name'])
else:
title = 'ElastAlert: %s' % (self.rule['name'])
if for_search:
return title
title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time')))
# Add count for spikes
count = matches[0].get('spike_count')
if count:
title += ' - %s+ events' % (count)
return title
def get_info(self):
return {'type': 'jira'}
class CommandAlerter(Alerter):
required_options = set(['command'])
def __init__(self, *args):
super(CommandAlerter, self).__init__(*args)
if isinstance(self.rule['command'], basestring) and '%' in self.rule['command']:
logging.warning('Warning! You could be vulnerable to shell injection!')
self.rule['command'] = [self.rule['command']]
def alert(self, matches):
for match in matches:
# Format the command and arguments
try:
command = [command_arg % match for command_arg in self.rule['command']]
self.last_command = command
except KeyError as e:
raise EAException("Error formatting command: %s" % (e))
# Run command and pipe data
try:
subp = subprocess.Popen(command, stdin=subprocess.PIPE)
if self.rule.get('pipe_match_json'):
match_json = json.dumps(match)
stdout, stderr = subp.communicate(input=match_json)
except OSError as e:
raise EAException("Error while running command %s: %s" % (' '.join(command), e))
def get_info(self):
return {'type': 'command',
'command': ' '.join(self.last_command)}
|
[
"logging.exception",
"subprocess.Popen",
"smtplib.SMTP",
"email.mime.text.MIMEText",
"util.EAException",
"logging.warning",
"json.dumps",
"util.pretty_ts",
"jira.client.JIRA",
"logging.info",
"datetime.timedelta",
"staticconf.loader.yaml_loader",
"datetime.datetime.now"
] |
[((5357, 5371), 'email.mime.text.MIMEText', 'MIMEText', (['body'], {}), '(body)\n', (5365, 5371), False, 'from email.mime.text import MIMEText\n'), ((5907, 5960), 'logging.info', 'logging.info', (["('Sent email to %s' % self.rule['email'])"], {}), "('Sent email to %s' % self.rule['email'])\n", (5919, 5960), False, 'import logging\n'), ((8353, 8378), 'staticconf.loader.yaml_loader', 'yaml_loader', (['account_file'], {}), '(account_file)\n', (8364, 8378), False, 'from staticconf.loader import yaml_loader\n'), ((9668, 9714), 'util.pretty_ts', 'pretty_ts', (["match[self.rule['timestamp_field']]"], {}), "(match[self.rule['timestamp_field']])\n", (9677, 9714), False, 'from util import pretty_ts\n'), ((10745, 10796), 'logging.info', 'logging.info', (["('Opened Jira ticket: %s' % self.issue)"], {}), "('Opened Jira ticket: %s' % self.issue)\n", (10757, 10796), False, 'import logging\n'), ((5648, 5668), 'smtplib.SMTP', 'SMTP', (['self.smtp_host'], {}), '(self.smtp_host)\n', (5652, 5668), False, 'from smtplib import SMTP\n'), ((7627, 7683), 'jira.client.JIRA', 'JIRA', (['self.server'], {'basic_auth': '(self.user, self.password)'}), '(self.server, basic_auth=(self.user, self.password))\n', (7631, 7683), False, 'from jira.client import JIRA\n'), ((8470, 8537), 'util.EAException', 'EAException', (['"""Jira account file must have user and password fields"""'], {}), "('Jira account file must have user and password fields')\n", (8481, 8537), False, 'from util import EAException\n'), ((11914, 11985), 'logging.warning', 'logging.warning', (['"""Warning! You could be vulnerable to shell injection!"""'], {}), "('Warning! You could be vulnerable to shell injection!')\n", (11929, 11985), False, 'import logging\n'), ((3957, 4067), 'logging.info', 'logging.info', (["('%s matched %s at %s' % (match[qk], self.rule['name'], match[self.rule[\n 'timestamp_field']]))"], {}), "('%s matched %s at %s' % (match[qk], self.rule['name'], match[\n self.rule['timestamp_field']]))\n", (3969, 4067), False, 'import logging\n'), ((4097, 4185), 'logging.info', 'logging.info', (["('%s at %s' % (self.rule['name'], match[self.rule['timestamp_field']]))"], {}), "('%s at %s' % (self.rule['name'], match[self.rule[\n 'timestamp_field']]))\n", (4109, 4185), False, 'import logging\n'), ((5731, 5783), 'util.EAException', 'EAException', (["('Error connecting to SMTP host: %s' % e)"], {}), "('Error connecting to SMTP host: %s' % e)\n", (5742, 5783), False, 'from util import EAException\n'), ((9380, 9473), 'logging.exception', 'logging.exception', (['("Error while searching for JIRA ticket using jql \'%s\': %s" % (jql, e))'], {}), '(\n "Error while searching for JIRA ticket using jql \'%s\': %s" % (jql, e))\n', (9397, 9473), False, 'import logging\n'), ((10045, 10106), 'logging.info', 'logging.info', (["('Commenting on existing ticket %s' % ticket.key)"], {}), "('Commenting on existing ticket %s' % ticket.key)\n", (10057, 10106), False, 'import logging\n'), ((10685, 10734), 'util.EAException', 'EAException', (["('Error creating JIRA ticket: %s' % e)"], {}), "('Error creating JIRA ticket: %s' % e)\n", (10696, 10734), False, 'from util import EAException\n'), ((12488, 12536), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdin': 'subprocess.PIPE'}), '(command, stdin=subprocess.PIPE)\n', (12504, 12536), False, 'import subprocess\n'), ((9092, 9115), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9113, 9115), False, 'import datetime\n'), ((9118, 9155), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'self.max_age'}), '(days=self.max_age)\n', (9136, 9155), False, 'import datetime\n'), ((12357, 12404), 'util.EAException', 'EAException', (["('Error formatting command: %s' % e)"], {}), "('Error formatting command: %s' % e)\n", (12368, 12404), False, 'from util import EAException\n'), ((12624, 12641), 'json.dumps', 'json.dumps', (['match'], {}), '(match)\n', (12634, 12641), False, 'import json\n')]
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\restaurants\restaurant_commands.py
# Compiled at: 2018-08-28 03:56:41
# Size of source mod 2**32: 29007 bytes
from protocolbuffers import Restaurant_pb2
from event_testing import test_events
from google.protobuf import text_format
from restaurants import restaurant_utils
from restaurants.chefs_choice import ChefsChoice
from restaurants.restaurant_diner_situation import DinerSubSituationState, RestaurantDinerSubSituation, RestaurantDinerBackGroundSituation
from restaurants.restaurant_order import OrderStatus, OrderRecommendationState, GroupOrder
from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director
from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target
from sims import sim
from sims4.protocol_buffer_utils import has_field
import services, sims4.commands
@sims4.commands.Command('restaurant.order_food', command_type=(sims4.commands.CommandType.Live))
def order_food(recipe_type: TunableInstanceParam(sims4.resources.Types.RECIPE), opt_sim: OptionalTargetParam=None, _connection=None):
if recipe_type is None:
sims4.commands.output('Recipe is None', _connection)
sims4.commands.automation_output('RestaurantOrderFood; Status:Failed', _connection)
return False
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist".format(opt_sim), _connection)
sims4.commands.automation_output('RestaurantOrderFood; Status:Failed', _connection)
return False
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Current venue is not restaurant', _connection)
sims4.commands.automation_output('RestaurantOrderFood; Status:Failed', _connection)
return False
zone_director.make_one_order(sim, recipe_type)
groups = zone_director.get_dining_groups_by_sim(sim)
if groups is None:
sims4.commands.output('Sim {} is not in dining group'.format(opt_sim), _connection)
sims4.commands.automation_output('RestaurantOrderFood; Status:Failed', _connection)
group = groups.pop()
group.hold_ordered_cost(recipe_type.restaurant_base_price)
sims4.commands.automation_output('RestaurantOrderFood; Status:Success', _connection)
return True
@sims4.commands.Command('restaurant.show_menu', command_type=(sims4.commands.CommandType.Live))
def show_menu(opt_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist".format(opt_sim), _connection)
return False
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Current venue is not restaurant', _connection)
return False
zone_director.show_menu(sim)
@sims4.commands.Command('restaurant.show_menu_for_chef', command_type=(sims4.commands.CommandType.Live))
def show_menu_for_chef(opt_sim: OptionalTargetParam=None, chef_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist".format(opt_sim), _connection)
return False
chef_sim = get_optional_target(chef_sim, _connection)
if chef_sim is None:
sims4.commands.output("Chef {} doesn't exist.".format(chef_sim), _connection)
return False
chef_situation = restaurant_utils.get_chef_situation(chef_sim=chef_sim)
if chef_situation is None:
sims4.commands.output("Couldn't find a Chef Situation in this zone.")
return False
chef_situation.show_menu(sim)
@sims4.commands.Command('restaurant.show_recommendation_menu_for_sim', command_type=(sims4.commands.CommandType.Live))
def show_recommendation_menu_for_sim(opt_sim: OptionalTargetParam=None, owner_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist".format(opt_sim), _connection)
return False
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Current venue is not restaurant', _connection)
return False
zone_director.show_menu(sim, is_recommendation=True)
@sims4.commands.Command('restaurant.claim_table', command_type=(sims4.commands.CommandType.Live))
def claim_table(opt_sim: OptionalTargetParam=None, opt_table: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist".format(opt_sim), _connection)
return False
table_to_claim = get_optional_target(opt_table, _connection)
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Current venue is not restaurant', _connection)
return False
zone_director.claim_table(sim, table_to_claim)
@sims4.commands.Command('restaurant.order_for_table', command_type=(sims4.commands.CommandType.Live))
def order_for_table(sim_orders: str, _connection=None):
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Current venue is not restaurant', _connection)
return False
proto = Restaurant_pb2.SimOrders()
text_format.Merge(sim_orders, proto)
orders = [(order.sim_id, order.recipe_id) for order in proto.sim_orders]
sim = services.object_manager().get(orders[0][0])
if sim is None:
sims4.commands.output("Trying to order for a Sim that isn't on the lot", _connection)
return False
zone_director.order_for_table(orders)
groups = zone_director.get_dining_groups_by_sim(sim)
group = groups.pop()
group.hold_ordered_cost(proto.meal_cost if has_field(proto, 'meal_cost') else 0)
return True
@sims4.commands.Command('restaurant.comp_drinks_for_group', command_type=(sims4.commands.CommandType.Live))
def comp_drinks_for_group(opt_sim: OptionalTargetParam=None, _connection=None):
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Current venue is not restaurant', _connection)
return False
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist".format(opt_sim), _connection)
return False
groups = zone_director.get_dining_groups_by_sim(sim)
group = groups.pop()
group.order_course_for_group((ChefsChoice.DRINK_COURSE), complimentary=True)
return True
@sims4.commands.Command('restaurant.comp_desserts_for_group', command_type=(sims4.commands.CommandType.Live))
def comp_desserts_for_group(opt_sim: OptionalTargetParam=None, _connection=None):
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Current venue is not restaurant', _connection)
return False
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist".format(opt_sim), _connection)
return False
groups = zone_director.get_dining_groups_by_sim(sim)
group = groups.pop()
group.order_course_for_group((ChefsChoice.DESSERT_COURSE), complimentary=True)
return True
@sims4.commands.Command('restaurant.recommend_order_for_table', command_type=(sims4.commands.CommandType.Live))
def recommend_order_for_table(sim_orders: str, _connection=None):
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Current venue is not restaurant', _connection)
return False
proto = Restaurant_pb2.SimOrders()
text_format.Merge(sim_orders, proto)
orders = [(order.sim_id, order.recipe_id) for order in proto.sim_orders]
sims_in_order = set([services.object_manager().get(order_sim_id) for order_sim_id in [order[0] for order in orders]])
for sim in sims_in_order:
if sim is None:
sims4.commands.output("Trying to target order for a Sim that isn't on the lot", _connection)
return False
active_group_order = _get_active_group_order_for_dining_group(sim)
if active_group_order:
recipe_manager = services.get_instance_manager(sims4.resources.Types.RECIPE)
for order in orders:
recipe = recipe_manager.get(order[1])
recipes = GroupOrder.get_food_drink_recipe_id_tuple(recipe)
active_group_order.add_sim_order((order[0]), food_recipe_id=(recipes[0]), drink_recipe_id=(recipes[1]),
recommendation_state=(OrderRecommendationState.RECOMMENDATION_PROPOSAL),
order_status=(OrderStatus.ORDER_INIT))
else:
zone_director.order_for_table(orders, send_order=False,
recommendation_state=(OrderRecommendationState.RECOMMENDATION_PROPOSAL),
order_status=(OrderStatus.ORDER_INIT))
groups = zone_director.get_dining_groups_by_sim(sim)
group = groups.pop()
group.hold_ordered_cost(proto.meal_cost if has_field(proto, 'meal_cost') else 0)
for sim in sims_in_order:
zone_director.trigger_recommendation_interaction(services.get_active_sim(), sim)
return True
@sims4.commands.Command('restaurant.npc_accept_or_reject_recommendation', command_type=(sims4.commands.CommandType.Live))
def npc_accept_or_reject_recommendation(opt_sim: OptionalTargetParam=None, accept_recommendation: bool=True, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist.".format(opt_sim), _connection)
return False
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Current venue is not restaurant', _connection)
return False
group_order = zone_director.get_active_group_order_for_sim(sim.sim_id)
if group_order is None:
sims4.commands.output('Sim {} was not offered a recommendation.'.format(opt_sim), _connection)
return False
if accept_recommendation:
sim_order = group_order.get_sim_order(sim.sim_id)
if sim_order is not None:
sim_order.recommendation_state = OrderRecommendationState.RECOMMENDATION_ACCEPTED
else:
group_order.remove_sim_order(sim.sim_id)
food_recipe, drink_recipe = ChefsChoice.get_order_for_npc_sim(sim)
group_order.add_sim_order((sim.sim_id), food_recipe_id=(food_recipe.guid64),
drink_recipe_id=(drink_recipe.guid64),
recommendation_state=(OrderRecommendationState.RECOMMENDATION_REJECTED),
order_status=(OrderStatus.ORDER_INIT))
return True
@sims4.commands.Command('restaurant.order_food_at_chef_station', command_type=(sims4.commands.CommandType.Live))
def order_food_at_chef_station(recipe_type: TunableInstanceParam(sims4.resources.Types.RECIPE), opt_sim: OptionalTargetParam=None, _connection=None):
if recipe_type is None:
sims4.commands.output('Recipe is None', _connection)
return False
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist.".format(opt_sim), _connection)
return False
chef_situation = restaurant_utils.get_chef_situation()
if chef_situation is None:
sims4.commands.output("Couldn't find a Chef Situation in this zone.")
return False
chef_situation.add_direct_order(recipe_type, sim)
services.get_event_manager().process_event((test_events.TestEvent.RestaurantFoodOrdered), sim_info=(sim.sim_info))
return True
@sims4.commands.Command('restaurant.npc_order_food_at_chef_station', command_type=(sims4.commands.CommandType.Live))
def npc_order_food_at_chef_station(opt_sim: OptionalTargetParam=None, chef_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist.".format(opt_sim), _connection)
return False
else:
chef_sim = get_optional_target(chef_sim, _connection)
if chef_sim is None:
sims4.commands.output("Chef {} doesn't exist.".format(chef_sim), _connection)
return False
chef_situation = restaurant_utils.get_chef_situation(chef_sim=chef_sim)
if chef_situation is None:
sims4.commands.output("Couldn't find a Chef Situation in this zone.")
return False
if chef_situation.menu_preset is not None:
food_order = ChefsChoice.get_order_for_npc_sim_with_menu(sim, chef_situation.menu_preset)
else:
food_order, _ = ChefsChoice.get_order_for_npc_sim(sim)
chef_situation.add_direct_order(food_order, sim)
services.get_event_manager().process_event((test_events.TestEvent.RestaurantFoodOrdered), sim_info=(sim.sim_info))
return True
@sims4.commands.Command('restaurant.give_chef_feedback', command_type=(sims4.commands.CommandType.Live))
def give_chef_feedback(to_chef_sim_id: OptionalTargetParam=None, from_sim_id: OptionalTargetParam=None, is_compliment: bool=True, waitstaff_sim_id: OptionalTargetParam=None, _connection=None):
from_sim = get_optional_target(from_sim_id, _connection)
if from_sim is None:
sims4.commands.output("From Sim {} doesn't exist.".format(from_sim_id), _connection)
return False
to_chef_sim = get_optional_target(to_chef_sim_id, _connection)
if to_chef_sim is None:
sims4.commands.output("To Chef Sim {} doesn't exist.".format(to_chef_sim_id), _connection)
return False
waitstaff_sim = get_optional_target(waitstaff_sim_id, _connection)
if waitstaff_sim is None:
sims4.commands.output("Waitstaff Sim {} doesn't exist.".format(waitstaff_sim_id), _connection)
return False
waitstaff_situation = restaurant_utils.get_waitstaff_situation(waitstaff_sim)
waitstaff_situation.give_chef_feedback(to_chef_sim, from_sim, is_compliment)
@sims4.commands.Command('restaurant.npc_order_food_from_waitstaff', command_type=(sims4.commands.CommandType.Live))
def npc_order_food_from_waitstaff(opt_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist.".format(opt_sim), _connection)
return False
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Not currently on a restaurant lot so cannot place orders with the waitstaff for NPC groups.', _connection)
return False
active_group_order = _get_active_group_order_for_dining_group(sim)
dining_groups = zone_director.get_dining_groups_by_sim(sim)
for dining_group in dining_groups:
if not dining_group.order_for_table(active_group_order=active_group_order):
sims4.commands.output('Failed to place order for dining group.', _connection)
return False
return True
@sims4.commands.Command('restaurant.comp_order_for_sim', command_type=(sims4.commands.CommandType.Live))
def comp_order_for_sim(opt_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.Command("Sim {} doesn't exist.".format(opt_sim), _connection)
return False
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.Command('Not currently on a restaurant lot.', _connection)
return False
business_manager = zone_director.business_manager
if business_manager is None:
sims4.commands.Command("The current zone doesn't have a business manager.", _connection)
return False
for group_order in zone_director.get_delivered_orders_for_sim(sim.id):
business_manager.comp_order_for_sim(group_order.get_sim_order(sim.id))
@sims4.commands.Command('restaurant.create_food_for_group_order_sim', command_type=(sims4.commands.CommandType.Live))
def create_food_for_group_order_sim(opt_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist.".format(opt_sim), _connection)
return False
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Not currently on a restaurant lot so can not create an order for a table.', _connection)
return False
group_order = zone_director.get_active_group_order_for_sim(sim.id)
if group_order is None:
sims4.commands.output('There is no group order in for the passed in sim {}.'.format(sim), _connection)
return False
zone_director.create_food_for_group_order(group_order)
return True
@sims4.commands.Command('restaurant.create_food_for_group_order_table', command_type=(sims4.commands.CommandType.Live))
def create_food_for_group_order_table(table_id: OptionalTargetParam=None, _connection=None):
table = get_optional_target(table_id, _connection)
if table is None:
sims4.commands.output("Table {} doesn't exist.".format(table_id), _connection)
return False
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Not currently on a restaurant lot so can not create an order for a table.', _connection)
return False
group_order = zone_director.get_active_group_order_for_table(table.id)
if group_order is None:
sims4.commands.output('There is no group order in for the passed in sim {}.'.format(sim), _connection)
return False
zone_director.create_food_for_group_order(group_order)
return True
@sims4.commands.Command('restaurant.set_ingredient_quality', command_type=(sims4.commands.CommandType.Live))
def set_ingredient_quality(ingredient_quality: RestaurantIngredientQualityType, _connection=None):
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is None:
sims4.commands.output('Trying to set the ingredient quality for a restaurant but there was no valid business manager found for the current zone.')
return False
business_manager.set_ingredient_quality(ingredient_quality)
@sims4.commands.Command('restaurant.expedite_sims_order', command_type=(sims4.commands.CommandType.Live))
def expedite_sim_order(opt_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist.".format(opt_sim), _connection)
return False
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Not on a restaurant lot.', _connection)
return
if not zone_director.has_group_order(sim.id):
sims4.commands.output('Sim {} does not have an order.'.format(sim), _connection)
return
group_order = zone_director.get_group_order(sim.id)
if group_order is not None:
group_order.expedited = True
@sims4.commands.Command('restaurant.refresh_configuration', command_type=(sims4.commands.CommandType.Live))
def refresh_configuration(_connection=None):
zone_director = get_restaurant_zone_director()
if zone_director is not None:
zone_director.refresh_configuration()
def _get_active_group_order_for_dining_group(sim):
zone_director = get_restaurant_zone_director()
if zone_director is None:
return
dining_groups = zone_director.get_dining_groups_by_sim(sim)
for dining_group in dining_groups:
for group_sim in dining_group.all_sims_in_situation_gen():
active_group_order = zone_director.get_active_group_order_for_sim(group_sim.sim_id)
if active_group_order:
return active_group_order
@sims4.commands.Command('restaurant.sim_is_employee', command_type=(sims4.commands.CommandType.Automation))
def sim_is_employee(opt_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("False, Sim {} doesn't exist.".format(opt_sim), _connection)
sims4.commands.automation_output('RestaurantIsEmployee; Status:InvalidSim', _connection)
return False
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('False, Not on a restaurant lot.', _connection)
sims4.commands.automation_output('RestaurantIsEmployee; Status:NotOnLot', _connection)
return False
situation_manager = services.get_zone_situation_manager()
if situation_manager is None:
sims4.commands.output('False, There is no situation manager on this lot.', _connection)
sims4.commands.automation_output('RestaurantIsEmployee; Status:NoSituationMgr', _connection)
return False
business_manager = zone_director.business_manager
if business_manager is None:
sim_situations = situation_manager.get_situations_sim_is_in(sim)
for situation in sim_situations:
if type(situation) in (RestaurantTuning.CHEF_SITUATION,
RestaurantTuning.HOST_SITUATION,
RestaurantTuning.WAITSTAFF_SITUATION):
sims4.commands.output('True, Sim is an employee of the current restaurant.', _connection)
sims4.commands.automation_output('RestaurantIsEmployee; Status:Success', _connection)
return True
elif business_manager.is_employee(sim.sim_info):
sims4.commands.output('True, Sim is currently an employee', _connection)
sims4.commands.automation_output('RestaurantIsEmployee; Status:Success', _connection)
return True
sims4.commands.output('False, Sim is not an employee of the current restaurant.', _connection)
sims4.commands.automation_output('RestaurantIsEmployee; Status:Failed', _connection)
return False
@sims4.commands.Command('restaurant.is_open', command_type=(sims4.commands.CommandType.Automation))
def is_open(_connection=None):
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('False, Not on a restaurant lot.', _connection)
sims4.commands.automation_output('RestaurantIsOpen; Status:NotOnLot', _connection)
return False
if zone_director.business_manager is None:
sims4.commands.output('True, unowned restaurants are always open.', _connection)
sims4.commands.automation_output('RestaurantIsOpen; Status:Success', _connection)
return True
if zone_director.business_manager.is_open:
sims4.commands.output('True, this owned restaurant is currently open', _connection)
sims4.commands.automation_output('RestaurantIsOpen; Status:Success', _connection)
return True
sims4.commands.output('False, this owned restaurant is currently closed', _connection)
sims4.commands.automation_output('RestaurantIsOpen; Status:Failed', _connection)
return False
@sims4.commands.Command('restaurant.get_sim_diner_state', command_type=(sims4.commands.CommandType.Automation))
def get_sim_dining_state(opt_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output("Sim {} doesn't exist".format(opt_sim), _connection)
return False
zone_director = get_restaurant_zone_director()
if zone_director is None:
sims4.commands.output('Not on a restaurant lot.', _connection)
return False
groups = zone_director.get_dining_groups_by_sim(sim)
if not groups:
sims4.commands.output('Sim {} is not in dining group'.format(sim), _connection)
sims4.commands.automation_output('RestaurantDinerState; Status:NotReady', _connection)
return True
dining_group = groups.pop()
for sub_situation in dining_group.sub_situations:
state = sub_situation.current_state_index().name
sims4.commands.automation_output('RestaurantDinerState; Status:{}'.format(state), _connection)
return True
|
[
"restaurants.restaurant_tuning.get_restaurant_zone_director",
"server_commands.argument_helpers.get_optional_target",
"services.get_active_sim",
"services.get_instance_manager",
"restaurants.restaurant_utils.get_waitstaff_situation",
"server_commands.argument_helpers.TunableInstanceParam",
"sims4.protocol_buffer_utils.has_field",
"restaurants.restaurant_utils.get_chef_situation",
"restaurants.chefs_choice.ChefsChoice.get_order_for_npc_sim",
"google.protobuf.text_format.Merge",
"services.get_event_manager",
"services.get_zone_situation_manager",
"restaurants.restaurant_order.GroupOrder.get_food_drink_recipe_id_tuple",
"protocolbuffers.Restaurant_pb2.SimOrders",
"restaurants.chefs_choice.ChefsChoice.get_order_for_npc_sim_with_menu",
"services.object_manager",
"services.business_service"
] |
[((1548, 1589), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (1567, 1589), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((1826, 1856), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (1854, 1856), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((2762, 2803), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (2781, 2803), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((2948, 2978), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (2976, 2978), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((3371, 3412), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (3390, 3412), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((3552, 3594), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['chef_sim', '_connection'], {}), '(chef_sim, _connection)\n', (3571, 3594), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((3748, 3802), 'restaurants.restaurant_utils.get_chef_situation', 'restaurant_utils.get_chef_situation', ([], {'chef_sim': 'chef_sim'}), '(chef_sim=chef_sim)\n', (3783, 3802), False, 'from restaurants import restaurant_utils\n'), ((4226, 4267), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (4245, 4267), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((4412, 4442), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (4440, 4442), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((4846, 4887), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (4865, 4887), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((5033, 5076), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_table', '_connection'], {}), '(opt_table, _connection)\n', (5052, 5076), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((5097, 5127), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (5125, 5127), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((5488, 5518), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (5516, 5518), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((5660, 5686), 'protocolbuffers.Restaurant_pb2.SimOrders', 'Restaurant_pb2.SimOrders', ([], {}), '()\n', (5684, 5686), False, 'from protocolbuffers import Restaurant_pb2\n'), ((5691, 5727), 'google.protobuf.text_format.Merge', 'text_format.Merge', (['sim_orders', 'proto'], {}), '(sim_orders, proto)\n', (5708, 5727), False, 'from google.protobuf import text_format\n'), ((6429, 6459), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (6457, 6459), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((6599, 6640), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (6618, 6640), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((7158, 7188), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (7186, 7188), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((7328, 7369), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (7347, 7369), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((7875, 7905), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (7903, 7905), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((9933, 9974), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (9952, 9974), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((11572, 11613), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (11591, 11613), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((11760, 11797), 'restaurants.restaurant_utils.get_chef_situation', 'restaurant_utils.get_chef_situation', ([], {}), '()\n', (11795, 11797), False, 'from restaurants import restaurant_utils\n'), ((12371, 12412), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (12390, 12412), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((13737, 13782), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['from_sim_id', '_connection'], {}), '(from_sim_id, _connection)\n', (13756, 13782), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((13940, 13988), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['to_chef_sim_id', '_connection'], {}), '(to_chef_sim_id, _connection)\n', (13959, 13988), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((14157, 14207), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['waitstaff_sim_id', '_connection'], {}), '(waitstaff_sim_id, _connection)\n', (14176, 14207), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((14388, 14443), 'restaurants.restaurant_utils.get_waitstaff_situation', 'restaurant_utils.get_waitstaff_situation', (['waitstaff_sim'], {}), '(waitstaff_sim)\n', (14428, 14443), False, 'from restaurants import restaurant_utils\n'), ((14741, 14782), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (14760, 14782), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((14928, 14958), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (14956, 14958), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((15732, 15773), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (15751, 15773), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((15920, 15950), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (15948, 15950), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((16663, 16704), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (16682, 16704), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((16850, 16880), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (16878, 16880), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((17585, 17627), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['table_id', '_connection'], {}), '(table_id, _connection)\n', (17604, 17627), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((17778, 17808), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (17806, 17808), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((19051, 19092), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (19070, 19092), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((19238, 19268), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (19266, 19268), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((19839, 19869), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (19867, 19869), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((20023, 20053), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (20051, 20053), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((20636, 20677), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (20655, 20677), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((22786, 22816), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (22814, 22816), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((23928, 23969), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_sim', '_connection'], {}), '(opt_sim, _connection)\n', (23947, 23969), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((24114, 24144), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (24142, 24144), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((1230, 1280), 'server_commands.argument_helpers.TunableInstanceParam', 'TunableInstanceParam', (['sims4.resources.Types.RECIPE'], {}), '(sims4.resources.Types.RECIPE)\n', (1250, 1280), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((8051, 8077), 'protocolbuffers.Restaurant_pb2.SimOrders', 'Restaurant_pb2.SimOrders', ([], {}), '()\n', (8075, 8077), False, 'from protocolbuffers import Restaurant_pb2\n'), ((8086, 8122), 'google.protobuf.text_format.Merge', 'text_format.Merge', (['sim_orders', 'proto'], {}), '(sim_orders, proto)\n', (8103, 8122), False, 'from google.protobuf import text_format\n'), ((10124, 10154), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (10152, 10154), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((10866, 10904), 'restaurants.chefs_choice.ChefsChoice.get_order_for_npc_sim', 'ChefsChoice.get_order_for_npc_sim', (['sim'], {}), '(sim)\n', (10899, 10904), False, 'from restaurants.chefs_choice import ChefsChoice\n'), ((11346, 11396), 'server_commands.argument_helpers.TunableInstanceParam', 'TunableInstanceParam', (['sims4.resources.Types.RECIPE'], {}), '(sims4.resources.Types.RECIPE)\n', (11366, 11396), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((12567, 12609), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['chef_sim', '_connection'], {}), '(chef_sim, _connection)\n', (12586, 12609), False, 'from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam, get_optional_target\n'), ((20931, 20961), 'restaurants.restaurant_tuning.get_restaurant_zone_director', 'get_restaurant_zone_director', ([], {}), '()\n', (20959, 20961), False, 'from restaurants.restaurant_tuning import RestaurantTuning, RestaurantIngredientQualityType, get_restaurant_zone_director\n'), ((21230, 21267), 'services.get_zone_situation_manager', 'services.get_zone_situation_manager', ([], {}), '()\n', (21265, 21267), False, 'import services, sims4.commands\n'), ((5815, 5840), 'services.object_manager', 'services.object_manager', ([], {}), '()\n', (5838, 5840), False, 'import services, sims4.commands\n'), ((6165, 6194), 'sims4.protocol_buffer_utils.has_field', 'has_field', (['proto', '"""meal_cost"""'], {}), "(proto, 'meal_cost')\n", (6174, 6194), False, 'from sims4.protocol_buffer_utils import has_field\n'), ((8670, 8729), 'services.get_instance_manager', 'services.get_instance_manager', (['sims4.resources.Types.RECIPE'], {}), '(sims4.resources.Types.RECIPE)\n', (8699, 8729), False, 'import services, sims4.commands\n'), ((9497, 9526), 'sims4.protocol_buffer_utils.has_field', 'has_field', (['proto', '"""meal_cost"""'], {}), "(proto, 'meal_cost')\n", (9506, 9526), False, 'from sims4.protocol_buffer_utils import has_field\n'), ((9622, 9647), 'services.get_active_sim', 'services.get_active_sim', ([], {}), '()\n', (9645, 9647), False, 'import services, sims4.commands\n'), ((11986, 12014), 'services.get_event_manager', 'services.get_event_manager', ([], {}), '()\n', (12012, 12014), False, 'import services, sims4.commands\n'), ((12783, 12837), 'restaurants.restaurant_utils.get_chef_situation', 'restaurant_utils.get_chef_situation', ([], {'chef_sim': 'chef_sim'}), '(chef_sim=chef_sim)\n', (12818, 12837), False, 'from restaurants import restaurant_utils\n'), ((13195, 13233), 'restaurants.chefs_choice.ChefsChoice.get_order_for_npc_sim', 'ChefsChoice.get_order_for_npc_sim', (['sim'], {}), '(sim)\n', (13228, 13233), False, 'from restaurants.chefs_choice import ChefsChoice\n'), ((13291, 13319), 'services.get_event_manager', 'services.get_event_manager', ([], {}), '()\n', (13317, 13319), False, 'import services, sims4.commands\n'), ((18523, 18550), 'services.business_service', 'services.business_service', ([], {}), '()\n', (18548, 18550), False, 'import services, sims4.commands\n'), ((8843, 8892), 'restaurants.restaurant_order.GroupOrder.get_food_drink_recipe_id_tuple', 'GroupOrder.get_food_drink_recipe_id_tuple', (['recipe'], {}), '(recipe)\n', (8884, 8892), False, 'from restaurants.restaurant_order import OrderStatus, OrderRecommendationState, GroupOrder\n'), ((13076, 13152), 'restaurants.chefs_choice.ChefsChoice.get_order_for_npc_sim_with_menu', 'ChefsChoice.get_order_for_npc_sim_with_menu', (['sim', 'chef_situation.menu_preset'], {}), '(sim, chef_situation.menu_preset)\n', (13119, 13152), False, 'from restaurants.chefs_choice import ChefsChoice\n'), ((8233, 8258), 'services.object_manager', 'services.object_manager', ([], {}), '()\n', (8256, 8258), False, 'import services, sims4.commands\n')]
|
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name="frappymongocontent",
version="1.0.0",
description="Store Implementation for Content in MongoDB",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ilfrich/frappy-py-mongo-content-store",
author="<NAME>",
author_email="<EMAIL>",
packages=[
"frappymongocontent"
],
install_requires=[
"pbu",
],
tests_require=[
"pytest",
],
zip_safe=False)
|
[
"setuptools.setup"
] |
[((99, 519), 'setuptools.setup', 'setup', ([], {'name': '"""frappymongocontent"""', 'version': '"""1.0.0"""', 'description': '"""Store Implementation for Content in MongoDB"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/ilfrich/frappy-py-mongo-content-store"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'packages': "['frappymongocontent']", 'install_requires': "['pbu']", 'tests_require': "['pytest']", 'zip_safe': '(False)'}), "(name='frappymongocontent', version='1.0.0', description=\n 'Store Implementation for Content in MongoDB', long_description=\n long_description, long_description_content_type='text/markdown', url=\n 'https://github.com/ilfrich/frappy-py-mongo-content-store', author=\n '<NAME>', author_email='<EMAIL>', packages=['frappymongocontent'],\n install_requires=['pbu'], tests_require=['pytest'], zip_safe=False)\n", (104, 519), False, 'from setuptools import setup\n')]
|
#!/usr/bin/env python3
import unittest
import os
import shutil
from src.data.VideoItem import VideoItem
from src.data.MetaDataItem import MetaDataItem
from src.executor.FaceBlurrer import FaceBlurrer
from numpy.testing import assert_array_equal, assert_raises
class TestAnonymizationExecutor(unittest.TestCase):
TEST_DIR = os.path.join(os.getcwd(), "anontest")
TEST_FILE = "test.mp4"
DATASET_PATH = "src/lib/anonymization/dataset/input"
ACCEPTED_FILE_EXTENSION = ".mp4"
TEST_FILE_PATH = os.path.join(TEST_DIR, TEST_FILE)
def setUp(self):
# Create test directory and copy one of the test videos from the anonymization repo into it
if not os.path.exists(self.TEST_DIR):
os.mkdir(self.TEST_DIR)
def tearDown(self):
# Delete test directory
if os.path.exists(self.TEST_DIR):
shutil.rmtree(self.TEST_DIR)
def test_compiles(self):
self.assertEqual(True, True)
"""
# Test that the executor works with a single video
def test_face_blurrer_single(self):
# Copy video to test directory
shutil.copy2(os.path.join(os.getcwd(), self.DATASET_PATH, "man_face.mp4"), self.TEST_FILE_PATH)
video = VideoItem(filepath = self.TEST_FILE_PATH, metadata=None)
original_data = video.npy
# Running the face blurrer should overwrite the input file
face_blurrer = FaceBlurrer()
new_data = face_blurrer.run(video)
# Now we check that the video data has changed
assert_raises(AssertionError, assert_array_equal, original_data, new_data)
"""
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.mkdir",
"os.getcwd",
"os.path.exists",
"shutil.rmtree",
"os.path.join"
] |
[((509, 542), 'os.path.join', 'os.path.join', (['TEST_DIR', 'TEST_FILE'], {}), '(TEST_DIR, TEST_FILE)\n', (521, 542), False, 'import os\n'), ((1636, 1651), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1649, 1651), False, 'import unittest\n'), ((342, 353), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (351, 353), False, 'import os\n'), ((815, 844), 'os.path.exists', 'os.path.exists', (['self.TEST_DIR'], {}), '(self.TEST_DIR)\n', (829, 844), False, 'import os\n'), ((680, 709), 'os.path.exists', 'os.path.exists', (['self.TEST_DIR'], {}), '(self.TEST_DIR)\n', (694, 709), False, 'import os\n'), ((723, 746), 'os.mkdir', 'os.mkdir', (['self.TEST_DIR'], {}), '(self.TEST_DIR)\n', (731, 746), False, 'import os\n'), ((858, 886), 'shutil.rmtree', 'shutil.rmtree', (['self.TEST_DIR'], {}), '(self.TEST_DIR)\n', (871, 886), False, 'import shutil\n')]
|
# Copyright (C) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
""" This module contains architecture of Text Recognition model."""
import tensorflow as tf
from tensorflow.contrib import rnn
import tensorflow.contrib.slim as slim
class TextRecognition:
""" Text recognition model definition. """
def __init__(self, is_training, num_classes, backbone_dropout=0.0):
self.is_training = is_training
self.lstm_dim = 256
self.num_classes = num_classes
self.backbone_dropout = backbone_dropout
def __call__(self, inputdata):
with tf.variable_scope('shadow'):
features = self.feature_extractor(inputdata=inputdata)
logits = self.encoder_decoder(inputdata=tf.squeeze(features, axis=1))
return logits
# pylint: disable=too-many-locals
def feature_extractor(self, inputdata):
""" Extracts features from input text image. """
with slim.arg_scope([slim.conv2d], padding='SAME',
weights_initializer=tf.contrib.layers.variance_scaling_initializer(),
weights_regularizer=slim.l2_regularizer(0.00025),
biases_initializer=None, activation_fn=None):
with slim.arg_scope([slim.batch_norm], updates_collections=None):
bn0 = slim.batch_norm(inputdata, 0.9, scale=True, is_training=self.is_training,
activation_fn=None)
dropout1 = slim.dropout(bn0, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv1 = slim.conv2d(dropout1, num_outputs=64, kernel_size=3)
bn1 = slim.batch_norm(conv1, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
pool1 = slim.max_pool2d(bn1, kernel_size=2, stride=2)
dropout2 = slim.dropout(pool1, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv2 = slim.conv2d(dropout2, num_outputs=128, kernel_size=3)
bn2 = slim.batch_norm(conv2, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
pool2 = slim.max_pool2d(bn2, kernel_size=2, stride=2)
dropout3 = slim.dropout(pool2, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv3 = slim.conv2d(dropout3, num_outputs=256, kernel_size=3)
bn3 = slim.batch_norm(conv3, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
dropout4 = slim.dropout(bn3, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv4 = slim.conv2d(dropout4, num_outputs=256, kernel_size=3)
bn4 = slim.batch_norm(conv4, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
pool4 = slim.max_pool2d(bn4, kernel_size=[2, 1], stride=[2, 1])
dropout5 = slim.dropout(pool4, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv5 = slim.conv2d(dropout5, num_outputs=512, kernel_size=3)
bn5 = slim.batch_norm(conv5, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
dropout6 = slim.dropout(bn5, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv6 = slim.conv2d(dropout6, num_outputs=512, kernel_size=3)
bn6 = slim.batch_norm(conv6, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
pool6 = slim.max_pool2d(bn6, kernel_size=[2, 1], stride=[2, 1])
dropout7 = slim.dropout(pool6, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv7 = slim.conv2d(dropout7, num_outputs=512, kernel_size=2, stride=[2, 1])
bn7 = slim.batch_norm(conv7, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
return bn7
def encoder_decoder(self, inputdata):
""" LSTM-based encoder-decoder module. """
with tf.variable_scope('LSTMLayers'):
[batch_size, width, _] = inputdata.get_shape().as_list()
with tf.variable_scope('encoder'):
forward_cells = []
backward_cells = []
for _ in range(2):
forward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
backward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
encoder_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
forward_cells, backward_cells, inputdata, dtype=tf.float32)
with tf.variable_scope('decoder'):
forward_cells = []
backward_cells = []
for _ in range(2):
forward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
backward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
decoder_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
forward_cells, backward_cells, encoder_layer, dtype=tf.float32)
rnn_reshaped = tf.reshape(decoder_layer, [batch_size * width, -1])
logits = slim.fully_connected(rnn_reshaped, self.num_classes, activation_fn=None)
logits = tf.reshape(logits, [batch_size, width, self.num_classes])
rnn_out = tf.transpose(logits, (1, 0, 2))
return rnn_out
|
[
"tensorflow.contrib.slim.conv2d",
"tensorflow.contrib.slim.arg_scope",
"tensorflow.squeeze",
"tensorflow.contrib.rnn.stack_bidirectional_dynamic_rnn",
"tensorflow.contrib.slim.dropout",
"tensorflow.reshape",
"tensorflow.contrib.slim.fully_connected",
"tensorflow.variable_scope",
"tensorflow.contrib.slim.l2_regularizer",
"tensorflow.transpose",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.contrib.slim.max_pool2d",
"tensorflow.contrib.slim.batch_norm",
"tensorflow.contrib.layers.variance_scaling_initializer"
] |
[((1098, 1125), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""shadow"""'], {}), "('shadow')\n", (1115, 1125), True, 'import tensorflow as tf\n'), ((5154, 5185), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""LSTMLayers"""'], {}), "('LSTMLayers')\n", (5171, 5185), True, 'import tensorflow as tf\n'), ((6236, 6287), 'tensorflow.reshape', 'tf.reshape', (['decoder_layer', '[batch_size * width, -1]'], {}), '(decoder_layer, [batch_size * width, -1])\n', (6246, 6287), True, 'import tensorflow as tf\n'), ((6310, 6382), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['rnn_reshaped', 'self.num_classes'], {'activation_fn': 'None'}), '(rnn_reshaped, self.num_classes, activation_fn=None)\n', (6330, 6382), True, 'import tensorflow.contrib.slim as slim\n'), ((6404, 6461), 'tensorflow.reshape', 'tf.reshape', (['logits', '[batch_size, width, self.num_classes]'], {}), '(logits, [batch_size, width, self.num_classes])\n', (6414, 6461), True, 'import tensorflow as tf\n'), ((6484, 6515), 'tensorflow.transpose', 'tf.transpose', (['logits', '(1, 0, 2)'], {}), '(logits, (1, 0, 2))\n', (6496, 6515), True, 'import tensorflow as tf\n'), ((1766, 1825), 'tensorflow.contrib.slim.arg_scope', 'slim.arg_scope', (['[slim.batch_norm]'], {'updates_collections': 'None'}), '([slim.batch_norm], updates_collections=None)\n', (1780, 1825), True, 'import tensorflow.contrib.slim as slim\n'), ((1849, 1946), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['inputdata', '(0.9)'], {'scale': '(True)', 'is_training': 'self.is_training', 'activation_fn': 'None'}), '(inputdata, 0.9, scale=True, is_training=self.is_training,\n activation_fn=None)\n', (1864, 1946), True, 'import tensorflow.contrib.slim as slim\n'), ((2009, 2100), 'tensorflow.contrib.slim.dropout', 'slim.dropout', (['bn0'], {'keep_prob': '(1.0 - self.backbone_dropout)', 'is_training': 'self.is_training'}), '(bn0, keep_prob=1.0 - self.backbone_dropout, is_training=self.\n is_training)\n', (2021, 2100), True, 'import tensorflow.contrib.slim as slim\n'), ((2160, 2212), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['dropout1'], {'num_outputs': '(64)', 'kernel_size': '(3)'}), '(dropout1, num_outputs=64, kernel_size=3)\n', (2171, 2212), True, 'import tensorflow.contrib.slim as slim\n'), ((2235, 2334), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['conv1', '(0.9)'], {'scale': '(True)', 'is_training': 'self.is_training', 'activation_fn': 'tf.nn.relu'}), '(conv1, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n', (2250, 2334), True, 'import tensorflow.contrib.slim as slim\n'), ((2393, 2438), 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['bn1'], {'kernel_size': '(2)', 'stride': '(2)'}), '(bn1, kernel_size=2, stride=2)\n', (2408, 2438), True, 'import tensorflow.contrib.slim as slim\n'), ((2467, 2560), 'tensorflow.contrib.slim.dropout', 'slim.dropout', (['pool1'], {'keep_prob': '(1.0 - self.backbone_dropout)', 'is_training': 'self.is_training'}), '(pool1, keep_prob=1.0 - self.backbone_dropout, is_training=self\n .is_training)\n', (2479, 2560), True, 'import tensorflow.contrib.slim as slim\n'), ((2620, 2673), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['dropout2'], {'num_outputs': '(128)', 'kernel_size': '(3)'}), '(dropout2, num_outputs=128, kernel_size=3)\n', (2631, 2673), True, 'import tensorflow.contrib.slim as slim\n'), ((2696, 2795), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['conv2', '(0.9)'], {'scale': '(True)', 'is_training': 'self.is_training', 'activation_fn': 'tf.nn.relu'}), '(conv2, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n', (2711, 2795), True, 'import tensorflow.contrib.slim as slim\n'), ((2854, 2899), 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['bn2'], {'kernel_size': '(2)', 'stride': '(2)'}), '(bn2, kernel_size=2, stride=2)\n', (2869, 2899), True, 'import tensorflow.contrib.slim as slim\n'), ((2928, 3021), 'tensorflow.contrib.slim.dropout', 'slim.dropout', (['pool2'], {'keep_prob': '(1.0 - self.backbone_dropout)', 'is_training': 'self.is_training'}), '(pool2, keep_prob=1.0 - self.backbone_dropout, is_training=self\n .is_training)\n', (2940, 3021), True, 'import tensorflow.contrib.slim as slim\n'), ((3081, 3134), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['dropout3'], {'num_outputs': '(256)', 'kernel_size': '(3)'}), '(dropout3, num_outputs=256, kernel_size=3)\n', (3092, 3134), True, 'import tensorflow.contrib.slim as slim\n'), ((3157, 3256), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['conv3', '(0.9)'], {'scale': '(True)', 'is_training': 'self.is_training', 'activation_fn': 'tf.nn.relu'}), '(conv3, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n', (3172, 3256), True, 'import tensorflow.contrib.slim as slim\n'), ((3319, 3410), 'tensorflow.contrib.slim.dropout', 'slim.dropout', (['bn3'], {'keep_prob': '(1.0 - self.backbone_dropout)', 'is_training': 'self.is_training'}), '(bn3, keep_prob=1.0 - self.backbone_dropout, is_training=self.\n is_training)\n', (3331, 3410), True, 'import tensorflow.contrib.slim as slim\n'), ((3470, 3523), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['dropout4'], {'num_outputs': '(256)', 'kernel_size': '(3)'}), '(dropout4, num_outputs=256, kernel_size=3)\n', (3481, 3523), True, 'import tensorflow.contrib.slim as slim\n'), ((3546, 3645), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['conv4', '(0.9)'], {'scale': '(True)', 'is_training': 'self.is_training', 'activation_fn': 'tf.nn.relu'}), '(conv4, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n', (3561, 3645), True, 'import tensorflow.contrib.slim as slim\n'), ((3704, 3759), 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['bn4'], {'kernel_size': '[2, 1]', 'stride': '[2, 1]'}), '(bn4, kernel_size=[2, 1], stride=[2, 1])\n', (3719, 3759), True, 'import tensorflow.contrib.slim as slim\n'), ((3788, 3881), 'tensorflow.contrib.slim.dropout', 'slim.dropout', (['pool4'], {'keep_prob': '(1.0 - self.backbone_dropout)', 'is_training': 'self.is_training'}), '(pool4, keep_prob=1.0 - self.backbone_dropout, is_training=self\n .is_training)\n', (3800, 3881), True, 'import tensorflow.contrib.slim as slim\n'), ((3941, 3994), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['dropout5'], {'num_outputs': '(512)', 'kernel_size': '(3)'}), '(dropout5, num_outputs=512, kernel_size=3)\n', (3952, 3994), True, 'import tensorflow.contrib.slim as slim\n'), ((4017, 4116), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['conv5', '(0.9)'], {'scale': '(True)', 'is_training': 'self.is_training', 'activation_fn': 'tf.nn.relu'}), '(conv5, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n', (4032, 4116), True, 'import tensorflow.contrib.slim as slim\n'), ((4179, 4270), 'tensorflow.contrib.slim.dropout', 'slim.dropout', (['bn5'], {'keep_prob': '(1.0 - self.backbone_dropout)', 'is_training': 'self.is_training'}), '(bn5, keep_prob=1.0 - self.backbone_dropout, is_training=self.\n is_training)\n', (4191, 4270), True, 'import tensorflow.contrib.slim as slim\n'), ((4330, 4383), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['dropout6'], {'num_outputs': '(512)', 'kernel_size': '(3)'}), '(dropout6, num_outputs=512, kernel_size=3)\n', (4341, 4383), True, 'import tensorflow.contrib.slim as slim\n'), ((4406, 4505), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['conv6', '(0.9)'], {'scale': '(True)', 'is_training': 'self.is_training', 'activation_fn': 'tf.nn.relu'}), '(conv6, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n', (4421, 4505), True, 'import tensorflow.contrib.slim as slim\n'), ((4564, 4619), 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['bn6'], {'kernel_size': '[2, 1]', 'stride': '[2, 1]'}), '(bn6, kernel_size=[2, 1], stride=[2, 1])\n', (4579, 4619), True, 'import tensorflow.contrib.slim as slim\n'), ((4648, 4741), 'tensorflow.contrib.slim.dropout', 'slim.dropout', (['pool6'], {'keep_prob': '(1.0 - self.backbone_dropout)', 'is_training': 'self.is_training'}), '(pool6, keep_prob=1.0 - self.backbone_dropout, is_training=self\n .is_training)\n', (4660, 4741), True, 'import tensorflow.contrib.slim as slim\n'), ((4801, 4869), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['dropout7'], {'num_outputs': '(512)', 'kernel_size': '(2)', 'stride': '[2, 1]'}), '(dropout7, num_outputs=512, kernel_size=2, stride=[2, 1])\n', (4812, 4869), True, 'import tensorflow.contrib.slim as slim\n'), ((4892, 4991), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['conv7', '(0.9)'], {'scale': '(True)', 'is_training': 'self.is_training', 'activation_fn': 'tf.nn.relu'}), '(conv7, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n', (4907, 4991), True, 'import tensorflow.contrib.slim as slim\n'), ((5274, 5302), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {}), "('encoder')\n", (5291, 5302), True, 'import tensorflow as tf\n'), ((5613, 5712), 'tensorflow.contrib.rnn.stack_bidirectional_dynamic_rnn', 'rnn.stack_bidirectional_dynamic_rnn', (['forward_cells', 'backward_cells', 'inputdata'], {'dtype': 'tf.float32'}), '(forward_cells, backward_cells,\n inputdata, dtype=tf.float32)\n', (5648, 5712), False, 'from tensorflow.contrib import rnn\n'), ((5748, 5776), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder"""'], {}), "('decoder')\n", (5765, 5776), True, 'import tensorflow as tf\n'), ((6087, 6190), 'tensorflow.contrib.rnn.stack_bidirectional_dynamic_rnn', 'rnn.stack_bidirectional_dynamic_rnn', (['forward_cells', 'backward_cells', 'encoder_layer'], {'dtype': 'tf.float32'}), '(forward_cells, backward_cells,\n encoder_layer, dtype=tf.float32)\n', (6122, 6190), False, 'from tensorflow.contrib import rnn\n'), ((1246, 1274), 'tensorflow.squeeze', 'tf.squeeze', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (1256, 1274), True, 'import tensorflow as tf\n'), ((1547, 1595), 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {}), '()\n', (1593, 1595), True, 'import tensorflow as tf\n'), ((1645, 1673), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['(0.00025)'], {}), '(0.00025)\n', (1664, 1673), True, 'import tensorflow.contrib.slim as slim\n'), ((5452, 5490), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['self.lstm_dim'], {}), '(self.lstm_dim)\n', (5475, 5490), True, 'import tensorflow as tf\n'), ((5534, 5572), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['self.lstm_dim'], {}), '(self.lstm_dim)\n', (5557, 5572), True, 'import tensorflow as tf\n'), ((5926, 5964), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['self.lstm_dim'], {}), '(self.lstm_dim)\n', (5949, 5964), True, 'import tensorflow as tf\n'), ((6008, 6046), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['self.lstm_dim'], {}), '(self.lstm_dim)\n', (6031, 6046), True, 'import tensorflow as tf\n')]
|
import json
from pyquery import PyQuery
from scylla.database import ProxyIP
from .base_provider import BaseProvider
class ProxyScraperProvider(BaseProvider):
def urls(self) -> [str]:
return ['https://raw.githubusercontent.com/sunny9577/proxy-scraper/master/proxies.json']
def parse(self, document: PyQuery) -> [ProxyIP]:
ip_list: [ProxyIP] = []
text = document.html()
json_object = json.load(text)
if not json_object or type(json_object['usproxy']) != list:
return ip_list
for ip_port in json_object['usproxy']:
p = ProxyIP(ip=ip_port['ip'], port=ip_port['port'])
ip_list.append(p)
return ip_list
@staticmethod
def should_render_js() -> bool:
return False
|
[
"scylla.database.ProxyIP",
"json.load"
] |
[((429, 444), 'json.load', 'json.load', (['text'], {}), '(text)\n', (438, 444), False, 'import json\n'), ((604, 651), 'scylla.database.ProxyIP', 'ProxyIP', ([], {'ip': "ip_port['ip']", 'port': "ip_port['port']"}), "(ip=ip_port['ip'], port=ip_port['port'])\n", (611, 651), False, 'from scylla.database import ProxyIP\n')]
|
"""
bank.accounts
~~~~~~~~~~~~~
This module contains code for managing accounts.
"""
from .cards import Card
from .exceptions import InsufficientBalance, AccountError, ExceedsLimit
import time, datetime
class Account:
"""
Base class for accounts, handles balances & transactions.
:param account_id: Unique ID associated with the account.
:param account_type: Type of account (savings, checkings, credit).
:param holder_accounts: An AccountHolder.Accounts() class.
:param accountholder_id: Unique ID of the account holder.
:param opening_balance: When account is created the opening amount of $.
:param open_date: Date the account was opened.
:param status: Status of the account (open, closed, locked).
"""
def __init__(
self,
account_id: int,
account_type: str,
holder_accounts,
accountholder_id: str,
opening_balance=0,
open_date=datetime.date.today(),
status: str = "open",
):
self.account_id = account_id
self.account_type = account_type
self.holder_accounts = holder_accounts
self.accountholder_id = account_id
self.balance = opening_balance if opening_balance >= 0 else 0
self.open_date = open_date
self.status = status
self.linked_cards = {}
self.withdrawal_limit = 5000
def withdraw(self, amount: float) -> dict:
"""'
Method to withdraw funds from account.
:param amount: Transaction amount.
"""
# Assuming there can be $0.
if self.status != "open":
raise AccountError(self.account_id, self.status)
elif amount > self.withdrawal_limit:
raise ExceedsLimit(self.withdrawal_limit)
elif amount > self.balance:
raise InsufficientBalance(self.balance, amount)
else:
self.balance -= amount
return {
"status": True,
"new_balance": self.balance,
"transaction_time": time.time(),
}
def deposit(self, amount: float) -> dict:
"""
Method to deposit funds to an account.
:param amount: Transaction amount.
"""
if self.status != "open":
raise AccountError(self.account_id, self.status)
self.balance += amount
return {
"status": True,
"new_balance": self.balance,
"transaction_time": time.time(),
}
class CheckingAccount(Account):
"""
Class for checking accounts, inherits base account class.
:param account_id: Unique ID associated with the account.
:param account_type: Type of account (savings, checkings, credit).
:param holder_accounts: An AccountHolder.Accounts() class.
:param accountholder_id: Unique ID of the account holder.
:param opening_balance: When account is created the opening amount of $.
:param open_date: Date the account was opened.
:param status: Status of the account (open, closed, frozen).
"""
def __init__(
self,
account_id: int,
account_type: str,
holder_accounts,
accountholder_id: str,
opening_balance=0,
open_date=datetime.date.today(),
status: str = "open",
):
super().__init__(
account_id,
account_type,
holder_accounts,
accountholder_id,
opening_balance,
open_date,
status,
)
self.account_type = "checking"
self.holder_accounts.checking_accounts[self.account_id] = self
class SavingsAccount(Account):
"""
Class for savings accounts, inherits base account class.
:param account_id: Unique ID associated with the account.
:param account_type: Type of account (savings, checkings, credit).
:param holder_accounts: An AccountHolder.Accounts() class.
:param accountholder_id: Unique ID of the account holder.
:param opening_balance: When account is created the opening amount of $.
:param open_date: Date the account was opened.
:param status: Status of the account (open, closed, frozen).
:kwarg interest: The interest of the savings account.
"""
def __init__(
self,
account_id: int,
account_type: str,
holder_accounts,
accountholder_id: str,
opening_balance=0,
open_date=datetime.date.today(),
status: str = "open",
interest_rate=0.001,
):
super().__init__(
account_id,
account_type,
holder_accounts,
accountholder_id,
opening_balance,
open_date,
status,
)
self.account_type = account_type
self.interest_rate = interest_rate
self.holder_accounts.saving_accounts[self.account_id] = self
class CreditAccount(Account):
"""
Class for credit accounts, inherits base account class.
:param account_id: Unique ID associated with the account.
:param account_type: Type of account (savings, checkings, credit).
:param holder_accounts: An AccountHolder.Accounts() class.
:param accountholder_id: Unique ID of the account holder.
:param opening_balance: When account is created the opening amount of $.
:param open_date: Date the account was opened.
:param status: Status of the account (open, closed, frozen).
:kwarg apr: the APR charged on outstanding balance.
"""
def __init__(
self,
account_id: int,
account_type: str,
holder_accounts,
accountholder_id: str,
opening_balance=0,
open_date=datetime.date.today(),
status: str = "open",
apr_rate=0.15,
):
super().__init__(
account_id,
account_type,
accountholder_id,
opening_balance,
open_date,
status,
)
self.account_type = account_type
self.apr_rate = apr_rate
self.holderaccounts.credit_accounts[self.account_id] = self
# self.billing_end =
# self.balance_due =
# .
# .
# etc etc.
class Accounts:
"""
Class that maintains the relations between account holders, accounts and cards.
:param holder: AccountHolder object holding account holder information.
:param accountholder_id: ID of account holder.
"""
def __init__(self, holder, accountholder_id: str):
self.holder = holder
self.accountholder_id = accountholder_id
self.checking_accounts = {}
self.saving_accounts = {}
self.credit_accounts = {}
self.issued_cards = {}
@property
def holder_info(self):
"""
Summary of the account holder who is linked with the accounts.
"""
return self.holder.__repr__
@property
def accounts(self):
"""
Str summary of number of accounts.
"""
return "".join(
[
f"Accounts: Checking: {len(self.checking_accounts)}, ",
f"Savings: {len(self.saving_accounts)}, ",
f"Credit: {len(self.credit_accounts)}",
]
)
@property
def total_balance(self) -> int:
"""
Total balance of all accounts.
"""
return self._checking_balance + self._savings_balance + self._credit_balance
@property
def _checking_balance(self) -> int:
"""
Total balance of all checking accounts.
"""
bal = 0
for id, obj in self.checking_accounts.items():
bal += obj.balance
return bal
@property
def _savings_balance(self) -> int:
"""
Total balance of all savings accounts.
"""
bal = 0
for id, obj in self.saving_accounts.items():
bal += obj.balance
return bal
@property
def _credit_balance(self) -> int:
"""
Total balance of all credit accounts.
"""
bal = 0
for id, obj in self.credit_accounts.items():
bal += obj.balance
return bal
|
[
"datetime.date.today",
"time.time"
] |
[((937, 958), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (956, 958), False, 'import time, datetime\n'), ((3244, 3265), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3263, 3265), False, 'import time, datetime\n'), ((4437, 4458), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (4456, 4458), False, 'import time, datetime\n'), ((5700, 5721), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (5719, 5721), False, 'import time, datetime\n'), ((2471, 2482), 'time.time', 'time.time', ([], {}), '()\n', (2480, 2482), False, 'import time, datetime\n'), ((2039, 2050), 'time.time', 'time.time', ([], {}), '()\n', (2048, 2050), False, 'import time, datetime\n')]
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import time
import re
from cElementTree import TreeBuilder, XMLParser, Element
import logging
log = logging.getLogger("koXMLTreeService")
# log.setLevel(logging.INFO)
class recollector:
def __init__(self):
self.res = {}
self.regs = {}
def add(self, name, reg, mods=None):
self.regs[name] = reg % self.regs
# print "%s = %s" % (name, self.regs[name])
if mods:
self.res[name] = re.compile(self.regs[
name], mods) # check that it is valid
else:
self.res[name] = re.compile(self.regs[
name]) # check that it is valid
collector = recollector()
a = collector.add
a("S", "[ \\n\\t\\r]+")
a("NameStrt", "[A-Za-z_:]|[^\\x00-\\x7F]")
a("NameChar", "[A-Za-z0-9_:.-]|[^\\x00-\\x7F]")
a("Name", "(?:%(NameStrt)s)(?:%(NameChar)s)*")
a("AttValSE", "\"[^<\"]*\"|'[^<']*'")
a("attrfinderRE", "(?:[\n \t]*)(%(Name)s)(?:%(S)s)?=(?:%(S)s)?(%(AttValSE)s)")
a("namespaces",
'xmlns(?::(?P<prefix>\w+))?=(?P<ns>(?:")([^"]*?)(?:")|(?:\')([^\']*?)(?:\'))', re.S | re.U)
a("tagpart",
'(?:<(?![?!-/>\s]))((?:(?P<prefix>[^\s/>]+):)?(?P<name>[^:\s/>]+)?)(?:\s+(?P<data>[^/<>]*))?', re.S | re.U)
a("tags", '<!--.*?-->|%(tagpart)s(?:/)?>', re.S | re.U)
a("alltags", '<!--.*?-->|(<[^\[!>?-].*?>)', re.S | re.U)
a("QuoteSE", "\"[^\"]*\"|'[^']*'")
a("DOCTYPE",
r'<!DOCTYPE\s+(?P<type>\S+)\s+(?P<ident>PUBLIC|SYSTEM)\s+(?P<data1>%(QuoteSE)s)\s*(?P<data2>%(QuoteSE)s)?\s*(?:\[|>)', re.S)
def getdoctype(text):
doctype = None
regex = collector.res["DOCTYPE"]
m = regex.search(text)
if m:
m = m.groupdict()
# [1:-1] is to strip quotes
if m['data1']:
m['data1'] = m['data1'][1:-1]
if m['data2']:
m['data2'] = m['data2'][1:-1]
if m['ident'] == 'PUBLIC':
doctype = (m['type'], m['ident'], m['data1'], m['data2'])
else:
doctype = (m['type'], m['ident'], "", m['data1'])
return doctype
def getattrs(text):
attrs = {}
regex = collector.res["attrfinderRE"]
match = regex.findall(text)
for a in match:
if a[1]:
attrs[a[0]] = a[1][1:-1]
else:
attrs[a[0]] = ""
return attrs
def currentTag(text):
m = collector.res["tagpart"].search(text)
if not m:
return None
td = m.groupdict()
ad = {}
if td['data']:
ad.update(getattrs(td['data']))
return (td['prefix'], td['name'], ad, m.start(0))
def elementFromTag(tree, tag, parent=None):
tagName = tag[1]
if not tagName:
tagName = ""
ns = None
if tag[0]:
if tag[0] in tree.prefixmap:
ns = tree.prefixmap[tag[0]]
else:
nsattr = "xmlns:%s" % tag[0]
if nsattr in tag[2]:
ns = tag[2][nsattr]
del tag[2][nsattr]
tree.prefixmap[tag[0]] = ns
elif "xmlns" in tag[2]:
ns = tag[2]["xmlns"]
del tag[2]["xmlns"]
elif parent is not None:
ns = parent.ns
localName = tag
if ns:
tagName = "{%s}%s" % (ns, tagName)
elem = Element(tagName, tag[2])
try:
elem.start = tree.err_info
elem.end = None
except:
# will happen when parsing with cElementTree
pass
# print elem.localName
if parent is not None:
parent.append(elem)
tree.nodemap[elem] = parent
tree.nodes.append(elem)
if elem.ns is not None:
if elem.ns not in tree.tags:
tree.tags[elem.ns] = {}
tree.tags[elem.ns][elem.localName] = elem
return elem
def elementFromText(tree, text, parent=None):
current = currentTag(text)
if current:
return elementFromTag(tree, current, parent)
return None
class iterparse:
"""iterparse that catches syntax errors so we can still handle any
events that happen prior to the syntax error"""
def __init__(self, content, events=("start", "end", "start-ns", "end-ns")):
self.content = content
self._events = events
self.err = None
self.err_info = None
self.root = None
def __iter__(self):
events = []
b = TreeBuilder()
p = XMLParser(b)
p._setevents(events, self._events)
try:
p.feed(self.content)
except SyntaxError as e:
self.err = e
self.err_info = (
p.CurrentLineNumber, p.CurrentColumnNumber, p.CurrentByteIndex)
for event in events:
yield event
del events[:]
try:
self.root = p.close()
except SyntaxError as e:
# if we had a previous syntax error, keep it
if not self.err:
self.err = e
self.err_info = (
p.CurrentLineNumber, p.CurrentColumnNumber, p.CurrentByteIndex)
for event in events:
yield event
def bisect_left_nodes_start(a, x, lo=0, hi=None):
"""A version of bisect.bisect_left which compares nodes based on their start position.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
# print "comparing", a[mid].start[:2], "and", x
if a[mid].start is None:
return mid
if a[mid].start[:2] == x: return mid
if a[mid].start[:2] < x: lo = mid+1
else:
hi = mid
return lo
class XMLDocument(object):
def __init__(self, content=None):
self.content = content
self.reset()
if self.content:
self.getDoctype()
def getDoctype(self):
self.doctype = getdoctype(self.content)
if self.doctype:
self.publicId = self.doctype[2]
self.systemId = self.doctype[3]
def reset(self):
self.doctype = None
self.publicId = None
self.systemId = None
self.err = None
self.err_info = None
self.root = None
self.current = None
self._rootnodes = []
self.nodes = [] # flat list of all nodes
self.tags = {} # { namespace_uri: { tag_local_name: elem, ...} , ...}
self.nodemap = {} # {child_elem: parent_elem, ... }
self.namespaces = [] # flat list of namespace uri's
self.nsmap = {} # { "http:/...": "xslt", ... }
self.prefixmap = {} # { "xslt": "http://.....", ... }
def getRoots(self):
# return a list of all nodes that have no parent
if not self._rootnodes:
self._rootnodes = [
node for node in self.nodemap if self.nodemap[node] is None]
return self._rootnodes
def namespace(self, elem):
# print "%s:%s xmlns[%s]"%(self.prefix(elem),elem.localName,elem.ns)
if hasattr(elem, "ns") and elem.ns:
return elem.ns
return self.nsmap.get("")
def parent(self, elem):
return self.nodemap.get(elem)
def qname(self, name):
if name and name[0] == '{':
ns, ln = name[1:].split('}')
prefix = self.nsmap.get(ns)
if prefix:
return "%s:%s" % (prefix, ln)
return ln
return name
def isAncestorOf(self, node, child):
""" Return true if child is a descendant of node """
# print "asking if %r is an ancestor of %r" %( node, child)
currentParent = self.parent(child)
while currentParent != child and currentParent is not None:
# print "\tparent =", currentParent
if node == currentParent:
# print "-->is a parent"
return True
currentParent = self.parent(currentParent)
# print "-->isn't a parent"
return False
def locateNode(self, line, col):
# nodes are 1-indexed, so we need to switch our indexing scheme
line += 1
# first look for the last node to start at or before the current
# position
idx = bisect_left_nodes_start(self.nodes, (line, col))-1
if idx < 0:
if self.nodes:
return self.nodes[0]
return None
assert idx < len(self.nodes)
node = self.nodes[idx]
# that was easy. Now we may need to move up the parent chain
# from this node if we are past the end of a node but before
# the beginning of another, e.g. <foo><bar>asd</bar>|</foo>
# -- the right node is foo, but the current value of node is 'bar'
startPos = node.start[:2]
if startPos is None: # if we're in a partial node, that's it
return node
if startPos[:2] == (line, col): # if it's an exact match, that's it
return node
# if idx == 0: return node # if we're at the toplevel, so be it
while node is not None:
while node.end:
# move up the parent chain until you get a parent
# whose end is after the current location
last_line, last_col = node.end[:2]
if (last_line, last_col) < (line, col):
node = self.parent(node)
if node is None:
return node
continue
break
if node is not None and not node.end:
# check it's parents and see if they have end markers
pnode = self.parent(node)
while pnode:
if pnode.end:
last_line, last_col = pnode.end[:2]
if (last_line, last_col) < (line, col):
node = pnode
break
pnode = self.parent(pnode)
if node.end:
continue
break
return node
def prefixFromNS(self, ns):
if self.prefixmap.get("") == ns:
return ""
prefix = self.nsmap.get(ns)
if not prefix:
prefix = self.nsmap.get(self.root.ns)
return prefix
def prefix(self, elem):
if not hasattr(elem, "ns") or not elem.ns:
return ""
return self.prefixFromNS(elem.ns)
def tagname(self, elem):
prefix = self.prefix(elem)
if prefix:
return "%s:%s" % (prefix, elem.localName)
return elem.localName
_endtagRe = re.compile(r"(</(\w+:)?\w+>)", re.U)
def parse(self, content=None):
self.reset()
self.content = content
if content:
# first, find the doctype decl
self.getDoctype()
elif not self.content:
raise Exception("no content to parse")
elstack = [None]
self.current = None
tags = {}
last_pos_ok = None
iter = iterparse(self.content)
for event, elem in iter:
if event == "start":
# print "%r %r %d %d %d" % (event, elem, elem.start[0],
# elem.start[1], elem.start[2])
self.nodemap[elem] = self.current
self.nodes.append(elem)
if elem.ns not in self.tags:
self.tags[elem.ns] = {}
self.tags[elem.ns][elem.localName] = elem
elstack.append(elem)
self.current = elem
elif event == "end":
# print "%r %r %r %r" % (event, elem, elem.start, elem.end)
if elem.end:
try:
pos = elem.end[2]
# print " len %d pos %d" % (len(self.content), pos)
# put the end location at the end of the end tag
m = self._endtagRe.match(self.content[pos:])
if m and m.groups():
pos = pos + m.end(1)
if pos > 0:
# we want to be after the ">"
diff = pos - elem.end[2] + 1
elem.end = (elem.end[
0], elem.end[1] + diff, pos)
except IndexError as e:
# XXX FIXME BUG 56337
log.exception(e)
pass
node = elstack.pop()
if elstack[-1] is None:
self._rootnodes.append(node)
self.current = elstack[-1]
elif event == "start-ns":
self.namespaces.append(elem)
self.prefixmap[elem[0]] = elem[1]
self.nsmap[elem[1]] = elem[0]
elif event == "end-ns":
self.namespaces.pop()
self.root = iter.root
self.err = iter.err
self.err_info = iter.err_info
# set the root if we can
if self.root is None and self.nodes:
self.root = self.nodes[0]
self.end_error(self.content)
# if we still do not have a root, do it
# now, as we should have a node
if self.root is None and self.nodes:
self.root = self.nodes[0]
# release content
self.content = None
def end_error(self, content):
if not self.err_info:
return
if not content:
raise Exception("No content?")
# create an element for the last part of the parse
parent = self.current
if self.err_info[2] >= 0:
start = self.err_info[2]
else:
# slower
# print self.err_info
p = 0
for i in range(self.err_info[0] - 1):
# use re.search("\r|\n|\r\n")
p = content.find("\n", p + 1)
start = p + self.err_info[1] + 1
end = content.find("<", start+1)
if end <= start:
end = len(content)
# fixup the start position
start = content.rfind(">", 0, start) + 1
if start >= end:
return
# print self.err_info
# print content[start:end]
current = currentTag(content[start:end])
if not current:
return
# print "%s:%s %r %d" % current
# fix error info
start = start+current[3]
line = content.count('\n', 0, start)
col = start - content.rfind('\n', 0, start)
self.err_info = (line, col, start)
self.current = elem = elementFromTag(self, current, parent)
def dump(self):
print("error ", self.err)
print("error_info ", self.err_info)
print("%d nodes created" % len(self.nodemap))
print("doctype ", self.doctype)
print("publicId ", self.publicId)
print("systemId ", self.systemId)
print(self.prefixmap)
print(self.nsmap)
print("root ", self.root)
if self.root:
print("root tag ", self.root.tag)
print("root ns ", self.root.ns)
print("root localName ", self.root.localName)
print("root start ", self.root.start)
print("root end ", self.root.end)
print("tree.current ", self.current)
import HTMLTreeParser
class HTMLDocument(XMLDocument):
def parse(self, content=None):
if content:
self.reset()
self.content = content
# first, find the doctype decl
self.getDoctype()
elif not self.content:
raise Exception("no content to parse")
p = HTMLTreeParser.Parser(HTMLTreeParser.HTMLTreeBuilder())
p.feed(content)
self.root = p.close()
self.nodes = p._builder.nodes
self.nodemap = p._builder.nodemap
self._rootnodes = p._builder._rootnodes
self.current = p._builder.current
class TreeService:
__treeMap = {} # map uri to elementtree
def __init__(self):
pass
def treeFromCache(self, uri):
if uri in self.__treeMap:
# print "tree cache hit for [%s]"%uri
return self.__treeMap[uri]
return None
def getTreeForURI(self, uri, content=None):
if not uri and not content:
return None
tree = None
if uri and uri in self.__treeMap:
tree = self.__treeMap[uri]
# if tree is not None:
# print "tree cache hit for [%s]"%uri
if not content:
return tree
if not tree:
if not content:
# get the content
try:
f = open(uri, 'r')
content = f.read(-1)
f.close()
except IOError as e:
# ignore file errors and return an empty tree
content = ""
if not content.startswith("<?xml"):
tree = HTMLDocument()
if not tree:
tree = XMLDocument()
# raise Exception("NOT IMPLEMENTED YET")
if content:
tree.parse(content)
if uri:
self.__treeMap[uri] = tree
return tree
def getTreeForContent(self, content):
return self.getTreeForURI(None, content)
__treeservice = None
def getService():
global __treeservice
if not __treeservice:
__treeservice = TreeService()
return __treeservice
if __name__ == "__main__":
import sys
# basic logging configuration
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
bigfile = "/Users/shanec/main/Apps/Komodo-devel/test/bigfile.xml"
fn = "/Users/shanec/main/Apps/Komodo-devel/src/samples/xslt_sample.xsl"
from elementtree.ElementTree import tostring
if 0:
# fn = "/Users/shanec/main/Apps/Komodo-devel/src/install/wix/feature-
# core.wxs"
t1 = time.clock()
tree = getService().getTreeForURI(bigfile)
t2 = time.clock()
print("cElementTree took ", (t2-t1))
tree.dump()
if 0:
f = open(bigfile, 'r')
content = f.read(-1)
f.close()
t1 = time.clock()
tree = HTMLDocument()
tree.parse(content)
t2 = time.clock()
print("HTMLBuilder took ", (t2-t1))
if 0:
print(currentTag("<xsl"))
print(currentTag("<xsl:"))
print(currentTag("<xsl:tag"))
print(currentTag("text><xsl:tag"))
# print nodemap
html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
"""
tree = getService().getTreeForURI("Text.html", html)
print(tostring(tree.root))
html = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<HEAD>
<TITLE>Mozilla Cross-Reference</TITLE>
<link HREF=http://www.activestate.com/global.css rel="stylesheet" type="text/css">
</HEAD>
<BODY BGCOLOR="#FFFFFF" TEXT="#000000"
LINK="#0000EE" VLINK="#551A8B" ALINK="#FF0000">
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td>
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td width="145"><a href=http://www.activestate.com/index.html><img src=http://www.activestate.com/img/Main_Logo_Border.gif width="167" height="66" border="0" alt="ActiveState Tool Corp."></a></td>
<td bgcolor="#000000" colspan=2 width="90%" align="center"><img src=http://www.activestate.com/img/Main_Banner.gif alt="Programming for the People."></td>
</tr>
</table>
<table width="100%" bgcolor="#000000" border="0" cellpadding="0" cellspacing="0">
<tr>
<td width="600">
<table width="600" border="0" cellpadding="0" cellspacing="3">
<tr>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Products/index.html>Products</a></td>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Support/index.html>Support</a></td>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Corporate/index.html>About Us</a></td>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Contact_Us.html>Contact</a></td>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Site_Map.html>Site Map</a></td>
</tr>
</table>
</td>
<td class="mainnav" width="100%">
<table width="100%" border="0" cellpadding="0" cellspacing="0">
<tr>
<td class="mainnav" bgcolor="#C2B266" width="100%"> </td>
<td class="mainnav" bgcolor="#000000" width="3"> </td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr>
</table>
<I>$treename</I>
<P>
"""
tree = getService().getTreeForURI("Text.html", html)
print(tostring(tree.root))
html = """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
<HTML>
<BODY>
<FORM><FIELDSET ><SELECT class=""><OPTGROUP >
"""
tree = getService().getTreeForContent(html)
tree = getService().getTreeForURI("newfile.txt", "")
tree = getService().getTreeForURI("newfile.txt", "<html>")
tree = getService().getTreeForURI("newfile.txt", "<html> <")
node = tree.locateNode(tree.current.start[0], tree.current.start[1])
assert node == tree.current, "locateNode returned incorrect node"
tree = getService().getTreeForURI("newfile.txt", "<table></table>\n\n\n\n")
node = tree.locateNode(2, 0)
assert node is None, "locateNode returned incorrect node"
node = tree.locateNode(0, 7)
assert node is not None, "locateNode returned incorrect node"
sys.exit(0)
xml = """
<c1><c2 a1="1" a2='1' a3='val'><e1 /><e2 f1="1" f2 = '33' /><c3 a='1'>blah</c3></c2 > </"""
tree = getService().getTreeForContent(xml)
node = tree.locateNode(tree.current.start[0], tree.current.start[1])
assert node == tree.current, "locateNode returned incorrect node"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xxmlns="xyz" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<xsl:template match="Class">
<html> <xsl:apply-imports/>
<xsl:
<xsl:apply-templates select="Order"/>
</html>
</xsl:template>
"""
tree = getService().getTreeForContent(xml)
node = tree.locateNode(tree.current.start[0], tree.current.start[1])
assert node == tree.current, "locateNode returned incorrect node"
# ensure we get the correct current node
xml = """<?xml version="1.0"?>
<!DOCTYPE window PUBLIC "-//MOZILLA//DTD XUL V1.0//EN" "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<window xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<popupset id="editorTooltipSet">
<popup type="tooltip" id="editorTooltip" flex="1">
<description multiline="true" id="editorTooltip-tooltipText" class="tooltip-label" flex="1"/>
</popup><
<popup type="autocomplete" id="popupTextboxAutoComplete"/>
</popupset>
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "popupset", "current element is incorrect"
# ensure we get the correct current node
xml = """<?xml version="1.0"?>
<!DOCTYPE window PUBLIC "-//MOZILLA//DTD XUL V1.0//EN" "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<window xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<popupset id="editorTooltipSet">
<popup type="tooltip" id="editorTooltip" flex="1">
<description multiline="true" id="editorTooltip-tooltipText" class="tooltip-label" flex="1"/>
</popup> <
<popup type="autocomplete" id="popupTextboxAutoComplete"/>
</popupset>
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "popupset", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<
<xsl:template/>
"""
tree = getService().getTreeForContent(xml)
assert tree.current == tree.root, "current element is incorrect"
assert tree.current.localName == "stylesheet", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<xsl:"""
tree = getService().getTreeForContent(xml)
assert tree.current.tag == "{http://www.w3.org/1999/XSL/Transform}", "current element is incorrect"
assert tree.current.localName == "", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "stylesheet", "current element is incorrect"
xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html """
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "html", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<xsl:template
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "template", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/><xsl:template"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "template", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<xsl:
<xsl:template/>
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "", "current element is incorrect"
assert tree.current.tag == "{http://www.w3.org/1999/XSL/Transform}", "current element is incorrect"
html = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><body><p><ul><li><li><li></ul></body>
"""
tree = getService().getTreeForContent(html)
# print tostring(tree.root)
assert tree.current.localName == "html", "current element is incorrect"
html = """<!DOCTYPE h:html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<h:html xmlns:h='urn:test'"""
tree = getService().getTreeForContent(html)
# print tostring(tree.root)
assert tree.current.localName == "html", "current element is incorrect"
# from cElementTree import Element
# tag = u"{urn:test}test"
# print tag
# e = Element(tag, {})
# print e.localName
# print e.tag
xml = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This sample XML file shows you ... -->
<Class>
<Order Name="TINAMIFORMES">
<Family Name="TINAMIDAE">
<Species attr="value">content.</Species>
<![CDATA[
This is a CDATA section
]]>
</Family>
</Order>
"""
tree = getService().getTreeForContent(xml)
# print tostring(tree.root)
assert len(tree.root[0][0][0]) == 0, "bad parent/child relationship"
xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<body
<!-- a comment -->
<title>
</title>
</html>
"""
tree = getService().getTreeForContent(xml)
# print tostring(tree.root)
assert tree.current.localName == "body", "current element is incorrect"
assert tree.parent(
tree.current).localName == "html", "current element is incorrect"
xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html
<body
"""
tree = getService().getTreeForContent(xml)
# print tostring(tree.root)
assert tree.current.localName == "html", "current element is incorrect"
|
[
"HTMLTreeParser.HTMLTreeBuilder",
"logging.StreamHandler",
"cElementTree.Element",
"time.clock",
"logging.Formatter",
"elementtree.ElementTree.tostring",
"cElementTree.XMLParser",
"cElementTree.TreeBuilder",
"sys.exit",
"logging.getLogger",
"re.compile"
] |
[((1784, 1821), 'logging.getLogger', 'logging.getLogger', (['"""koXMLTreeService"""'], {}), "('koXMLTreeService')\n", (1801, 1821), False, 'import logging\n'), ((4853, 4877), 'cElementTree.Element', 'Element', (['tagName', 'tag[2]'], {}), '(tagName, tag[2])\n', (4860, 4877), False, 'from cElementTree import TreeBuilder, XMLParser, Element\n'), ((12085, 12122), 're.compile', 're.compile', (['"""(</(\\\\w+:)?\\\\w+>)"""', 're.U'], {}), "('(</(\\\\w+:)?\\\\w+>)', re.U)\n", (12095, 12122), False, 'import re\n'), ((19116, 19139), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (19137, 19139), False, 'import logging\n'), ((19243, 19304), 'logging.Formatter', 'logging.Formatter', (['"""%(name)-12s: %(levelname)-8s %(message)s"""'], {}), "('%(name)-12s: %(levelname)-8s %(message)s')\n", (19260, 19304), False, 'import logging\n'), ((23703, 23714), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (23711, 23714), False, 'import sys\n'), ((5915, 5928), 'cElementTree.TreeBuilder', 'TreeBuilder', ([], {}), '()\n', (5926, 5928), False, 'from cElementTree import TreeBuilder, XMLParser, Element\n'), ((5941, 5953), 'cElementTree.XMLParser', 'XMLParser', (['b'], {}), '(b)\n', (5950, 5953), False, 'from cElementTree import TreeBuilder, XMLParser, Element\n'), ((19788, 19800), 'time.clock', 'time.clock', ([], {}), '()\n', (19798, 19800), False, 'import time\n'), ((19865, 19877), 'time.clock', 'time.clock', ([], {}), '()\n', (19875, 19877), False, 'import time\n'), ((20045, 20057), 'time.clock', 'time.clock', ([], {}), '()\n', (20055, 20057), False, 'import time\n'), ((20129, 20141), 'time.clock', 'time.clock', ([], {}), '()\n', (20139, 20141), False, 'import time\n'), ((20621, 20640), 'elementtree.ElementTree.tostring', 'tostring', (['tree.root'], {}), '(tree.root)\n', (20629, 20640), False, 'from elementtree.ElementTree import tostring\n'), ((22882, 22901), 'elementtree.ElementTree.tostring', 'tostring', (['tree.root'], {}), '(tree.root)\n', (22890, 22901), False, 'from elementtree.ElementTree import tostring\n'), ((2123, 2156), 're.compile', 're.compile', (['self.regs[name]', 'mods'], {}), '(self.regs[name], mods)\n', (2133, 2156), False, 'import re\n'), ((2267, 2294), 're.compile', 're.compile', (['self.regs[name]'], {}), '(self.regs[name])\n', (2277, 2294), False, 'import re\n'), ((17199, 17231), 'HTMLTreeParser.HTMLTreeBuilder', 'HTMLTreeParser.HTMLTreeBuilder', ([], {}), '()\n', (17229, 17231), False, 'import HTMLTreeParser\n'), ((19428, 19449), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (19445, 19449), False, 'import logging\n')]
|
# -*- coding: utf-8 -*-
from flask import request, current_app
from domain.models import Image, Document
from validation.base_validators import ParameterizedValidator
import repo
class CanCreateFacilityValidator(ParameterizedValidator):
def validate(self, f, *args, **kwargs):
user_id = repo.get_user_id_for_user(cookies=request.cookies)
valid = user_id and repo.can_user_create_facility(user_id, cookies=request.cookies)
if not valid:
self.fail("You do not have privileges to create a facility.",
f, 403, None, *args, **kwargs)
class CanEditFacilityValidator(ParameterizedValidator):
def validate(self, f, *args, **kwargs):
if kwargs.get("facility_id", None): # the normal case: a faility
facility_id = kwargs["facility_id"]
elif kwargs.get("image_id", None): # an image related to a facility
image = current_app.db_session.query(Image).get(kwargs["image_id"])
facility_id = image.facility_id
elif kwargs.get("document_id", None): # a document related to a facility
document = current_app.db_session.query(Document).get(kwargs["document_id"])
facility_id = document.facility_id
elif request.form.get('facilityId', None): # POST image/document with facility id in form
facility_id = request.form.get('facilityId')
#this should cover all cases where this decorator is used
user_id = repo.get_user_id_for_user(cookies=request.cookies)
valid = user_id and repo.can_user_edit_facility(user_id, facility_id,
cookies=request.cookies)
if not valid:
self.fail("You do not have privileges to edit facility %s." % facility_id,
f, 403, None, *args, **kwargs)
|
[
"flask.request.form.get",
"repo.can_user_create_facility",
"repo.can_user_edit_facility",
"repo.get_user_id_for_user",
"flask.current_app.db_session.query"
] |
[((302, 352), 'repo.get_user_id_for_user', 'repo.get_user_id_for_user', ([], {'cookies': 'request.cookies'}), '(cookies=request.cookies)\n', (327, 352), False, 'import repo\n'), ((1480, 1530), 'repo.get_user_id_for_user', 'repo.get_user_id_for_user', ([], {'cookies': 'request.cookies'}), '(cookies=request.cookies)\n', (1505, 1530), False, 'import repo\n'), ((381, 444), 'repo.can_user_create_facility', 'repo.can_user_create_facility', (['user_id'], {'cookies': 'request.cookies'}), '(user_id, cookies=request.cookies)\n', (410, 444), False, 'import repo\n'), ((1559, 1633), 'repo.can_user_edit_facility', 'repo.can_user_edit_facility', (['user_id', 'facility_id'], {'cookies': 'request.cookies'}), '(user_id, facility_id, cookies=request.cookies)\n', (1586, 1633), False, 'import repo\n'), ((1252, 1288), 'flask.request.form.get', 'request.form.get', (['"""facilityId"""', 'None'], {}), "('facilityId', None)\n", (1268, 1288), False, 'from flask import request, current_app\n'), ((917, 952), 'flask.current_app.db_session.query', 'current_app.db_session.query', (['Image'], {}), '(Image)\n', (945, 952), False, 'from flask import request, current_app\n'), ((1364, 1394), 'flask.request.form.get', 'request.form.get', (['"""facilityId"""'], {}), "('facilityId')\n", (1380, 1394), False, 'from flask import request, current_app\n'), ((1126, 1164), 'flask.current_app.db_session.query', 'current_app.db_session.query', (['Document'], {}), '(Document)\n', (1154, 1164), False, 'from flask import request, current_app\n')]
|
from numpy.linalg import norm
from numpy import dot
def cosine_sim(vec1, vec2):
"""Calculates the cosine similarity between two vectors
Args:
vec1 (list of float): A vector
vec2 (list of float): A vector
Returns:
The cosine similarity between the two input vectors
"""
return dot(vec1, vec2) / (norm(vec1) * norm(vec2))
def select_salient_posts(post_vectors, post_weights, k=10, similarity_threshold=0.4):
"""
Selects the top k most salient posts in a collection of posts.
To avoid redundancy, any post too similar to other-posts are disregarded. Each selected post will
therefore be both highly salient and representative of unique semantics.
Note:
post_vectors and post_weights must be in the same order. The ith element of post_weights must reflect
the ith element of post_vectors
Args:
post_vectors (list of (list of float)): Hybrid tfidf representation of the documents
as a document-term matrix
post_weights (list of float): Hybrid Tfidf weight for each document
k (int): The number of posts to select as output
similarity_threshold (float): The maximum cosine similiarity for a post to be selected
"""
sorted_keyed_vectors = [z for _, z in sorted(zip(post_weights, enumerate(post_vectors)), key=lambda i: i[0],
reverse=True)] # z is (i,vi) sorted by weight
i = 1
veclength = len(post_vectors)
loop_condition = True
significant_indices = [0]
unsorted_indices = [sorted_keyed_vectors[0][0]]
while loop_condition:
is_similar = False
for j in significant_indices:
sim = cosine_sim(sorted_keyed_vectors[j][1], sorted_keyed_vectors[i][1])
if sim >= similarity_threshold:
is_similar = True
if not is_similar:
significant_indices.append(i)
unsorted_indices.append(sorted_keyed_vectors[i][0])
if (len(significant_indices) >= k) or (i >= veclength - 1):
loop_condition = False
i += 1
return unsorted_indices
|
[
"numpy.dot",
"numpy.linalg.norm"
] |
[((338, 353), 'numpy.dot', 'dot', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (341, 353), False, 'from numpy import dot\n'), ((357, 367), 'numpy.linalg.norm', 'norm', (['vec1'], {}), '(vec1)\n', (361, 367), False, 'from numpy.linalg import norm\n'), ((370, 380), 'numpy.linalg.norm', 'norm', (['vec2'], {}), '(vec2)\n', (374, 380), False, 'from numpy.linalg import norm\n')]
|
import cv2
import numpy as np
from elements.yolo import OBJ_DETECTION
Object_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush' ]
Object_colors = list(np.random.rand(80,3)*255)
Object_detector = OBJ_DETECTION('weights/yolov5s.pt', Object_classes)
def gstreamer_pipeline(
capture_width=1280,
capture_height=720,
display_width=1280,
display_height=720,
framerate=60,
flip_method=0,
):
return (
"nvarguscamerasrc ! "
"video/x-raw(memory:NVMM), "
"width=(int)%d, height=(int)%d, "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)
# To flip the image, modify the flip_method parameter (0 and 2 are the most common)
print(gstreamer_pipeline(flip_method=0))
# cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
cap = cv2.VideoCapture("1627775013.mp4")
if cap.isOpened():
window_handle = cv2.namedWindow("CSI Camera", cv2.WINDOW_AUTOSIZE)
# Window
while cv2.getWindowProperty("CSI Camera", 0) >= 0:
ret, frame = cap.read()
if ret and not(frame is None):
# detection process
objs = Object_detector.detect(frame)
# plotting
for obj in objs:
# print(obj)
label = obj['label']
score = obj['score']
[(xmin,ymin),(xmax,ymax)] = obj['bbox']
color = Object_colors[Object_classes.index(label)]
frame = cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), color, 2)
frame = cv2.putText(frame, f'{label} ({str(score)})', (xmin,ymin), cv2.FONT_HERSHEY_SIMPLEX , 0.75, color, 1, cv2.LINE_AA)
else:
break
cv2.imshow("CSI Camera", frame)
keyCode = cv2.waitKey(30)
if keyCode == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
else:
print("Unable to open camera")
|
[
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"elements.yolo.OBJ_DETECTION",
"cv2.rectangle",
"numpy.random.rand",
"cv2.destroyAllWindows",
"cv2.getWindowProperty",
"cv2.namedWindow"
] |
[((1145, 1196), 'elements.yolo.OBJ_DETECTION', 'OBJ_DETECTION', (['"""weights/yolov5s.pt"""', 'Object_classes'], {}), "('weights/yolov5s.pt', Object_classes)\n", (1158, 1196), False, 'from elements.yolo import OBJ_DETECTION\n'), ((2131, 2165), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""1627775013.mp4"""'], {}), "('1627775013.mp4')\n", (2147, 2165), False, 'import cv2\n'), ((2205, 2255), 'cv2.namedWindow', 'cv2.namedWindow', (['"""CSI Camera"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('CSI Camera', cv2.WINDOW_AUTOSIZE)\n", (2220, 2255), False, 'import cv2\n'), ((3154, 3177), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3175, 3177), False, 'import cv2\n'), ((1101, 1122), 'numpy.random.rand', 'np.random.rand', (['(80)', '(3)'], {}), '(80, 3)\n', (1115, 1122), True, 'import numpy as np\n'), ((2279, 2317), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (['"""CSI Camera"""', '(0)'], {}), "('CSI Camera', 0)\n", (2300, 2317), False, 'import cv2\n'), ((3016, 3047), 'cv2.imshow', 'cv2.imshow', (['"""CSI Camera"""', 'frame'], {}), "('CSI Camera', frame)\n", (3026, 3047), False, 'import cv2\n'), ((3066, 3081), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (3077, 3081), False, 'import cv2\n'), ((2779, 2837), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(xmin, ymin)', '(xmax, ymax)', 'color', '(2)'], {}), '(frame, (xmin, ymin), (xmax, ymax), color, 2)\n', (2792, 2837), False, 'import cv2\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 12:25:25 2018
Toy datasets.
@author: jlsuarezdiaz
"""
import numpy as np
import pandas as pd
from six.moves import xrange
from sklearn.preprocessing import LabelEncoder
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.datasets import(
load_iris, load_digits)
def toy_plot(X,y):
f, ax = plt.subplots(figsize=(12,9))
plt.axis('equal')
plt.scatter(X[:,0],X[:,1],c=y,cmap="rainbow",label=y)
#cmap = plt.get_cmap('rainbow')
#cc = np.unique(y)
#cn = len(cc)
#for i,c in enumerate(cc):
# print(i,c)
# ind = np.where(y == c)[0]
# print(ind)
# XX = X[ind]
# print(cmap(i/(cn+1)))
# ax.scatter(XX[:,0],XX[:,1],c=cmap(i/(cn+1)),label=c)
#plt.legend()
plt.show()
return plt
def circular_toy_dataset(rads = [1,2], samples = [200,200], noise = [0.2,0.2], seed = None):
if seed is not None:
np.random.seed(seed)
n = sum(samples)
d = 2
X = np.empty([n,d])
y = np.empty([n])
le = LabelEncoder()
le.fit(rads)
acum = 0
for j,s in enumerate(samples):
for i in xrange(s):
ns1 = noise[j]*np.random.randn()
ns2 = noise[j]*np.random.randn()
x1 = (rads[j]+ns1)*np.cos(2*np.pi*i/s)
x2 = (rads[j]+ns2)*np.sin(2*np.pi*i/s)
X[acum+i,:] = [x1,x2]
y[acum+i] = rads[j]
acum += s
y = le.transform(y)
return X,y
def hiperplane_toy_dataset(ws = [[1,1],[1,-1]],bs = [[0,0],[0,0]],nsamples=800,xrange=[-1,1],yrange=[-1,1], noise = 0.1,seed = None):
if seed is not None:
np.random.seed(seed)
n=nsamples
d=2
X = np.random.rand(n,d)
y = np.zeros([n])
yy = np.empty([n,len(ws)])
X[:,0] = (xrange[1]-xrange[0])*X[:,0]+xrange[0]
X[:,1] = (yrange[1]-yrange[0])*X[:,1]+yrange[0]
for j, (w, b) in enumerate(zip(ws,bs)):
w = np.matrix(w)
b = np.matrix(b)
ns = noise*np.random.randn(n,2)
yy[:,j] = np.sign(((X+ns)-b).dot(w.T)).reshape([n])
yy[yy==-1]=0
yy = yy.astype(int)
for i in range(n):
for j, u in enumerate(yy[i,:]):
y[i] += (u << j)
return X,y
def iris2d_toy_dataset(dims=[0,2]):
data=load_iris() # IRIS
X=data['data']
X=X[:,dims]
y=data['target']
return X,y
def balls_toy_dataset(centers = [[-2,-2],[0,0],[2,2],[2,-2],[-2,2]],rads = [1.4,1.4,1.4,1.4,1.4],samples=[200,200,200,200,200],noise = [0.3,0.3,0.3,0.3,0.3],seed=None):
if seed is not None:
np.random.seed(seed)
n = sum(samples)
d=2
X=np.empty([n,d])
y=np.empty([n])
acum=0
for j, s in enumerate(samples):
rs = rads[j]*np.random.rand(s)
angs = 2*np.pi*np.random.rand(s)
ns = noise[j]*np.random.rand(s)
c = np.array(centers[j])
for i in xrange(s):
X[acum+i,:] = c +ns[i] + rs[i]*np.array([np.cos(angs[i]),np.sin(angs[i])])
y[acum+i]=j
acum += s
return X,y
def simetria_hor(A):
nrow, ncol= A.shape
A = np.abs(A-A[:,::-1]) # Diferencia con la imagen simétrica
return np.mean(A) # Media de las diferencias (grado de simetría)
def simetria_ver(A):
nrow, ncol= A.shape
A = np.abs(A-A[::-1,:]) # Diferencia con la imagen simétrica
return np.mean(A) # Media de las diferencias (grado de simetría)
def digits_toy_dataset(dims=[0,2],numbers=[0,1,2,3,4,5,6,7,8,9]):
data=load_digits()
XX = data['data']
y = data['target']
nn,dd = XX.shape
XX = XX.reshape([nn,8,8])
X = np.empty([nn,3])
for i in xrange(nn):
X[i,0] = simetria_hor(XX[i,:,:])
X[i,1] = simetria_ver(XX[i,:,:])
X[i,2] = np.mean(XX[i,:])
selected = np.where(np.isin(y,numbers))[0]
return X[selected,:][:,dims],y[selected]
def single_toy_dataset(samples=8, classes = 3, seed=None):
X = np.empty([samples,2])
y = np.empty([samples])
for i in xrange(samples):
c = np.random.randint(0,classes)
x = np.random.rand(1,2)
X[i,:]=x
y[i]=c
return X,y
|
[
"sklearn.datasets.load_iris",
"sklearn.datasets.load_digits",
"numpy.isin",
"numpy.abs",
"numpy.random.seed",
"numpy.empty",
"numpy.mean",
"numpy.random.randint",
"numpy.sin",
"numpy.random.randn",
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.cos",
"numpy.matrix",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"matplotlib.pyplot.axis",
"six.moves.xrange",
"numpy.array",
"numpy.random.rand"
] |
[((393, 422), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (405, 422), True, 'import matplotlib.pyplot as plt\n'), ((431, 448), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (439, 448), True, 'import matplotlib.pyplot as plt\n'), ((453, 512), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'y', 'cmap': '"""rainbow"""', 'label': 'y'}), "(X[:, 0], X[:, 1], c=y, cmap='rainbow', label=y)\n", (464, 512), True, 'import matplotlib.pyplot as plt\n'), ((835, 845), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (843, 845), True, 'import matplotlib.pyplot as plt\n'), ((1057, 1073), 'numpy.empty', 'np.empty', (['[n, d]'], {}), '([n, d])\n', (1065, 1073), True, 'import numpy as np\n'), ((1081, 1094), 'numpy.empty', 'np.empty', (['[n]'], {}), '([n])\n', (1089, 1094), True, 'import numpy as np\n'), ((1104, 1118), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1116, 1118), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1792, 1812), 'numpy.random.rand', 'np.random.rand', (['n', 'd'], {}), '(n, d)\n', (1806, 1812), True, 'import numpy as np\n'), ((1820, 1833), 'numpy.zeros', 'np.zeros', (['[n]'], {}), '([n])\n', (1828, 1833), True, 'import numpy as np\n'), ((2394, 2405), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (2403, 2405), False, 'from sklearn.datasets import load_iris, load_digits\n'), ((2758, 2774), 'numpy.empty', 'np.empty', (['[n, d]'], {}), '([n, d])\n', (2766, 2774), True, 'import numpy as np\n'), ((2780, 2793), 'numpy.empty', 'np.empty', (['[n]'], {}), '([n])\n', (2788, 2793), True, 'import numpy as np\n'), ((3248, 3270), 'numpy.abs', 'np.abs', (['(A - A[:, ::-1])'], {}), '(A - A[:, ::-1])\n', (3254, 3270), True, 'import numpy as np\n'), ((3316, 3326), 'numpy.mean', 'np.mean', (['A'], {}), '(A)\n', (3323, 3326), True, 'import numpy as np\n'), ((3445, 3467), 'numpy.abs', 'np.abs', (['(A - A[::-1, :])'], {}), '(A - A[::-1, :])\n', (3451, 3467), True, 'import numpy as np\n'), ((3513, 3523), 'numpy.mean', 'np.mean', (['A'], {}), '(A)\n', (3520, 3523), True, 'import numpy as np\n'), ((3664, 3677), 'sklearn.datasets.load_digits', 'load_digits', ([], {}), '()\n', (3675, 3677), False, 'from sklearn.datasets import load_iris, load_digits\n'), ((3783, 3800), 'numpy.empty', 'np.empty', (['[nn, 3]'], {}), '([nn, 3])\n', (3791, 3800), True, 'import numpy as np\n'), ((3813, 3823), 'six.moves.xrange', 'xrange', (['nn'], {}), '(nn)\n', (3819, 3823), False, 'from six.moves import xrange\n'), ((4112, 4134), 'numpy.empty', 'np.empty', (['[samples, 2]'], {}), '([samples, 2])\n', (4120, 4134), True, 'import numpy as np\n'), ((4142, 4161), 'numpy.empty', 'np.empty', (['[samples]'], {}), '([samples])\n', (4150, 4161), True, 'import numpy as np\n'), ((4175, 4190), 'six.moves.xrange', 'xrange', (['samples'], {}), '(samples)\n', (4181, 4190), False, 'from six.moves import xrange\n'), ((988, 1008), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1002, 1008), True, 'import numpy as np\n'), ((1206, 1215), 'six.moves.xrange', 'xrange', (['s'], {}), '(s)\n', (1212, 1215), False, 'from six.moves import xrange\n'), ((1731, 1751), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1745, 1751), True, 'import numpy as np\n'), ((2035, 2047), 'numpy.matrix', 'np.matrix', (['w'], {}), '(w)\n', (2044, 2047), True, 'import numpy as np\n'), ((2060, 2072), 'numpy.matrix', 'np.matrix', (['b'], {}), '(b)\n', (2069, 2072), True, 'import numpy as np\n'), ((2688, 2708), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2702, 2708), True, 'import numpy as np\n'), ((2978, 2998), 'numpy.array', 'np.array', (['centers[j]'], {}), '(centers[j])\n', (2986, 2998), True, 'import numpy as np\n'), ((3025, 3034), 'six.moves.xrange', 'xrange', (['s'], {}), '(s)\n', (3031, 3034), False, 'from six.moves import xrange\n'), ((3924, 3941), 'numpy.mean', 'np.mean', (['XX[i, :]'], {}), '(XX[i, :])\n', (3931, 3941), True, 'import numpy as np\n'), ((4204, 4233), 'numpy.random.randint', 'np.random.randint', (['(0)', 'classes'], {}), '(0, classes)\n', (4221, 4233), True, 'import numpy as np\n'), ((4245, 4265), 'numpy.random.rand', 'np.random.rand', (['(1)', '(2)'], {}), '(1, 2)\n', (4259, 4265), True, 'import numpy as np\n'), ((2092, 2113), 'numpy.random.randn', 'np.random.randn', (['n', '(2)'], {}), '(n, 2)\n', (2107, 2113), True, 'import numpy as np\n'), ((2867, 2884), 'numpy.random.rand', 'np.random.rand', (['s'], {}), '(s)\n', (2881, 2884), True, 'import numpy as np\n'), ((2908, 2925), 'numpy.random.rand', 'np.random.rand', (['s'], {}), '(s)\n', (2922, 2925), True, 'import numpy as np\n'), ((2948, 2965), 'numpy.random.rand', 'np.random.rand', (['s'], {}), '(s)\n', (2962, 2965), True, 'import numpy as np\n'), ((3970, 3989), 'numpy.isin', 'np.isin', (['y', 'numbers'], {}), '(y, numbers)\n', (3977, 3989), True, 'import numpy as np\n'), ((1244, 1261), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1259, 1261), True, 'import numpy as np\n'), ((1289, 1306), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1304, 1306), True, 'import numpy as np\n'), ((1338, 1363), 'numpy.cos', 'np.cos', (['(2 * np.pi * i / s)'], {}), '(2 * np.pi * i / s)\n', (1344, 1363), True, 'import numpy as np\n'), ((1389, 1414), 'numpy.sin', 'np.sin', (['(2 * np.pi * i / s)'], {}), '(2 * np.pi * i / s)\n', (1395, 1414), True, 'import numpy as np\n'), ((3089, 3104), 'numpy.cos', 'np.cos', (['angs[i]'], {}), '(angs[i])\n', (3095, 3104), True, 'import numpy as np\n'), ((3105, 3120), 'numpy.sin', 'np.sin', (['angs[i]'], {}), '(angs[i])\n', (3111, 3120), True, 'import numpy as np\n')]
|
"""
Module wich contains the EditMenu class and some functions linked to this menu
"""
import wx
import sys
import os
from api.api_pyflakes import main as CheckPySyntax
from Utils.voice_synthese import my_speak
class EditMenu(wx.Menu):
"""Inits a instance of a wx.Menu to create a Theme menu and
his buttons (Copy, Paste, Find,...)
:return: the Theme menu filled by buttons
:rtype: wx.Menu see https://wxpython.org/Phoenix/docs/html/wx.Menu.html
"""
def __init__(self, frame):
"""
Constructor method
"""
wx.Menu.__init__(self, "Edit")
self.frame = frame
self.item_list = []
self.Append(wx.ID_COPY, "&Copy\tCTRL+C")
self.Append(wx.ID_CUT, "&Cut\tCTRL+X")
self.Append(wx.ID_PASTE, "&Paste\tCTRL+V")
self.Append(wx.ID_REDO, "&Undo\tCTRL+Z")
self.Append(wx.ID_UNDO, "&Redo\tCTRL+Y")
self.Append(wx.ID_SYNTAX_CHECK, "&Syntax Check")
self.Append(wx.ID_FIND, "&Find and/or Replace\tCTRL+F")
def OnCopy(self, evt):
"""Copy the selection on the clipboard
:param evt: Event to trigger the method
:type evt: wx.Event
"""
self.frame.notebook.GetCurrentPage().Copy()
def OnPaste(self, evt):
"""Paste the content of the clipboard
:param evt: Event to trigger the method
:type evt: wx.Event
"""
self.frame.notebook.GetCurrentPage().Paste()
def OnCut(self, evt):
"""Cut the selection on the clipboard
:param evt: Event to trigger the method
:type evt: wx.Event
"""
self.frame.notebook.GetCurrentPage().Cut()
def OnRedo(self, evt):
"""Redo
:param evt: Event to trigger the method
:type evt: wx.Event
"""
self.frame.notebook.GetCurrentPage().Redo()
def OnUndo(self, evt):
"""Undo
:param evt: Event to trigger the method
:type evt: wx.Event
"""
self.frame.notebook.GetCurrentPage().Undo()
def OnFindReplace(self, evt):
"""Open a wx.FindReplaceDialog to find and/, replace text in the current editor
:param evt: Event to trigger the method
:type evt: wx.Event
"""
notebookP = self.frame.notebook
page = notebookP.GetCurrentPage()
if page is None:
return
page.OnShowFindReplace()
def OnSyntaxCheck(self, evt):
"""Check the python syntax on the current Tab
:param evt: Event to trigger the method
:type evt: wx.Event
"""
page = self.frame.notebook.GetCurrentPage()
syntaxCheckFilePath = "%s/temp/syntaxCheck.py" % os.getcwd()
syntaxCheckFileText = page.GetValue()
filehandle = open(syntaxCheckFilePath, "wb")
syntaxCheckFileText = syntaxCheckFileText.split("\r")
nocheck = 0
for i in syntaxCheckFileText:
if i.find("'''") >= 0 and nocheck == 0:
nocheck = 1
elif i.find("'''") >= 0 and nocheck == 1:
nocheck = 0
if nocheck == 1:
if i == "":
filehandle.write('\r'.encode('utf-8'))
continue
else:
filehandle.write(i.encode('utf-8'))
filehandle.write('\r'.encode('utf-8'))
continue
elif i == "":
filehandle.write('\r'.encode('utf-8'))
continue
filehandle.write(i.encode('utf-8'))
filehandle.write('\r'.encode('utf-8'))
filehandle.close()
backStdout = sys.stdout
backStderr = sys.stderr
stdoutFilePath = "%s/temp/stdout.py" % os.getcwd()
stderrFilePath = "%s/temp/stderr.py" % os.getcwd()
stdoutFile = open(stdoutFilePath, 'w')
stderrFile = open(stderrFilePath, 'w')
sys.stdout = stdoutFile
sys.stderr = stderrFile
CheckPySyntax(None, str(syntaxCheckFilePath))
sys.stdout = backStdout
sys.stderr = backStderr
stdoutFile.close()
stderrFile.close()
stdoutFile = open(stdoutFilePath, 'r')
stderrFile = open(stderrFilePath, 'r')
stdout = stdoutFile.read()
stderr = stderrFile.read()
stdoutFile.close()
stderrFile.close()
appendMsg = page.filename
if str(stdout) == "" and str(stderr) == "":
my_speak(self.frame, "No Error Detected !")
pass
else:
my_speak(self.frame, "Some Errors or Warnings Detected, check")
if stdout != "":
stdout = stdout.split("\n")
for i in stdout:
if i == "":
continue
if i.find("syntaxCheck.py") > 0:
i = i[len(syntaxCheckFilePath):]
appendMsg = appendMsg + i + "\n"
self.frame.shell.AppendText(appendMsg)
if stderr == "":
pass
else:
stderr = stderr.split("\n")
for i in stderr:
if i == "":
continue
if i.find("syntaxCheck.py") > 0:
i = i[len(syntaxCheckFilePath):]
appendMsg = appendMsg + "\n" + i
self.frame.shell.AppendText(appendMsg)
self.frame.shell.AppendText("Syntax terminated.\n")
|
[
"os.getcwd",
"Utils.voice_synthese.my_speak",
"wx.Menu.__init__"
] |
[((571, 601), 'wx.Menu.__init__', 'wx.Menu.__init__', (['self', '"""Edit"""'], {}), "(self, 'Edit')\n", (587, 601), False, 'import wx\n'), ((2705, 2716), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2714, 2716), False, 'import os\n'), ((3756, 3767), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3765, 3767), False, 'import os\n'), ((3815, 3826), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3824, 3826), False, 'import os\n'), ((4476, 4519), 'Utils.voice_synthese.my_speak', 'my_speak', (['self.frame', '"""No Error Detected !"""'], {}), "(self.frame, 'No Error Detected !')\n", (4484, 4519), False, 'from Utils.voice_synthese import my_speak\n'), ((4563, 4626), 'Utils.voice_synthese.my_speak', 'my_speak', (['self.frame', '"""Some Errors or Warnings Detected, check"""'], {}), "(self.frame, 'Some Errors or Warnings Detected, check')\n", (4571, 4626), False, 'from Utils.voice_synthese import my_speak\n')]
|
# (c) Copyright [2017] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
* Validate that every benchmark in ``input-file`` has mandatory parameters
defined in ``params``
$ python result_processor.py validate --input-file= --params=
* Filter benchmarks in ``input-file`` by throwing away those not containing
specific parameters defined in ``params``. The filtered subset of benchmarks
is written to ``output-file``.
$ python result_processor.py filter --input-file= --params= --output-file=
* Update every benchmark in ``input-file`` by overriding values of specific
parameters which value are defined in ``params``. The updated subset of
benchmarks is written to ``output-file``.
$ python result_processor.py update --input-file= --params= --output-file=
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
from collections import defaultdict
from dlbs.utils import Six
from dlbs.utils import DictUtils
from dlbs.processor import Processor
def load_json_file(file_name):
""" Loads a json object from a file.
:param str file_name: A file name to load JSON object from.
:return: A loaded JSON object.
"""
with open(file_name) as file_obj:
return json.load(file_obj)
def get_params(params):
"""Loads parameters specified by params.
:param str params: A JSON parsable string that defines how parameters
need to be loaded. See function comments on how it is
done.
:return: A dictionary with keys being parameters and values being their
values. Null value means no value - that's perfectly valid case.
:rtype: dict
The ``params`` is a JSON parsable string treated differently depending
on its type:
* ``string`` The value is a file name that contains JSON object
* ``list`` The list of parameters
* ``dict`` The dictionary that maps parameters to their values.
If type is list or loaded JSON object is a list, it gets converted to
dictionary with null values.
"""
parsed_params = json.loads(params)
if isinstance(parsed_params, Six.string_types):
parsed_params = load_json_file(parsed_params)
if isinstance(parsed_params, list):
parsed_params = dict.fromkeys(parsed_params, None)
if not isinstance(parsed_params, dict):
raise ValueError("Invalid type of object that holds parameters (%s)" % type(parsed_params))
return parsed_params
def validate_benchmarks(args):
"""Validates benchmarks ensuring every benchmark contains mandatory parameters.
Also make sure `exp.id`s are unique.
:param argparse args: Command line arguments.
The following command line arguments are used:
* ``args.input_file`` A file with benchmark results.
* ``args.params`` Specification of mandatory parameters. For format,
read comments of ``get_params`` function
"""
# Load benchmarks and parameters.
benchmarks = load_json_file(args.input_file)['data']
params = get_params(args.params)
# Figure out missing parameters.
missing_params = defaultdict(lambda: 0)
exp_ids = set() # All identifiers of experiments
duplicates = False # If two or more experiments have the same ID
for benchmark in benchmarks:
keys = [key for key in params if key not in benchmark]
for key in keys:
missing_params[key] += 1
if 'exp.id' in benchmark:
if benchmark['exp.id'] not in exp_ids:
exp_ids.add(benchmark['exp.id'])
else:
duplicates = True
# Report validation results.
print("Number of benchmarks: %d" % len(benchmarks))
if not missing_params and not duplicates:
print("Benchmark validation result: SUCCESS")
else:
print("Benchmark validation result: FAILURE")
if len(missing_params) > 0:
print("missing parameters:")
for missing_param in missing_params:
print("\t%s: %d" % (missing_param, missing_params[missing_param]))
if duplicates:
print("Several benchmarks have same identifier (exp.id)")
def filter_benchmarks(args):
"""Filter benchmarks by removing those that do not contain provided parameters.
:param argparse args: Command line arguments.
The following command line arguments are used:
* ``args.input_file`` A file with benchmark results.
* ``args.params`` Specification of mandatory parameters. For format,
read comments of ``get_params`` function
* ``args.output_file`` An output file with updated benchmark results.
"""
# Load benchmarks and parameters
input_benchmarks = load_json_file(args.input_file)['data']
params = get_params(args.params)
# Filter benchmarks
output_benchmarks = []
for input_benchmark in input_benchmarks:
keep = True
for key in params:
if key not in input_benchmark or not input_benchmark[key]:
keep = False
break
if keep:
output_benchmarks.append(input_benchmark)
# Report results and serialize
print("Number of input benchmarks: %d" % len(input_benchmarks))
print("Number of output benchmarks: %d" % len(output_benchmarks))
DictUtils.dump_json_to_file({"data": output_benchmarks}, args.output_file)
def update_benchmarks(args):
"""Update benchmarks by overriding parameters provided by a user.
:param argparse args: Command line arguments.
The following command line arguments are used:
* ``args.input_file`` A file with benchmark results.
* ``args.params`` Specification of mandatory parameters. For format,
read comments of ``get_params`` function
* ``args.output_file`` An output file with updated benchmark results.
"""
# Load benchmarks and parameters.
benchmarks = load_json_file(args.input_file)['data']
prefix = '__'
params = {prefix + k: v for k, v in get_params(args.params).items()}
# Add prefixed parameters to all benchmarks.
for benchmark in benchmarks:
benchmark.update(params)
# Process and compute variables
Processor().compute_variables(benchmarks)
# Replace prefix overwriting variables in case of a conflict
prefixed_keys = params.keys()
prefix_len = len(prefix)
output_benchmarks = []
for benchmark in benchmarks:
for k in prefixed_keys:
benchmark[k[prefix_len:]] = benchmark[k]
del benchmark[k]
if benchmark['exp.model'] != '':
output_benchmarks.append(benchmark)
benchmarks = output_benchmarks
# Serialize updated benchmarks.
DictUtils.dump_json_to_file({"data": benchmarks}, args.output_file)
def main():
"""Main function - parses command line args and processes benchmarks."""
parser = argparse.ArgumentParser()
parser.add_argument(
'action', type=str,
help="Action to perform ('validate', 'filter', 'update')"
)
parser.add_argument(
'--input_file', '--input-file', type=str, required=True, default=None,
help='An input JSON file. This file is never modified.'
)
parser.add_argument(
'--params', type=str, required=False, default=None,
help="JSON array or object OR string. If string it's considered as a file name."
)
parser.add_argument(
'--output_file', '--output-file', required=False, default=False,
help="Output JSON file, possible, modified version of an input JSON file."
)
args = parser.parse_args()
if args.action == 'validate':
validate_benchmarks(args)
elif args.action == 'filter':
filter_benchmarks(args)
elif args.action == 'update':
update_benchmarks(args)
else:
raise ValueError("Action parameter has invalid value (%s). "
"Must be one of ['validate', 'filter', 'update']" % args.action)
if __name__ == '__main__':
main()
|
[
"json.load",
"json.loads",
"argparse.ArgumentParser",
"dlbs.utils.DictUtils.dump_json_to_file",
"collections.defaultdict",
"dlbs.processor.Processor"
] |
[((2678, 2696), 'json.loads', 'json.loads', (['params'], {}), '(params)\n', (2688, 2696), False, 'import json\n'), ((3731, 3754), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (3742, 3754), False, 'from collections import defaultdict\n'), ((5945, 6019), 'dlbs.utils.DictUtils.dump_json_to_file', 'DictUtils.dump_json_to_file', (["{'data': output_benchmarks}", 'args.output_file'], {}), "({'data': output_benchmarks}, args.output_file)\n", (5972, 6019), False, 'from dlbs.utils import DictUtils\n'), ((7357, 7424), 'dlbs.utils.DictUtils.dump_json_to_file', 'DictUtils.dump_json_to_file', (["{'data': benchmarks}", 'args.output_file'], {}), "({'data': benchmarks}, args.output_file)\n", (7384, 7424), False, 'from dlbs.utils import DictUtils\n'), ((7529, 7554), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7552, 7554), False, 'import argparse\n'), ((1828, 1847), 'json.load', 'json.load', (['file_obj'], {}), '(file_obj)\n', (1837, 1847), False, 'import json\n'), ((6848, 6859), 'dlbs.processor.Processor', 'Processor', ([], {}), '()\n', (6857, 6859), False, 'from dlbs.processor import Processor\n')]
|
from flask import render_template
import app.charts as charts
from . import app
@app.route("/")
def search():
return render_template('search.html', title='Search')
@app.route("/dashboard")
def hello():
_dashboard = charts.dashboard.create_charts()
return render_template('base.html',
title='dashboard_class',
source_file='dashboard',
myechart=_dashboard.render_embed(),
script_list=_dashboard.get_js_dependencies())
@app.route('/aboutus')
def dashboard():
return render_template('aboutus.html', title='About Us')
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html"), 404
@app.errorhandler(500)
def page_not_found(e):
return render_template("500.html"), 500
|
[
"app.charts.dashboard.create_charts",
"flask.render_template"
] |
[((124, 170), 'flask.render_template', 'render_template', (['"""search.html"""'], {'title': '"""Search"""'}), "('search.html', title='Search')\n", (139, 170), False, 'from flask import render_template\n'), ((227, 259), 'app.charts.dashboard.create_charts', 'charts.dashboard.create_charts', ([], {}), '()\n', (257, 259), True, 'import app.charts as charts\n'), ((593, 642), 'flask.render_template', 'render_template', (['"""aboutus.html"""'], {'title': '"""About Us"""'}), "('aboutus.html', title='About Us')\n", (608, 642), False, 'from flask import render_template\n'), ((702, 729), 'flask.render_template', 'render_template', (['"""404.html"""'], {}), "('404.html')\n", (717, 729), False, 'from flask import render_template\n'), ((794, 821), 'flask.render_template', 'render_template', (['"""500.html"""'], {}), "('500.html')\n", (809, 821), False, 'from flask import render_template\n')]
|
import tempfile
import os
import requests
from tqdm import tqdm
from rich import print as rprint
from felicette.constants import band_tag_map
workdir = os.path.join(os.path.expanduser("~"), "felicette-data")
def check_sat_path(id):
data_path = os.path.join(workdir, id)
if not os.path.exists(data_path):
os.makedirs(data_path, exist_ok=True)
def save_to_file(url, filename, id, info_message):
data_path = os.path.join(workdir, id)
data_id = filename.split("/")[-1].split("-")[1].split(".")[0]
rprint(info_message)
file_path = os.path.join(data_path, filename)
response = requests.get(url, stream=True)
with tqdm.wrapattr(
open(file_path, "wb"),
"write",
miniters=1,
desc=data_id,
total=int(response.headers.get("content-length", 0)),
) as fout:
for chunk in response.iter_content(chunk_size=4096):
fout.write(chunk)
fout.close()
def data_file_exists(filename):
return os.path.exists(filename)
def file_paths_wrt_id(id):
home_path_id = os.path.join(workdir, id)
return {
"base": home_path_id,
"preview": os.path.join(home_path_id, "%s-preview.jpg" % (id)),
"b5": os.path.join(home_path_id, "%s-b5.tiff" % (id)),
"b4": os.path.join(home_path_id, "%s-b4.tiff" % (id)),
"b3": os.path.join(home_path_id, "%s-b3.tiff" % (id)),
"b2": os.path.join(home_path_id, "%s-b2.tiff" % (id)),
"b8": os.path.join(home_path_id, "%s-b8.tiff" % (id)),
"stack": os.path.join(home_path_id, "%s-stack.tiff" % (id)),
"pan_sharpened": os.path.join(home_path_id, "%s-pan.tiff" % (id)),
"output_path": os.path.join(home_path_id, "%s-color-processed.tiff" % (id)),
"output_path_jpeg": os.path.join(
home_path_id, "%s-color-processed.jpeg" % (id)
),
"vegetation_path": os.path.join(home_path_id, "%s-vegetation.tiff" % (id)),
"vegetation_path_jpeg": os.path.join(home_path_id, "%s-vegetation.jpeg" % (id)),
}
|
[
"os.path.join",
"os.makedirs",
"os.path.exists",
"rich.print",
"requests.get",
"os.path.expanduser"
] |
[((167, 190), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (185, 190), False, 'import os\n'), ((252, 277), 'os.path.join', 'os.path.join', (['workdir', 'id'], {}), '(workdir, id)\n', (264, 277), False, 'import os\n'), ((432, 457), 'os.path.join', 'os.path.join', (['workdir', 'id'], {}), '(workdir, id)\n', (444, 457), False, 'import os\n'), ((528, 548), 'rich.print', 'rprint', (['info_message'], {}), '(info_message)\n', (534, 548), True, 'from rich import print as rprint\n'), ((565, 598), 'os.path.join', 'os.path.join', (['data_path', 'filename'], {}), '(data_path, filename)\n', (577, 598), False, 'import os\n'), ((614, 644), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (626, 644), False, 'import requests\n'), ((989, 1013), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1003, 1013), False, 'import os\n'), ((1062, 1087), 'os.path.join', 'os.path.join', (['workdir', 'id'], {}), '(workdir, id)\n', (1074, 1087), False, 'import os\n'), ((290, 315), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (304, 315), False, 'import os\n'), ((325, 362), 'os.makedirs', 'os.makedirs', (['data_path'], {'exist_ok': '(True)'}), '(data_path, exist_ok=True)\n', (336, 362), False, 'import os\n'), ((1150, 1199), 'os.path.join', 'os.path.join', (['home_path_id', "('%s-preview.jpg' % id)"], {}), "(home_path_id, '%s-preview.jpg' % id)\n", (1162, 1199), False, 'import os\n'), ((1217, 1262), 'os.path.join', 'os.path.join', (['home_path_id', "('%s-b5.tiff' % id)"], {}), "(home_path_id, '%s-b5.tiff' % id)\n", (1229, 1262), False, 'import os\n'), ((1280, 1325), 'os.path.join', 'os.path.join', (['home_path_id', "('%s-b4.tiff' % id)"], {}), "(home_path_id, '%s-b4.tiff' % id)\n", (1292, 1325), False, 'import os\n'), ((1343, 1388), 'os.path.join', 'os.path.join', (['home_path_id', "('%s-b3.tiff' % id)"], {}), "(home_path_id, '%s-b3.tiff' % id)\n", (1355, 1388), False, 'import os\n'), ((1406, 1451), 'os.path.join', 'os.path.join', (['home_path_id', "('%s-b2.tiff' % id)"], {}), "(home_path_id, '%s-b2.tiff' % id)\n", (1418, 1451), False, 'import os\n'), ((1469, 1514), 'os.path.join', 'os.path.join', (['home_path_id', "('%s-b8.tiff' % id)"], {}), "(home_path_id, '%s-b8.tiff' % id)\n", (1481, 1514), False, 'import os\n'), ((1535, 1583), 'os.path.join', 'os.path.join', (['home_path_id', "('%s-stack.tiff' % id)"], {}), "(home_path_id, '%s-stack.tiff' % id)\n", (1547, 1583), False, 'import os\n'), ((1612, 1658), 'os.path.join', 'os.path.join', (['home_path_id', "('%s-pan.tiff' % id)"], {}), "(home_path_id, '%s-pan.tiff' % id)\n", (1624, 1658), False, 'import os\n'), ((1685, 1743), 'os.path.join', 'os.path.join', (['home_path_id', "('%s-color-processed.tiff' % id)"], {}), "(home_path_id, '%s-color-processed.tiff' % id)\n", (1697, 1743), False, 'import os\n'), ((1775, 1833), 'os.path.join', 'os.path.join', (['home_path_id', "('%s-color-processed.jpeg' % id)"], {}), "(home_path_id, '%s-color-processed.jpeg' % id)\n", (1787, 1833), False, 'import os\n'), ((1886, 1939), 'os.path.join', 'os.path.join', (['home_path_id', "('%s-vegetation.tiff' % id)"], {}), "(home_path_id, '%s-vegetation.tiff' % id)\n", (1898, 1939), False, 'import os\n'), ((1975, 2028), 'os.path.join', 'os.path.join', (['home_path_id', "('%s-vegetation.jpeg' % id)"], {}), "(home_path_id, '%s-vegetation.jpeg' % id)\n", (1987, 2028), False, 'import os\n')]
|
from amcp_pylib.core import Command, command_syntax
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} KEYER {[keyer:0,1]|0}')
def MIXER_KEYER(command: Command) -> Command:
"""
Replaces layer n+1's alpha with the R (red) channel of layer n, and hides the RGB channels of layer n.
If keyer equals 1 then the specified layer will not be rendered,
instead it will be used as the key for the layer above.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-keyer
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} CHROMA {[enable:0,1] {[target_hue:float] [hue_width:float] '
'[min_saturation:float] [min_brightness:float] [softness:float] [spill_suppress:float] '
'[spill_suppress_saturation:float] [show_mask:0,1]}} '
'{[duration:int] {[tween:string]|LINEAR}|0 LINEAR}')
def MIXER_CHROMA(command: Command) -> Command:
"""
Enables or disables chroma keying on the specified video layer.
Giving no parameters returns the current chroma settings.
The chroma keying is done in the HSB/HSV color space.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-chroma
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} CHROMA {[enable:0,1] {[target_hue:float] [hue_width:float] '
'[min_saturation:float] [min_brightness:float] [softness:float] [spill_suppress:float] '
'[spill_suppress_saturation:float] [show_mask:0,1]}} {[duration:int] '
'{[tween:string]|LINEAR}|0 LINEAR}')
def MIXER_BLEND(command: Command) -> Command:
"""
Sets the blend mode to use when compositing this layer with the background.
If no argument is given the current blend mode is returned.
Every layer can be set to use a different blend mode than the default normal mode,
similar to applications like Photoshop.
Some common uses are to use screen to make all the black image data become transparent,
or to use add to selectively lighten highlights.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-blend
"""
return command
@command_syntax('MIXER [video_channel:int]-[layer:int] INVERT {invert:0,1|0}')
def MIXER_INVERT(command: Command) -> Command:
"""
Invert color. Only works on layers.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-invert
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} OPACITY {[opacity:float] '
'{[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}')
def MIXER_OPACITY(command: Command) -> Command:
"""
Changes the opacity of the specified layer. The value is a float between 0 and 1.
Retrieves the opacity of the specified layer if no argument is given.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-opacity
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} BRIGHTNESS {[brightness:float] '
'{[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}')
def MIXER_BRIGHTNESS(command: Command) -> Command:
"""
Changes the brightness of the specified layer. The value is a float between 0 and 1.
Retrieves the brightness of the specified layer if no argument is given.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-brightness
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} SATURATION {[saturation:float] '
'{[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}')
def MIXER_SATURATION(command: Command) -> Command:
"""
Changes the saturation of the specified layer. The value is a float between 0 and 1.
Retrieves the saturation of the specified layer if no argument is given.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-saturation
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} CONTRAST {[contrast:float] '
'{[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}')
def MIXER_CONTRAST(command: Command) -> Command:
"""
Changes the contrast of the specified layer. The value is a float between 0 and 1.
Retrieves the contrast of the specified layer if no argument is given.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-contrast
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} LEVELS {[min_input:float] [max_input:float] '
'[gamma:float] [min_output:float] [max_output:float]{[duration:int] '
'{[tween:string]|LINEAR}|0 LINEAR}}')
def MIXER_LEVELS(command: Command) -> Command:
"""
Adjusts the video levels of a layer. If no arguments are given the current levels are returned.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-levels
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} FILL {[x:float] [y:float] [x_scale:float] '
'[y_scale:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}')
def MIXER_FILL(command: Command) -> Command:
"""
Scales/positions the video stream on the specified layer. The concept is quite simple;
it comes from the ancient DVE machines like ADO.
Imagine that the screen has a size of 1x1 (not in pixel, but in an abstract measure).
Then the coordinates of a full size picture is 0 0 1 1, which means left edge is at coordinate 0,
top edge at coordinate 0, width full size = 1, heigh full size = 1.
If you want to crop the picture on the left side (for wipe left to right) you set the left edge to full right
=> 1 and the width to 0. So this give you the start-coordinates of 1 0 0 1.
End coordinates of any wipe are allways the full picture 0 0 1 1.
With the FILL command it can make sense to have values between 1 and 0, if you want to do a smaller window.
If, for instance you want to have a window of half the size of your screen, you set with and height to 0.5.
If you want to center it you set left and top edge to 0.25 so you will get the arguments 0.25 0.25 0.5 0.5.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-fill
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} CLIP {[x:float] [y:float] [width:float] '
'[height:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}')
def MIXER_CLIP(command: Command) -> Command:
"""
Defines the rectangular viewport where a layer is rendered thru on the screen without being affected by MIXER FILL,
MIXER ROTATION and MIXER PERSPECTIVE. See MIXER CROP if you want to crop the layer before transforming it.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-clip
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} ANCHOR {[x:float] [y:float] {[duration:int] '
'{[tween:string]|LINEAR}|0 LINEAR}}')
def MIXER_ANCHOR(command: Command) -> Command:
"""
Changes the anchor point of the specified layer, or returns the current values if no arguments are given.
The anchor point is around which MIXER FILL and MIXER ROTATION will be done from.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-anchor
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} CROP {[left_edge:float] [top_edge:float] '
'[right_edge:float] [bottom_edge:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}')
def MIXER_CROP(command: Command) -> Command:
"""
Defines how a layer should be cropped before making other transforms via MIXER FILL, MIXER ROTATION
and #MIXER PERSPECTIVE. See MIXER CLIP if you want to change the viewport relative to the screen instead.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-crop
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} ROTATION {[angle:float] '
'{[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}')
def MIXER_ROTATION(command: Command) -> Command:
"""
Returns or modifies the angle of which a layer is rotated by (clockwise degrees)
around the point specified by MIXER ANCHOR.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-rotation
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} PERSPECTIVE {[top_left_x:float] [top_left_y:float] '
'[top_right_x:float] [top_right_y:float] [bottom_right_x:float] [bottom_right_y:float] '
'[bottom_left_x:float] [bottom_left_y:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}')
def MIXER_PERSPECTIVE(command: Command) -> Command:
"""
Perspective transforms (corner pins or distorts if you will) a layer.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-perspective
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} MIPMAP {[mipmap:0,1]|0}')
def MIXER_MIPMAP(command: Command) -> Command:
"""
Sets whether to use mipmapping (anisotropic filtering if supported) on a layer or not.
If no argument is given the current state is returned.
Mipmapping reduces aliasing when downscaling/perspective transforming.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-mipmap
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]|-0} VOLUME {[volume:float] '
'{[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}')
def MIXER_VOLUME(command: Command) -> Command:
"""
Changes the volume of the specified layer. The 1.0 is the original volume, which can be attenuated or amplified.
Retrieves the volume of the specified layer if no argument is given.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-volume
"""
return command
@command_syntax('MIXER [video_channel:int] MASTERVOLUME {[volume:float]}')
def MIXER_MASTERVOLUME(command: Command) -> Command:
"""
Changes or retrieves (giving no argument) the volume of the entire channel.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-mastervolume
"""
return command
@command_syntax('MIXER [video_channel:int] STRAIGHT_ALPHA_OUTPUT {[straight_alpha:0,1|0]}')
def MIXER_STRAIGHT_ALPHA_OUTPUT(command: Command) -> Command:
"""
Turn straight alpha output on or off for the specified channel.
The casparcg.config needs to be configured to enable the feature.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-straight_alpha_output
"""
return command
@command_syntax('MIXER [video_channel:int] GRID [resolution:int] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}')
def MIXER_GRID(command: Command) -> Command:
"""
Creates a grid of video layer in ascending order of the layer index,
i.e. if resolution equals 2 then a 2x2 grid of layers will be created starting from layer 1.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-grid
"""
return command
@command_syntax('MIXER [video_channel:int] COMMIT')
def MIXER_COMMIT(command: Command) -> Command:
"""
Commits all deferred mixer transforms on the specified channel.
This ensures that all animations start at the same exact frame.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-commit
"""
return command
@command_syntax('MIXER [video_channel:int]{-[layer:int]} CLEAR')
def MIXER_CLEAR(command: Command) -> Command:
"""
Clears all transformations on a channel or layer.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#mixer-clear
"""
return command
@command_syntax('CHANNEL_GRID')
def CHANNEL_GRID(command: Command) -> Command:
"""
Opens a new channel and displays a grid with the contents of all the existing channels.
The element <channel-grid>true</channel-grid> must be present in casparcg.config for this to work correctly.
:link https://github.com/CasparCG/help/wiki/AMCP-Protocol#channel_grid
"""
return command
|
[
"amcp_pylib.core.command_syntax"
] |
[((55, 142), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} KEYER {[keyer:0,1]|0}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} KEYER {[keyer:0,1]|0}')\n", (69, 142), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((533, 850), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} CHROMA {[enable:0,1] {[target_hue:float] [hue_width:float] [min_saturation:float] [min_brightness:float] [softness:float] [spill_suppress:float] [spill_suppress_saturation:float] [show_mask:0,1]}} {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} CHROMA {[enable:0,1] {[target_hue:float] [hue_width:float] [min_saturation:float] [min_brightness:float] [softness:float] [spill_suppress:float] [spill_suppress_saturation:float] [show_mask:0,1]}} {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}'\n )\n", (547, 850), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((1247, 1564), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} CHROMA {[enable:0,1] {[target_hue:float] [hue_width:float] [min_saturation:float] [min_brightness:float] [softness:float] [spill_suppress:float] [spill_suppress_saturation:float] [show_mask:0,1]}} {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} CHROMA {[enable:0,1] {[target_hue:float] [hue_width:float] [min_saturation:float] [min_brightness:float] [softness:float] [spill_suppress:float] [spill_suppress_saturation:float] [show_mask:0,1]}} {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}'\n )\n", (1261, 1564), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((2192, 2269), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]-[layer:int] INVERT {invert:0,1|0}"""'], {}), "('MIXER [video_channel:int]-[layer:int] INVERT {invert:0,1|0}')\n", (2206, 2269), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((2471, 2617), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} OPACITY {[opacity:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} OPACITY {[opacity:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}'\n )\n", (2485, 2617), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((2950, 3102), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} BRIGHTNESS {[brightness:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} BRIGHTNESS {[brightness:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}'\n )\n", (2964, 3102), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((3447, 3599), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} SATURATION {[saturation:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} SATURATION {[saturation:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}'\n )\n", (3461, 3599), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((3944, 4092), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} CONTRAST {[contrast:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} CONTRAST {[contrast:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}'\n )\n", (3958, 4092), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((4429, 4645), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} LEVELS {[min_input:float] [max_input:float] [gamma:float] [min_output:float] [max_output:float]{[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} LEVELS {[min_input:float] [max_input:float] [gamma:float] [min_output:float] [max_output:float]{[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}'\n )\n", (4443, 4645), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((4935, 5114), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} FILL {[x:float] [y:float] [x_scale:float] [y_scale:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} FILL {[x:float] [y:float] [x_scale:float] [y_scale:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}'\n )\n", (4949, 5114), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((6292, 6468), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} CLIP {[x:float] [y:float] [width:float] [height:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} CLIP {[x:float] [y:float] [width:float] [height:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}'\n )\n", (6306, 6468), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((6866, 7015), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} ANCHOR {[x:float] [y:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} ANCHOR {[x:float] [y:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}'\n )\n", (6880, 7015), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((7383, 7584), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} CROP {[left_edge:float] [top_edge:float] [right_edge:float] [bottom_edge:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} CROP {[left_edge:float] [top_edge:float] [right_edge:float] [bottom_edge:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}'\n )\n", (7397, 7584), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((7965, 8110), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} ROTATION {[angle:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} ROTATION {[angle:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}'\n )\n", (7979, 8110), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((8418, 8720), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} PERSPECTIVE {[top_left_x:float] [top_left_y:float] [top_right_x:float] [top_right_y:float] [bottom_right_x:float] [bottom_right_y:float] [bottom_left_x:float] [bottom_left_y:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} PERSPECTIVE {[top_left_x:float] [top_left_y:float] [top_right_x:float] [top_right_y:float] [bottom_right_x:float] [bottom_right_y:float] [bottom_left_x:float] [bottom_left_y:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}'\n )\n", (8432, 8720), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((8994, 9083), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} MIPMAP {[mipmap:0,1]|0}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} MIPMAP {[mipmap:0,1]|0}')\n", (9008, 9083), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((9465, 9609), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]|-0} VOLUME {[volume:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}"""'], {}), "(\n 'MIXER [video_channel:int]{-[layer:int]|-0} VOLUME {[volume:float] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}}'\n )\n", (9479, 9609), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((9970, 10043), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int] MASTERVOLUME {[volume:float]}"""'], {}), "('MIXER [video_channel:int] MASTERVOLUME {[volume:float]}')\n", (9984, 10043), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((10297, 10392), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int] STRAIGHT_ALPHA_OUTPUT {[straight_alpha:0,1|0]}"""'], {}), "(\n 'MIXER [video_channel:int] STRAIGHT_ALPHA_OUTPUT {[straight_alpha:0,1|0]}')\n", (10311, 10392), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((10717, 10842), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int] GRID [resolution:int] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}"""'], {}), "(\n 'MIXER [video_channel:int] GRID [resolution:int] {[duration:int] {[tween:string]|LINEAR}|0 LINEAR}'\n )\n", (10731, 10842), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((11160, 11210), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int] COMMIT"""'], {}), "('MIXER [video_channel:int] COMMIT')\n", (11174, 11210), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((11508, 11571), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""MIXER [video_channel:int]{-[layer:int]} CLEAR"""'], {}), "('MIXER [video_channel:int]{-[layer:int]} CLEAR')\n", (11522, 11571), False, 'from amcp_pylib.core import Command, command_syntax\n'), ((11785, 11815), 'amcp_pylib.core.command_syntax', 'command_syntax', (['"""CHANNEL_GRID"""'], {}), "('CHANNEL_GRID')\n", (11799, 11815), False, 'from amcp_pylib.core import Command, command_syntax\n')]
|
# Generated by Django 2.1.3 on 2018-11-25 09:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('decaptcha', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='captcharecord',
name='hashkey',
field=models.CharField(max_length=255, verbose_name='Hashkey'),
),
]
|
[
"django.db.models.CharField"
] |
[((335, 391), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""Hashkey"""'}), "(max_length=255, verbose_name='Hashkey')\n", (351, 391), False, 'from django.db import migrations, models\n')]
|
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = ''
with open('schematec/__init__.py', 'r') as fd:
regex = re.compile(r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]')
for line in fd:
m = regex.match(line)
if m:
version = m.group(1)
break
setup(
name='schematec',
packages=['schematec'],
package_data={'': ['LICENSE']},
version=version,
description='Set of tools that makes input data validation easier',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mylokin/schematec',
keywords=['schema'],
license='MIT',
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Database :: Database Engines/Servers',
),
)
|
[
"re.compile",
"distutils.core.setup"
] |
[((348, 984), 'distutils.core.setup', 'setup', ([], {'name': '"""schematec"""', 'packages': "['schematec']", 'package_data': "{'': ['LICENSE']}", 'version': 'version', 'description': '"""Set of tools that makes input data validation easier"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/mylokin/schematec"""', 'keywords': "['schema']", 'license': '"""MIT"""', 'classifiers': "('Development Status :: 4 - Beta', 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7', 'Topic :: Database',\n 'Topic :: Database :: Database Engines/Servers')"}), "(name='schematec', packages=['schematec'], package_data={'': [\n 'LICENSE']}, version=version, description=\n 'Set of tools that makes input data validation easier', author='<NAME>',\n author_email='<EMAIL>', url='https://github.com/mylokin/schematec',\n keywords=['schema'], license='MIT', classifiers=(\n 'Development Status :: 4 - Beta', 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7', 'Topic :: Database',\n 'Topic :: Database :: Database Engines/Servers'))\n", (353, 984), False, 'from distutils.core import setup\n'), ((179, 238), 're.compile', 're.compile', (['"""__version__\\\\s*=\\\\s*[\\\\\'"]([^\\\\\'"]*)[\\\\\'"]"""'], {}), '(\'__version__\\\\s*=\\\\s*[\\\\\\\'"]([^\\\\\\\'"]*)[\\\\\\\'"]\')\n', (189, 238), False, 'import re\n')]
|
def plot_history(hist):
import matplotlib.pyplot as plt
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Squared Error')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label = 'Val Error')
plt.legend()
plt.show()
def create_neuro_prediction(train, test, target, mainpath, numeric_features=[], categorical_features=[], scaled=True, test_size = 0.2, Skfold=False):
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import tensorflow as tf
from prepare_and_scale_data import prepare_and_scale_data
from get_compiled_model import get_compiled_model
from create_tensorpad_path import create_tensorpad_path
#split the initial train dataframe to test/train dataframes
data, train_scaled, train_non_scaled, test_scaled, test_non_scaled = prepare_and_scale_data(train, test, numeric_features, categorical_features)
y_train = data[target]
if scaled:
X_train, X_test, y_train, y_test = train_test_split(train_scaled, y_train, test_size=test_size)
test = test_scaled
else:
X_train, X_test, y_train, y_test = train_test_split(train_non_scaled, y_train, test_size=test_size)
test = test_non_scaled
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=test_size)
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((X_train.values, y_train.values))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(32)
# Prepare the test dataset
test_dataset = tf.data.Dataset.from_tensor_slices((X_test.values, y_test.values))
test_dataset = test_dataset.batch(32)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((X_val.values, y_val.values))
val_dataset = val_dataset.batch(32)
log_path, log_dir = create_tensorpad_path(mainpath)
model, callbacks = get_compiled_model(X_train, target, log_dir)
history = model.fit(train_dataset, epochs=50, validation_data=val_dataset, callbacks=callbacks)
result = model.evaluate(test_dataset)
print(dict(zip(model.metrics_names, result)))
pred_train = model.predict(X_train)
print(np.sqrt(mean_squared_error(y_train,pred_train)))
pred = model.predict(X_test)
print(np.sqrt(mean_squared_error(y_test,pred)))
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
if scaled:
pred_all = model.predict(test_scaled)
else:
pred_all = model.predict(test_non_scaled)
pred_df = pd.DataFrame(pred_all, columns = target)
for t in target:
data.loc[:, "Ennustettu " + t] = pred_df[t]
return(data, test, model, hist, log_path)
|
[
"prepare_and_scale_data.prepare_and_scale_data",
"pandas.DataFrame",
"get_compiled_model.get_compiled_model",
"matplotlib.pyplot.show",
"create_tensorpad_path.create_tensorpad_path",
"matplotlib.pyplot.plot",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.legend",
"tensorflow.data.Dataset.from_tensor_slices",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"sklearn.metrics.mean_squared_error"
] |
[((68, 80), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (78, 80), True, 'import matplotlib.pyplot as plt\n'), ((86, 105), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (96, 105), True, 'import matplotlib.pyplot as plt\n'), ((111, 143), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Squared Error"""'], {}), "('Mean Squared Error')\n", (121, 143), True, 'import matplotlib.pyplot as plt\n'), ((149, 221), 'matplotlib.pyplot.plot', 'plt.plot', (["hist['epoch']", "hist['mean_squared_error']"], {'label': '"""Train Error"""'}), "(hist['epoch'], hist['mean_squared_error'], label='Train Error')\n", (157, 221), True, 'import matplotlib.pyplot as plt\n'), ((239, 313), 'matplotlib.pyplot.plot', 'plt.plot', (["hist['epoch']", "hist['val_mean_squared_error']"], {'label': '"""Val Error"""'}), "(hist['epoch'], hist['val_mean_squared_error'], label='Val Error')\n", (247, 313), True, 'import matplotlib.pyplot as plt\n'), ((333, 345), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (343, 345), True, 'import matplotlib.pyplot as plt\n'), ((351, 361), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (359, 361), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1138), 'prepare_and_scale_data.prepare_and_scale_data', 'prepare_and_scale_data', (['train', 'test', 'numeric_features', 'categorical_features'], {}), '(train, test, numeric_features, categorical_features)\n', (1085, 1138), False, 'from prepare_and_scale_data import prepare_and_scale_data\n'), ((1520, 1575), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'test_size': 'test_size'}), '(X_train, y_train, test_size=test_size)\n', (1536, 1575), False, 'from sklearn.model_selection import train_test_split\n'), ((1635, 1703), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(X_train.values, y_train.values)'], {}), '((X_train.values, y_train.values))\n', (1669, 1703), True, 'import tensorflow as tf\n'), ((1829, 1895), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(X_test.values, y_test.values)'], {}), '((X_test.values, y_test.values))\n', (1863, 1895), True, 'import tensorflow as tf\n'), ((1998, 2062), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(X_val.values, y_val.values)'], {}), '((X_val.values, y_val.values))\n', (2032, 2062), True, 'import tensorflow as tf\n'), ((2131, 2162), 'create_tensorpad_path.create_tensorpad_path', 'create_tensorpad_path', (['mainpath'], {}), '(mainpath)\n', (2152, 2162), False, 'from create_tensorpad_path import create_tensorpad_path\n'), ((2187, 2231), 'get_compiled_model.get_compiled_model', 'get_compiled_model', (['X_train', 'target', 'log_dir'], {}), '(X_train, target, log_dir)\n', (2205, 2231), False, 'from get_compiled_model import get_compiled_model\n'), ((2639, 2668), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (2651, 2668), True, 'import pandas as pd\n'), ((2848, 2886), 'pandas.DataFrame', 'pd.DataFrame', (['pred_all'], {'columns': 'target'}), '(pred_all, columns=target)\n', (2860, 2886), True, 'import pandas as pd\n'), ((1233, 1293), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_scaled', 'y_train'], {'test_size': 'test_size'}), '(train_scaled, y_train, test_size=test_size)\n', (1249, 1293), False, 'from sklearn.model_selection import train_test_split\n'), ((1377, 1441), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_non_scaled', 'y_train'], {'test_size': 'test_size'}), '(train_non_scaled, y_train, test_size=test_size)\n', (1393, 1441), False, 'from sklearn.model_selection import train_test_split\n'), ((2495, 2534), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train', 'pred_train'], {}), '(y_train, pred_train)\n', (2513, 2534), False, 'from sklearn.metrics import mean_squared_error\n'), ((2591, 2623), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'pred'], {}), '(y_test, pred)\n', (2609, 2623), False, 'from sklearn.metrics import mean_squared_error\n')]
|
import asyncio
import logging
logger = logging.getLogger(__name__)
def compute_user_level(user_xp):
power = 1
while user_xp >= 2 ** power:
power = power + 1
return power
class XPAggregator:
def __init__(self, redis, levels):
self.redis = redis
self.levels = levels
async def get_rank(self, guild):
xp_zset = "guilds:{}:xp".format(guild.id)
top_xp = await self.redis.zrevrange(xp_zset, 0, 9, withscores=True)
level_tasks = []
for line in top_xp:
level_tasks.append(asyncio.create_task(self.get_user_level(line[0], guild)))
await asyncio.gather(*level_tasks)
result = []
for i, level_task in enumerate(level_tasks):
level = level_task.result()
result.append((top_xp[i][0], top_xp[i][1], level[0], level[1]))
return result
async def get_user_xp(self, user_id, guild):
"""
Returns the user xp from the guilds:{}:xp zset
"""
xp_zset = "guilds:{}:xp".format(guild.id)
return await self.redis.zscore(xp_zset, user_id)
async def get_user_level(self, user_id, guild):
"""
Returns a tuple with level and level name based on the guilds:{}:levels zset and config
"""
level_zset = "guilds:{}:levels".format(guild.id)
level = await self.redis.zscore(level_zset, user_id)
if level is None:
return (1, self.levels[1])
return (level, self.levels[level])
async def compute_user_xp(self, user_id, guild_id):
"""
Uses the activity timeframe stored in guilds:{}:users:{}:activity to compute XP for a single user
"""
activity_set = "guilds:{}:users:{}:activity".format(guild_id, user_id)
react_value = "guilds:{}:users:{}:reactions".format(guild_id, user_id)
activity, reacts = await asyncio.gather(
self.redis.scard(activity_set),
self.redis.get(react_value),
)
return activity + int(reacts) if reacts is not None else activity
async def update_guild_xp(self, guild_id):
"""
Stores the XP in the guilds:{}:xp zset
"""
users_set = "guilds:{}:users".format(guild_id)
logger.debug("Accessing {}".format(users_set))
async for user_id in self.redis.isscan(users_set):
user_xp = await self.compute_user_xp(user_id, guild_id)
xp_zset = "guilds:{}:xp".format(guild_id)
logger.debug(
"Writing {} for user: {} by {}".format(xp_zset, user_id, user_xp)
)
await self.redis.zadd(xp_zset, user_xp, user_id)
async def update_guilds_level(self, guild_id):
"""
Stores the user levels in a guilds:{}:levels zset
"""
users_set = "guilds:{}:users".format(guild_id)
logger.debug("Accessing {}".format(users_set))
async for user_id in self.redis.isscan(users_set):
xp_zset = "guilds:{}:xp".format(guild_id)
logger.debug("Accessing {} for user: {}".format(xp_zset, user_id))
user_xp = await self.redis.zscore(xp_zset, user_id)
if not user_xp:
continue
level = compute_user_level(user_xp)
level_zset = "guilds:{}:levels".format(guild_id)
notify_list = "guilds:{}:notify".format(guild_id)
logger.debug("Accessing {} for user: {}".format(level_zset, user_id))
previous_level = await self.redis.zscore(level_zset, user_id)
if not previous_level:
await self.set_level(level_zset, user_id, level)
elif level > previous_level:
asyncio.gather(
self.set_level(level_zset, user_id, level),
self.redis.lpush(notify_list, user_id),
)
async def set_level(self, level_zset, user_id, level):
logger.debug("User {} is level {}".format(user_id, level))
logger.debug(
"Writing {} and for user: {} with value {}".format(
level_zset, user_id, level
)
)
await self.redis.zadd(level_zset, level, user_id)
async def update_guilds(self):
logger.info("Accessing guilds")
async for guild in self.redis.isscan("guilds"):
await self.update_guild_xp(guild)
await self.update_guilds_level(guild)
|
[
"asyncio.gather",
"logging.getLogger"
] |
[((40, 67), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (57, 67), False, 'import logging\n'), ((631, 659), 'asyncio.gather', 'asyncio.gather', (['*level_tasks'], {}), '(*level_tasks)\n', (645, 659), False, 'import asyncio\n')]
|
# global
import torch
from typing import Union, Optional, Tuple, List
def roll(x: torch.Tensor, shift: Union[int, Tuple[int]], axis: Union[int, Tuple[int]]=None)\
-> torch.Tensor:
return torch.roll(x, shift, axis)
# noinspection PyShadowingBuiltins
def flip(x: torch.Tensor,
axis: Optional[Union[int, Tuple[int], List[int]]] = None)\
-> torch.Tensor:
num_dims: int = len(x.shape)
if not num_dims:
return x
if axis is None:
new_axis: List[int] = list(range(num_dims))
else:
new_axis: List[int] = axis
if isinstance(new_axis, int):
new_axis = [new_axis]
else:
new_axis = new_axis
new_axis = [item + num_dims if item < 0 else item for item in new_axis]
return torch.flip(x, new_axis)
|
[
"torch.flip",
"torch.roll"
] |
[((198, 224), 'torch.roll', 'torch.roll', (['x', 'shift', 'axis'], {}), '(x, shift, axis)\n', (208, 224), False, 'import torch\n'), ((761, 784), 'torch.flip', 'torch.flip', (['x', 'new_axis'], {}), '(x, new_axis)\n', (771, 784), False, 'import torch\n')]
|
# Copyright 2018 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
from pathlib import Path
import pytest
import htmap
from htmap.utils import timeout_to_seconds, wait_for_path_to_exist
def test_returns_when_path_does_exist():
path = Path(__file__)
wait_for_path_to_exist(path)
@pytest.mark.parametrize("timeout", [0, -1])
def test_timeout_on_nonexistent_path(timeout):
path = Path("foo")
with pytest.raises(htmap.exceptions.TimeoutError):
wait_for_path_to_exist(path, timeout=timeout)
@pytest.mark.parametrize(
"timeout, expected", [(1, 1.0), (0.1, 0.1), (timedelta(seconds=2.3), 2.3), (None, None),],
)
def test_timeout_to_seconds(timeout, expected):
assert timeout_to_seconds(timeout) == expected
|
[
"htmap.utils.timeout_to_seconds",
"htmap.utils.wait_for_path_to_exist",
"pytest.raises",
"pathlib.Path",
"datetime.timedelta",
"pytest.mark.parametrize"
] |
[((905, 948), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""timeout"""', '[0, -1]'], {}), "('timeout', [0, -1])\n", (928, 948), False, 'import pytest\n'), ((853, 867), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (857, 867), False, 'from pathlib import Path\n'), ((873, 901), 'htmap.utils.wait_for_path_to_exist', 'wait_for_path_to_exist', (['path'], {}), '(path)\n', (895, 901), False, 'from htmap.utils import timeout_to_seconds, wait_for_path_to_exist\n'), ((1007, 1018), 'pathlib.Path', 'Path', (['"""foo"""'], {}), "('foo')\n", (1011, 1018), False, 'from pathlib import Path\n'), ((1029, 1073), 'pytest.raises', 'pytest.raises', (['htmap.exceptions.TimeoutError'], {}), '(htmap.exceptions.TimeoutError)\n', (1042, 1073), False, 'import pytest\n'), ((1083, 1128), 'htmap.utils.wait_for_path_to_exist', 'wait_for_path_to_exist', (['path'], {'timeout': 'timeout'}), '(path, timeout=timeout)\n', (1105, 1128), False, 'from htmap.utils import timeout_to_seconds, wait_for_path_to_exist\n'), ((1313, 1340), 'htmap.utils.timeout_to_seconds', 'timeout_to_seconds', (['timeout'], {}), '(timeout)\n', (1331, 1340), False, 'from htmap.utils import timeout_to_seconds, wait_for_path_to_exist\n'), ((1206, 1228), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(2.3)'}), '(seconds=2.3)\n', (1215, 1228), False, 'from datetime import timedelta\n')]
|
import hashlib
import logging
import time
from typing import Tuple
logger = logging.getLogger(__name__)
_startup_time = int(time.time())
logger.info("Startup time: %s", _startup_time)
def gen_hash(key: Tuple[str, ...]) -> str:
return hashlib.sha256(str(key + (_startup_time,)).encode("utf-8")).hexdigest()
|
[
"logging.getLogger",
"time.time"
] |
[((77, 104), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (94, 104), False, 'import logging\n'), ((126, 137), 'time.time', 'time.time', ([], {}), '()\n', (135, 137), False, 'import time\n')]
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import autograd
import time as t
import os
from itertools import chain
from torchvision import utils
from .spectral_normalization import SpectralNorm
class WassersteinLoss(torch.nn.Module):
def forward(self, x , target):
loss = -target.mean()*x.mean()
return loss
class Generator(torch.nn.Module):
def __init__(self, channels):
super().__init__()
self.main_module = nn.Sequential(
nn.ConvTranspose2d(in_channels=100, out_channels=1024, kernel_size=4, stride=1, padding=0),
nn.BatchNorm2d(num_features=1024),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels=1024, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(num_features=512),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(num_features=256),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels=256, out_channels=channels, kernel_size=4, stride=2, padding=1))
self.output = nn.Tanh()
def forward(self, x):
x = self.main_module(x)
return self.output(x)
class Discriminator(torch.nn.Module):
def __init__(self, channels,version="DCGAN_M"):
super().__init__()
self.version = version
self.main_module = nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=1, out_channels=256, kernel_size=3, stride=1, padding=1)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=2, padding=1)),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=4, stride=2, padding=1)),
nn.BatchNorm2d(1024),
nn.LeakyReLU(0.2, inplace=True),
)
if version == "DCGAN_L":
self.output = nn.Sequential(SpectralNorm(nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)))
self.metric = torch.nn.BCEWithLogitsLoss()
elif version == "WGAN_GP":
self.output = nn.Sequential(SpectralNorm(nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)))
self.metric = WassersteinLoss()
else:
self.output = nn.Sequential(SpectralNorm(nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)),
nn.Sigmoid())
if version == "DCGAN":self.metric = torch.nn.BCELoss()
elif version == "DCGAN_M":self.metric = torch.nn.MSELoss()
else:
raise NotImplementedError
def forward(self, x, target=None):
x = self.main_module(x)
x = self.output(x)
return x.reshape(x.size(0),x.size(1)) #(b,1)
def calculate_gradient_penalty(self, real_images, fake_images,GP_lambda= 10):
batch_size = len(real_images)
device = next(self.parameters()).device
eta = torch.FloatTensor(batch_size,1,1,1).uniform_(0,1)
eta = eta.expand(batch_size, real_images.size(1), real_images.size(2), real_images.size(3))
eta = eta.to(device)
interpolated = eta * real_images + ((1 - eta) * fake_images)
interpolated = interpolated.to(device)
interpolated = eta * real_images + ((1 - eta) * fake_images)
interpolated = Variable(interpolated, requires_grad=True)
prob_interpolated = self(interpolated)
gradients = autograd.grad(outputs=prob_interpolated, inputs=interpolated,
grad_outputs=torch.ones(prob_interpolated.size()).to(device),
create_graph=True, retain_graph=True)[0]
grad_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * GP_lambda
return grad_penalty
class Binary_Checker(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.Tensor(1))
def forward(self,x):
shape=tuple(range(1,len(x.shape)))
return (x**2).mean(shape).unsqueeze(1)
class DCGAN_MODEL(object):
def __init__(self, args):
print("DCGAN model initalization.")
self.G = Generator(args.channels)
if args.GAN_TYPE == "ForceBINARY":
self.D = Binary_Checker()
else:
self.D = Discriminator(args.channels,args.GAN_TYPE)
self.D.version = args.GAN_TYPE
self.C = args.channels
self.check_cuda(True)
def check_cuda(self, cuda_flag=False):
print(cuda_flag)
if cuda_flag:
self.cuda_index = 0
self.cuda = True
self.D.cuda(self.cuda_index)
self.G.cuda(self.cuda_index)
print("Cuda enabled flag: {}".format(self.cuda))
else:
self.cuda = False
def save_to(self,path,mode="full"):
checkpoint = self.all_state_dict(mode=mode)
torch.save(checkpoint,path)
def all_state_dict(self,epoch=None,mode="full"):
checkpoint={}
checkpoint['epoch'] = epoch
checkpoint['D_state_dict'] = self.D.state_dict()
checkpoint['G_state_dict'] = self.G.state_dict()
if mode != "light":
if hasattr(self,"I2C"):checkpoint['C_state_dict'] = self.I2C.state_dict()
if hasattr(self,"D_optimizer"):checkpoint['D_optimizer'] = self.d_optimizer.state_dict()
if hasattr(self,"G_optimizer"):checkpoint['G_optimizer'] = self.g_optimizer.state_dict()
if hasattr(self,"C_optimizer"):checkpoint['C_optimizer'] = self.c_optimizer.state_dict()
return checkpoint
|
[
"torch.nn.MSELoss",
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.BCELoss",
"torch.nn.Tanh",
"torch.autograd.Variable",
"torch.nn.Conv2d",
"torch.FloatTensor",
"torch.save",
"torch.nn.BatchNorm2d",
"torch.Tensor",
"torch.nn.LeakyReLU",
"torch.nn.Sigmoid"
] |
[((1175, 1184), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (1182, 1184), True, 'import torch.nn as nn\n'), ((3587, 3629), 'torch.autograd.Variable', 'Variable', (['interpolated'], {'requires_grad': '(True)'}), '(interpolated, requires_grad=True)\n', (3595, 3629), False, 'from torch.autograd import Variable\n'), ((5136, 5164), 'torch.save', 'torch.save', (['checkpoint', 'path'], {}), '(checkpoint, path)\n', (5146, 5164), False, 'import torch\n'), ((518, 612), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(100)', 'out_channels': '(1024)', 'kernel_size': '(4)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=100, out_channels=1024, kernel_size=4,\n stride=1, padding=0)\n', (536, 612), True, 'import torch.nn as nn\n'), ((622, 655), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(1024)'}), '(num_features=1024)\n', (636, 655), True, 'import torch.nn as nn\n'), ((669, 682), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (676, 682), True, 'import torch.nn as nn\n'), ((698, 792), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(1024)', 'out_channels': '(512)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=1024, out_channels=512, kernel_size=3,\n stride=1, padding=1)\n', (716, 792), True, 'import torch.nn as nn\n'), ((802, 834), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(512)'}), '(num_features=512)\n', (816, 834), True, 'import torch.nn as nn\n'), ((848, 861), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (855, 861), True, 'import torch.nn as nn\n'), ((877, 971), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(512)', 'out_channels': '(256)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=512, out_channels=256, kernel_size=4, stride\n =2, padding=1)\n', (895, 971), True, 'import torch.nn as nn\n'), ((980, 1012), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(256)'}), '(num_features=256)\n', (994, 1012), True, 'import torch.nn as nn\n'), ((1026, 1039), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1033, 1039), True, 'import torch.nn as nn\n'), ((1055, 1153), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(256)', 'out_channels': 'channels', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=256, out_channels=channels, kernel_size=4,\n stride=2, padding=1)\n', (1073, 1153), True, 'import torch.nn as nn\n'), ((1584, 1615), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1596, 1615), True, 'import torch.nn as nn\n'), ((1739, 1758), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (1753, 1758), True, 'import torch.nn as nn\n'), ((1772, 1803), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1784, 1803), True, 'import torch.nn as nn\n'), ((1928, 1948), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1024)'], {}), '(1024)\n', (1942, 1948), True, 'import torch.nn as nn\n'), ((1962, 1993), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1974, 1993), True, 'import torch.nn as nn\n'), ((2208, 2236), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {}), '()\n', (2234, 2236), False, 'import torch\n'), ((4160, 4175), 'torch.Tensor', 'torch.Tensor', (['(1)'], {}), '(1)\n', (4172, 4175), False, 'import torch\n'), ((1491, 1569), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(256)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=1, out_channels=256, kernel_size=3, stride=1, padding=1)\n', (1500, 1569), True, 'import torch.nn as nn\n'), ((1644, 1729), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(256)', 'out_channels': '(512)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=256, out_channels=512, kernel_size=4, stride=2, padding=1\n )\n', (1653, 1729), True, 'import torch.nn as nn\n'), ((1832, 1917), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(512)', 'out_channels': '(1024)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=512, out_channels=1024, kernel_size=4, stride=2,\n padding=1)\n', (1841, 1917), True, 'import torch.nn as nn\n'), ((3193, 3231), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_size', '(1)', '(1)', '(1)'], {}), '(batch_size, 1, 1, 1)\n', (3210, 3231), False, 'import torch\n'), ((2100, 2179), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1024)', 'out_channels': '(1)', 'kernel_size': '(4)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)\n', (2109, 2179), True, 'import torch.nn as nn\n'), ((2640, 2652), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2650, 2652), True, 'import torch.nn as nn\n'), ((2704, 2722), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (2720, 2722), False, 'import torch\n'), ((2325, 2404), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1024)', 'out_channels': '(1)', 'kernel_size': '(4)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)\n', (2334, 2404), True, 'import torch.nn as nn\n'), ((2518, 2597), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1024)', 'out_channels': '(1)', 'kernel_size': '(4)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)\n', (2527, 2597), True, 'import torch.nn as nn\n'), ((2775, 2793), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (2791, 2793), False, 'import torch\n')]
|
""" Util functions for SMPL
@@batch_skew
@@batch_rodrigues
@@batch_lrotmin
@@batch_global_rigid_transformation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def batch_skew(vec, batch_size=None):
"""
vec is N x 3, batch_size is int
returns N x 3 x 3. Skew_sym version of each matrix.
"""
with tf.name_scope("batch_skew", values=[vec]):
if batch_size is None:
batch_size = vec.shape.as_list()[0]
col_inds = tf.constant([1, 2, 3, 5, 6, 7])
indices = tf.reshape(
tf.reshape(tf.range(0, batch_size) * 9, [-1, 1]) + col_inds,
[-1, 1])
updates = tf.reshape(
tf.stack(
[
-vec[:, 2], vec[:, 1], vec[:, 2], -vec[:, 0], -vec[:, 1],
vec[:, 0]
],
axis=1), [-1])
out_shape = [batch_size * 9]
res = tf.scatter_nd(indices, updates, out_shape)
res = tf.reshape(res, [batch_size, 3, 3])
return res
def batch_rodrigues(theta, name=None):
"""
Theta is N x 3
"""
with tf.name_scope(name, "batch_rodrigues", [theta]):
batch_size = theta.shape.as_list()[0]
# angle = tf.norm(theta, axis=1)
# r = tf.expand_dims(tf.div(theta, tf.expand_dims(angle + 1e-8, -1)), -1)
# angle = tf.expand_dims(tf.norm(theta, axis=1) + 1e-8, -1)
angle = tf.expand_dims(tf.norm(theta + 1e-8, axis=1), -1)
r = tf.expand_dims(tf.div(theta, angle), -1)
angle = tf.expand_dims(angle, -1)
cos = tf.cos(angle)
sin = tf.sin(angle)
outer = tf.matmul(r, r, transpose_b=True, name="outer")
eyes = tf.tile(tf.expand_dims(tf.eye(3), 0), [batch_size, 1, 1])
R = cos * eyes + (1 - cos) * outer + sin * batch_skew(
r, batch_size=batch_size)
return R
def batch_lrotmin(theta, name=None):
""" NOTE: not used bc I want to reuse R and this is simple.
Output of this is used to compute joint-to-pose blend shape mapping.
Equation 9 in SMPL paper.
Args:
pose: `Tensor`, N x 72 vector holding the axis-angle rep of K joints.
This includes the global rotation so K=24
Returns
diff_vec : `Tensor`: N x 207 rotation matrix of 23=(K-1) joints with identity subtracted.,
"""
with tf.name_scope(name, "batch_lrotmin", [theta]):
with tf.name_scope("ignore_global"):
theta = theta[:, 3:]
# N*23 x 3 x 3
Rs = batch_rodrigues(tf.reshape(theta, [-1, 3]))
lrotmin = tf.reshape(Rs - tf.eye(3), [-1, 207])
return lrotmin
def batch_global_rigid_transformation(Rs, Js, parent, rotate_base=False):
"""
Computes absolute joint locations given pose.
rotate_base: if True, rotates the global rotation by 90 deg in x axis.
if False, this is the original SMPL coordinate.
Args:
Rs: N x 24 x 3 x 3 rotation vector of K joints
Js: N x 24 x 3, joint locations before posing
parent: 24 holding the parent id for each index
Returns
new_J : `Tensor`: N x 24 x 3 location of absolute joints
A : `Tensor`: N x 24 4 x 4 relative joint transformations for LBS.
"""
with tf.name_scope("batch_forward_kinematics", values=[Rs, Js]):
N = Rs.shape[0].value
if rotate_base:
print('Flipping the SMPL coordinate frame!!!!')
rot_x = tf.constant(
[[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype=Rs.dtype)
rot_x = tf.reshape(tf.tile(rot_x, [N, 1]), [N, 3, 3])
root_rotation = tf.matmul(Rs[:, 0, :, :], rot_x)
else:
root_rotation = Rs[:, 0, :, :]
# Now Js is N x 24 x 3 x 1
Js = tf.expand_dims(Js, -1)
def make_A(R, t, name=None):
# Rs is N x 3 x 3, ts is N x 3 x 1
with tf.name_scope(name, "Make_A", [R, t]):
R_homo = tf.pad(R, [[0, 0], [0, 1], [0, 0]])
t_homo = tf.concat([t, tf.ones([N, 1, 1])], 1)
return tf.concat([R_homo, t_homo], 2)
A0 = make_A(root_rotation, Js[:, 0])
results = [A0]
for i in range(1, parent.shape[0]):
j_here = Js[:, i] - Js[:, parent[i]]
A_here = make_A(Rs[:, i], j_here)
res_here = tf.matmul(
results[parent[i]], A_here, name="propA%d" % i)
results.append(res_here)
# 10 x 24 x 4 x 4
results = tf.stack(results, axis=1)
new_J = results[:, :, :3, 3]
# --- Compute relative A: Skinning is based on
# how much the bone moved (not the final location of the bone)
# but (final_bone - init_bone)
# ---
Js_w0 = tf.concat([Js, tf.zeros([N, 24, 1, 1])], 2)
init_bone = tf.matmul(results, Js_w0)
# Append empty 4 x 3:
init_bone = tf.pad(init_bone, [[0, 0], [0, 0], [0, 0], [3, 0]])
A = results - init_bone
return new_J, A
|
[
"tensorflow.ones",
"tensorflow.range",
"tensorflow.sin",
"tensorflow.reshape",
"tensorflow.pad",
"tensorflow.eye",
"tensorflow.constant",
"tensorflow.div",
"tensorflow.stack",
"tensorflow.matmul",
"tensorflow.tile",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.name_scope",
"tensorflow.norm",
"tensorflow.scatter_nd",
"tensorflow.expand_dims",
"tensorflow.cos"
] |
[((408, 449), 'tensorflow.name_scope', 'tf.name_scope', (['"""batch_skew"""'], {'values': '[vec]'}), "('batch_skew', values=[vec])\n", (421, 449), True, 'import tensorflow as tf\n'), ((549, 580), 'tensorflow.constant', 'tf.constant', (['[1, 2, 3, 5, 6, 7]'], {}), '([1, 2, 3, 5, 6, 7])\n', (560, 580), True, 'import tensorflow as tf\n'), ((984, 1026), 'tensorflow.scatter_nd', 'tf.scatter_nd', (['indices', 'updates', 'out_shape'], {}), '(indices, updates, out_shape)\n', (997, 1026), True, 'import tensorflow as tf\n'), ((1041, 1076), 'tensorflow.reshape', 'tf.reshape', (['res', '[batch_size, 3, 3]'], {}), '(res, [batch_size, 3, 3])\n', (1051, 1076), True, 'import tensorflow as tf\n'), ((1182, 1229), 'tensorflow.name_scope', 'tf.name_scope', (['name', '"""batch_rodrigues"""', '[theta]'], {}), "(name, 'batch_rodrigues', [theta])\n", (1195, 1229), True, 'import tensorflow as tf\n'), ((1605, 1630), 'tensorflow.expand_dims', 'tf.expand_dims', (['angle', '(-1)'], {}), '(angle, -1)\n', (1619, 1630), True, 'import tensorflow as tf\n'), ((1645, 1658), 'tensorflow.cos', 'tf.cos', (['angle'], {}), '(angle)\n', (1651, 1658), True, 'import tensorflow as tf\n'), ((1673, 1686), 'tensorflow.sin', 'tf.sin', (['angle'], {}), '(angle)\n', (1679, 1686), True, 'import tensorflow as tf\n'), ((1704, 1751), 'tensorflow.matmul', 'tf.matmul', (['r', 'r'], {'transpose_b': '(True)', 'name': '"""outer"""'}), "(r, r, transpose_b=True, name='outer')\n", (1713, 1751), True, 'import tensorflow as tf\n'), ((2419, 2464), 'tensorflow.name_scope', 'tf.name_scope', (['name', '"""batch_lrotmin"""', '[theta]'], {}), "(name, 'batch_lrotmin', [theta])\n", (2432, 2464), True, 'import tensorflow as tf\n'), ((3307, 3365), 'tensorflow.name_scope', 'tf.name_scope', (['"""batch_forward_kinematics"""'], {'values': '[Rs, Js]'}), "('batch_forward_kinematics', values=[Rs, Js])\n", (3320, 3365), True, 'import tensorflow as tf\n'), ((3816, 3838), 'tensorflow.expand_dims', 'tf.expand_dims', (['Js', '(-1)'], {}), '(Js, -1)\n', (3830, 3838), True, 'import tensorflow as tf\n'), ((4546, 4571), 'tensorflow.stack', 'tf.stack', (['results'], {'axis': '(1)'}), '(results, axis=1)\n', (4554, 4571), True, 'import tensorflow as tf\n'), ((4870, 4895), 'tensorflow.matmul', 'tf.matmul', (['results', 'Js_w0'], {}), '(results, Js_w0)\n', (4879, 4895), True, 'import tensorflow as tf\n'), ((4946, 4997), 'tensorflow.pad', 'tf.pad', (['init_bone', '[[0, 0], [0, 0], [0, 0], [3, 0]]'], {}), '(init_bone, [[0, 0], [0, 0], [0, 0], [3, 0]])\n', (4952, 4997), True, 'import tensorflow as tf\n'), ((747, 839), 'tensorflow.stack', 'tf.stack', (['[-vec[:, 2], vec[:, 1], vec[:, 2], -vec[:, 0], -vec[:, 1], vec[:, 0]]'], {'axis': '(1)'}), '([-vec[:, 2], vec[:, 1], vec[:, 2], -vec[:, 0], -vec[:, 1], vec[:, \n 0]], axis=1)\n', (755, 839), True, 'import tensorflow as tf\n'), ((1500, 1530), 'tensorflow.norm', 'tf.norm', (['(theta + 1e-08)'], {'axis': '(1)'}), '(theta + 1e-08, axis=1)\n', (1507, 1530), True, 'import tensorflow as tf\n'), ((1562, 1582), 'tensorflow.div', 'tf.div', (['theta', 'angle'], {}), '(theta, angle)\n', (1568, 1582), True, 'import tensorflow as tf\n'), ((2479, 2509), 'tensorflow.name_scope', 'tf.name_scope', (['"""ignore_global"""'], {}), "('ignore_global')\n", (2492, 2509), True, 'import tensorflow as tf\n'), ((2597, 2623), 'tensorflow.reshape', 'tf.reshape', (['theta', '[-1, 3]'], {}), '(theta, [-1, 3])\n', (2607, 2623), True, 'import tensorflow as tf\n'), ((3501, 3565), 'tensorflow.constant', 'tf.constant', (['[[1, 0, 0], [0, -1, 0], [0, 0, -1]]'], {'dtype': 'Rs.dtype'}), '([[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype=Rs.dtype)\n', (3512, 3565), True, 'import tensorflow as tf\n'), ((3677, 3709), 'tensorflow.matmul', 'tf.matmul', (['Rs[:, 0, :, :]', 'rot_x'], {}), '(Rs[:, 0, :, :], rot_x)\n', (3686, 3709), True, 'import tensorflow as tf\n'), ((4389, 4446), 'tensorflow.matmul', 'tf.matmul', (['results[parent[i]]', 'A_here'], {'name': "('propA%d' % i)"}), "(results[parent[i]], A_here, name='propA%d' % i)\n", (4398, 4446), True, 'import tensorflow as tf\n'), ((1791, 1800), 'tensorflow.eye', 'tf.eye', (['(3)'], {}), '(3)\n', (1797, 1800), True, 'import tensorflow as tf\n'), ((2659, 2668), 'tensorflow.eye', 'tf.eye', (['(3)'], {}), '(3)\n', (2665, 2668), True, 'import tensorflow as tf\n'), ((3614, 3636), 'tensorflow.tile', 'tf.tile', (['rot_x', '[N, 1]'], {}), '(rot_x, [N, 1])\n', (3621, 3636), True, 'import tensorflow as tf\n'), ((3941, 3978), 'tensorflow.name_scope', 'tf.name_scope', (['name', '"""Make_A"""', '[R, t]'], {}), "(name, 'Make_A', [R, t])\n", (3954, 3978), True, 'import tensorflow as tf\n'), ((4005, 4040), 'tensorflow.pad', 'tf.pad', (['R', '[[0, 0], [0, 1], [0, 0]]'], {}), '(R, [[0, 0], [0, 1], [0, 0]])\n', (4011, 4040), True, 'import tensorflow as tf\n'), ((4127, 4157), 'tensorflow.concat', 'tf.concat', (['[R_homo, t_homo]', '(2)'], {}), '([R_homo, t_homo], 2)\n', (4136, 4157), True, 'import tensorflow as tf\n'), ((4821, 4844), 'tensorflow.zeros', 'tf.zeros', (['[N, 24, 1, 1]'], {}), '([N, 24, 1, 1])\n', (4829, 4844), True, 'import tensorflow as tf\n'), ((634, 657), 'tensorflow.range', 'tf.range', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (642, 657), True, 'import tensorflow as tf\n'), ((4080, 4098), 'tensorflow.ones', 'tf.ones', (['[N, 1, 1]'], {}), '([N, 1, 1])\n', (4087, 4098), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
import sys
import os
# source: https://raw.githubusercontent.com/riscv/riscv-poky/master/scripts/sysroot-relativelinks.py
# Take a sysroot directory and turn all the absolute symlinks and turn them into
# relative ones such that the sysroot is usable within another system.
if len(sys.argv) != 2:
print("Usage is " + sys.argv[0] + "<directory>")
sys.exit(1)
topdir = sys.argv[1]
topdir = os.path.abspath(topdir)
def handlelink(filePath, subdir):
link = os.readlink(filePath)
if link[0] != "/":
return
if link.startswith(topdir):
return
newLink = os.path.relpath(topdir+link, subdir)
print("\t%s replacing %s => %s" % (filePath, link, newLink))
os.unlink(filePath)
os.symlink(newLink, filePath)
for subdir, dirs, files in os.walk(topdir):
for file in files:
filePath = os.path.join(subdir, file)
if os.path.islink(filePath):
handlelink(filePath, subdir)
|
[
"os.path.abspath",
"os.unlink",
"os.readlink",
"os.walk",
"os.path.islink",
"os.path.relpath",
"os.symlink",
"os.path.join",
"sys.exit"
] |
[((422, 445), 'os.path.abspath', 'os.path.abspath', (['topdir'], {}), '(topdir)\n', (437, 445), False, 'import os\n'), ((807, 822), 'os.walk', 'os.walk', (['topdir'], {}), '(topdir)\n', (814, 822), False, 'import os\n'), ((379, 390), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (387, 390), False, 'import sys\n'), ((492, 513), 'os.readlink', 'os.readlink', (['filePath'], {}), '(filePath)\n', (503, 513), False, 'import os\n'), ((618, 656), 'os.path.relpath', 'os.path.relpath', (['(topdir + link)', 'subdir'], {}), '(topdir + link, subdir)\n', (633, 656), False, 'import os\n'), ((725, 744), 'os.unlink', 'os.unlink', (['filePath'], {}), '(filePath)\n', (734, 744), False, 'import os\n'), ((749, 778), 'os.symlink', 'os.symlink', (['newLink', 'filePath'], {}), '(newLink, filePath)\n', (759, 778), False, 'import os\n'), ((866, 892), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (878, 892), False, 'import os\n'), ((904, 928), 'os.path.islink', 'os.path.islink', (['filePath'], {}), '(filePath)\n', (918, 928), False, 'import os\n')]
|
from fastapi import FastAPI
import pickle
from SatImages import SatImage
import uvicorn
def load_models():
"""
load the models from disk
and put them in a dictionary
Returns:
dict: loaded models
"""
models = {
"knn": pickle.load(open("./model_weights/clf.bin", 'rb'))
}
print("Models loaded from disk")
return models
def get_prediction(feats, clf):
pred = clf.predict(feats)[0] # just get single value
prob = clf.predict_proba(feats)[0].tolist() # send to list for return
return {'prediction': int(round(pred)), 'probability': prob}
# initiate API
app = FastAPI()
@app.get("/")
def index():
return {'message':'hello, everyone'}
@app.post("/predict")
def predict(data: SatImage):
data = data.dict()
feats = [[feat_val for _, feat_val in data.items()]]
models = load_models()
pred = get_prediction(feats, models['knn'])
return pred
if __name__ == "__main__":
uvicorn.run(app, host = "127.0.0.1", port = 8000)
|
[
"uvicorn.run",
"fastapi.FastAPI"
] |
[((623, 632), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (630, 632), False, 'from fastapi import FastAPI\n'), ((957, 1002), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""127.0.0.1"""', 'port': '(8000)'}), "(app, host='127.0.0.1', port=8000)\n", (968, 1002), False, 'import uvicorn\n')]
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('Contacts/', views.Contacts.as_view(), name='contact'),
path('Profile/', views.profile.as_view(), name='profile'),
]
|
[
"django.urls.path"
] |
[((71, 106), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (75, 106), False, 'from django.urls import path\n')]
|
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import random
from flask import Flask, request
from google.cloud import bigquery
import datetime
from concurrent.futures import ThreadPoolExecutor
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
logger = logging.getLogger(__name__)
app = Flask(__name__)
moves = ['R', 'L', 'F']
client = bigquery.Client()
executor = ThreadPoolExecutor()
table_name = "allegro-hackathon12-2186.battle_ds.events"
@app.route("/", methods=['GET'])
def index():
return "Let the battle begin!"
@app.route("/", methods=['POST'])
def move():
logger.info(request.json)
state = request.json
me_href = state["_links"]["self"]["href"]
dims_w = state["arena"]["dims"][0]
dims_h = state["arena"]["dims"][1]
arena = dict()
insert_rows = []
ts = datetime.datetime.now().timestamp()
for player_name, player_state in state["arena"]["state"].items():
pos_x = player_state["x"]
pos_y = player_state["y"]
arena[(pos_x, pos_y)] = player_name
if player_name == me_href:
me_x = pos_x
me_y = pos_y
me_d = player_state["direction"]
me_was_hit = player_state["wasHit"]
#insert_rows.append({
# 'timestamp': ts,
# 'player': player_name,
# **player_state,
# })
#executor.submit(client.insert_rows_json, table_name, insert_rows)
if me_was_hit: # run!
if random.random() < 0.3:
return "F"
# check if somebody is on the line
if me_d == "N":
if (me_x, me_y - 1) in arena or (me_x, me_y - 2) in arena or (me_x, me_y - 3) in arena:
return "T"
elif me_d == "E":
if (me_x+1, me_y) in arena or (me_x+2, me_y) in arena or (me_x+3, me_y) in arena:
return "T"
elif me_d == "S":
if (me_x, me_y + 1) in arena or (me_x, me_y + 2) in arena or (me_x, me_y + 3) in arena:
return "T"
else:
if (me_x-1, me_y) in arena or (me_x-2, me_y) in arena or (me_x-2, me_y) in arena:
return "T"
if random.random() < 0.2:
return "F"
return "R" # round turret
if __name__ == "__main__":
app.run(debug=False,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))
|
[
"google.cloud.bigquery.Client",
"flask.Flask",
"os.environ.get",
"random.random",
"concurrent.futures.ThreadPoolExecutor",
"datetime.datetime.now",
"logging.getLogger"
] |
[((842, 869), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (859, 869), False, 'import logging\n'), ((877, 892), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (882, 892), False, 'from flask import Flask, request\n'), ((927, 944), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {}), '()\n', (942, 944), False, 'from google.cloud import bigquery\n'), ((956, 976), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {}), '()\n', (974, 976), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((797, 831), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""INFO"""'], {}), "('LOGLEVEL', 'INFO')\n", (811, 831), False, 'import os\n'), ((2679, 2694), 'random.random', 'random.random', ([], {}), '()\n', (2692, 2694), False, 'import random\n'), ((1392, 1415), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1413, 1415), False, 'import datetime\n'), ((2042, 2057), 'random.random', 'random.random', ([], {}), '()\n', (2055, 2057), False, 'import random\n'), ((2826, 2854), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '(8080)'], {}), "('PORT', 8080)\n", (2840, 2854), False, 'import os\n')]
|
from dataclasses import dataclass
from typing import ClassVar
from datalabs.features.features import Features, Value
from datalabs.tasks.base import register_task, TaskTemplate, TaskType
@register_task(TaskType.kg_prediction)
@dataclass
class KGPrediction(TaskTemplate):
task: TaskType = TaskType.kg_prediction
input_schema: ClassVar[Features] = Features(
{"head": Value("string"), "link": Value("string"), "tail": Value("string")}
)
# TODO(Pengfei): label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
head_column: str = "head"
link_column: str = "link"
tail_column: str = "tail"
def __post_init__(self):
self.task_categories = [
task_cls.get_task() for task_cls in self.get_task_parents()
]
self.input_schema: ClassVar[Features] = Features(
{
self.head_column: Value("string"),
self.link_column: Value("string"),
self.tail_column: Value("string"),
}
)
@register_task(TaskType.kg_link_tail_prediction)
@dataclass
class KGLinkTailPrediction(KGPrediction):
task: TaskType = TaskType.kg_link_tail_prediction
head_column: str = "head"
link_column: str = "link"
tail_column: str = "tail"
|
[
"datalabs.features.features.Value",
"datalabs.tasks.base.register_task"
] |
[((191, 228), 'datalabs.tasks.base.register_task', 'register_task', (['TaskType.kg_prediction'], {}), '(TaskType.kg_prediction)\n', (204, 228), False, 'from datalabs.tasks.base import register_task, TaskTemplate, TaskType\n'), ((1034, 1081), 'datalabs.tasks.base.register_task', 'register_task', (['TaskType.kg_link_tail_prediction'], {}), '(TaskType.kg_link_tail_prediction)\n', (1047, 1081), False, 'from datalabs.tasks.base import register_task, TaskTemplate, TaskType\n'), ((384, 399), 'datalabs.features.features.Value', 'Value', (['"""string"""'], {}), "('string')\n", (389, 399), False, 'from datalabs.features.features import Features, Value\n'), ((409, 424), 'datalabs.features.features.Value', 'Value', (['"""string"""'], {}), "('string')\n", (414, 424), False, 'from datalabs.features.features import Features, Value\n'), ((434, 449), 'datalabs.features.features.Value', 'Value', (['"""string"""'], {}), "('string')\n", (439, 449), False, 'from datalabs.features.features import Features, Value\n'), ((888, 903), 'datalabs.features.features.Value', 'Value', (['"""string"""'], {}), "('string')\n", (893, 903), False, 'from datalabs.features.features import Features, Value\n'), ((939, 954), 'datalabs.features.features.Value', 'Value', (['"""string"""'], {}), "('string')\n", (944, 954), False, 'from datalabs.features.features import Features, Value\n'), ((990, 1005), 'datalabs.features.features.Value', 'Value', (['"""string"""'], {}), "('string')\n", (995, 1005), False, 'from datalabs.features.features import Features, Value\n')]
|
"""
this python file is written orientated by the TUIO spezification
https://www.tuio.org/?specification
It supports only 2D Object|Blob|Cursor
Profile
|
---------------------
| | |
Object Cursor Blob
"""
from pythonosc.osc_message_builder import OscMessageBuilder
from pythonosc.osc_message import OscMessage
from pythontuio.const import TUIO_BLOB, TUIO_CURSOR, TUIO_OBJECT
class Profile:
"""
custom class of all subjects passing the TUIO connection.
See more at https://www.tuio.org/?specification
"""
def __init__(self, session_id):
self.session_id = session_id
class Object(Profile):
"""
TUIO Object 2D Interactive Surface
"""
def __init__(self, session_id):
super().__init__(session_id)
self.class_id = -1 # i
self.position = (0, 0) # x,y
self.angle = 0 # a
self.velocity = (0, 0) # X,Y
self.velocity_rotation = 0 # A
self.motion_acceleration = 0 # m
self.rotation_acceleration = 0 # r
def get_message(self) -> OscMessage:
"""
returns the OSC message of the Object with the TUIO spezification
"""
x, y = self.position
X, Y = self.velocity
builder = OscMessageBuilder(address=TUIO_OBJECT)
for val in [
"set",
int(self.session_id),
int(self.class_id),
float(x),
float(y),
float(self.angle),
float(X),
float(Y),
float(self.velocity_rotation),
float(self.motion_acceleration),
float(self.rotation_acceleration)
]:
builder.add_arg(val)
return builder.build()
class Cursor(Profile):
"""
TUIO Cursor 2D Interactive Surface
"""
def __init__(self, session_id):
super().__init__(session_id)
self.position = (0, 0) # x,y
self.velocity = (0, 0) # X,Y
self.motion_acceleration = 0 # m
def get_message(self)-> OscMessage:
"""
returns the OSC message of the Cursor with the TUIO spezification
"""
x, y = self.position
X, Y = self.velocity
builder = OscMessageBuilder(address=TUIO_CURSOR)
for val in [
"set",
self.session_id,
float(x),
float(y),
float(X),
float(Y),
float(self.motion_acceleration)
]:
builder.add_arg(val)
return builder.build()
class Blob(Profile):
# pylint: disable=too-many-instance-attributes
"""
TUIO Blob 2D Interactive Surface
"""
def __init__(self, session_id):
super().__init__(session_id)
self.position = (0, 0) # x,y
self.angle = 5 # a
self.dimension = (.1, .1) # w, h
self.area = 0.1 # f
self.velocity = (0.1, 0.1) # X,Y
self.velocity_rotation = 0.1 # A
self.motion_acceleration = 0.1 # m
self.rotation_acceleration = 0.1 # r
def get_message(self)-> OscMessage:
"""
returns the OSC message of the Blob with the TUIO spezification
"""
x, y = self.position
X, Y = self.velocity
w, h = self.dimension
builder = OscMessageBuilder(address=TUIO_BLOB)
for val in [
"set",
self.session_id,
float(x),
float(y),
float(self.angle),
float(w),
float(h),
float(self.area),
float(X),
float(Y),
float(self.velocity_rotation),
float(self.motion_acceleration),
float(self.rotation_acceleration)
]:
builder.add_arg(val)
return builder.build()
|
[
"pythonosc.osc_message_builder.OscMessageBuilder"
] |
[((1417, 1455), 'pythonosc.osc_message_builder.OscMessageBuilder', 'OscMessageBuilder', ([], {'address': 'TUIO_OBJECT'}), '(address=TUIO_OBJECT)\n', (1434, 1455), False, 'from pythonosc.osc_message_builder import OscMessageBuilder\n'), ((2463, 2501), 'pythonosc.osc_message_builder.OscMessageBuilder', 'OscMessageBuilder', ([], {'address': 'TUIO_CURSOR'}), '(address=TUIO_CURSOR)\n', (2480, 2501), False, 'from pythonosc.osc_message_builder import OscMessageBuilder\n'), ((3705, 3741), 'pythonosc.osc_message_builder.OscMessageBuilder', 'OscMessageBuilder', ([], {'address': 'TUIO_BLOB'}), '(address=TUIO_BLOB)\n', (3722, 3741), False, 'from pythonosc.osc_message_builder import OscMessageBuilder\n')]
|
import sys
import time
import forward_messages
import helpers
try:
import config
except (ImportError, ModuleNotFoundError):
print(
"config.py not found. Rename config.example.py to config.py after configuration."
)
sys.exit(1)
def main():
if config.forward_user:
forward_messages.forward(config.forward_user)
reddit = helpers.initialize_reddit()
participated = set(helpers.load_data("participated"))
stats = helpers.load_data("stats")
participated = participated.union(get_participants(reddit, stats["last_daily_run"]))
helpers.write_data("participated", list(participated))
stats["last_daily_run"] = (
time.time() - 60
) # to cover accidental gaps due to execution time
helpers.write_data("stats", stats)
def get_participants(reddit, last_check):
participated = set()
old_comments = False
old_submissions = False
for submission in reddit.subreddit(config.target_subreddit).new(limit=None):
if submission.created_utc < last_check:
old_submissions = True
break
try:
participated.add(submission.author.name)
except AttributeError:
# More than likely a deleted user
pass
for comment in reddit.subreddit(config.target_subreddit).comments(limit=None):
if comment.created_utc < last_check:
old_comments = True
break
try:
participated.add(comment.author.name)
except AttributeError:
# More than likely a deleted user
pass
if (
not old_comments or not old_submissions
) and "--ignore-old-comments-warning" not in sys.argv:
raise Exception(
"Not all old comments were retrieved. Run again with --ignore-old-comments-warning to "
"suppress."
)
return participated
if __name__ == "__main__":
main()
|
[
"helpers.write_data",
"helpers.load_data",
"time.time",
"forward_messages.forward",
"sys.exit",
"helpers.initialize_reddit"
] |
[((363, 390), 'helpers.initialize_reddit', 'helpers.initialize_reddit', ([], {}), '()\n', (388, 390), False, 'import helpers\n'), ((461, 487), 'helpers.load_data', 'helpers.load_data', (['"""stats"""'], {}), "('stats')\n", (478, 487), False, 'import helpers\n'), ((755, 789), 'helpers.write_data', 'helpers.write_data', (['"""stats"""', 'stats'], {}), "('stats', stats)\n", (773, 789), False, 'import helpers\n'), ((241, 252), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (249, 252), False, 'import sys\n'), ((303, 348), 'forward_messages.forward', 'forward_messages.forward', (['config.forward_user'], {}), '(config.forward_user)\n', (327, 348), False, 'import forward_messages\n'), ((414, 447), 'helpers.load_data', 'helpers.load_data', (['"""participated"""'], {}), "('participated')\n", (431, 447), False, 'import helpers\n'), ((678, 689), 'time.time', 'time.time', ([], {}), '()\n', (687, 689), False, 'import time\n')]
|
#!/usr/bin/env python
import curses
import curses.textpad
import time
from plugin_utils import *
class TerminalFrontend:
'''
Text based curses terminal frontend for the bot.
'''
def __init__(self, stdscr=None):
'''
Initialize the frontend.
:param stdscr: The curses main window, if one exists.
'''
self.message_queues = {}
self.channel = ""
if not stdscr:
self.main_window = curses.initscr()
else:
self.main_window = stdscr
self.on_enter_triggers = []
self.on_enter_triggers.append(self.send_to_channel)
self._should_exit = False
self.input_buffer = ""
self.init_window()
def init_window(self):
'''
Initialize the geometry of the display window, given the current size.
'''
(self.max_y, self.max_x) = self.main_window.getmaxyx()
self.output_start_x = 5
self.output_start_y = 5
self.output_max_x = self.max_x - 30
self.output_max_y = self.max_y - 10
self.channels_start_x = self.output_max_x + 4
self.channels_start_y = self.output_start_y
self.channels_max_x = self.max_x - 4
self.channels_max_y = self.output_max_y
self.line_length = self.output_max_x - self.output_start_x
self.input_buffer_start_y = self.max_y - 5
self.input_buffer_start_x = self.output_start_x
self.input_buffer_max_y = self.max_y - 1
self.input_buffer_max_x = self.output_max_x
def send_to_channel(self, message):
'''
Send a string to the active channel.
:param message: The message to send.
'''
self.message_queues[self.channel].reverse()
self.message_queues[self.channel].append("Sending:" + message + " to " + self.channel)
try:
if '/' in self.channel:
send_message(self.channel, message, 'chat')
else:
send_message(self.channel, message, 'groupchat')
except Exception as e:
self.message_queues[self.channel].append(str(e))
self.message_queues[self.channel].reverse()
def add_trigger_on_enter(self, function):
'''
Add a function to be called when the enter key is hit.
:param function: The callback to be triggered.
'''
self.on_enter_triggers.append(function)
def display_message(self, message):
'''
Takes a new message object and handles it appropriately, displaying it in its queue.
:param message: The message to display.
'''
if message.Room:
new_channel = message.Room
else:
new_channel = message.From
if new_channel not in self.message_queues:
self.message_queues[new_channel] = []
if not self.channel:
self.channel = new_channel
aligned_message_block = ['\n', message.From] # 'mucnick' and 'mucroom' ('muc' in status messages)
if message.From_Nick:
aligned_message_block = ['\n', message.From_Nick]
for i in xrange(0, len(message.Body), self.line_length):
aligned_message_block.append(message.Body[i:i+self.line_length])
self.message_queues[new_channel].reverse()
for line in aligned_message_block:
self.message_queues[new_channel].append(line)
self.message_queues[new_channel].reverse()
self.message_queues[new_channel] = self.message_queues[new_channel][:self.output_max_y]
self.paint()
def paint(self):
'''
Paint the screen. Should be called per "tick"
'''
self.main_window.clear()
lines = self.message_queues[self.channel]
print_position = self.output_max_y
for line in lines:
try:
self.main_window.addstr(print_position, self.output_start_x, line)
except:
pass
print_position -= 1
if print_position < self.output_start_y:
break
print_position = self.channels_start_y
for channel in self.message_queues.keys():
if channel == self.channel:
channel = '=' + channel + '='
channel = channel[:(self.channels_max_x - self.channels_start_x)]
try:
self.main_window.addstr(print_position, self.channels_start_x, channel)
except:
pass
print_position += 1
if print_position > self.channels_max_y:
break
self.main_window.addstr(self.input_buffer_start_y, self.input_buffer_start_x, "> " + self.input_buffer[(-self.line_length - 2):])
self.main_window.refresh()
def handle_user_input(self):
'''
Handle any key events that have been generated.
'''
try:
c = self.main_window.getkey()
if c == u"\u001B": # Escape
curses.endwin()
self._should_exit = True
return
elif c == u"\u007F" or c == "KEY_BACKSPACE": # Delete
self.input_buffer = self.input_buffer[:-1]
elif c == '\n': # Enter
for function in self.on_enter_triggers:
function(self.input_buffer)
self.input_buffer = ""
elif c == "KEY_PPAGE" or c == "KEY_NPAGE":
if not len(self.message_queues.keys()):
return
prev_channel = self.message_queues.keys()[len(self.message_queues.keys())-1]
next_channel = self.channel
for channel in self.message_queues.keys():
if prev_channel == self.channel and c == "KEY_NPAGE":
next_channel = channel
break
if channel == self.channel and c == "KEY_PPAGE":
break
prev_channel = channel
if c == "KEY_PPAGE":
self.switch_channel(prev_channel)
elif c == "KEY_NPAGE":
self.switch_channel(next_channel)
elif c == "KEY_RESIZE":
self.init_window()
else:
self.input_buffer += c
self.paint()
#One of my dirtier hacks, for debugging purposes.
except Exception as e:
self.input_buffer += str(e)
self.paint()
def should_exit(self):
'''
Has the user requested that the bot exit?
'''
return self._should_exit
def switch_channel(self, channel):
'''
Switch the active channel
:param channel: The channel to switch to.
'''
if channel in self.message_queues:
self.channel = channel
|
[
"curses.initscr",
"curses.endwin"
] |
[((406, 422), 'curses.initscr', 'curses.initscr', ([], {}), '()\n', (420, 422), False, 'import curses\n'), ((4229, 4244), 'curses.endwin', 'curses.endwin', ([], {}), '()\n', (4242, 4244), False, 'import curses\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from typing import Union
import h5py
import os
import traceback
from h5grove.encoders import orjson_encode
from h5grove.models import LinkResolution
from h5grove.utils import NotFoundError
from tornado import web
from tornado.httpclient import HTTPError
from notebook.base.handlers import APIHandler
from notebook.utils import url_path_join
from .config import HdfConfig
from .exception import JhdfError
from .responses import create_response
from .util import jsonize
__all__ = ["HdfBaseManager", "HdfFileManager", "HdfBaseHandler"]
## manager
class HdfBaseManager:
"""Base class for implementing HDF5 handling"""
def __init__(self, log, notebook_dir):
self.log = log
self.notebook_dir = notebook_dir
def _get(self, f, uri, **kwargs):
raise NotImplementedError
def get(self, relfpath, uri, **kwargs):
def _handleErr(code: int, msg: Union[str, dict]):
extra = dict(
(
("relfpath", relfpath),
("uri", uri),
*kwargs.items(),
)
)
if isinstance(msg, dict):
# encode msg as json
msg["debugVars"] = {**msg.get("debugVars", {}), **extra}
msg = orjson_encode(msg).decode()
else:
msg = "\n".join((msg, ", ".join(f"{key}: {val}" for key, val in extra.items())))
self.log.error(msg)
raise HTTPError(code, msg)
if not relfpath:
msg = f"The request was malformed; fpath should not be empty."
_handleErr(400, msg)
fpath = url_path_join(self.notebook_dir, relfpath)
if not os.path.exists(fpath):
msg = f"The request specified a file that does not exist."
_handleErr(403, msg)
else:
try:
# test opening the file with h5py
with h5py.File(fpath, "r"):
pass
except Exception:
msg = f"The request did not specify a file that `h5py` could understand.\n" f"Error: {traceback.format_exc()}"
_handleErr(401, msg)
try:
result = self._get(fpath, uri, **kwargs)
except JhdfError as e:
msg = e.args[0]
msg["traceback"] = traceback.format_exc()
msg["type"] = "JhdfError"
_handleErr(400, msg)
except NotFoundError as e:
_handleErr(404, str(e))
except Exception:
msg = f"Found and opened file, error getting contents from object specified by the uri.\n" f"Error: {traceback.format_exc()}"
_handleErr(500, msg)
return result
class HdfFileManager(HdfBaseManager):
"""Implements base HDF5 file handling"""
def __init__(self, log, notebook_dir, resolve_links):
super().__init__(log, notebook_dir)
self.resolve_links = resolve_links
def _get(self, fpath, uri, **kwargs):
with h5py.File(fpath, "r") as f:
return self._getFromFile(f, uri, **kwargs)
def _getFromFile(self, f, uri, **kwargs):
return jsonize(self._getResponse(create_response(f, uri, self.resolve_links), **kwargs))
def _getResponse(self, responseObj, **kwargs):
raise NotImplementedError
## handler
class HdfBaseHandler(APIHandler):
managerClass = None
"""Base class for HDF5 api handlers
"""
def initialize(self, notebook_dir):
if self.managerClass is None:
raise NotImplementedError
self.notebook_dir = notebook_dir
hdf_config = HdfConfig(config=self.config)
self.manager = self.managerClass(log=self.log, notebook_dir=notebook_dir, resolve_links=LinkResolution.ONLY_VALID if hdf_config.resolve_links else LinkResolution.NONE)
@web.authenticated
async def get(self, path):
"""Based on an api request, get either the contents of a group or a
slice of a dataset and return it as serialized JSON.
"""
uri = "/" + self.get_query_argument("uri").lstrip("/")
itemss = ()
# get any query parameter vals
_kws = ("min_ndim", "ixstr", "subixstr")
_vals = (self.get_query_argument(kw, default=None) for kw in _kws)
itemss += (zip(_kws, _vals),)
# get any repeated query parameter array vals
_array_kws = ("attr_keys",)
_array_vals = (self.get_query_arguments(kw) or None for kw in _array_kws)
itemss += (zip(_array_kws, _array_vals),)
# filter all of the collected params and vals into a kwargs dict
kwargs = {k: v if v else None for items in itemss for k, v in items}
# do any needed type conversions of param vals
_num_kws = ("min_ndim",)
for k in (k for k in _num_kws if kwargs[k] is not None):
kwargs[k] = int(kwargs[k])
try:
self.finish(orjson_encode(self.manager.get(path, uri, **kwargs), default=jsonize))
except HTTPError as err:
self.set_status(err.code)
response = err.response.body if err.response else str(err.code)
self.finish("\n".join((response, err.message)))
# def getQueryArguments(self, key, func=None):
# if func is not None:
# return [func(x) for x in self.get_query_argument(key).split(',')] if key in self.request.query_arguments else None
# else:
# return [x for x in self.get_query_argument(key).split(',')] if key in self.request.query_arguments else None
|
[
"h5py.File",
"notebook.utils.url_path_join",
"h5grove.encoders.orjson_encode",
"os.path.exists",
"tornado.httpclient.HTTPError",
"traceback.format_exc"
] |
[((1763, 1805), 'notebook.utils.url_path_join', 'url_path_join', (['self.notebook_dir', 'relfpath'], {}), '(self.notebook_dir, relfpath)\n', (1776, 1805), False, 'from notebook.utils import url_path_join\n'), ((1591, 1611), 'tornado.httpclient.HTTPError', 'HTTPError', (['code', 'msg'], {}), '(code, msg)\n', (1600, 1611), False, 'from tornado.httpclient import HTTPError\n'), ((1822, 1843), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (1836, 1843), False, 'import os\n'), ((3173, 3194), 'h5py.File', 'h5py.File', (['fpath', '"""r"""'], {}), "(fpath, 'r')\n", (3182, 3194), False, 'import h5py\n'), ((2051, 2072), 'h5py.File', 'h5py.File', (['fpath', '"""r"""'], {}), "(fpath, 'r')\n", (2060, 2072), False, 'import h5py\n'), ((2469, 2491), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2489, 2491), False, 'import traceback\n'), ((1397, 1415), 'h5grove.encoders.orjson_encode', 'orjson_encode', (['msg'], {}), '(msg)\n', (1410, 1415), False, 'from h5grove.encoders import orjson_encode\n'), ((2231, 2253), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2251, 2253), False, 'import traceback\n'), ((2797, 2819), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2817, 2819), False, 'import traceback\n')]
|
#!/usr/bin/env python3
"""Entropy and information theory related calculations.
**Author: <NAME>**
"""
######################## Imports ########################
import numpy as np
import stp
######################## Helper functions ########################
def _eps_filter(x):
""" Checks if the value is within machine-epsilon of zero and maps it to
zero if it is the case. Useful for removing negative values in entropies that should otherwise be zero.
Args:
x (float): value to be checked.
Returns:
(float): x if the value is not within machine epsilon of zero, 0 otherwise.
"""
return x if not np.isclose(x, 0, atol=9*10E-15) else 0
######################## Entropy calculations ########################
def entropy(p):
""" Calculates the Shannon entropy for a marginal distribution.
Args:
p (np.ndarray): the marginal distribution.
Returns:
(float): the entropy of p
"""
# Since zeros do not contribute to the Shannon entropy by definition, we
# ignore them to avoid any errors/warnings.
p = p[p != 0]
H = -np.dot(p, np.log(p))
# Filter against machine epsilon
return _eps_filter(H)
def delta_entropy(R, p):
""" Calculates the discrete time change in entropy using the entropy of p
evolved with R, minus the entropy of p.
Args:
R (np.ndarray): the transition matrix.
p (np.ndarray): the marginal distribution.
Returns:
(float): the change in entropy
"""
return entropy(step(R, p)) - entropy(p)
def relative_entropy(p, q):
""" Calculates the Kullback-Leibler divergence, which is nonnegative and
vanishes if and only if the distributions coincide.
Args:
p, q (np.ndarray): the probability distributions.
Returns:
(float): the relative entropy.
"""
if p.shape[0] != q.shape[0]:
print('Dimensions of vectors are not equal. Cannot find relative entropy.')
sys.exit()
# Any values where p is zero are defined to be zero and hence do not
# contribute to the relative entropy
# By masking q as well we automatically skip the values that were supposed
# to vanish with p avoiding any misalignment issues
# Note that by masking q only where p is zero doesn't remove
# any mismatching meaning it will still be infinite (as it should be)
# in the case where q has a zero that p does not.
p_filtered = p[p != 0]
log_ratio = np.log(p_filtered / q[p != 0])
return np.dot(p_filtered, log_ratio)
def entropy_production(matrix, p, discrete=True):
""" Calculates the entropy production for either discrete or continuous
time.
Args:
matrix (np.ndarray): the stochastic matrix, either a discrete time transition matrix or a continuous time rate matrix.
p (np.ndarray): the marginal distribution
Kwargs:
discrete (bool): True if we are calculating the discrete time entropy production (nats), False if we are calculating it in continuous time (nats/time).
Returns:
(float/np.inf): the entropy production
"""
log_product = matrix * np.log( matrix / matrix.T )
# The entropy term only exists in the case of discrete time
# it vanishes when we calculate the continuous time EP,
# by multiplying by the boolean we include it only when
# necessary
EP = np.dot(log_product.sum(axis=0), p) - (entropy(p) * discrete) \
- np.dot(stp.step(matrix, p), np.log(p))
return EP
def entropy_flow(R, p):
""" Calculates the discrete time entropy flow. This has not been
generalized to handle the continuous time entropy flow yet.
Args:
R (np.ndarray): the discrete time transition matrix
p (np.ndarray): the marginal distribution
Returns:
(float): the entropy flow
"""
# Vectorized calculation
log_product = R * np.log( R / R.T )
p_step = step(R, p)
EF = -np.dot(log_product.sum(axis=0), p) + entropy(p_step) \
+ np.dot(p_step, np.log(p))
return EF
######################## Entropy rates ########################
def entropy_rate(R):
""" Calculates the asymptotic entropy rate for the provided transition
matrix. If the matrix is time-inhomogeneous then we return a function that generates the entropy_rate as a function of n by calculating the systems limiting distribution for each n.
Args:
R (np.ndarray/function): the transition matrix.
Returns:
(float/function): the entropy velocity.
"""
if callable(R):
return lambda n : entropy_rate(R(n))
pst = stp.get_stationary_distribution(R, discrete=True)
RProduct = (R * np.log(R)).sum(axis=0)
return -np.dot(pst, RProduct)
######################## Information Space Objects ########################
class InfoSpace:
""" Information space. Holds collections of paths that traverse states in a
state space as a matrix, and the probability of each of those paths.
Provides functionality on this path space such as providing path entropies.
Attributes:
paths: the matrix of paths.
probabilities: a list of probabilities each path.
num_paths: the number of paths considered.
path_length: the length of the paths considered.
probabilities: a matrix where the (i,j)th element is the probability of observing the first j states of the ith path.
entropies: a list of path entropies for each path
total_probability: the sum of the probabilities of each path.
"""
def __init__(self, paths, p_matrix):
""" Initializes the InfoSpace object.
Args:
paths (np.ndarray): a matrix of paths where the (i,j)th element corresponds to the jth symbol of the ith path.
p_matrix (np.ndarray): a matrix of probabilities where the (i,j)th element corresponds to the probability of observing the ith path for the first j+1 (zero-indexing) symbols.
"""
self._paths = np.array(paths)
# Matrix of probabilities corresponding to the probability for the path
# at each moment.
self._p_matrix = np.array(p_matrix)
if self._p_matrix.size != 0:
# The information space is not empty
self._probabilities = self._p_matrix[:, -1]
else:
# There is zero probability here.
self._probabilities = 0
#------------- Properties -------------#
@property
def paths(self):
return self._paths
@property
def num_paths(self):
return self.paths.shape[0]
@property
def path_length(self):
return self.paths.shape[1]
@property
def probabilities(self):
return self._probabilities
@property
def entropies(self):
""" Returns a list of path entropies for each corresponding path
probability.
"""
try:
return self._entropies
except AttributeError:
# It's never been calculated before
self._entropies = -np.log(self.probabilities)
return self._entropies
@property
def total_probability(self):
try:
return self.probabilities.sum()
except AttributeError:
# Space is empty
return 0
#------------- Static methods -------------#
@staticmethod
def shorten(infospace, path_length, return_index=False):
""" Takes an Information Space and shortens it. Since unique paths of
length n, may be degenerate when truncated to paths of length m < n, we need to check for degeneracies and filter them out in both paths and probabilities.
Args:
infospace (InfoSpace): the information space to shorten.
path_length (int): the path length the information space should be shortened to.
Kwargs:
return_index (bool): returns the indices of the non-degenerate paths for the given path length using the original matrix. Useful for filtering other quantities of interest that may not be attached to this object.
Returns:
(InfoSpace): the shortened InfoSpace.
"""
if path_length < 1:
raise ValueError(f'Invalid path length: {path_length}. Path length must be an integer greater than 0.')
elif path_length > infospace.path_length:
raise ValueError(f'Cannot shorten an InformationSpace from length: {infospace.path_length} -> {path_length}.')
if infospace.paths.size == 0:
# This is an empty information space
return infospace if not return_index else (infospace, [])
# Truncate the path matrix
paths = infospace.paths[:, :path_length]
# Return index will provide the path indices of the non-degenerate paths
_, indices = np.unique(paths, axis=0, return_index=True)
# Sort the indices
indices = sorted(indices)
# Filter out the paths. Not taken from np.unique to ensure the correct
# ordering.
paths = paths[indices, :]
# Truncate the probability matrix
p_matrix = infospace._p_matrix[:, :path_length]
# Filter the probabilities matrix
p_matrix = p_matrix[indices, :]
infospace = InfoSpace(paths, p_matrix)
return infospace if not return_index else infospace, indices
class PartitionedInfoSpace(InfoSpace):
""" Partitioned Information Space. Constructs a typical set on an
information space to partition it into a typical information space and an atypical one.
Holds path probabilities, typical paths, atypical paths, atypical path probabilities and more. This object will use a provided (often sampled) path space to partition the space into a collection of typical and atypical paths depending on the dynamics provided. Will also track other quantities of interest such as the upper and lower bounds on the path probabilities required for the paths to be considered typical.
Attributes:
paths: the matrix of paths.
probabilities: a list of probabilities each path.
num_paths: the number of paths considered.
path_length: the length of the paths considered.
probabilities: a matrix where the (i,j)th element is the probability of observing the first j states of the ith path.
entropies: a list of path entropies for each path.
entropy_rates: a list of the entropy rates for each various path length. This will be the center of the epsilon-neighborhood for path entropies to qualify paths as typical for.
epsilon: the widths of the neighborhood used for paths to be considered typical for each path length.
upper/lower: the upper/lower bounds as measured in nats. This means that a path is typical if and only if its path entropy rate is within these bounds.
typicalities: a matrix where the (i,j)th element is a boolean determining whether the ith path is typical after j+1 steps.
ts: the typical set.
ats: the atypical set.
"""
def __init__(self, entropy_rates, epsilon, paths=None, p_matrix=None, typical_space=None, atypical_space=None):
""" Generates the PartitionedInfoSpace.
Args:
entropy_rates (np.ndarray): a list of the entropy rates for each various path length. This will be the center of the epsilon-neighborhood for path entropies to qualify paths as typical for.
epsilon (np.ndarray): the widths of the neighborhood used for paths to be considered typical for each path length.
Kwargs:
paths (np.ndarray/None): the entire sampled path space, the union of the typical and atypical spaces. If not provided these spaces will be merged to generate it.
p_matrix (np.ndarray/None): the entire matrix of probabilities for each path and each path length. If not provided, this will be generated by merging the p_matrix of the typical and atypical spaces.
typical_space (InfoSpace/None): the typical set on this space. If None, partitions the provided path space.
atypical_space (InfoSpace): the atypical set on this space. If None, partitions the provided path space.
"""
# Bool if the space simply needs to be partitioned
must_partition = (paths is None) or (p_matrix is None)
# Bool if the space simply needs to be merged since it's already been
# partitioned into a typical and atypical space
must_union = (typical_space is None) or (atypical_space is None)
if must_partition and must_union:
# We need either the paths AND the p_matrix or the tupical/atypical
# spaces to partition/union the spaces respectively.
raise TypeError('In sufficient information provided to partition/union the Information Space. We need either paths with their probabilities or the already partitioned spaces.')
if must_partition:
# Partition the paths and probability matrix into a typical and
# atypical space
# Need to generate the upper/lower bounds for the partitioning
# of the spaces
self._lower = entropy_rates - epsilon
self._upper = entropy_rates + epsilon
ts_paths = []; ts_p_matrix = []
ats_paths = []; ats_p_matrix = []
for path, path_index in enumerate(paths):
path_prob = p_matrix[path_index]
# The path entropy rate for direct comparison with the
# upper/lower bounds
path_entropy_rate = -np.log(path_prob[-1]) / path_length
is_typical = (
(self.lower[-1] <= path_entropy_rate)
and (path_entropy_rate <= self._upper)
)
if is_typical:
ts_paths.append(path)
ts_p_matrix.append(path_prob)
else:
ats_paths.append(path)
ats_p_matrix.append(path_prob)
typical_space = InfoSpace(ts_paths, ts_p_matrix)
atypical_space = InfoSpace(ats_paths, ats_p_matrix)
elif must_union:
# Union the path data
ts_empty = (typical_space.paths.size == 0)
ats_empty = (atypical_space.paths.size == 0)
if not ts_empty and not ats_empty:
# Both are nonempty
paths = np.vstack( (typical_space.paths, atypical_space.paths) )
p_matrix = np.vstack(
(typical_space._p_matrix, atypical_space._p_matrix)
)
elif ts_empty:
# Only the typical_space is empty
paths = atypical_space.paths
p_matrix = atypical_space._p_matrix
else:
# Only the atypical_space is empty
paths = typical_space.paths
p_matrix = typical_space._p_matrix
### Storing properties ###
self._paths = paths
self._p_matrix = p_matrix
self._probabilities = self._p_matrix[:, -1]
self._entropy_rates = entropy_rates
# Generalize the epsilon to a path_length dependent epsilon for
# potential generalizations in child classes.
if isinstance(epsilon, list):
epsilon = np.array(epsilon)
if not isinstance(epsilon, np.ndarray):
# We were only provided a float
epsilon = np.full(self.path_length, epsilon)
self._epsilon = epsilon
self._ts = typical_space
self._ats = atypical_space
#------------- Properties -------------#
@property
def entropy_rates(self):
return self._entropy_rates
@property
def epsilon(self):
return self._epsilon
@property
def upper(self):
try:
return self._upper
except AttributeError:
# It's never been calculated before
self._upper = self.entropy_rates + self.epsilon
return self._upper
@property
def lower(self):
try:
return self._lower
except AttributeError:
# It's never been calculated before.
self._lower = self.entropy_rates - self.epsilon
return self._lower
@property
def typicalities(self):
""" Returns the matrix of typicalities. """
try:
return self._typicalities
except AttributeError:
# It's never been calculated before
typicalities = []
ns = np.arange(1, self.path_length + 1)
path_entropy_rates = -np.log(self._p_matrix) / ns
self._typicalities = (
(self.lower <= path_entropy_rates)
& (path_entropy_rates <= self.upper)
)
return self._typicalities
@property
def ats(self):
return self._ats
@property
def ts(self):
return self._ts
#------------- Static methods -------------#
@staticmethod
def shorten(pinfospace, path_length, return_index=False):
""" Takes a PartitionedInformationSpace and shortens it. Since unique
paths of length n, may be degenerate when truncated to paths of length m < n, we need to check for degeneracies and filter them out in both paths and probabilities.
Args:
pinfospace (PartitionedInfoSpace): the partitioned information space to shorten.
path_length (int): the path length the information space should be shortened to.
Kwargs:
return_index (bool): returns the indices of the non-degenerate paths for the given path length using the original matrix. Useful for filtering other quantities of interest that may not be attached to this object.
Returns:
(PartitionedInfoSpace): the shortened PartitionedInfoSpace.
"""
# Hold the current information space to access properties
old_pinfospace = pinfospace
# Call parent method
# Paths and p_matrix will be handled here along with any other
# properties shared with parent. Sorted indices of non-degenerate
# paths will be calculated here too.
pinfospace, indices = InfoSpace.shorten(old_pinfospace, path_length, return_index=True)
# Finish the rest of this object's specific properties
# Truncate the entropy_rates
entropy_rates = old_pinfospace.entropy_rates[:path_length]
# Truncate the epsilon
epsilon = old_pinfospace.epsilon[:path_length]
# Truncate the typicalities matrix
# Necessary to re-partition the space.
# Filter out the typicalities matrix
typicalities = old_pinfospace.typicalities[indices, :path_length]
### Partitioning ###
ts_paths, ts_p_matrix = [], []
ats_paths, ats_p_matrix = [], []
paths = pinfospace.paths
p_matrix = pinfospace._p_matrix
for path_index, is_typical in enumerate(typicalities[:, -1]):
path = paths[path_index]
probs = p_matrix[path_index]
if is_typical:
ts_paths.append(path)
ts_p_matrix.append(probs)
else:
ats_paths.append(path)
ats_p_matrix.append(probs)
# The partitioned spaces
ts = InfoSpace(ts_paths, ts_p_matrix)
ats = InfoSpace(ats_paths, ats_p_matrix)
pinfospace = PartitionedInfoSpace(entropy_rates=entropy_rates, epsilon=epsilon, paths=paths, p_matrix=p_matrix, typical_space=ts, atypical_space=ats)
# Save the pre-generated property
pinfospace._typicalities = typicalities
return pinfospace if not return_index else pinfospace, indices
@staticmethod
def partition_space(R, p, paths, epsilon=0.5, return_p=False):
""" Partitions a path space using the dynamics provided.
Args:
R (np.ndarray/function): the transition matrix, time-dependent if provided as a function.
p (np.ndarray): the initial marginal distribution.
paths (np.ndarray): the portion of the path space to use.
Kwargs:
epsilon (float/np.ndarray): the radius/radii of the epsilon neighborhood to consider paths to be typical within.
return_p (bool): False, return only the PartitionedInfoSpace, True returns both the PartitionedInfoSpace and a list of the marginal vs time.
Returns:
(ParitionedInfoSpace/2-tuple): the PartitionedInfoSpace (PIS) or the PIS and a list of the marginal versus observation step if return_p is True.
"""
#------------- Data preparation -------------#
# Convert the transition matrix to add time-dependence as a constant
# matrix if a constant matrix was provided
if not callable(R):
# Not being saved as an attribute since this is not easily
# recoverable by being saved to a file.
# Emphasize saving properties that can be saved/loaded.
oldR = R
R = lambda n : oldR
num_paths, path_length = paths.shape
p_matrix = np.zeros(paths.shape)
# Initialize the marginal distribution data
for x, path in enumerate(paths):
# Just equal to the initial marginal
p_matrix[x, 0] = p[path[0]]
# Used for the bounds
entropy_rates = np.array([
entropy_rate(R(i))
for i in range(path_length)
])
# The marginal versus time
if return_p: p_vs_time = [p]
#------------- Data gathering -------------#
# bar = gui.ProgressBar(path_length * num_paths, width=300, title='Gathering data...')
### Quantities versus time ###
for current_path_length in range(2, path_length + 1):
# The data index
i = current_path_length - 1
# Since the marginals are zero-indexed as are the paths
step_index = current_path_length - 2
currentR = R(current_path_length - 1)
# Propagate the marginal one step and save it separately
# for quantities like the temporal coarse graining term
pstep = stp.step(currentR, p)
### Path probability calculations ###
for x, path in enumerate(paths):
current_state = path[step_index]
jump_state = path[step_index + 1]
# Forward calculations
# Recursive calculation to save time
last_joint = p_matrix[x, i - 1]
jump_prob = currentR[jump_state, current_state]
p_matrix[x, i] = last_joint * jump_prob
# If updated in each iteration, slows down the simulation
# drastically
# bar.update(amount=num_paths)
if return_p: p_vs_time.append(pstep)
# Finished data gathering for this iteration, propagate marginal
# forward in time
p = pstep
# bar.finish()
### Partitioning ###
upper = entropy_rates + epsilon
lower = entropy_rates - epsilon
ts_paths, ts_p_matrix = [], []
ats_paths, ats_p_matrix = [], []
path_entropy_rates = -np.log(p_matrix[:, -1]) / path_length
# Identify the paths that are typical and atypical
for path_index, path_entropy_rate in enumerate(path_entropy_rates):
# Can't create typicality matrix since partitioning it will
# break the ordering
# Determines whether this path is ultimately typical
is_typical = (
(lower[-1] <= path_entropy_rate)
and (path_entropy_rate <= upper[-1])
)
probs = p_matrix[path_index]
if is_typical:
ts_paths.append(path)
ts_p_matrix.append(probs)
else:
ats_paths.append(path)
ats_p_matrix.append(probs)
# The partitioned spaces
ts = InfoSpace(ts_paths, ts_p_matrix)
ats = InfoSpace(ats_paths, ats_p_matrix)
pinfospace = PartitionedInfoSpace(
entropy_rates=entropy_rates,
epsilon=epsilon,
paths=paths,
p_matrix=p_matrix,
typical_space=ts,
atypical_space=ats
)
# Set pre-calculated properties
pinfospace._upper = upper
pinfospace._lower = lower
return (pinfospace, p_vs_time) if return_p else pinfospace
######################## Entry ########################
def main():
print('info.py')
### Testing ###
p = stp.rand_p(3)
R = stp.self_assembly_transition_matrix()
paths = stp.complete_path_space(3, 4)
pinfospace = PartitionedInfoSpace.partition_space(R, p, paths)
print( f'pinfospace.total_probability: {pinfospace.total_probability}' )
print(pinfospace.ats.num_paths)
if __name__ == '__main__':
main()
|
[
"numpy.full",
"stp.rand_p",
"numpy.log",
"stp.self_assembly_transition_matrix",
"numpy.zeros",
"stp.get_stationary_distribution",
"numpy.isclose",
"stp.complete_path_space",
"numpy.array",
"numpy.arange",
"numpy.vstack",
"numpy.dot",
"stp.step",
"numpy.unique"
] |
[((2633, 2663), 'numpy.log', 'np.log', (['(p_filtered / q[p != 0])'], {}), '(p_filtered / q[p != 0])\n', (2639, 2663), True, 'import numpy as np\n'), ((2676, 2705), 'numpy.dot', 'np.dot', (['p_filtered', 'log_ratio'], {}), '(p_filtered, log_ratio)\n', (2682, 2705), True, 'import numpy as np\n'), ((4922, 4971), 'stp.get_stationary_distribution', 'stp.get_stationary_distribution', (['R'], {'discrete': '(True)'}), '(R, discrete=True)\n', (4953, 4971), False, 'import stp\n'), ((25621, 25634), 'stp.rand_p', 'stp.rand_p', (['(3)'], {}), '(3)\n', (25631, 25634), False, 'import stp\n'), ((25643, 25680), 'stp.self_assembly_transition_matrix', 'stp.self_assembly_transition_matrix', ([], {}), '()\n', (25678, 25680), False, 'import stp\n'), ((25693, 25722), 'stp.complete_path_space', 'stp.complete_path_space', (['(3)', '(4)'], {}), '(3, 4)\n', (25716, 25722), False, 'import stp\n'), ((3348, 3373), 'numpy.log', 'np.log', (['(matrix / matrix.T)'], {}), '(matrix / matrix.T)\n', (3354, 3373), True, 'import numpy as np\n'), ((4164, 4179), 'numpy.log', 'np.log', (['(R / R.T)'], {}), '(R / R.T)\n', (4170, 4179), True, 'import numpy as np\n'), ((5028, 5049), 'numpy.dot', 'np.dot', (['pst', 'RProduct'], {}), '(pst, RProduct)\n', (5034, 5049), True, 'import numpy as np\n'), ((6408, 6423), 'numpy.array', 'np.array', (['paths'], {}), '(paths)\n', (6416, 6423), True, 'import numpy as np\n'), ((6568, 6586), 'numpy.array', 'np.array', (['p_matrix'], {}), '(p_matrix)\n', (6576, 6586), True, 'import numpy as np\n'), ((9348, 9391), 'numpy.unique', 'np.unique', (['paths'], {'axis': '(0)', 'return_index': '(True)'}), '(paths, axis=0, return_index=True)\n', (9357, 9391), True, 'import numpy as np\n'), ((22071, 22092), 'numpy.zeros', 'np.zeros', (['paths.shape'], {}), '(paths.shape)\n', (22079, 22092), True, 'import numpy as np\n'), ((687, 719), 'numpy.isclose', 'np.isclose', (['x', '(0)'], {'atol': '(9 * 1e-14)'}), '(x, 0, atol=9 * 1e-14)\n', (697, 719), True, 'import numpy as np\n'), ((1192, 1201), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (1198, 1201), True, 'import numpy as np\n'), ((3677, 3696), 'stp.step', 'stp.step', (['matrix', 'p'], {}), '(matrix, p)\n', (3685, 3696), False, 'import stp\n'), ((3698, 3707), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (3704, 3707), True, 'import numpy as np\n'), ((4296, 4305), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (4302, 4305), True, 'import numpy as np\n'), ((16041, 16058), 'numpy.array', 'np.array', (['epsilon'], {}), '(epsilon)\n', (16049, 16058), True, 'import numpy as np\n'), ((16173, 16207), 'numpy.full', 'np.full', (['self.path_length', 'epsilon'], {}), '(self.path_length, epsilon)\n', (16180, 16207), True, 'import numpy as np\n'), ((23154, 23175), 'stp.step', 'stp.step', (['currentR', 'p'], {}), '(currentR, p)\n', (23162, 23175), False, 'import stp\n'), ((4992, 5001), 'numpy.log', 'np.log', (['R'], {}), '(R)\n', (4998, 5001), True, 'import numpy as np\n'), ((17286, 17320), 'numpy.arange', 'np.arange', (['(1)', '(self.path_length + 1)'], {}), '(1, self.path_length + 1)\n', (17295, 17320), True, 'import numpy as np\n'), ((24207, 24230), 'numpy.log', 'np.log', (['p_matrix[:, -1]'], {}), '(p_matrix[:, -1])\n', (24213, 24230), True, 'import numpy as np\n'), ((7494, 7520), 'numpy.log', 'np.log', (['self.probabilities'], {}), '(self.probabilities)\n', (7500, 7520), True, 'import numpy as np\n'), ((15131, 15185), 'numpy.vstack', 'np.vstack', (['(typical_space.paths, atypical_space.paths)'], {}), '((typical_space.paths, atypical_space.paths))\n', (15140, 15185), True, 'import numpy as np\n'), ((15215, 15277), 'numpy.vstack', 'np.vstack', (['(typical_space._p_matrix, atypical_space._p_matrix)'], {}), '((typical_space._p_matrix, atypical_space._p_matrix))\n', (15224, 15277), True, 'import numpy as np\n'), ((14274, 14295), 'numpy.log', 'np.log', (['path_prob[-1]'], {}), '(path_prob[-1])\n', (14280, 14295), True, 'import numpy as np\n'), ((17356, 17378), 'numpy.log', 'np.log', (['self._p_matrix'], {}), '(self._p_matrix)\n', (17362, 17378), True, 'import numpy as np\n')]
|
from src.modules.helper.get_county_pop import get_county_pop
from typing import Union
def calc_county_per_capita_rate(
county_name_clean: str,
total_over_period: Union[int, float],
_round=True,
num_of_people: int = 100000,
):
""" Calculates a rate per 100,000 people for a specific county
Args:
county_name_clean (str): County name to get population for. Eg "Dauphin"
total_over_period (Union[int, float]): Number to calculate rate for. Eg. 1000 deaths.
_round (bool, optional): Whether to round the result.
num_of_people (int, optional): Number of people to use as per capita rate. Defaults to 100000.
Return:
Union[int, float]: Per capita rate for county.
"""
county_pop = get_county_pop(county_name_clean)
per_capita_rate = (total_over_period / county_pop) * num_of_people
per_capita_rate = round(per_capita_rate) if _round else per_capita_rate
return per_capita_rate
|
[
"src.modules.helper.get_county_pop.get_county_pop"
] |
[((756, 789), 'src.modules.helper.get_county_pop.get_county_pop', 'get_county_pop', (['county_name_clean'], {}), '(county_name_clean)\n', (770, 789), False, 'from src.modules.helper.get_county_pop import get_county_pop\n')]
|
import setuptools
version = int(setuptools.__version__.split('.')[0])
assert version > 30, "tensorpack installation requires setuptools > 30"
from setuptools import setup
import os
import shutil
import sys
# setup metainfo
CURRENT_DIR = os.path.dirname(__file__)
libinfo_py = os.path.join(CURRENT_DIR, 'tensorpack/libinfo.py')
exec(open(libinfo_py, "rb").read())
# produce rst readme for pypi
try:
import pypandoc
long_description = pypandoc.convert_file('README.md', 'rst')
except ImportError:
long_description = open('README.md').read()
# configure requirements
reqfile = os.path.join(CURRENT_DIR, 'requirements.txt')
req = [x.strip() for x in open(reqfile).readlines()]
setup(
name='tensorpack',
version=__version__,
description='Neural Network Toolbox on TensorFlow',
long_description=long_description,
install_requires=req,
tests_require=['flake8', 'scikit-image'],
extras_require={
'all': ['pillow', 'scipy', 'h5py', 'lmdb>=0.92', 'matplotlib',
'scikit-learn', "tornado; python_version < '3.0'"]
},
include_package_data=True,
package_data={'tensorpack': ['user_ops/Makefile', 'user_ops/*.cc', 'user_ops/*.h']},
)
|
[
"pypandoc.convert_file",
"setuptools.setup",
"os.path.dirname",
"setuptools.__version__.split",
"os.path.join"
] |
[((238, 263), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (253, 263), False, 'import os\n'), ((277, 327), 'os.path.join', 'os.path.join', (['CURRENT_DIR', '"""tensorpack/libinfo.py"""'], {}), "(CURRENT_DIR, 'tensorpack/libinfo.py')\n", (289, 327), False, 'import os\n'), ((589, 634), 'os.path.join', 'os.path.join', (['CURRENT_DIR', '"""requirements.txt"""'], {}), "(CURRENT_DIR, 'requirements.txt')\n", (601, 634), False, 'import os\n'), ((689, 1159), 'setuptools.setup', 'setup', ([], {'name': '"""tensorpack"""', 'version': '__version__', 'description': '"""Neural Network Toolbox on TensorFlow"""', 'long_description': 'long_description', 'install_requires': 'req', 'tests_require': "['flake8', 'scikit-image']", 'extras_require': '{\'all\': [\'pillow\', \'scipy\', \'h5py\', \'lmdb>=0.92\', \'matplotlib\',\n \'scikit-learn\', "tornado; python_version < \'3.0\'"]}', 'include_package_data': '(True)', 'package_data': "{'tensorpack': ['user_ops/Makefile', 'user_ops/*.cc', 'user_ops/*.h']}"}), '(name=\'tensorpack\', version=__version__, description=\n \'Neural Network Toolbox on TensorFlow\', long_description=\n long_description, install_requires=req, tests_require=[\'flake8\',\n \'scikit-image\'], extras_require={\'all\': [\'pillow\', \'scipy\', \'h5py\',\n \'lmdb>=0.92\', \'matplotlib\', \'scikit-learn\',\n "tornado; python_version < \'3.0\'"]}, include_package_data=True,\n package_data={\'tensorpack\': [\'user_ops/Makefile\', \'user_ops/*.cc\',\n \'user_ops/*.h\']})\n', (694, 1159), False, 'from setuptools import setup\n'), ((443, 484), 'pypandoc.convert_file', 'pypandoc.convert_file', (['"""README.md"""', '"""rst"""'], {}), "('README.md', 'rst')\n", (464, 484), False, 'import pypandoc\n'), ((32, 65), 'setuptools.__version__.split', 'setuptools.__version__.split', (['"""."""'], {}), "('.')\n", (60, 65), False, 'import setuptools\n')]
|