content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from datetime import datetime
from num2words import num2words
import gettext as gettext_module, locale, os
__all__ = ('spoken_time', 'absolute_spoken_date', 'relative_spoken_date')
# Reset locale for day & month names
locale.setlocale( locale.LC_ALL, '')
# Load translations for current locale
_language, _encoding = locale.getlocale()
_translation = gettext_module.translation( 'spoken_time',
languages = [_language], fallback=True,
localedir = os.path.join( os.path.dirname(__file__), 'locale'))
_ = _translation.gettext
ngettext = _translation.ngettext
del _encoding, _translation
def spoken_time( t=None, hours=None, am_pm=True, colloquial=True):
""" Localize the time of day.
:param t: timestamp (datetime.time or datetime.datetime), defaults to current time.
:param hours: 12 or 24, defaults to localization specific value.
:param am_pm: Include time of day? (Default: yes)
:param colloquial: Try to sound more natural e.g. 'quarter past noon'.
"""
if not t: t = datetime.now()
try:
hours = hours or int( _('{hours_in_clock}'))
except ValueError:
hours = 12
assert hours == 12 or hours == 24
am_pm = time_of_day( t) if am_pm and hours == 12 else ''
num_hour = (t.hour + hours - 1) % hours + 1
minutes = spoken_minute( t.minute)
next_num_hour = (t.hour + hours) % hours + 1
to_minutes = spoken_minute( 60 - t.minute)
hour = ngettext( "one o'clock",
"{hour} o'clock", num_hour).format( hour=num_hour)
next_hour = ngettext( "one o'clock",
"{hour} o'clock", next_num_hour).format( hour=next_num_hour)
# Ensure correct pronounciation after stripping blanks
num_hour = num2words( num_hour, lang=_language)
next_num_hour = num2words( next_num_hour, lang=_language)
if colloquial:
if t.hour == 11:
next_hour = _('noon')
am_pm = ''
elif t.hour == 12:
hour = _('noon')
am_pm = ''
elif t.hour == 23:
next_hour = _('midnight')
am_pm = ''
elif t.hour == 0:
hour = _('midnight')
am_pm = ''
if t.minute == 0:
text = _("{hour} {am_pm}")
elif t.minute == 15:
text = _("quarter past {num_hour} {am_pm}")
elif t.minute == 30:
text = _("half past {num_hour} {am_pm}")
elif t.minute == 45:
text = _("quarter to {next_num_hour} {am_pm}")
elif t.minute in (40, 50) or t.minute >= 50:
text = _("{to_minutes} to {next_num_hour} {am_pm}")
else: text = _("{minutes} past {hour} {am_pm}")
return text.format( **locals()).strip()
def time_of_day( t=None):
"Localize the part of the day like 'afternoon', 'evening' etc."
if not t: t = datetime.now()
if 0 <= t.hour <= 4: return _( 'at night')
if 4 < t.hour <= 9: return _( 'in the morning')
if 9 < t.hour <= 11: return _( 'before noon')
if 11 < t.hour <= 12: return _( 'around noon')
if 12 < t.hour <= 17: return _( 'in the afternoon')
if 17 < t.hour <= 21: return _( 'in the evening')
if 21 < t.hour <= 23: return _( 'at night')
return '' # Noon or midnight
def absolute_spoken_date( dt=None, format=None, cardinal_day=False):
""" Describe a date in human-understandable words.
:param dt: date (or datetime), defaults to current day.
:param format: Format string with variables {weekday}, {day}, {month} and {year}.
:param cardinal_day: Use an ordinal or cardinal day number.
:return: locaalised formatted string.
"""
if dt is None: dt = datetime.now()
if type( dt) is datetime: dt = dt.date()
format = format or _("{weekday}, the {day} of {month} {year}")
weekday = locale.nl_langinfo( locale.DAY_1 + (dt.weekday() + 1) % 7)
month = locale.nl_langinfo( locale.MON_1 + dt.month - 1)
day = num2words( dt.day, lang=_language,
to='cardinal' if cardinal_day else 'ordinal')
year = dt.year
return format.format( **locals())
def relative_spoken_date( dt=None, preposition=''):
"Describe the time span to a date in human-understandable words"
if dt is None: dt = datetime.now()
if type( dt) is datetime: dt = dt.date()
delta = dt - datetime.now().date()
if delta.days == -2:
return _("day before yesterday").format( days=-delta.days)
if delta.days == -1: return _("yesterday")
if delta.days == 0: return _("today")
if delta.days == 1: return _("tomorrow")
if delta.days == 2: return _("the day after tomorrow")
weekday = locale.nl_langinfo( locale.DAY_1 + (dt.weekday() + 1) % 7)
if -7 <= delta.days < 0:
return _("{preposition} last {weekday}").format(
preposition=preposition, weekday=weekday).strip()
if 0 < delta.days <= 7:
return _("{preposition} next {weekday}").format(
preposition=preposition, weekday=weekday).strip()
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
997,
17,
10879,
1330,
997,
17,
10879,
198,
11748,
651,
5239,
355,
651,
5239,
62,
21412,
11,
36693,
11,
28686,
628,
198,
834,
439,
834,
796,
19203,
19842,
62,
2435,
3256,
705,
48546,
62,
... | 2.402315 | 2,073 |
from redis import Redis
import time
from functools import update_wrapper
from flask import request, g
from flask import Flask, jsonify
from models import Base, Item
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
import json
engine = create_engine('sqlite:///bargainMart.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
app = Flask(__name__)
app = Flask(__name__)
#ADD RATE LIMITING CODE HERE
@app.route('/catalog')
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host = '0.0.0.0', port = 5000)
| [
6738,
2266,
271,
1330,
2297,
271,
198,
11748,
640,
198,
6738,
1257,
310,
10141,
1330,
4296,
62,
48553,
198,
6738,
42903,
1330,
2581,
11,
308,
198,
6738,
42903,
1330,
46947,
11,
33918,
1958,
220,
198,
6738,
4981,
1330,
7308,
11,
9097,
... | 2.910569 | 246 |
from datetime import datetime, timedelta
from flask import current_app
from notifications_utils.timezones import convert_utc_to_bst
from sqlalchemy import asc, desc, func
from app import db
from app.dao.dao_utils import autocommit
from app.models import (
SMS_TYPE,
FactBilling,
ProviderDetails,
ProviderDetailsHistory,
User,
)
def _get_sms_providers_for_update(time_threshold):
"""
Returns a list of providers, while holding a for_update lock on the provider details table, guaranteeing that those
providers won't change (but can still be read) until you've committed/rolled back your current transaction.
if any of the providers have been changed recently, it returns an empty list - it's still your responsiblity to
release the transaction in that case
"""
# get current priority of both providers
q = ProviderDetails.query.filter(
ProviderDetails.notification_type == 'sms',
ProviderDetails.active
).with_for_update().all()
# if something updated recently, don't update again. If the updated_at is null, treat it as min time
if any((provider.updated_at or datetime.min) > datetime.utcnow() - time_threshold for provider in q):
current_app.logger.info(f"Not adjusting providers, providers updated less than {time_threshold} ago.")
return []
return q
@autocommit
def dao_reduce_sms_provider_priority(identifier, *, time_threshold):
"""
Will reduce a chosen sms provider's priority, and increase the other provider's priority by 10 points each.
If either provider has been updated in the last `time_threshold`, then it won't take any action.
"""
amount_to_reduce_by = 10
providers_list = _get_sms_providers_for_update(time_threshold)
if not providers_list:
return
providers = {provider.identifier: provider for provider in providers_list}
other_identifier = get_alternative_sms_provider(identifier)
reduced_provider = providers[identifier]
increased_provider = providers[other_identifier]
# always keep values between 0 and 100
reduced_provider_priority = max(0, reduced_provider.priority - amount_to_reduce_by)
increased_provider_priority = min(100, increased_provider.priority + amount_to_reduce_by)
_adjust_provider_priority(reduced_provider, reduced_provider_priority)
_adjust_provider_priority(increased_provider, increased_provider_priority)
@autocommit
def dao_adjust_provider_priority_back_to_resting_points():
"""
Provided that neither SMS provider has been modified in the last hour, move both providers by 10 percentage points
each towards their defined resting points (set in SMS_PROVIDER_RESTING_POINTS in config.py).
"""
amount_to_reduce_by = 10
time_threshold = timedelta(hours=1)
providers = _get_sms_providers_for_update(time_threshold)
for provider in providers:
target = current_app.config['SMS_PROVIDER_RESTING_POINTS'][provider.identifier]
current = provider.priority
if current != target:
if current > target:
new_priority = max(target, provider.priority - amount_to_reduce_by)
else:
new_priority = min(target, provider.priority + amount_to_reduce_by)
_adjust_provider_priority(provider, new_priority)
@autocommit
def _update_provider_details_without_commit(provider_details):
"""
Doesn't commit, for when you need to control the database transaction manually
"""
provider_details.version += 1
provider_details.updated_at = datetime.utcnow()
history = ProviderDetailsHistory.from_original(provider_details)
db.session.add(provider_details)
db.session.add(history)
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
198,
6738,
42903,
1330,
1459,
62,
1324,
198,
6738,
19605,
62,
26791,
13,
2435,
89,
1952,
1330,
10385,
62,
315,
66,
62,
1462,
62,
65,
301,
198,
6738,
44161,
282,
26599,
1330,
... | 2.973016 | 1,260 |
#! /usr/bin/python3
from default_settings import default_settings
from ultron_cli import UltronCLI
if __name__ == '__main__':
default_settings()
try:
UltronCLI().cmdloop()
except KeyboardInterrupt:
print("\nInterrupted by user.")
print("Goodbye")
exit(0)
| [
2,
0,
220,
1220,
14629,
14,
8800,
14,
29412,
18,
198,
6738,
4277,
62,
33692,
1330,
4277,
62,
33692,
198,
6738,
3789,
1313,
62,
44506,
1330,
46315,
5097,
40,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
22... | 2.467213 | 122 |
import subprocess
import schavott.gfatofasta
import os
import pyfasta
| [
11748,
850,
14681,
198,
11748,
5513,
615,
1252,
13,
70,
17359,
1659,
40197,
198,
11748,
28686,
198,
11748,
12972,
7217,
64,
198
] | 3.181818 | 22 |
from alien_functions import *
| [
6738,
8756,
62,
12543,
2733,
1330,
1635,
628,
628,
198
] | 3.4 | 10 |
from flask import Flask
from flask.ext.login import current_user
from flask.ext.security import Security, SQLAlchemyUserDatastore
from flask_mail import Mail
from flask_debugtoolbar import DebugToolbarExtension
from werkzeug.contrib.fixers import ProxyFix
from flask.ext.admin import Admin, AdminIndexView
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.principal import Principal, identity_loaded
from flask.ext.assets import Environment
from .utils import wtf
from .utils.assets import bundles
from .utils.errors import add_errorhandlers
from .database import db
from .frontend import frontend_blueprint, on_identity_loaded
mail = Mail()
# Admin interface
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
13,
2302,
13,
38235,
1330,
1459,
62,
7220,
198,
6738,
42903,
13,
2302,
13,
12961,
1330,
4765,
11,
16363,
2348,
26599,
12982,
27354,
459,
382,
198,
6738,
42903,
62,
4529,
1330,
11099,
198,
6738... | 3.701087 | 184 |
known = set()
unknown = set()
for _ in range(int(input())):
known.add(input().strip().lower())
for _ in range(int(input())):
for word in input().strip().lower().split():
if word not in known:
unknown.add(word)
for word in unknown:
print(word) | [
4002,
796,
900,
3419,
198,
34680,
796,
900,
3419,
198,
198,
1640,
4808,
287,
2837,
7,
600,
7,
15414,
28955,
2599,
198,
220,
220,
220,
1900,
13,
2860,
7,
15414,
22446,
36311,
22446,
21037,
28955,
198,
198,
1640,
4808,
287,
2837,
7,
6... | 2.541284 | 109 |
#
# (c) Copyright 2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# return kernel boot arguments for given fcoe interface
from ansible.errors import AnsibleFilterError
| [
2,
198,
2,
357,
66,
8,
15069,
1584,
30446,
15503,
6400,
446,
14973,
7712,
18470,
198,
2,
357,
66,
8,
15069,
2177,
311,
19108,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
153... | 3.931217 | 189 |
from cProfile import label
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import pickle
import random
from saxspy import debyeWaller as dwf
from scipy.interpolate import CubicSpline
from tqdm import tqdm
import saxspy
import umap
if __name__ == '__main__':
Phase = 'lamellar'
d1, d3, exp_data, q = load_data('lamellar')
plot_saxs_umap(d1,exp_data)
plot_saxs_tsne(d1,exp_data)
plot_saxs_pca(d1,exp_data)
plot_saxs(d1[0],q)
plot_saxs_featuremap(d3[0],q)
| [
6738,
269,
37046,
1330,
6167,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
12501,
296,
9150,
1330,
4217,
32,
198,
6738,
1341,
35720,
13,
805,
361,
727,
1... | 2.224806 | 258 |
"""
FactSet Ownership API
FactSet’s Fund Ownership API gives access to both **Holdings** and **Holders** data.<p> Factset's Holdings endpoints gives access to all the underlying securities and their position detils held within a given fund. Fund Types supported include Open-End Mutual Funds, Closed-end Mutual Funds, and Exchange Traded Funds. Security Holders information retrieves all \"holder types\" and their positions across institutions, funds, insiders, and stakeholders.</p><p>The FactSet Ownership and Mutual Funds database collects global equity ownership data for approximately 50,000 institutions, 60,000 unique Mutual Fund portfolios, and 400,000 Insider/Stake holders from around 110 countries. For more details review our [Data Collection](https://my.apps.factset.com/oa/cms/oaAttachment/87e162be-f2d1-4f40-a85b-bfb1b020d270/20079) methodology. </p> # noqa: E501
The version of the OpenAPI document: 1.1.0
Contact: api@factset.com
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
import os
NAME = "fds.sdk.FactSetOwnership"
VERSION = "0.20.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"urllib3 >= 1.25.3",
"python-dateutil",
"fds.sdk.utils >= 1.0.0",
]
setup(
name=NAME,
version=VERSION,
description="FactSet Ownership client library for Python",
author="FactSet Research Systems",
url="https://github.com/FactSet/enterprise-sdk/tree/main/code/python/FactSetOwnership/v1",
keywords=["FactSet", "API", "SDK"],
python_requires=">=3.6",
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
license="Apache-2.0",
long_description_content_type="text/markdown",
long_description=read("README.md")
)
| [
37811,
198,
220,
220,
220,
19020,
7248,
33147,
1056,
7824,
628,
220,
220,
220,
19020,
7248,
447,
247,
82,
7557,
33147,
1056,
7824,
3607,
1895,
284,
1111,
12429,
26807,
654,
1174,
290,
12429,
26807,
364,
1174,
1366,
29847,
79,
29,
19020,... | 3.063796 | 627 |
# -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test common module."""
from __future__ import print_function
from chromite.cros_bisect import common
from chromite.lib import cros_test_lib
class TestCommitInfo(cros_test_lib.TestCase):
"""Tests CommitInfo class."""
def testEmpty(self):
"""Tests that empty CommitInfo's data members are initialized correctly."""
info = common.CommitInfo()
self.assertEqual(
"CommitInfo(sha1='', title='', score=Score(values=[]), label='', "
"timestamp=0)",
repr(info))
def testAssigned(self):
"""Tests that CommitInfo constrcutor sets up data members correctly."""
info = common.CommitInfo(
sha1='abcdef', title='test', score=common.Score(values=[1]),
label='GOOD', timestamp=100)
self.assertEqual(
"CommitInfo(sha1='abcdef', title='test', score=Score(values=[1.0]), "
"label='GOOD', timestamp=100)",
repr(info))
def testEqual(self):
"""Tests equality of two CommitInfo objects."""
info1 = common.CommitInfo(
sha1='abcdef', title='test', score=common.Score(values=[1, 2, 3]),
label='GOOD', timestamp=100)
info2 = common.CommitInfo(
sha1='abcdef', title='test', score=common.Score(values=[1, 2, 3]),
label='GOOD', timestamp=100)
self.assertEqual(info1, info2)
# In Python 2.7, __ne__() doesn't delegates to "not __eq__()" so that the
# sanity check is necessary.
self.assertFalse(info1 != info2)
def testNotEqual(self):
"""Tests inequality of two CommitInfo objects."""
info1 = common.CommitInfo(
sha1='abcdef', title='test', score=common.Score(values=[1, 2, 3]),
label='GOOD', timestamp=100)
info2 = common.CommitInfo(
sha1='abcdef', title='test', score=common.Score(values=[1, 2]),
label='GOOD', timestamp=100)
self.assertNotEqual(info1, info2)
self.assertFalse(info1 == info2)
def testBool(self):
"""Tests CommitInfo's boolean value conversion.
Only default(empty) CommitInfo's boolean value is False.
"""
info1 = common.CommitInfo()
self.assertTrue(not info1)
self.assertFalse(bool(info1))
info2 = common.CommitInfo(title='something')
self.assertTrue(bool(info2))
self.assertFalse(not info2)
class TestScore(cros_test_lib.TestCase):
"""Tests Score class."""
@staticmethod
def IsEmpty(score):
"""Checks if a score object is empty.
Args:
score: Score object.
Returns:
True if score is empty (default / un-assigned).
"""
return (
'Score(values=[])' == repr(score) and
'Score(values=[], mean=0.000, var=0.000, std=0.000)' == str(score) and
0 == len(score))
def testEmpty(self):
"""Tests that default Score object is empty."""
score = common.Score()
self.assertTrue(self.IsEmpty(score))
def testScoreInit(self):
"""Tests that Score() sets up data member correctly."""
score = common.Score([1, 2, 3])
self.assertEqual('Score(values=[1.0, 2.0, 3.0])', repr(score))
self.assertEqual(
'Score(values=[1.0, 2.0, 3.0], mean=2.000, var=1.000, std=1.000)',
str(score))
self.assertEqual(3, len(score))
def testScoreInitWrongType(self):
"""Tests that Init() can handles wrong input type by resetting itself."""
self.assertTrue(self.IsEmpty(common.Score(['a', 'b'])))
self.assertTrue(self.IsEmpty(common.Score([])))
self.assertTrue(self.IsEmpty(common.Score(1)))
def testScoreUpdate(self):
"""Tests that Update() sets up data member correctly."""
score = common.Score([1, 2, 3])
score.Update([2, 4, 6, 8])
self.assertEqual('Score(values=[2.0, 4.0, 6.0, 8.0])', repr(score))
self.assertEqual(
'Score(values=[2.0, 4.0, 6.0, 8.0], mean=5.000, var=6.667, std=2.582)',
str(score))
self.assertEqual(4, len(score))
def testScoreUpdateWrongType(self):
"""Tests that Update() can handles wrong input type by resetting itself."""
score = common.Score([1, 2, 3])
score.Update(['a', 'b'])
self.assertTrue(self.IsEmpty(score))
def testScoreUpdateEmpty(self):
"""Tests that Update() can handle empty input."""
score = common.Score([1, 2, 3])
score.Update([])
self.assertTrue(self.IsEmpty(score))
def testScoreUpdateNotAList(self):
"""Tests that Update() can handle wrong input type by resetting itself."""
score = common.Score([1, 2, 3])
score.Update(5)
self.assertTrue(self.IsEmpty(score))
def testEqual(self):
"""Tests equality of two Score objects."""
score1 = common.Score([1, 2, 3])
score2 = common.Score([1, 2, 3])
self.assertEqual(score1, score2)
self.assertTrue(score1 == score2)
self.assertFalse(score1 != score2)
score3 = common.Score([3, 2, 1])
self.assertEqual(score1, score3)
self.assertTrue(score1 == score3)
self.assertFalse(score1 != score3)
score4 = common.Score()
score5 = common.Score([])
self.assertEqual(score4, score5)
self.assertTrue(score4 == score5)
self.assertFalse(score4 != score5)
def testNotEqual(self):
"""Tests inequality of two Score objects."""
score1 = common.Score([1, 2])
score2 = common.Score([1, 2, 3])
self.assertNotEqual(score1, score2)
self.assertTrue(score1 != score2)
self.assertFalse(score1 == score2)
score3 = common.Score([1, 3])
self.assertNotEqual(score1, score3)
self.assertTrue(score1 != score3)
self.assertFalse(score1 == score3)
score4 = common.Score()
score5 = common.Score([0])
self.assertNotEqual(score4, score5)
self.assertTrue(score4 != score5)
self.assertFalse(score4 == score5)
def testBool(self):
"""Tests Score's boolean conversion.
Only Score without value is treated as False.
"""
score1 = common.Score()
self.assertTrue(not score1)
self.assertFalse(bool(score1))
score2 = common.Score([0])
self.assertTrue(bool(score2))
self.assertFalse(not score2)
class ClassAOptionsChecker(common.OptionsChecker):
"""Used to test common.OptionsChecker."""
REQUIRED_ARGS = ('a', )
class ClassBOptionsChecker(ClassAOptionsChecker):
"""Used to test common.OptionsChecker."""
REQUIRED_ARGS = ClassAOptionsChecker.REQUIRED_ARGS + ('b', )
class TestOptionsChecker(cros_test_lib.TestCase):
"""Tests OptionsChecker class."""
def testInit(self):
"""Tests constructor with OptionChecker."""
options_e = cros_test_lib.EasyAttr()
options_a = cros_test_lib.EasyAttr(a='a')
options_b = cros_test_lib.EasyAttr(b='b')
options_ab = cros_test_lib.EasyAttr(a='a', b='b')
options_abc = cros_test_lib.EasyAttr(a='a', b='b', c='c')
# Expect no exceptions.
common.OptionsChecker(options_e)
common.OptionsChecker(options_abc)
ClassAOptionsChecker(options_a)
ClassBOptionsChecker(options_ab)
ClassBOptionsChecker(options_abc)
# Missing 'a' argument.
with self.assertRaises(common.MissingRequiredOptionsException) as cm:
ClassAOptionsChecker(options_b)
exception_message = cm.exception.message
self.assertTrue('Missing command line' in exception_message)
self.assertTrue('ClassAOptionsChecker' in exception_message)
self.assertTrue("['a']" in exception_message)
# Missing derived 'a' argument.
with self.assertRaises(common.MissingRequiredOptionsException) as cm:
ClassBOptionsChecker(options_b)
exception_message = cm.exception.message
self.assertTrue('Missing command line' in exception_message)
self.assertTrue('ClassBOptionsChecker' in exception_message)
self.assertTrue("['a']" in exception_message)
def testSanityCheckOptions(self):
"""Like testInit, but just call SanityCheckOptions()."""
options_e = cros_test_lib.EasyAttr()
options_a = cros_test_lib.EasyAttr(a='a')
options_b = cros_test_lib.EasyAttr(b='b')
options_ab = cros_test_lib.EasyAttr(a='a', b='b')
options_abc = cros_test_lib.EasyAttr(a='a', b='b', c='c')
self.assertTrue(common.OptionsChecker.SanityCheckOptions(options_e))
self.assertTrue(common.OptionsChecker.SanityCheckOptions(options_abc))
self.assertTrue(ClassAOptionsChecker.SanityCheckOptions(options_a))
self.assertTrue(ClassBOptionsChecker.SanityCheckOptions(options_ab))
self.assertTrue(ClassBOptionsChecker.SanityCheckOptions(options_abc))
# Missing 'a' argument.
with self.assertRaises(common.MissingRequiredOptionsException) as cm:
ClassAOptionsChecker.SanityCheckOptions(options_b)
exception_message = cm.exception.message
self.assertTrue('Missing command line' in exception_message)
self.assertTrue('ClassAOptionsChecker' in exception_message)
self.assertTrue("['a']" in exception_message)
# Missing derived 'a' argument.
with self.assertRaises(common.MissingRequiredOptionsException) as cm:
ClassBOptionsChecker.SanityCheckOptions(options_b)
exception_message = cm.exception.message
self.assertTrue('Missing command line' in exception_message)
self.assertTrue('ClassBOptionsChecker' in exception_message)
self.assertTrue("['a']" in exception_message)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
2177,
383,
18255,
1505,
7294,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
46... | 2.648672 | 3,501 |
import yaml
| [
11748,
331,
43695,
628
] | 3.25 | 4 |
from .__funcs__ import tortuosity_geometric_2d | [
6738,
764,
834,
12543,
6359,
834,
1330,
7619,
84,
16579,
62,
469,
16996,
62,
17,
67
] | 2.875 | 16 |
from datetime import date, datetime
import pytz
from tests.fixtures.simple import Schema
from tests.fixtures.simple.schema import Item, Size
| [
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
198,
198,
11748,
12972,
22877,
198,
198,
6738,
5254,
13,
69,
25506,
13,
36439,
1330,
10011,
2611,
198,
6738,
5254,
13,
69,
25506,
13,
36439,
13,
15952,
2611,
1330,
9097,
11,
12849,
628
] | 3.512195 | 41 |
#!/usr/bin/env python
import logging
import collections
import traceback
import Queue
import threading
# Inspired by: https://stackoverflow.com/questions/2829329
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
18931,
198,
11748,
17268,
198,
11748,
12854,
1891,
198,
11748,
4670,
518,
198,
11748,
4704,
278,
628,
198,
2,
45827,
416,
25,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
6... | 3.458333 | 48 |
from contextlib import contextmanager
import pytest
from mando import Program
program = Program('example.py', '1.0.10')
program.option(
"-f", "--foo", dest='foo', default='bar', completer=NoopCompleter,
help="Real programmers don't comment their code. \
If it was hard to write, it should be hard to read."
)
program.add_subprog('sub')
program.sub.option(
"-i", "--inc", dest='inc', type=int, default=0,
help="Some help text."
)
@program.command
def getopt(name):
'''
:param name: Name of option to return.
'''
# also allows for: Script.foo
return getattr(program, name)
@program.sub.command
def powOfSub(b, e):
'''
:param b: Base.
:param e: Exponent.
'''
return int(b) ** int(e) + program.inc
@program.sub.command('powOfSub2')
def powOfSub2_impl(b, e):
'''
:param b: Base.
:param e: Exponent.
'''
return int(b) ** int(e) - program.inc
@program.command
@program.command
def vara(pos, foo, spam=24, *vars):
'''
:param vars: Yeah, you got it right, the variable arguments.
'''
pass
@program.command
def another(baw, owl=42, json=False, tomawk=None):
'''This yet another example showcasing the power of Mando!
:param baw: That's the positional argument, obviously.
:param -o, --owl: Yeah, I know, this is too much.
:param -j, --json: In case you want to pipe it through something.
:param -t, --tomawk: Well, in this case -t isn't for time.'''
pass
@program.command('alias')
@program.command
@program.command('more-power')
def more_power(x, y=2):
'''This one really shows off complete power.
:param x <int>: Well, the base.
:param -y <int>: You got it, the exponent.'''
return x ** y
@program.command
def repeat(what, times=10):
'''Getting types from annotations.
:param what: what to repeat.
:param -t, --times: how many times to repeat.'''
return what * times
# version-agnostic way of setting annotations.
# Equivalent to 'repeat(what: str, times: int=10)'
repeat.__annotations__ = {'what': str, 'times': int}
@program.command('more-powerful')
@program.arg('x', type=int, completer=NoopCompleter)
@program.arg('y', '-y', '--epsilon', type=int)
@program.command
@program.arg('x', type=int)
@program.arg('y', type=int)
def overriding(x, y=4):
'''Yoo an override test.
:param x <str>: This is so wroong!!! Let's hope it gets overridden by @arg.
:param -y <metavar>: This too!!'''
return x - y
@program.command
def dashes(a, b=5):
'''Usual command help.
:param a <int>: A help obviously.
:param b <int>: Yooo.'''
return a ** b
@program.command
GENERIC_COMMANDS_CASES = [
('goo 2', [['2', False, None]]),
('goo 2 --verbose', [['2', True, None]]),
('goo 2 --bar 9', [['2', False, '9']]),
('goo 2 --verbose --bar 8', [['2', True, '8']]),
('vara 2 3', [['2', '3', 24]]),
('vara 2 3 --spam 8', [['2', '3', 8]]),
# Unfortunately this is an argparse "bug". See:
# http://bugs.python.org/issue15112
# You cannot intermix positional and optional arguments for now.
#('vara 1 2 --spam 8 9 8', ['1', '2', 8, '9', '8']),
('vara 1 2 4 5 --spam 8', [['1', '2', 8, '4', '5']]),
('vara --spam 8 1 2 4 5', [['1', '2', 8, '4', '5']]),
('vara 9 8 1 2 3 4', [['9', '8', 24, '1', '2', '3', '4']]),
('another 2', [['2', 42, False, None]]),
('another 2 -j', [['2', 42, True, None]]),
('another 2 -t 1 -o 3', [['2', 3, False, '1']]),
('another 2 --owl 89 --tomawk 98', [['2', 89, False, '98']]),
('another 2 --json -o 1', [['2', 1, True, None]]),
('another 3 --owl 8 --json --tomawk 8', [['3', 8, True, '8']]),
('alias 5 -b 9', [['5', 9], 'analiased']),
('more-power 9 -y 2', [[9, 2], 'more_power']),
('more-powerful 9 -y 3', [[9, 3], 'more_power_2']),
('more-powerful 9 --epsilon 3', [[9, 3], 'more_power_2']),
('overriding 2', [[2, 4]]),
('overriding 2 -y 7', [[2, 7]]),
('dashes 2', [[2, 5]]),
('dashes 8 -b 7', [[8, 7]]),
('append', [[[]]]),
('append --acc 2', [[['2']]]),
('append --acc 2 --acc 3', [[['2', '3']]]),
]
@pytest.mark.parametrize('args,rest', GENERIC_COMMANDS_CASES)
PROGRAM_EXECUTE_CASES = [
('power 2', 4),
('power 2 -y 4', 16),
('more-power 3', 9),
('more-power 3 -y 4', 81),
('more-powerful 4 -y 2', 16),
('more-powerful 4 --epsilon 2', 16),
('overriding 2', -2),
('overriding 2 -y 7', -5),
('dashes 2', 32),
('dashes 7 -b 3', 343),
('repeat a', 'aaaaaaaaaa'),
('repeat a -t 5', 'aaaaa'),
]
@pytest.mark.parametrize('args,result', PROGRAM_EXECUTE_CASES)
@contextmanager
PROGRAM_EXCEPT_CASES = [
('repeat a', does_not_raise()),
('repeat a -t blah', pytest.raises(SystemExit)),
]
@pytest.mark.parametrize('args,expectation', PROGRAM_EXCEPT_CASES)
PROGRAM_OPTIONS_CASES = [
(' getopt foo', 'bar'),
(' -f xyz getopt foo', 'xyz'),
('--foo xyz getopt foo', 'xyz'),
(' sub powOfSub 2 3', 8),
(' -f xyz sub -i 1 powOfSub 2 3', 9),
('--foo xyz sub --inc 2 powOfSub 2 3', 10),
(' sub powOfSub2 2 3', 8),
(' -f xyz sub -i 1 powOfSub2 2 3', 7),
('--foo xyz sub --inc 2 powOfSub2 2 3', 6),
]
@pytest.mark.parametrize('args,result', PROGRAM_OPTIONS_CASES)
| [
6738,
4732,
8019,
1330,
4732,
37153,
198,
11748,
12972,
9288,
198,
6738,
6855,
78,
1330,
6118,
628,
198,
23065,
796,
6118,
10786,
20688,
13,
9078,
3256,
705,
16,
13,
15,
13,
940,
11537,
628,
198,
198,
23065,
13,
18076,
7,
198,
220,
... | 2.324114 | 2,314 |
import collections
# using the queue | [
11748,
17268,
198,
220,
220,
220,
220,
198,
220,
220,
220,
1303,
1262,
262,
16834
] | 3 | 15 |
import contextlib
import logging
import os
from urllib.error import URLError
from urllib.parse import urlencode, urlparse
# from django.contrib.auth import get_user_model
from django.contrib.gis.db import models
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from girder_utils.files import field_file_to_local_path
from model_utils.managers import InheritanceManager
from s3_file_field import S3FileField
from rgd.utility import (
_link_url,
compute_checksum_file,
compute_checksum_url,
patch_internal_presign,
precheck_fuse,
safe_urlopen,
url_file_to_fuse_path,
url_file_to_local_path,
uuid_prefix_filename,
)
# from .. import tasks
from .collection import Collection
from .constants import DB_SRID
from .mixins import TaskEventMixin
logger = logging.getLogger(__name__)
class ModifiableEntry(models.Model):
"""A base class for models that need to track modified datetimes and users."""
modified = models.DateTimeField(editable=False, help_text='The last time this entry was saved.')
created = models.DateTimeField(editable=False, help_text='When this was added to the database.')
# creator = models.ForeignKey(
# get_user_model(), on_delete=models.DO_NOTHING, related_name='creator'
# )
# modifier = models.ForeignKey(
# get_user_model(), on_delete=models.DO_NOTHING, related_name='modifier'
# )
class SpatialEntry(models.Model):
"""Common model to all geospatial data entries.
This is intended to be used in a mixin manner.
"""
spatial_id = models.AutoField(primary_key=True)
# Datetime of creation for the dataset
acquisition_date = models.DateTimeField(null=True, default=None, blank=True)
# This can be used with GeoDjango's geographic database functions for spatial indexing
footprint = models.GeometryField(srid=DB_SRID)
outline = models.GeometryField(srid=DB_SRID)
instrumentation = models.CharField(
max_length=100,
null=True,
blank=True,
help_text='The instrumentation used to acquire these data.',
)
objects = InheritanceManager()
@property
@property
def subentry_name(self):
"""Return the name from the subentry model."""
return self.subentry.name
@property
class ChecksumFile(ModifiableEntry, TaskEventMixin):
"""The main class for user-uploaded files.
This has support for manually uploading files or specifing a URL to a file
(for example in an existing S3 bucket).
"""
name = models.CharField(max_length=1000, blank=True)
checksum = models.CharField(max_length=128) # sha512
validate_checksum = models.BooleanField(
default=False
) # a flag to validate the checksum against the saved checksum
last_validation = models.BooleanField(default=True)
collection = models.ForeignKey(
Collection,
on_delete=models.SET_NULL,
related_name='%(class)ss',
related_query_name='%(class)ss',
null=True,
blank=True,
)
type = models.IntegerField(choices=FileSourceType.choices, default=FileSourceType.FILE_FIELD)
file = S3FileField(null=True, blank=True, upload_to=uuid_prefix_filename)
url = models.TextField(null=True, blank=True)
task_funcs = (
# tasks.task_checksum_file_post_save,
)
def get_checksum(self):
"""Compute a new checksum without saving it."""
if self.type == FileSourceType.FILE_FIELD:
return compute_checksum_file(self.file)
elif self.type == FileSourceType.URL:
return compute_checksum_url(self.url)
else:
raise NotImplementedError(f'Type ({self.type}) not supported.')
def yield_local_path(self, vsi=False):
"""Create a local path for the file to be accessed.
This will first attempt to use httpfs to FUSE mount the file's URL.
If FUSE is unavailable, this will fallback to a Virtual File Systems URL (``vsicurl``) if the ``vsi`` option is set. Otherwise, this will
download the entire file to local storage.
Parameters
----------
vsi : bool
If FUSE fails, fallback to a Virtual File Systems URL. See
``get_vsi_path``. This is especially useful if the file
is being utilized by GDAL and FUSE is not set up.
"""
if self.type == FileSourceType.URL and precheck_fuse(self.get_url()):
return url_file_to_fuse_path(self.get_url(internal=True))
elif vsi and self.type != FileSourceType.FILE_FIELD:
logger.info('`yield_local_path` falling back to Virtual File System URL.')
return self.yield_vsi_path(internal=True)
# Fallback to loading entire file locally
logger.info('`yield_local_path` falling back to downloading entire file to local storage.')
if self.type == FileSourceType.FILE_FIELD:
return field_file_to_local_path(self.file)
elif self.type == FileSourceType.URL:
return url_file_to_local_path(self.url)
def get_url(self, internal=False):
"""Get the URL of the stored resource.
Parameters
----------
internal : bool
In most cases this URL will be accessible from anywhere. In some
cases, this URL will only be accessible from within the container.
This flag is for use with internal processes to make sure the host
is correctly set to ``minio`` when needed. See
``patch_internal_presign`` for more details.
"""
if self.type == FileSourceType.FILE_FIELD:
if internal:
with patch_internal_presign(self.file):
return self.file.url
else:
return self.file.url
elif self.type == FileSourceType.URL:
return self.url
data_link.allow_tags = True
def get_vsi_path(self, internal=False) -> str:
"""Return the GDAL Virtual File Systems [0] URL.
This currently formulates the `/vsicurl/...` URL [1] for internal and
external files. This is assuming that both are read-only. External
files can still be from private S3 buckets as long as `self.url`
redirects to a presigned S3 URL [1]:
> Starting with GDAL 2.1, `/vsicurl/` will try to query directly
redirected URLs to Amazon S3 signed URLs during their validity
period, so as to minimize round-trips.
This URL can be used for both GDAL and Rasterio [2]:
> To help developers switch [from GDAL], Rasterio will accept
[vsi] identifiers and other format-specific connection
strings, too, and dispatch them to the proper format drivers
and protocols.
`/vsis3/` could be used for...
* read/write access
* directory listing (for sibling files)
...but is a bit more of a challenge to setup. [2]
[0] https://gdal.org/user/virtual_file_systems.html
[1] https://gdal.org/user/virtual_file_systems.html#vsicurl-http-https-ftp-files-random-access
[2] https://gdal.org/user/virtual_file_systems.html#vsis3-aws-s3-files
[3] https://rasterio.readthedocs.io/en/latest/topics/switch.html?highlight=vsis3#dataset-identifiers
"""
url = self.get_url(internal=internal)
if url.startswith('s3://'):
s3_path = url.replace('s3://', '')
vsi = f'/vsis3/{s3_path}'
else:
gdal_options = {
'url': url,
'use_head': 'no',
'list_dir': 'no',
}
vsi = f'/vsicurl?{urlencode(gdal_options)}'
logger.info(f'vsi URL: {vsi}')
return vsi
@contextlib.contextmanager
def yield_vsi_path(self, internal=False):
"""Wrap ``get_vsi_path`` in a context manager."""
yield self.get_vsi_path(internal=internal)
class SpatialAsset(SpatialEntry):
"""Any spatially referenced asset set.
This can be any collection of files that have a spatial reference and are
not explictly handled by the other SpatialEntry subtypes. For example, this
model can be used to hold a collection of PDF documents or slide decks that
have a georeference.
"""
name = models.CharField(max_length=1000, blank=True)
description = models.TextField(null=True, blank=True)
files = models.ManyToManyField(ChecksumFile)
| [
11748,
4732,
8019,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
2956,
297,
571,
13,
18224,
1330,
37902,
2538,
81,
1472,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
2956,
11925,
8189,
11,
19016,
29572,
198,
198,
2,
422,
42625,
14208,... | 2.534877 | 3,369 |
"""Users forms."""
# Django
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import authenticate
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from .models import *
| [
37811,
14490,
5107,
526,
15931,
198,
2,
37770,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
23914,
1330,
4819... | 3.679012 | 81 |
"""
猫眼电影影评,以复联4为例,先把时间节点都爬下来,再组合成url用线程池爬,这样效率更高
"""
import arrow
import requests
import looter as lt
from pprint import pprint
from pathlib import Path
from concurrent import futures
domain = 'http://m.maoyan.com'
movie_id = '248172' # 复仇者联盟4
total_timestamps = []
total_items = []
if __name__ == '__main__':
get_timestamps()
start_times = Path('maoyan_comment_timestamps.txt').read_text().split('\n')
tasklist = [f'{domain}/mmdb/comments/movie/{movie_id}.json?_v_=yes&offset=0&startTime={t}' for t in start_times]
with futures.ThreadPoolExecutor(50) as executor:
executor.map(crawl, tasklist)
lt.save(total_items, name='maoyan_comments.csv', no_duplicate=True)
| [
37811,
198,
163,
234,
104,
40367,
120,
18796,
113,
37605,
109,
37605,
109,
46237,
226,
171,
120,
234,
20015,
98,
13783,
235,
164,
223,
242,
19,
10310,
118,
160,
122,
233,
171,
120,
234,
17739,
230,
162,
232,
232,
33768,
114,
29785,
... | 1.936111 | 360 |
from ..app.security import create_access_token
from ...utilities.utils import disable_logging
from cobald.daemon.core.config import load
from pathlib import Path
from typing import List
import logging
import typer
| [
6738,
11485,
1324,
13,
12961,
1330,
2251,
62,
15526,
62,
30001,
198,
6738,
2644,
315,
2410,
13,
26791,
1330,
15560,
62,
6404,
2667,
198,
6738,
22843,
1940,
13,
6814,
7966,
13,
7295,
13,
11250,
1330,
3440,
198,
198,
6738,
3108,
8019,
1... | 3.857143 | 56 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtMultimediaWidgets import QVideoWidget
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
12417,
27703,
13,
9019,
6,
201,
198,
2,
201,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
12454,
24... | 2.804196 | 143 |
import numpy as np
from sklearn.metrics.pairwise import manhattan_distances as dist
| [
11748,
299,
32152,
355,
45941,
201,
198,
6738,
1341,
35720,
13,
4164,
10466,
13,
24874,
3083,
1330,
582,
12904,
62,
17080,
1817,
355,
1233,
201,
198,
201,
198,
201,
198,
201,
198
] | 2.875 | 32 |
# Copyright 2011 David W. Hogg.
# All rights reserved.
# BUGS:
# - Brittle code; must be run from directory client/examples; dies if APOD reformats urls or html.
# - Runs client using os.system() instead of importing client and executing it; see if False block at end.
from __future__ import print_function
import re
import os
import sys
import urllib as url
from astrometry.net.client import Client
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
parser.add_option('--server', dest='server', default='http://supernova.astrometry.net/api/',
help='Set server base URL (eg, http://nova.astrometry.net/api/)')
parser.add_option('--apikey', '-k', dest='apikey',
help='API key for Astrometry.net web service; if not given will check AN_API_KEY environment variable')
opt,args = parser.parse_args()
if opt.apikey is None:
# try the environment
opt.apikey = os.environ.get('AN_API_KEY', None)
if opt.apikey is None:
parser.print_help()
print()
print('You must either specify --apikey or set AN_API_KEY')
sys.exit(-1)
useclient = True
if useclient:
client = Client(apiurl=opt.server)
client.login(opt.apikey)
for year in range(1996, 2013):
for month in range(1, 13):
print("apod.py __main__: working on month %d-%02d" % (year, month))
for day in range(1, 32):
iurl = get_apod_image_url(apod_url(month, day, year))
if iurl is None:
continue
if useclient:
client.url_upload(iurl)
print(client.submission_images(1))
else:
cmd = "python ../client.py --server %s --apikey %s --urlupload \"%s\"" % (opt.server, opt.apikey, iurl)
print(cmd)
os.system(cmd)
| [
2,
15069,
2813,
3271,
370,
13,
367,
10332,
13,
198,
2,
1439,
2489,
10395,
13,
198,
198,
2,
347,
7340,
50,
25,
198,
2,
532,
1709,
1206,
2438,
26,
1276,
307,
1057,
422,
8619,
5456,
14,
1069,
12629,
26,
10564,
611,
3486,
3727,
4975,
... | 2.216 | 875 |
#[port, char]
nameDict = dict([(65, 'A'),(66, 'B'),(67, 'C'),(68, 'D'), (69, 'E'), (70, 'F'),(71, 'G'), (72, 'H'),(73, 'I'),(74, 'J'),(75, 'K'),(76, 'L'),
(77, 'M'),(78, 'N'),(79, 'O'),(80, 'P'),(81, 'Q'),(82, 'R'),(83, 'S'),(84, 'T'),(85, 'U'),(86, 'V'),(87, 'W'),(88, 'X'),(89, 'Y'),(90, 'Z'),
(97, 'a'),(98, 'b'),(99, 'c'),(100, 'd'),(101, 'e'),(102, 'f'),(103, 'g'),(104, 'h'),(105, 'i'),(106, 'j'),(107, 'k'),(108, 'l'),(109, 'm'),
(110, 'n'),(111, 'o'),(112, 'p'),(113, 'q'),(114, 'r'),(115, 's'),(116, 't'),(117, 'u'),(118, 'v'),(119, 'w'),(120, 'x'),(121, 'y'),(122, 'z'),
(48, '0'),(49, '1'),(50, '2'),(51, '3'),(52, '4'),(53, '5'),(54, '6'),(55, '7'),(56, '8'),(57, '9'),(33, '!'),
(32, ' '), (190,'.'), (13, '\n'),(8, "@"),(13, '\n'),(222, "'"),(189,'-'),(191,'?'),(188,',')])
#[char, state]
stateDict = dict([('A', -32768),('B', -32768),('C', -32768),('D', -32768),('E', -32768),('F', -32768),('G', -32768),('H', -32768),('I', -32768),('J', -32768),
('K', -32768),('L', -32768),('M', -32768),('N', -32768),('O', -32768),('P', -32768),('Q', -32768),('R', -32768),('S', -32768),('T', -32768),('U', -32768),
('V', -32768),('W', -32768),('X', -32768),('Y', -32768),('Z', -32768),('a', -32768),('b', -32768),('c', -32768),('d', -32768),('e', -32768),('f', -32768),
('g', -32768),('h', -32768),('i', -32768),('j', -32768),('k', -32768),('l', -32768),('m', -32768),('n', -32768),('o', -32768),('p', -32768),('q', -32768),
('r', -32768),('s', -32768),('t', -32768),('u', -32768),('v', -32768),('w', -32768),('x', -32768),('y', -32768),('z', -32768),(',', -32768),('.', -32768),
('1', -32768),('2', -32768),('3', -32768),('4', -32768),('5', -32768),('6', -32768),('7', -32768),('8', -32768),('9', -32768),('0', -32768),
('!', -32768),(' ', -32768),('?', -32768),('\n', -32768),('-', -32768),("@", -32768),('\n', -32768),("'", -32768)])
| [
2,
58,
634,
11,
1149,
60,
198,
3672,
35,
713,
796,
8633,
26933,
7,
2996,
11,
705,
32,
33809,
7,
2791,
11,
705,
33,
33809,
7,
3134,
11,
705,
34,
33809,
7,
3104,
11,
705,
35,
33809,
357,
3388,
11,
705,
36,
33809,
357,
2154,
11,
... | 1.88627 | 976 |
"""
See README.md for help configuring and running this script.
"""
import os
import sys
import datetime
from urllib.parse import urlparse
import http.client
import json
import jwt
from dotenv import load_dotenv
########
# KEY CONFIGURATION - Put your API Key info here
# added on 2021-12-08 by shawn.becker@angel.com
load_dotenv()
ISSUER_ID = os.environ.get('ISSUER_ID')
KEY_ID = os.environ.get('KEY_ID')
PRIVATE_KEY_PATH = os.environ.get('PRIVATE_KEY_PATH')
print("ISSUER_ID " + ISSUER_ID)
print("KEY_ID " + KEY_ID)
print("PRIVATE_KEY_PATH " + PRIVATE_KEY_PATH)
########
# GET METRICS INSIGHTS - This is where the actual API interaction happens
def get_metrics_insights(app_id):
"""
This function does all the real work. It:
1. Creates an Authorization header value with bearer token (JWT)
2. Gets power & performance metrics for the app by app ID
3. Parse insights and relevant metrics
4. Pretty-print aggregate metrics datasets
If anything goes wrong during this process the error is reported and the script
exists with a non-zero status.
"""
# 1. Create an Authorization header value with bearer token (JWT)
# The token is set to expire in 5 minutes, and is used for all App Store
# Connect API calls.
auth_header = f"Bearer {create_token()}"
print("Find aggregate metrics datasets.")
# 2. Gets power & performance metrics for the app by app ID
# If the app or insights are not found, report an error and exit.
metrics_response = make_http_request(
"GET",
f"https://api.appstoreconnect.apple.com/v1/apps/{app_id}/perfPowerMetrics",
headers={
"Authorization": auth_header,
"Accept": "application/vnd.apple.xcode-metrics+json"
}
)
product_data = json.loads(metrics_response)['productData']
insights = json.loads(metrics_response)['insights']
if insights:
regressions = insights["regressions"]
else:
die(1, f"no regression insight found with app ID {app_id}")
for regression in regressions:
print(red("\ninsight regression:\n" + blue(regression["summaryString"])))
# 3. Parse insights and relevant metrics and datasets
# If no metrics datasets are found, report an error and exit.
metric_name = regression["metric"]
target_datasets = regression["populations"]
parsed_metric = None
for report in product_data:
for category in report["metricCategories"]:
for metric in category["metrics"]:
if metric["identifier"] == metric_name:
parsed_metric = metric
parsed_datasets = list()
if parsed_metric:
unit = parsed_metric["unit"]["displayName"]
for target_dataset in target_datasets:
device = target_dataset["device"]
percentile = target_dataset["percentile"]
for dataset in parsed_metric["datasets"]:
criteria = dataset["filterCriteria"]
if criteria["device"] == device and criteria["percentile"] == percentile:
parsed_datasets.append(dataset)
else:
die(1, "no metrics datasets matching the regression insight")
# 4. Pretty-print aggregate metrics datasets
#
print(red("============================================================================="))
for dataset in parsed_datasets:
criteria = dataset["filterCriteria"]
points = dataset["points"]
print(green("\n %s (%s), %s, %s"%(
metric_name,
unit,
criteria["deviceMarketingName"],
criteria["percentile"])))
version_row = "version | "
value_row = "value | "
margin_row = "error margin | "
for point in points:
version_pad = " " * max(len(str(point["value"])) - len(point["version"]), 0)
value_pad = " " * max(len(point["version"]) - len(str(point["value"])), 0)
margin_pad = " " * max(len(str(point["value"])), len(point["version"]))
version_row += point["version"] + version_pad + " | "
value_row += str(point["value"]) + value_pad + " | "
if "errorMargin" in point:
margin_row += str(point["errorMargin"]) + margin_pad[:-len(str(point["errorMargin"]))] + " | "
else:
margin_row += margin_pad + " | "
print(version_row + "\n" + value_row + "\n" + margin_row)
########
# API SUPPORT - Code to support HTTP API calls and logging
def create_token():
"""
Creates a token that lives for 5 minutes, which should be long enough
to download metrics & diagnostics reports. In a long-running script you should adjust
the code to issue a new token periodically.
"""
if PRIVATE_KEY_PATH == "XXXXXXXXXX":
die(-2, "You need to configure your key information at the top of the file first.")
with open(PRIVATE_KEY_PATH) as f:
key = f.read()
expiry = datetime.datetime.utcnow() + datetime.timedelta(minutes=5)
token_data = jwt.encode(
{
'iss': ISSUER_ID,
'aud': 'appstoreconnect-v1',
'exp': expiry
},
key,
algorithm='ES256',
headers={
'kid': KEY_ID
}
)
return token_data
########
# TEXT COLORS - Functions to color text for pretty output
########
# ENTRY POINT
if __name__ == "__main__":
app_id = '1583111882'
if len(sys.argv) > 1:
app_id = sys,argv[1]
get_metrics_insights(app_id)
| [
37811,
198,
6214,
20832,
11682,
13,
9132,
329,
1037,
4566,
870,
290,
2491,
428,
4226,
13,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
4818,
8079,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
198,
1174... | 2.361936 | 2,459 |
from graphql import GraphQLError
from shared.messages import UNKNOW_ERROR, TOO_MANY_REQUESTS, UNFOLLOW_SUCCESS, UNFOLLOW_ERROR, FOLLOW_SUCCESS, FOLLOW_ERROR
from resolvers.types.user_info import UserInfo
from resolvers.types.picture import Picture
from resolvers.types.feed import Feed
from storage.session import get_session
MESSAGE_ERROR = 'Please wait a few minutes before you try again.'
| [
6738,
4823,
13976,
1330,
29681,
48,
2538,
81,
1472,
198,
6738,
4888,
13,
37348,
1095,
1330,
4725,
29132,
3913,
62,
24908,
11,
5390,
46,
62,
10725,
56,
62,
2200,
10917,
1546,
4694,
11,
4725,
6080,
44765,
62,
12564,
4093,
7597,
11,
4725... | 3.140625 | 128 |
import importlib
import traceback
info = {
"name": "reload",
"type": 1,
"description": "Reloads a command",
"id": "reload",
"options": [
{
"name": "command",
"description": "Command name",
"type": 3,
"required": True
},
{
"name": "send",
"description": "Update command JSON?",
"type": 5,
"required": False
}
],
"default_permission": False
}
| [
11748,
1330,
8019,
198,
11748,
12854,
1891,
198,
198,
10951,
796,
1391,
198,
220,
220,
220,
366,
3672,
1298,
366,
260,
2220,
1600,
198,
220,
220,
220,
366,
4906,
1298,
352,
11,
198,
220,
220,
220,
366,
11213,
1298,
366,
6892,
1170,
... | 1.911877 | 261 |
import time
import pytest
from tinkoff_voicekit_client import user_utils
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
| [
11748,
640,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
44569,
2364,
62,
38888,
15813,
62,
16366,
1330,
2836,
62,
26791,
628,
198,
31,
9078,
9288,
13,
4102,
13,
292,
13361,
952,
628,
198,
31,
9078,
9288,
13,
4102,
13,
292,
13361,
... | 2.530303 | 66 |
#!/usr/bin/python2
"""
Statement:
Let d(n) be defined as the sum of proper divisors of n
(numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a != b, then a and b are an amicable
pair and each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20,
22, 44, 55 and 110; therefore d(220) = 284.
The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
"""
from unittest import TestCase, main
from utils import factors
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
17,
198,
198,
37811,
198,
48682,
25,
198,
198,
5756,
288,
7,
77,
8,
307,
5447,
355,
262,
2160,
286,
1774,
2659,
271,
669,
286,
299,
198,
7,
77,
17024,
1342,
621,
299,
543,
14083,
21894,
656,
... | 2.851852 | 216 |
from django.core.management.base import BaseCommand, CommandError
from environments.dynamodb import DynamoIdentityWrapper
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
12331,
198,
198,
6738,
12493,
13,
67,
4989,
375,
65,
1330,
41542,
7390,
26858,
36918,
2848,
628
] | 4 | 31 |
from extensions import db
from app import app
from models import User
with app.app_context():
jon = User.query.filter_by(id=7).first()
jon.balance += 1800_00
storage = User.query.filter_by(id=11).first()
storage.balance -= 10000_00
alasdair = User.query.filter_by(id=10).first()
alasdair.balance -= 2400_00
db.session.commit()
| [
6738,
18366,
1330,
20613,
198,
6738,
598,
1330,
598,
198,
6738,
4981,
1330,
11787,
198,
198,
4480,
598,
13,
1324,
62,
22866,
33529,
198,
220,
220,
220,
474,
261,
796,
11787,
13,
22766,
13,
24455,
62,
1525,
7,
312,
28,
22,
737,
11085... | 2.691729 | 133 |
from .imports import *
from .utils.core import *
from .utils.extras import *
# why is this different?
| [
6738,
764,
320,
3742,
1330,
1635,
198,
6738,
764,
26791,
13,
7295,
1330,
1635,
198,
6738,
764,
26791,
13,
2302,
8847,
1330,
1635,
628,
198,
198,
2,
1521,
318,
428,
1180,
30,
628
] | 3.212121 | 33 |
# -*- coding: utf-8 -*-
"""
@FileName: __init__.py
@Time: 2020/7/18 11:10
@Author: zhaojm
Module Description
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
8979,
5376,
25,
11593,
15003,
834,
13,
9078,
198,
31,
7575,
25,
12131,
14,
22,
14,
1507,
1367,
25,
940,
198,
31,
13838,
25,
1976,
3099,
13210,
76,
19... | 2.166667 | 54 |
import os
import sys
from datetime import timedelta
from typing import Callable, Optional
from click import (
Choice,
IntRange,
argument,
command,
confirm,
echo,
group,
option,
)
from firebolt.common.exception import FireboltError
from firebolt.model.engine import Engine
from firebolt.service.manager import ResourceManager
from firebolt.service.types import (
EngineStatusSummary,
EngineType,
WarmupMethod,
)
from firebolt_cli.common_options import (
common_options,
default_from_config_file,
json_option,
)
from firebolt_cli.utils import (
construct_resource_manager,
construct_shortcuts,
convert_bytes,
exit_on_firebolt_exception,
get_default_database_engine,
prepare_execution_result_line,
prepare_execution_result_table,
)
@group(
cls=construct_shortcuts(
shortages={
"list": "list (ls)",
"ls": "list (ls)",
}
)
)
def engine() -> None:
"""
Manage engines.
"""
def get_engine_from_name_or_default(
rm: ResourceManager, engine_name: Optional[str], database_name: Optional[str]
) -> Engine:
"""
Returns engine either from its name, or a default engine deducted
from database_name. At least one engine_name or database_name should
be provided, raises an Error otherwise.
"""
if engine_name is not None:
return rm.engines.get_by_name(name=engine_name)
elif database_name is not None:
return get_default_database_engine(rm, database_name)
else:
raise FireboltError("Either engine name or database name has to be specified")
@command()
@common_options
@option(
"--database-name",
envvar="FIREBOLT_DATABASE_NAME",
help="Alternatively to engine name, database name could be specified, "
"its default engine will be used",
hidden=True,
callback=default_from_config_file(required=False),
)
@option(
"--wait/--no-wait",
help="Wait until the engine is started.",
is_flag=True,
default=False,
)
@argument("engine_name", type=str, required=False)
@exit_on_firebolt_exception
def start(**raw_config_options: str) -> None:
"""
Start an existing ENGINE_NAME. If ENGINE_NAME is not set,
uses default engine instead.
"""
rm = construct_resource_manager(**raw_config_options)
engine = get_engine_from_name_or_default(
rm, raw_config_options["engine_name"], raw_config_options["database_name"]
)
if (
engine.current_status_summary
== EngineStatusSummary.ENGINE_STATUS_SUMMARY_FAILED
):
raise FireboltError(
f"Engine {engine.name} is in a failed state.\n"
f"You need to restart an engine first:\n"
f"$ firebolt engine restart {engine.name}"
)
start_stop_generic(
engine=engine,
action="start",
accepted_initial_states={
EngineStatusSummary.ENGINE_STATUS_SUMMARY_STOPPED,
EngineStatusSummary.ENGINE_STATUS_SUMMARY_STOPPING,
},
accepted_final_states={EngineStatusSummary.ENGINE_STATUS_SUMMARY_RUNNING},
accepted_final_nowait_states={
EngineStatusSummary.ENGINE_STATUS_SUMMARY_STARTING
},
wrong_initial_state_error="Engine {name} is not in a stopped state."
"The current engine state is {state}.",
success_message="Engine {name} is successfully started.",
success_message_nowait="Start request for engine {name} is successfully sent.",
failure_message="Engine {name} failed to start. Engine status: {status}.",
**raw_config_options,
)
@command()
@common_options
@option(
"--database-name",
envvar="FIREBOLT_DATABASE_NAME",
help="Alternatively to engine name, database name could be specified, "
"its default engine will be used",
hidden=True,
callback=default_from_config_file(required=False),
)
@option(
"--wait/--no-wait",
help="Wait until the engine is stopped.",
is_flag=True,
default=False,
)
@argument("engine_name", type=str, required=False)
@exit_on_firebolt_exception
def stop(**raw_config_options: str) -> None:
"""
Stop an existing ENGINE_NAME. If ENGINE_NAME is not set,
uses default engine instead.
"""
rm = construct_resource_manager(**raw_config_options)
engine = get_engine_from_name_or_default(
rm, raw_config_options["engine_name"], raw_config_options["database_name"]
)
start_stop_generic(
engine=engine,
action="stop",
accepted_initial_states={
EngineStatusSummary.ENGINE_STATUS_SUMMARY_RUNNING,
EngineStatusSummary.ENGINE_STATUS_SUMMARY_STARTING_INITIALIZING,
},
accepted_final_states={EngineStatusSummary.ENGINE_STATUS_SUMMARY_STOPPED},
accepted_final_nowait_states={
EngineStatusSummary.ENGINE_STATUS_SUMMARY_STOPPING,
EngineStatusSummary.ENGINE_STATUS_SUMMARY_STOPPED,
},
wrong_initial_state_error="Engine {name} is not in a "
"running or initializing state. The current engine state is {state}.",
success_message="Engine {name} is successfully stopped.",
success_message_nowait="Stop request for engine {name} is successfully sent.",
failure_message="Engine {name} failed to stop. Engine status: {status}.",
**raw_config_options,
)
def engine_properties_options(create_mode: bool = True) -> Callable:
"""
decorator for engine create/update common options
:param create_mode: True for create, will make some options required
"""
_ENGINE_OPTIONS = [
option(
"--name",
help="Name of the engine.",
type=str,
required=True,
),
option(
"--spec",
help="Engine spec. Run 'firebolt engine get-instance-types' "
"to get a list of available spec",
type=str,
required=create_mode,
),
option(
"--description",
help="Engine description (max: 64 characters).",
type=str,
default="" if create_mode else None,
required=False,
),
option(
"--type",
help='Engine type: "rw" for general purpose '
'and "ro" for data analytics.',
type=Choice(list(ENGINE_TYPES.keys()), case_sensitive=False),
default="ro" if create_mode else None,
required=False,
),
option(
"--scale",
help="The number of engine nodes. Value entered must be between 1 and 128.",
type=IntRange(1, 128, clamp=False),
default=1 if create_mode else None,
required=False,
show_default=True,
metavar="INTEGER",
),
option(
"--use-spot/--no-use-spot",
help="Use spot instances",
is_flag=True,
default=None,
required=False,
),
option(
"--auto-stop",
help="Stop engine automatically after specified time in minutes."
"Value entered must be between 1 and 43200"
"(max value is equal to 30 days).",
type=IntRange(1, 30 * 24 * 60, clamp=False),
default=20 if create_mode else None,
required=False,
show_default=True,
metavar="INTEGER",
),
option(
"--warmup",
help="Engine warmup method. "
"Minimal(min), Preload indexes(ind), Preload all data(all)",
type=Choice(list(WARMUP_METHODS.keys())),
default="ind" if create_mode else None,
required=False,
show_default=True,
),
]
return _engine_properties_options_inner
def echo_engine_information(
rm: ResourceManager, engine: Engine, use_json: bool
) -> None:
"""
:param engine:
:param database:
:param use_json:
:return:
"""
revision = None
instance_type = None
if engine.latest_revision_key:
revision = rm.engine_revisions.get_by_key(engine.latest_revision_key)
instance_type = rm.instance_types.instance_types_by_key[
revision.specification.db_compute_instances_type_key
]
def _format_auto_stop(auto_stop: str) -> str:
"""
auto_stop could be set either 0 or to a value with ending with m or s
if it is the case then we print its timedelta or "ALWAYS ON"
if not the original auto_stop parameter is returned
"""
val = int(auto_stop[:-1])
if val == 0:
return "ALWAYS ON"
if auto_stop[-1] == "m":
return str(timedelta(minutes=val))
elif auto_stop[-1] == "s":
return str(timedelta(seconds=val))
else:
return auto_stop
echo(
prepare_execution_result_line(
data=[
engine.name,
engine.description,
engine.current_status_summary.name
if engine.current_status_summary
else None,
_format_auto_stop(engine.settings.auto_stop_delay_duration),
revision.specification.db_compute_instances_use_spot
if revision
else "",
engine.settings.preset,
engine.settings.warm_up,
str(engine.create_time),
engine.database.name if engine.database else None,
instance_type.name if instance_type else "",
revision.specification.db_compute_instances_count if revision else "",
],
header=[
"name",
"description",
"status",
"auto_stop",
"is_spot_instance",
"preset",
"warm_up",
"create_time",
"attached_to_database",
"instance_type",
"scale",
],
use_json=bool(use_json),
)
)
ENGINE_TYPES = {"rw": EngineType.GENERAL_PURPOSE, "ro": EngineType.DATA_ANALYTICS}
WARMUP_METHODS = {
"min": WarmupMethod.MINIMAL,
"ind": WarmupMethod.PRELOAD_INDEXES,
"all": WarmupMethod.PRELOAD_ALL_DATA,
}
@command()
@common_options
@option(
"--database-name",
envvar="FIREBOLT_DATABASE_NAME",
help="Alternatively to engine name, database name could be specified, "
"its default engine will be used",
hidden=True,
callback=default_from_config_file(required=False),
)
@option(
"--wait/--no-wait",
help="Wait until the engine is restarted.",
is_flag=True,
default=False,
)
@argument("engine_name", type=str, required=False)
@exit_on_firebolt_exception
def restart(**raw_config_options: str) -> None:
"""
Restart an existing ENGINE_NAME. If ENGINE_NAME is not set,
uses default engine instead.
"""
rm = construct_resource_manager(**raw_config_options)
engine = get_engine_from_name_or_default(
rm, raw_config_options["engine_name"], raw_config_options["database_name"]
)
start_stop_generic(
engine=engine,
action="restart",
accepted_initial_states={
EngineStatusSummary.ENGINE_STATUS_SUMMARY_RUNNING,
EngineStatusSummary.ENGINE_STATUS_SUMMARY_FAILED,
},
accepted_final_states={EngineStatusSummary.ENGINE_STATUS_SUMMARY_RUNNING},
accepted_final_nowait_states={
EngineStatusSummary.ENGINE_STATUS_SUMMARY_STOPPING,
EngineStatusSummary.ENGINE_STATUS_SUMMARY_STARTING,
},
wrong_initial_state_error="Engine {name} is not in a running or failed state."
" The current engine state is {state}.",
success_message="Engine {name} is successfully restarted.",
success_message_nowait="Restart request for engine {name} "
"is successfully sent.",
failure_message="Engine {name} failed to restart. Engine status: {status}.",
**raw_config_options,
)
@command()
@common_options
@engine_properties_options(create_mode=True)
@option(
"--database-name",
help="Name of the database the engine should be attached to.",
type=str,
required=True,
)
@json_option
@exit_on_firebolt_exception
def create(**raw_config_options: str) -> None:
"""
Creates engine with the requested parameters.
"""
rm = construct_resource_manager(**raw_config_options)
database = rm.databases.get_by_name(name=raw_config_options["database_name"])
region = rm.regions.get_by_key(database.compute_region_key)
engine = rm.engines.create(
name=raw_config_options["name"],
spec=raw_config_options["spec"],
region=region.name,
engine_type=ENGINE_TYPES[raw_config_options["type"]],
scale=int(raw_config_options["scale"]),
auto_stop=int(raw_config_options["auto_stop"]),
warmup=WARMUP_METHODS[raw_config_options["warmup"]],
description=raw_config_options["description"],
revision_spec_kwargs={
"db_compute_instances_use_spot": True
if raw_config_options["use_spot"]
else False
},
)
try:
database.attach_to_engine(engine=engine, is_default_engine=True)
except (FireboltError, RuntimeError) as err:
engine.delete()
raise err
if not raw_config_options["json"]:
echo(
f"Engine {engine.name} is successfully created "
f"and attached to the {database.name}."
)
echo_engine_information(rm, engine, bool(raw_config_options["json"]))
@command()
@common_options
@engine_properties_options(create_mode=False)
@option(
"--new-engine-name",
help="Set this parameter for renaming the engine.",
default=None,
required=False,
)
@json_option
@exit_on_firebolt_exception
def update(
use_spot: Optional[bool], auto_stop: int, scale: int, **raw_config_options: str
) -> None:
"""
Update engine parameters. Engine should be stopped before updating.
"""
something_to_update = (
any(
raw_config_options[param] is not None
for param in [
"spec",
"type",
"warmup",
"description",
]
)
or scale is not None
or use_spot is not None
or auto_stop is not None
)
if not something_to_update:
echo("Nothing to update. At least one parameter should be provided.", err=True)
sys.exit(os.EX_USAGE)
rm = construct_resource_manager(**raw_config_options)
engine = rm.engines.get_by_name(name=raw_config_options["name"])
engine = engine.update(
name=raw_config_options["new_engine_name"],
spec=raw_config_options["spec"],
engine_type=ENGINE_TYPES.get(raw_config_options["type"], None),
scale=scale,
auto_stop=auto_stop,
warmup=WARMUP_METHODS.get(raw_config_options["warmup"], None),
description=raw_config_options["description"],
use_spot=use_spot,
)
if not raw_config_options["json"]:
echo(f"Engine {engine.name} is successfully updated.")
echo_engine_information(rm, engine, bool(raw_config_options["json"]))
@command()
@common_options
@option(
"--database-name",
envvar="FIREBOLT_DATABASE_NAME",
help="Alternatively to engine name, database name could be specified, "
"its default engine will be used",
hidden=True,
callback=default_from_config_file(required=False),
)
@argument("engine_name", type=str, required=False)
@exit_on_firebolt_exception
def status(**raw_config_options: str) -> None:
"""
Check the ENGINE_NAME status. If ENGINE_NAME is not set,
uses default engine instead.
"""
rm = construct_resource_manager(**raw_config_options)
engine = get_engine_from_name_or_default(
rm, raw_config_options["engine_name"], raw_config_options["database_name"]
)
current_status_name = (
engine.current_status_summary.name if engine.current_status_summary else ""
)
echo(f"Engine {engine.name} current status is: {current_status_name}")
@command(name="list", short_help="List existing engines (alias: ls)")
@common_options
@option(
"--name-contains",
help="A string used to filter the list of returned engines. "
"Partial matches will be returned.",
default=None,
type=str,
)
@json_option
@exit_on_firebolt_exception
def list(**raw_config_options: str) -> None:
"""
List existing engines
"""
rm = construct_resource_manager(**raw_config_options)
engines = rm.engines.get_many(
name_contains=raw_config_options["name_contains"],
order_by="ENGINE_ORDER_NAME_ASC",
)
if not raw_config_options["json"]:
echo("Found {num_engines} engines".format(num_engines=len(engines)))
if raw_config_options["json"] or engines:
echo(
prepare_execution_result_table(
data=[
[
engine.name,
engine.current_status_summary.name
if engine.current_status_summary
else EngineStatusSummary.ENGINE_STATUS_SUMMARY_UNSPECIFIED,
rm.regions.get_by_key(engine.compute_region_key).name,
]
for engine in engines
],
header=["name", "status", "region"],
use_json=bool(raw_config_options["json"]),
)
)
@command()
@common_options
@option(
"--yes",
help="Automatic yes on confirmation prompt",
is_flag=True,
)
@argument(
"engine_name",
type=str,
)
@exit_on_firebolt_exception
def drop(**raw_config_options: str) -> None:
"""
Drop an existing engine
"""
rm = construct_resource_manager(**raw_config_options)
engine = rm.engines.get_by_name(name=raw_config_options["engine_name"])
if raw_config_options["yes"] or confirm(
f"Do you really want to drop the engine {engine.name}?"
):
engine.delete()
echo(f"Drop request for engine {engine.name} is successfully sent")
else:
echo("Drop request is aborted")
@command()
@common_options
@argument(
"engine_name",
type=str,
)
@json_option
@exit_on_firebolt_exception
def describe(**raw_config_options: str) -> None:
"""
Describe specified engine
"""
rm = construct_resource_manager(**raw_config_options)
engine = rm.engines.get_by_name(name=raw_config_options["engine_name"])
echo_engine_information(rm, engine, bool(raw_config_options["json"]))
@command()
@common_options
@option(
"--region",
help="Instances information relevant to this region.",
required=True,
type=str,
)
@json_option
@exit_on_firebolt_exception
def get_instance_types(**raw_config_options: str) -> None:
"""
Get instance types (spec) available for your account
"""
rm = construct_resource_manager(**raw_config_options)
if not raw_config_options["region"] in rm.regions.regions_by_name:
raise FireboltError(
f"Unknown region: {raw_config_options['region']}. "
f"Available regions: {', '.join(rm.regions.regions_by_name.keys())}"
)
region = rm.regions.get_by_name(name=raw_config_options["region"])
echo(
prepare_execution_result_table(
data=[
[
spec.name,
spec.cpu_virtual_cores_count,
convert_bytes(spec.memory_size_bytes),
convert_bytes(spec.storage_size_bytes),
]
for spec in sorted(
rm.instance_types.get_instance_types_per_region(region),
key=lambda x: (x.name[0], x.cpu_virtual_cores_count),
)
],
header=["name", "cpu", "memory", "storage"],
use_json=bool(raw_config_options["json"]),
)
)
engine.add_command(get_instance_types)
engine.add_command(create)
engine.add_command(describe)
engine.add_command(drop)
engine.add_command(start)
engine.add_command(restart)
engine.add_command(stop)
engine.add_command(status)
engine.add_command(update)
engine.add_command(list)
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
19720,
1330,
4889,
540,
11,
32233,
198,
198,
6738,
3904,
1330,
357,
198,
220,
220,
220,
18502,
11,
198,
220,
220,
220,
2558,
17257,
11,
198,
220,
2... | 2.301568 | 8,867 |
import os
from .remote import * # NOQA
from .base import MEDIA_ROOT, BASE_DIR # NOQA
DEBUG = False
WAGTAIL_CACHE = False
BASE_URL = "https://web.staging.nhsx-website.dalmatian.dxw.net"
MIDDLEWARE += ["baipw.middleware.BasicAuthIPWhitelistMiddleware"]
BASIC_AUTH_LOGIN = os.environ.get("BASIC_AUTH_LOGIN", "")
BASIC_AUTH_PASSWORD = os.environ.get("BASIC_AUTH_PASSWORD", "")
BASIC_AUTH_DISABLE_CONSUMING_AUTHORIZATION_HEADER = True
####################################################################################################
# Static assets served by Whitenoise
####################################################################################################
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATIC_URL = "/static/"
####################################################################################################
# Media assets served from a CDN / bucket
####################################################################################################
AWS_S3_CUSTOM_DOMAIN = os.environ.get("AWS_CDN_URI", "")
AWS_STORAGE_BUCKET_NAME = os.environ.get("AWS_BUCKET_NAME", "")
MEDIA_URL = "{}{}/".format(AWS_S3_CUSTOM_DOMAIN, MEDIA_ROOT)
| [
11748,
28686,
198,
6738,
764,
47960,
1330,
1635,
220,
1303,
8005,
48,
32,
198,
6738,
764,
8692,
1330,
26112,
3539,
62,
13252,
2394,
11,
49688,
62,
34720,
220,
1303,
8005,
48,
32,
198,
198,
30531,
796,
10352,
198,
54,
4760,
5603,
4146,... | 3.263158 | 361 |
from __future__ import annotations
import time
import typing
from concurrent.futures import Future
from functools import partial
from uuid import uuid4
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import KolibriDaemonDBus
from kolibri_app.config import DAEMON_APPLICATION_ID
from kolibri_app.config import DAEMON_MAIN_OBJECT_PATH
from kolibri_app.config import DAEMON_PRIVATE_OBJECT_PATH
from .dbus_helpers import DBusManagerProxy
from .desktop_users import AccountsServiceManager
from .desktop_users import UserInfo
from .futures import future_chain
from .glib_helpers import dict_to_vardict
from .kolibri_search_handler import LocalSearchHandler
from .kolibri_service_manager import KolibriServiceManager
INACTIVITY_TIMEOUT_MS = 30 * 1000 # 30 seconds in milliseconds
DEFAULT_STOP_KOLIBRI_TIMEOUT_SECONDS = 60 # 1 minute in seconds
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
640,
198,
11748,
19720,
198,
6738,
24580,
13,
69,
315,
942,
1330,
10898,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
198,
6738,
308,
... | 3.12766 | 282 |
# Copyright (c) 2014 Brocade Communications Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""NOS NETCONF XML Configuration Command Templates.
Interface Configuration Commands
"""
# Get NOS Version
SHOW_FIRMWARE_VERSION = (
"show-firmware-version xmlns:nc="
"'urn:brocade.com:mgmt:brocade-firmware-ext'"
)
GET_VCS_DETAILS = (
'get-vcs-details xmlns:nc="urn:brocade.com:mgmt:brocade-vcs"'
)
SHOW_VIRTUAL_FABRIC = (
'show-virtual-fabric xmlns:nc="urn:brocade.com:mgmt:brocade-vcs"'
)
GET_VIRTUAL_FABRIC_INFO = (
'interface xmlns:nc="urn:brocade.com:mgmt:brocade-firmware-ext"'
)
NOS_VERSION = "./*/{urn:brocade.com:mgmt:brocade-firmware-ext}os-version"
VFAB_ENABLE = "./*/*/*/{urn:brocade.com:mgmt:brocade-vcs}vfab-enable"
# Create VLAN (vlan_id)
CREATE_VLAN_INTERFACE = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<interface-vlan xmlns="urn:brocade.com:mgmt:brocade-interface">
<interface>
<vlan>
<name>{vlan_id}</name>
</vlan>
</interface>
</interface-vlan>
</config>
"""
# Delete VLAN (vlan_id)
DELETE_VLAN_INTERFACE = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<interface-vlan xmlns="urn:brocade.com:mgmt:brocade-interface">
<interface>
<vlan operation="delete">
<name>{vlan_id}</name>
</vlan>
</interface>
</interface-vlan>
</config>
"""
#
# AMPP Life-cycle Management Configuration Commands
#
# Create AMPP port-profile (port_profile_name)
CREATE_PORT_PROFILE = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile xmlns="urn:brocade.com:mgmt:brocade-port-profile">
<name>{name}</name>
</port-profile>
</config>
"""
# Create VLAN sub-profile for port-profile (port_profile_name)
CREATE_VLAN_PROFILE_FOR_PORT_PROFILE = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile xmlns="urn:brocade.com:mgmt:brocade-port-profile">
<name>{name}</name>
<vlan-profile/>
</port-profile>
</config>
"""
# Configure L2 mode for VLAN sub-profile (port_profile_name)
CONFIGURE_L2_MODE_FOR_VLAN_PROFILE_IN_DOMAIN = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile xmlns="urn:brocade.com:mgmt:brocade-port-profile">
<name>{name}</name>
<vlan-profile>
<switchport-basic>
<basic/>
</switchport-basic>
</vlan-profile>
</port-profile>
</config>
"""
# Configure L2 mode for VLAN sub-profile (port_profile_name)
CONFIGURE_L2_MODE_FOR_VLAN_PROFILE = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile xmlns="urn:brocade.com:mgmt:brocade-port-profile">
<name>{name}</name>
<vlan-profile>
<switchport/>
</vlan-profile>
</port-profile>
</config>
"""
# Configure trunk mode for VLAN sub-profile (port_profile_name)
CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile xmlns="urn:brocade.com:mgmt:brocade-port-profile">
<name>{name}</name>
<vlan-profile>
<switchport>
<mode>
<vlan-mode>trunk</vlan-mode>
</mode>
</switchport>
</vlan-profile>
</port-profile>
</config>
"""
# Configure allowed VLANs for VLAN sub-profile
# (port_profile_name, allowed_vlan, native_vlan)
CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile xmlns="urn:brocade.com:mgmt:brocade-port-profile">
<name>{name}</name>
<vlan-profile>
<switchport>
<trunk>
<allowed>
<vlan>
<add>{vlan_id}</add>
</vlan>
</allowed>
</trunk>
</switchport>
</vlan-profile>
</port-profile>
</config>
"""
# Delete port-profile (port_profile_name)
DELETE_PORT_PROFILE = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile
xmlns="urn:brocade.com:mgmt:brocade-port-profile" operation="delete">
<name>{name}</name>
</port-profile>
</config>
"""
# Activate port-profile (port_profile_name)
ACTIVATE_PORT_PROFILE = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile-global xmlns="urn:brocade.com:mgmt:brocade-port-profile">
<port-profile>
<name>{name}</name>
<activate/>
</port-profile>
</port-profile-global>
</config>
"""
# Deactivate port-profile (port_profile_name)
DEACTIVATE_PORT_PROFILE = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile-global xmlns="urn:brocade.com:mgmt:brocade-port-profile">
<port-profile>
<name>{name}</name>
<activate
xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="delete" />
</port-profile>
</port-profile-global>
</config>
"""
# Associate MAC address to port-profile (port_profile_name, mac_address)
ASSOCIATE_MAC_TO_PORT_PROFILE = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile-global xmlns="urn:brocade.com:mgmt:brocade-port-profile">
<port-profile>
<name>{name}</name>
<static>
<mac-address>{mac_address}</mac-address>
</static>
</port-profile>
</port-profile-global>
</config>
"""
# Dissociate MAC address from port-profile (port_profile_name, mac_address)
DISSOCIATE_MAC_FROM_PORT_PROFILE = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile-global xmlns="urn:brocade.com:mgmt:brocade-port-profile">
<port-profile>
<name>{name}</name>
<static
xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="delete">
<mac-address>{mac_address}</mac-address>
</static>
</port-profile>
</port-profile-global>
</config>
"""
# port-profile domain management commands
REMOVE_PORTPROFILE_FROM_DOMAIN = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile-domain xmlns="urn:brocade.com:mgmt:brocade-port-profile">
<port-profile-domain-name>{domain_name}</port-profile-domain-name>
<profile operation="delete">
<profile-name>{name}</profile-name>
</profile>
</port-profile-domain>
</config>
"""
# put port profile in default domain
CONFIGURE_PORTPROFILE_IN_DOMAIN = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<port-profile-domain xmlns="urn:brocade.com:mgmt:brocade-port-profile">
<port-profile-domain-name>{domain_name}</port-profile-domain-name>
<profile>
<profile-name>{name}</profile-name>
</profile>
</port-profile-domain>
</config>
"""
#
# L3 Life-cycle Management Configuration Commands
#
# Create SVI and assign ippaddres (rbridge_id,vlan_id,ip_address)
CONFIGURE_SVI_WITH_IP_ADDRESS = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<ve>
<name>{vlan_id}</name>
<ip xmlns="urn:brocade.com:mgmt:brocade-ip-config">
<ip-config>
<address>
<address>{ip_address}</address>
</address>
</ip-config>
</ip>
</ve>
</interface>
</rbridge-id>
</config>
"""
# Add ipaddress to SVI (rbridge_id,vlan_id,ip_address)
ADD_IP_ADDRESS_TO_SVI = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<ve>
<name>{vlan_id}</name>
<ip xmlns="urn:brocade.com:mgmt:brocade-ip-config">
<ip-config>
<address>
<address>{ip_address}</address>
</address>
</ip-config>
</ip>
</ve>
</interface>
</rbridge-id>
</config>
"""
# Add anycast ipaddress to SVI (rbridge_id,vlan_id,ip_address)
ADD_ANYCAST_IP_ADDRESS_TO_SVI = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<ve>
<name>{vlan_id}</name>
<ip xmlns="urn:brocade.com:mgmt:brocade-ip-config">
<ip-anycast-address xmlns="urn:brocade.com:mgmt:brocade-vrrp">
<ip-address>{ip_address}</ip-address>
</ip-anycast-address>
</ip>
</ve>
</interface>
</rbridge-id>
</config>
"""
# set learn-any for SVI (rbridge_id,vlan_id)
ADD_ARP_LEARN_ANY_TO_SVI = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<ve>
<name>{vlan_id}</name>
<ip xmlns="urn:brocade.com:mgmt:brocade-ip-config">
<ip-config xmlns="urn:brocade.com:mgmt:brocade-ip-config">
<arp>
<learn-any></learn-any>
</arp>
</ip-config>
</ip>
</ve>
</interface>
</rbridge-id>
</config>
"""
# set arp aging timeout for SVI (rbridge_id,vlan_id,arp_aging_timeout)
SET_ARP_AGING_TIMEOUT_FOR_SVI = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<ve>
<name>{vlan_id}</name>
<ip xmlns="urn:brocade.com:mgmt:brocade-ip-config">
<ip-config>
<arp-aging-timeout>{arp_aging_timeout}</arp-aging-timeout>
</ip-config>
</ip>
</ve>
</interface>
</rbridge-id>
</config>
"""
# add vrf to bgp (rbridge_id, vrf_name)
# router bgp
# address-family ipv4 unicast vrf {vrf_name}
ADD_VRF_TO_BGP = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<router>
<router-bgp xmlns="urn:brocade.com:mgmt:brocade-bgp">
<address-family>
<ipv4>
<ipv4-unicast>
<af-vrf>
<af-vrf-name>{vrf_name}</af-vrf-name>
</af-vrf>
</ipv4-unicast>
</ipv4>
</address-family>
</router-bgp>
</router>
</rbridge-id>
</config>
"""
# router bgp
# address-family ipv4 unicast vrf {vrf_name}
# multipath ebgp
ADD_MULTIPATH_EBGP_TO_BGP_FOR_VRF = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<router>
<router-bgp xmlns="urn:brocade.com:mgmt:brocade-bgp">
<address-family>
<ipv4>
<ipv4-unicast>
<af-vrf>
<af-vrf-name>{vrf_name}</af-vrf-name>
<af-common-cmds-holder>
<multipath>
<ebgp></ebgp>
</multipath>
</af-common-cmds-holder>
</af-vrf>
</ipv4-unicast>
</ipv4>
</address-family>
</router-bgp>
</router>
</rbridge-id>
</config>
"""
# delete SVI (rbridge_id,vlan_id)
DELETE_SVI = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<ve operation="delete">
<name>{vlan_id}</name>
</ve>
</interface>
</rbridge-id>
</config>
"""
# Activate SVI (rbridge_id,vlan_id)
ACTIVATE_SVI = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<ve>
<name>{vlan_id}</name>
<shutdown xmlns="urn:brocade.com:mgmt:brocade-ip-config"
xc:operation="delete"></shutdown>
</ve>
</interface>
</rbridge-id>
</config>
"""
# Remove ipaddress from SVI (rbridge_id,vlan_id)
DECONFIGURE_IP_FROM_SVI = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<ve>
<name>{vlan_id}</name>
<ip xmlns="urn:brocade.com:mgmt:brocade-ip-config">
<ip-config>
<address xc:operation="delete">
<address>{gw_ip}</address>
</address>
</ip-config>
</ip>
</ve>
</interface>
</rbridge-id>
</config>
"""
# create vrf (rbridge_id,vrf_name)
CREATE_VRF = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<vrf xmlns="urn:brocade.com:mgmt:brocade-vrf">
<vrf-name>{vrf_name}</vrf-name>
</vrf>
</rbridge-id>
</config>
"""
# delete vrf (rbridge_id,vrf_name)
DELETE_VRF = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<vrf xmlns="urn:brocade.com:mgmt:brocade-vrf"
xc:operation="delete">
<vrf-name>{vrf_name}</vrf-name>
</vrf>
</rbridge-id>
</config>
"""
# configure route distinguisher for vrf (rbridge_id,vrf_name, rd)
CONFIGURE_RD_FOR_VRF = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<vrf xmlns="urn:brocade.com:mgmt:brocade-vrf">
<vrf-name>{vrf_name}</vrf-name>
<route-distiniguisher>{rd}</route-distiniguisher>
</vrf>
</rbridge-id>
</config>
"""
# configure vni for vrf (rbridge_id, vrf_name, vni)
CONFIGURE_NVI_FOR_VRF = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<vrf xmlns="urn:brocade.com:mgmt:brocade-vrf">
<vrf-name>{vrf_name}</vrf-name>
<vni>{vni}</vni>
</vrf>
</rbridge-id>
</config>
"""
# configure address-family for vrf (rbridge_id,vrf_name)
ADD_ADDRESS_FAMILY_FOR_VRF_V1 = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<vrf xmlns="urn:brocade.com:mgmt:brocade-vrf">
<vrf-name>{vrf_name}</vrf-name>
<address-family xmlns="urn:brocade.com:mgmt:brocade-vrf">
<ipv4>
<max-route>1200</max-route>
</ipv4>
</address-family>
</vrf>
</rbridge-id>
</config>
"""
# configure address-family for vrf (rbridge_id,vrf_name)
ADD_ADDRESS_FAMILY_FOR_VRF = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<vrf xmlns="urn:brocade.com:mgmt:brocade-vrf">
<vrf-name>{vrf_name}</vrf-name>
<address-family xmlns="urn:brocade.com:mgmt:brocade-vrf">
<ip>
<unicast/>
</ip>
</address-family>
</vrf>
</rbridge-id>
</config>
"""
# configure address-family for vrf with targets (rbridge_id,vrf_name, vni, import_vni)
ADD_ADDRESS_FAMILY_IMPORT_TARGET_FOR_VRF = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<vrf xmlns="urn:brocade.com:mgmt:brocade-vrf">
<vrf-name>{vrf_name}</vrf-name>
<address-family xmlns="urn:brocade.com:mgmt:brocade-vrf">
<ip>
<unicast>
<route-target>
<action>import</action>
<target-community>{vni}:{vni}</target-community>
</route-target>
</unicast>
</ip>
</address-family>
</vrf>
</rbridge-id>
</config>
"""
ADD_ADDRESS_FAMILY_EXPORT_TARGET_FOR_VRF = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<vrf xmlns="urn:brocade.com:mgmt:brocade-vrf">
<vrf-name>{vrf_name}</vrf-name>
<address-family xmlns="urn:brocade.com:mgmt:brocade-vrf">
<ip>
<unicast>
<route-target>
<action>export</action>
<target-community>{vni}:{vni}</target-community>
</route-target>
</unicast>
</ip>
</address-family>
</vrf>
</rbridge-id>
</config>
"""
# Bind vrf to SVI (rbridge_id,vlan_idi, vrf)
ADD_VRF_TO_SVI = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<ve>
<name>{vlan_id}</name>
<vrf xmlns="urn:brocade.com:mgmt:brocade-ip-config">
<forwarding>{vrf_name}</forwarding>
</vrf>
</ve>
</interface>
</rbridge-id>
</config>
"""
# unbind vrf from SVI (rbridge_id,vlan_idi, vrf)
DELETE_VRF_FROM_SVI = """
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<ve>
<name>{vlan_id}</name>
<vrf xmlns="urn:brocade.com:mgmt:brocade-ip-config"
operation="delete">
<forwarding>{vrf_name}</forwarding>
</vrf>
</ve>
</interface>
</rbridge-id>
</config>
"""
#
# Constants
#
# Port profile naming convention for Neutron networks
OS_PORT_PROFILE_NAME = "openstack-profile-{id}"
OS_VRF_NAME = "osv-{id}"
# Port profile filter expressions
PORT_PROFILE_XPATH_FILTER = "/port-profile"
PORT_PROFILE_NAME_XPATH_FILTER = "/port-profile[name='{name}']"
| [
2,
15069,
357,
66,
8,
1946,
2806,
46395,
14620,
11998,
11,
3457,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,... | 1.768778 | 12,754 |
import prody
import numpy
pdb_data = prody.parsePDB("../Models/prot_stretching/stretching_trajectory_offset_ligand.pdb")
pdb_trajectory = prody.PDBEnsemble("iterposed_CA")
# Write the initial coordsets
prot = pdb_data.select("name CA")
prody.writePDB("stretching_trajectory_offset_ligand.iterposed_all.pdb", prot)
with file("stretching_trajectory_offset_ligand.initial_CA.coords", 'w') as outfile:
outfile.write("%d %d %d\n"%prot.getCoordsets().shape)
for coordset in prot.getCoordsets():
numpy.savetxt(outfile, coordset)
# We only want to work with CAs. If we use the 'all coordinates+atom selection" trick
# Prody will still use all coordinates for iterative superposition
pdb_trajectory.setCoords(prot.getCoordsets()[0])
pdb_trajectory.addCoordset(prot.getCoordsets())
pdb_trajectory.setAtoms(prot)
pdb_trajectory.iterpose()
prody.writePDB("stretching_trajectory_offset_ligand.iterposed_CA.pdb", pdb_trajectory)
with file("stretching_trajectory_offset_ligand.iterposed_CA.coords", 'w') as outfile:
outfile.write("%d %d %d\n"%pdb_trajectory.getCoordsets().shape)
for coordset in pdb_trajectory.getCoordsets():
numpy.savetxt(outfile, coordset)
| [
11748,
386,
9892,
198,
11748,
299,
32152,
198,
198,
79,
9945,
62,
7890,
796,
386,
9892,
13,
29572,
5760,
33,
7203,
40720,
5841,
1424,
14,
11235,
62,
301,
1186,
10813,
14,
301,
1186,
10813,
62,
9535,
752,
652,
62,
28968,
62,
4604,
39... | 2.552916 | 463 |
import torch
| [
11748,
28034,
628
] | 4.666667 | 3 |
from . import fnoUtils as utils | [
6738,
764,
1330,
277,
3919,
18274,
4487,
355,
3384,
4487
] | 3.1 | 10 |
# -*- coding: utf-8 -*-
import datetime
import django
import os
import sys
# env
sys.path.append('/usr/lib/python2.7/dist-packages/')
sys.path.append('/usr/lib/python2.7/')
sys.path.append('/usr/local/lib/python2.7/dist-packages/')
sys.path.append('/data2/django_1.9/')
sys.path.append('/data2/django_projects/')
sys.path.append('/data2/django_third/')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djpsilobus.settings")
django.setup()
from djsani.core.sql import STUDENTS_ALPHA
from directory.core import STUDENTS_ALL
from djzbar.utils.informix import do_sql as do_esql
from djsani.core.utils import get_term
from djauth.LDAPManager import LDAPManager
from django.conf import settings
# set up command-line options
def main():
"""
Find all students who have staff attribute in LDAP
"""
NOW = datetime.datetime.now()
term = get_term()
sql = ''' {}
AND stu_serv_rec.yr = "{}"
AND stu_serv_rec.sess = "{}"
AND prog_enr_rec.cl IN {}
'''.format(
STUDENTS_ALPHA, term["yr"], term["sess"],
('FN','FF','FR','SO','JR','SR','GD','UT')
)
#print "djsani sql = {}".format(sql)
#print "djkotter sql = {}".format(STUDENTS_ALL)
#objs = do_esql(sql)
objs = do_esql(STUDENTS_ALL)
# initialize the LDAP manager
l = LDAPManager()
print NOW
for o in objs:
print "{}, {} ({})".format(o.lastname, o.firstname, o[2])
result = l.search(o.id,field=settings.LDAP_ID_ATTR)
staff = result[0][1].get('carthageStaffStatus')
if staff:
staff = staff[0]
username = result[0][1]['cn'][0]
email = result[0][1].get('mail')
if email:
email = email[0]
print "username = {} | id {} | email = {} | staff = {}".format(
username, o.id, email, staff
)
print NOW
######################
# shell command line
######################
if __name__ == "__main__":
sys.exit(main())
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
4818,
8079,
198,
11748,
42625,
14208,
198,
11748,
28686,
198,
11748,
25064,
198,
2,
17365,
198,
17597,
13,
6978,
13,
33295,
10786,
14,
14629,
14,
8019,
14,
29412,
... | 2.260771 | 882 |
from unittest import TestCase
from metabase.exceptions import AuthenticationError
from metabase.metabase import Metabase
from tests.helpers import IntegrationTestCase
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
1138,
5754,
13,
1069,
11755,
1330,
48191,
12331,
198,
6738,
1138,
5754,
13,
4164,
5754,
1330,
3395,
5754,
198,
6738,
5254,
13,
16794,
364,
1330,
38410,
14402,
20448,
628,
198
] | 4.25 | 40 |
"""
Given two strings s and t which consist of only lowercase letters.
String t is generated by random shuffling string s and then add one more letter at a random position.
Find the letter that was added in t.
Example:
Input:
s = "abcd"
t = "abcde"
Output:
e
Explanation:
'e' is the letter that was added.
Your runtime beats 40.52 % of python submissions
"""
| [
37811,
198,
15056,
734,
13042,
264,
290,
256,
543,
3473,
286,
691,
2793,
7442,
7475,
13,
198,
198,
10100,
256,
318,
7560,
416,
4738,
32299,
1359,
4731,
264,
290,
788,
751,
530,
517,
3850,
379,
257,
4738,
2292,
13,
198,
198,
16742,
2... | 3.315315 | 111 |
#######################################################################
# Name: test_parser_params
# Purpose: Test for parser parameters.
# Author: Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# Copyright: (c) 2014 Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# License: MIT License
#######################################################################
import pytest # type: ignore
import sys
# proj
from arpeggio import *
def test_autokwd():
"""
autokwd will match keywords on word boundaries.
"""
parser = ParserPython(grammar, autokwd=True)
# If autokwd is enabled this should parse without error.
parser.parse("one two three")
# But this will not parse because each word to match
# will be, by default, tried to match as a whole word
with pytest.raises(NoMatch):
parser.parse("onetwothree")
parser = ParserPython(grammar, autokwd=False)
# If we turn off the autokwd than this will match.
parser.parse("one two three")
parser.parse("onetwothree")
def test_skipws():
"""
skipws will skip whitespaces.
"""
parser = ParserPython(grammar)
# If skipws is on this should parse without error.
parser.parse("one two three")
# If not the same input will raise exception.
parser = ParserPython(grammar, skipws=False)
with pytest.raises(NoMatch):
parser.parse("one two three")
def test_ws():
"""
ws consists of chars that will be skipped if skipws is enables.
By default it consists of space, tab and newline.
"""
parser = ParserPython(grammar)
# With default ws this should parse without error
parser.parse("""one
two three""")
# If we make only a space char to be ws than the
# same input will raise exception.
parser = ParserPython(grammar, ws=" ")
with pytest.raises(NoMatch):
parser.parse("""one
two three""")
# But if only spaces are between words than it will
# parse.
parser.parse("one two three")
def test_file(capsys):
"""
'file' specifies an output file for the DebugPrinter mixin.
"""
# First use stdout
parser = ParserPython(grammar, debug=True, file=sys.stdout)
out, err = capsys.readouterr()
parser.dprint('this is stdout')
out, err = capsys.readouterr()
assert out == 'this is stdout\n'
assert err == ''
# Now use stderr
parser = ParserPython(grammar, debug=False, file=sys.stderr)
out, err = capsys.readouterr()
parser.dprint('this is stderr')
out, err = capsys.readouterr()
assert out == ''
assert err == 'this is stderr\n'
| [
29113,
29113,
4242,
21017,
198,
2,
6530,
25,
1332,
62,
48610,
62,
37266,
198,
2,
32039,
25,
6208,
329,
30751,
10007,
13,
198,
2,
6434,
25,
46157,
371,
13,
1024,
13881,
47297,
38325,
1279,
36274,
42743,
390,
13881,
17215,
5161,
308,
45... | 2.833866 | 939 |
from dataclasses import dataclass
from typing import Optional
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
32233,
628,
198,
31,
19608,
330,
31172,
198
] | 3.75 | 20 |
import pytest
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
@pytest.fixture(autouse=True)
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
1334,
62,
30604,
13,
9288,
1330,
3486,
2149,
75,
1153,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
2306,
1076,
28,
1782... | 2.803279 | 61 |
from __future__ import absolute_import
import pytz
from datetime import datetime
from django.utils import timezone
from mock import patch
from sentry.testutils import AcceptanceTestCase, SnubaTestCase
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
12972,
22877,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
15290,
1330,
8529,
198,
198,
6738,
1908,
563,
... | 3.796296 | 54 |
#1로 만들기
x=26
d = [0] * 30001
for i in range(2, x+1):
d[i]=d[i-1]+1
if i%2 == 0:
d[i]=min(d[i],d[i//2]+1)
if i%3 == 0:
d[i]=min(d[i],d[i//3]+1)
if i%5 ==0:
d[i]=min(d[i],d[i//5]+1)
print(d[x])
| [
2,
16,
167,
94,
250,
31619,
100,
234,
167,
241,
97,
166,
116,
108,
198,
87,
28,
2075,
198,
198,
67,
796,
685,
15,
60,
1635,
5867,
486,
198,
198,
1640,
1312,
287,
2837,
7,
17,
11,
2124,
10,
16,
2599,
198,
220,
220,
220,
288,
... | 1.335227 | 176 |
from . import server
from . import client
from . import model
def serve(kelner_model, host="127.0.0.1", port=server.KELNER_PORT):
"""
Serves the loaded kelner_model
"""
k_server = server.KelnerServer(kelner_model)
return k_server.serve_http(host, port)
| [
6738,
764,
1330,
4382,
198,
6738,
764,
1330,
5456,
198,
6738,
764,
1330,
2746,
628,
198,
4299,
4691,
7,
7750,
1008,
62,
19849,
11,
2583,
2625,
16799,
13,
15,
13,
15,
13,
16,
1600,
2493,
28,
15388,
13,
42,
3698,
21479,
62,
15490,
2... | 2.59434 | 106 |
from setuptools import setup, find_packages
setup(name='calltrak',
version='1.0',
py_modules = ['calltrak'],
) | [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
3672,
11639,
13345,
44195,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
16,
13,
15,
3256,
198,
220,
220,
220,
220,
220,
12972,
62,
18170,
796,
37250,
... | 2.5625 | 48 |
import pandas as pd
import torch
from torch import nn
from torch.utils.data import DataLoader
from app.data_loading.bow_data_loading import BowMovieSentimentDataset
from app.embeddings.bag_of_words import BagOfWords
from app.models.bow_classifier import BowClassifier
from app.preprocessing.preprocessor import Preprocessor
from app.trainers.bow_classifier_trainer import test
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
198,
6738,
598,
13,
7890,
62,
25138,
13,
8176,
62,
7890,
62,
25138,
1330,
9740,
25097,... | 3.53271 | 107 |
# encoding: utf-8
import argparse
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import animation
plt.rcParams['font.family'] = 'IPAPGothic'
plt.rcParams['font.size'] = 11
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.top'] = True
plt.rcParams['ytick.right'] = True
plt.rcParams['xtick.major.width'] = 1.0
plt.rcParams['ytick.major.width'] = 1.0
plt.rcParams['axes.linewidth'] = 1.0
plt.rcParams['figure.figsize'] = (8, 7)
plt.rcParams['figure.dpi'] = 100
plt.rcParams['figure.subplot.hspace'] = 0.3
plt.rcParams['figure.subplot.wspace'] = 0.3
if __name__ == '__main__':
main()
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
11748,
1822,
29572,
198,
11748,
3108,
8019,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
2603,
29487,
8019,
1330,
11034,
... | 2.318493 | 292 |
"""
repository.py
Author: Joseph Maclean Arhin
"""
import os
import click
from jinja2 import Template
from .utils import convert_to_camelcase, remove_suffix, add_to_init
def create_repository(path, name, is_sql=True):
"""
This method creates a repository in the rootdir/repositories
directory with the name specified.
"""
name = name.lower()
file_dir = os.path.join(path, "repositories")
if not os.path.exists(file_dir):
click.echo(click.style(f"cannot find models in {path}", fg="red"))
file_name = f"{name}.py"
repo_name = convert_to_camelcase(name)
model_name = remove_suffix(name, "repository")
template_string = get_template_string(is_sql)
template = Template(template_string)
data = template.render(repo_name=repo_name, model_name=model_name.capitalize())
file_path = os.path.join(file_dir, file_name)
if not os.path.exists(file_path):
with open(file_path, "w", encoding="UTF-8") as file:
file.write(data)
add_to_init(file_dir, f"{name}", f"{repo_name}")
else:
click.echo(f"{name}.py exists")
def get_template_string(is_sql):
"""
Generate template string
:param sql:
:return:
"""
if is_sql:
template_string = """from flask_easy.repository import SqlRepository
from app.models import {{model_name}}
class {{repo_name}}(SqlRepository):
model = {{model_name}}
"""
else:
template_string = """from flask_easy.repository import MongoRepository
from app.models import {{model_name}}
class {{repo_name}}(MongoRepository):
model = {{model_name}}
"""
return template_string
| [
37811,
198,
260,
1930,
37765,
13,
9078,
198,
198,
13838,
25,
7212,
4100,
13087,
943,
20079,
198,
37811,
198,
11748,
28686,
198,
11748,
3904,
198,
6738,
474,
259,
6592,
17,
1330,
37350,
198,
198,
6738,
764,
26791,
1330,
10385,
62,
1462,
... | 2.527607 | 652 |
import math
from oh import ConfigDict
| [
11748,
10688,
198,
6738,
11752,
1330,
17056,
35,
713,
628,
628
] | 3.727273 | 11 |
"""
Module with helpful functions for general use.
"""
# Core imports
from datetime import date
from re import sub as regex_sub
from sys import stdout
from sys import stderr
# Project imports
from . import constants
def parse_date(date_string):
"""
Parses a date from a string in DD/MM/YYYY format.
:param str date_string: A string containing a date in DD/MM/YYYY format.
:return: A datetime.date parsed with the specified date.
:raises ValueError: if the input is not in the proper date format.
"""
try:
day, month, year = date_string.strip().split('/')
return date(int(year), int(month), int(day))
except (ValueError, AttributeError):
raise ValueError("Input '{}' is not a properly formatted date (must be in DD/MM/YYY format)"
"".format(date_string))
def format_date(dt):
"""
Formats date as a brazilian date (DD/MM/YYYY).
:param datetime.date dt: The date to be formatted.
:return str: The formatted date, as a string.
"""
return dt.strftime('%d/%m/%Y')
def format_for_terminal(string, effect):
"""
Formats text to be output to a terminal.
:param str string: The string to the formatted.
:param str effect: The effect to be used. Some effects are already predefiend present in the
constant package as TERMINAL_*.
"""
return effect + string + constants.TERMINAL_END
def remove_terminal_formatting(string):
"""Removes terminal-formatting characters from a string."""
return regex_sub('\\033\[[0-9]+m', '', string)
def write_plain(string):
"""
Writes to the standard output stream with a newline at the end.
:param str string: The string to the printed to the stdout.
"""
stdout.write(string + '\n')
def write_pretty(string, effect):
"""
Writes pretty (colored, bold, underlined) text to the standard output stream.
:param str string: The string to the printed to the stdout.
:param str effect: The effect to be used. Some effects are already predefiend present in the
constant package as TERMINAL_*.
"""
stdout.write(effect + string + constants.TERMINAL_END)
def write_error(string):
"""
Writes to the standard error stream with a newline at the end.
:param str string: The string to the printed to the stderr.
"""
stderr.write(string + '\n')
| [
37811,
198,
26796,
351,
7613,
5499,
329,
2276,
779,
13,
198,
37811,
198,
198,
2,
7231,
17944,
198,
6738,
4818,
8079,
1330,
3128,
198,
6738,
302,
1330,
850,
355,
40364,
62,
7266,
198,
6738,
25064,
1330,
14367,
448,
198,
6738,
25064,
13... | 2.825527 | 854 |
from fastapi import APIRouter
from application.controller import appointment as AppointmentController
from application.models.schema import appointment as AppointmentSchema
from application.models.schema.utils import SuccessResponse
router = APIRouter(prefix='/appointments', tags=['appointments'])
@router.post("/create/{patient_id}", response_model=AppointmentSchema.Appointment)
@router.get("/delete/{id}", response_model=SuccessResponse)
| [
6738,
3049,
15042,
1330,
3486,
4663,
39605,
198,
198,
6738,
3586,
13,
36500,
1330,
12557,
355,
2034,
49805,
22130,
198,
6738,
3586,
13,
27530,
13,
15952,
2611,
1330,
12557,
355,
2034,
49805,
27054,
2611,
198,
6738,
3586,
13,
27530,
13,
... | 3.642276 | 123 |
"""changing module to not nullable
Revision ID: 6079f2fae734
Revises: 6de7e8a83d66, 1db87855f77c
Create Date: 2021-06-10 17:32:53.546706
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '6079f2fae734'
down_revision = ('6de7e8a83d66', '1db87855f77c')
branch_labels = None
depends_on = None
| [
37811,
22954,
8265,
284,
407,
9242,
540,
198,
198,
18009,
1166,
4522,
25,
718,
2998,
24,
69,
17,
69,
3609,
22,
2682,
198,
18009,
2696,
25,
718,
2934,
22,
68,
23,
64,
5999,
67,
2791,
11,
352,
9945,
23,
3695,
2816,
69,
3324,
66,
1... | 3.148936 | 376 |
import webbrowser
from mplus.moduls.response import Selector
from mplus.utils.data import search
import re as _re
def re(response, regex, replace_entities=False, flags=_re.S):
"""
@summary: 正则匹配
注意:网页源码<a class='page-numbers'... 会被处理成<a class="page-numbers" ; 写正则时要写<a class="(.*?)"。 但不会改非html的文本引号格式
为了使用方便,正则单双引号自动处理为不敏感
---------
@param regex: 正则或者re.compile
@param flags: re.S ...
@param replace_entities: 为True时 去掉 等字符, 转义"为 " 等, 会使网页结构发生变化。如在网页源码中提取json, 建议设置成False
---------
@result: 列表
"""
# 将单双引号设置为不敏感
if isinstance(regex, str):
regex = _re.sub("['\"]", "['\"]", regex)
return Selector(response.text).re(regex, replace_entities, flags=flags)
def re_first(response, regex, default=None, replace_entities=False, flags=_re.S):
"""
@summary: 正则匹配
注意:网页源码<a class='page-numbers'... 会被处理成<a class="page-numbers" ; 写正则时要写<a class="(.*?)"。 但不会改非html的文本引号格式
为了使用方便,正则单双引号自动处理为不敏感
---------
@param regex: 正则或者re.compile
@param default: 未匹配到, 默认值
@param flags: re.S ...
@param replace_entities: 为True时 去掉 等字符, 转义"为 " 等, 会使网页结构发生变化。如在网页源码中提取json, 建议设置成False
---------
@result: 第一个值或默认值
"""
# 将单双引号设置为不敏感
if isinstance(regex, str):
regex = _re.sub("['\"]", "['\"]", regex)
return Selector(response.text).re_first(regex, default, replace_entities, flags=flags)
| [
11748,
3992,
40259,
198,
6738,
285,
9541,
13,
4666,
5753,
13,
26209,
1330,
9683,
273,
198,
6738,
285,
9541,
13,
26791,
13,
7890,
1330,
2989,
198,
11748,
302,
355,
4808,
260,
628,
628,
628,
628,
628,
628,
628,
198,
4299,
302,
7,
2620... | 1.381226 | 1,044 |
# factotum.py - Plan 9 factotum integration for Mercurial
#
# Copyright (C) 2012 Steven Stallion <sstallion@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''http authentication with factotum
This extension allows the factotum(4) facility on Plan 9 from Bell Labs
platforms to provide authentication information for HTTP access. Configuration
entries specified in the auth section as well as authentication information
provided in the repository URL are fully supported. If no prefix is specified,
a value of "*" will be assumed.
By default, keys are specified as::
proto=pass service=hg prefix=<prefix> user=<username> !password=<password>
If the factotum extension is unable to read the required key, one will be
requested interactively.
A configuration section is available to customize runtime behavior. By
default, these entries are::
[factotum]
executable = /bin/auth/factotum
mountpoint = /mnt/factotum
service = hg
The executable entry defines the full path to the factotum binary. The
mountpoint entry defines the path to the factotum file service. Lastly, the
service entry controls the service name used when reading keys.
'''
from __future__ import absolute_import
import os
from mercurial.i18n import _
from mercurial import (
error,
httpconnection,
registrar,
url,
util,
)
urlreq = util.urlreq
passwordmgr = url.passwordmgr
ERRMAX = 128
_executable = _mountpoint = _service = None
configtable = {}
configitem = registrar.configitem(configtable)
configitem('factotum', 'executable',
default='/bin/auth/factotum',
)
configitem('factotum', 'mountpoint',
default='/mnt/factotum',
)
configitem('factotum', 'service',
default='hg',
)
@monkeypatch_method(passwordmgr)
| [
2,
1109,
313,
388,
13,
9078,
532,
5224,
860,
1109,
313,
388,
11812,
329,
12185,
333,
498,
198,
2,
198,
2,
15069,
357,
34,
8,
2321,
8239,
35719,
295,
1279,
82,
32989,
295,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
770,
1430,
318,
... | 3.524927 | 682 |
#!/usr/bin/env python3
import os
import cv2
import pickle
import os.path as osp
import utils
aruco = cv2.aruco
def pose_esitmation(
frame,
dictionary,
marker_length,
camera_matrix,
dist_coeffs):
"""Estimates poses of detected markers in the frame."""
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
parameters = aruco.DetectorParameters_create()
corners, ids, _ = aruco.detectMarkers(
gray,
dictionary,
parameters=parameters,
cameraMatrix=camera_matrix,
distCoeff=dist_coeffs)
if ids is None:
print("Not detect any markers.")
return None
# if markers are detected
if len(corners) > 0:
for i in range(0, len(ids)):
# estimate pose of each marker and return the values rvec and tvec
rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
corners[i], marker_length, camera_matrix, dist_coeffs)
# draw a square around the markers
aruco.drawDetectedMarkers(frame, corners)
# draw Axis
aruco.drawAxis(frame, camera_matrix, dist_coeffs, rvec, tvec, 0.01)
return frame
def estimate_marker_pose_video(
dictionary,
marker_length,
video_path,
camera_matrix,
dist_coeffs,
isShow=True,
isSave=True,
savename=None,
savedirpath=None):
"""Reads a video and saves and/or shows the result images."""
cap = cv2.VideoCapture(video_path)
cnt = 0
while(cap.isOpened()):
cnt += 1
ret, frame = cap.read()
if not ret:
break
frame = pose_esitmation(
frame, dictionary, marker_length, camera_matrix, dist_coeffs)
if frame is None:
continue
if isSave:
if savename is None or savedirpath is None:
print("Error: Please specify save marker path.")
return -1
saveimg_path = osp.join(
savedirpath, str(savename)+'_'+str(cnt)+'.png')
cv2.imwrite(saveimg_path, frame)
if isShow:
utils.imshow(img=frame, wsec=10, width=1000)
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
args = utils.get_options()
videos_dirpath = args.in_dir
videos_dirpath = osp.join(
osp.dirname(__file__),
videos_dirpath)
if not osp.exists(videos_dirpath):
print("Not found directory for video files...")
exit()
cam_param_path = osp.join(
osp.dirname(__file__), args.camera_param_path)
with open(cam_param_path, 'rb') as f:
camera_params = pickle.load(f)
cameramat, distcoeff, rvecs, tvecs, stdIn, stdEx = camera_params
# delete files under save dir and make save dir
resimg_dirpath = osp.join(
osp.dirname(__file__), args.out_dir)
if osp.exists(resimg_dirpath):
# recognize any extentions
resimg_paths, resimg_names = utils.get_file_paths(
resimg_dirpath, '*')
[os.remove(mpath) for mpath in resimg_paths]
os.makedirs(resimg_dirpath, exist_ok=True)
marker_length = 0.02 # [m]
video_paths, video_names = utils.get_file_paths(videos_dirpath, '*')
for i, (v_path, v_name) in enumerate(zip(video_paths, video_names)):
if not (osp.splitext(v_name)[1] in ['.mp4', '.avi']):
print("Check file extention: "+v_path)
continue
estimate_marker_pose_video(
utils.get_aruco_dict(args.aruco_dict),
marker_length,
v_path,
cameramat,
distcoeff,
savename=i,
savedirpath=resimg_dirpath)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
2298,
293,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
198,
11748,
3384,
4487,
198,
198,
11493,
1073,
796,
269,
85,
... | 2.058758 | 1,804 |
import frappe
import os
import json
import sys
# bench execute mfi_customization.mfi.patch.migrate_patch.get_custom_role_permission
# bench execute mfi_customization.mfi.patch.migrate_patch.set_custom_role_permission
| [
11748,
5306,
27768,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
25064,
628,
198,
2,
220,
220,
7624,
12260,
285,
12463,
62,
23144,
1634,
13,
76,
12463,
13,
17147,
13,
76,
42175,
62,
17147,
13,
1136,
62,
23144,
62,
18090,
62,
525,... | 3 | 75 |
# Copyright maintained by EleutherAI. Originally from https://github.com/EleutherAI/github-downloader
import chardet
import magic
import lm_dataformat as lmd
import os
import random
import sys
import traceback
import shutil
import csv
import json
from multiprocessing import cpu_count, Pool
from tqdm import tqdm
import argparse
import subprocess
from itertools import repeat
bad_extensions = [
'app',
'bin',
'bmp',
'bz2',
'class',
'csv',
'dat',
'db',
'dll',
'dylib',
'egg',
'eot',
'exe',
'gif',
'gitignore',
'glif',
'gradle',
'gz',
'ico',
'jar',
'jpeg',
'jpg',
'lo',
'lock',
'log',
'mp3',
'mp4',
'nar',
'o',
'ogg',
'otf',
'p',
'pdf',
'png',
'pickle',
'pkl',
'pyc',
'pyd',
'pyo',
'rkt',
'so',
'ss',
'svg',
'tar',
'tsv',
'ttf',
'war',
'webm',
'woff',
'woff2',
'xz',
'zip',
'zst'
]
# load programming language extensions from json file
with open("./Programming_Languages_Extensions.json", "r") as f:
data = json.load(f)
lang_exts = []
for i in data:
if "extensions" not in i:
continue
lang_exts.extend(i["extensions"])
mime = magic.Magic(mime=True)
if __name__ == '__main__':
args = process_args() # parse args
verbose = args.verbose
# make output dirs
if '.tmp' not in os.listdir():
os.makedirs('.tmp')
if 'github_data' not in os.listdir():
os.makedirs('github_data')
# read repo data to a tuple (reponame, n_stars, language)
with open('github_repositories.csv', 'r') as f:
csv_reader = csv.reader(f)
repo_data = list(map(tuple, csv_reader))
# filter by number of stars
if args.n_stars != -1:
repo_data = filter_by_stars(repo_data, args.n_stars)
repo_data.sort()
random.seed(420)
random.shuffle(repo_data)
n_threads = cpu_count() * 3 if args.n_threads == -1 else args.n_threads
chunk_size = n_threads * 3 if args.chunk_size == -1 else args.chunk_size
assert n_threads != 0
# do work
repo_chunks = split_into_chunks(repo_data, chunk_size)
archive_name = 'github_data'
ar = lmd.Archive(archive_name)
pool = Pool(n_threads)
pbar = tqdm(repo_chunks, total=len(repo_chunks))
success_hist = []
for count, chunk in enumerate(pbar):
repos_out = pool.starmap(process_repo_list,
zip(chunk, repeat(args.clone_timeout), repeat(args.processing_timeout)))
not_none = 0
none = 0
for repo in repos_out:
if repo is not None:
not_none += 1
for f in repo:
ar.add_data(f[0], meta=f[1])
else:
none += 1
# remove any leftover files
subprocess.Popen("rm -rfv .tmp && mkdir .tmp", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
if count % args.commit_freq == 0:
ar.commit()
success_hist.append((not_none / len(repos_out)) * 100)
success_rate = sum(success_hist) / len(success_hist)
pbar.set_postfix({"Success Rate": success_rate})
ar.commit() # final commit
| [
2,
15069,
9456,
416,
15987,
12866,
20185,
13,
19486,
422,
3740,
1378,
12567,
13,
785,
14,
28827,
12866,
20185,
14,
12567,
12,
15002,
263,
198,
198,
11748,
442,
446,
316,
198,
11748,
5536,
198,
11748,
300,
76,
62,
7890,
18982,
355,
300... | 2.105875 | 1,549 |
import unittest
from selenium import webdriver
from time import sleep
if __name__ == '__main__':
unittest.main(verbosity = 2) | [
11748,
555,
715,
395,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
640,
1330,
3993,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
7,
19011,
... | 2.934783 | 46 |
from adaptivefiltering.pdal import *
from adaptivefiltering.paths import get_temporary_filename
from . import dataset, minimal_dataset
import jsonschema
import os
import pyrsistent
import pytest
_pdal_filter_list = [
"filters.csf",
"filters.elm",
"filters.outlier",
"filters.pmf",
"filters.skewnessbalancing",
"filters.smrf",
]
@pytest.mark.parametrize("f", _pdal_filter_list)
@pytest.mark.slow
@pytest.mark.parametrize("f", _pdal_filter_list)
| [
6738,
29605,
10379,
20212,
13,
30094,
282,
1330,
1635,
198,
6738,
29605,
10379,
20212,
13,
6978,
82,
1330,
651,
62,
11498,
5551,
62,
34345,
198,
198,
6738,
764,
1330,
27039,
11,
10926,
62,
19608,
292,
316,
198,
198,
11748,
44804,
684,
... | 2.469072 | 194 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-07 14:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
20,
319,
2177,
12,
940,
12,
2998,
1478,
25,
3132,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.888889 | 81 |
from urlparse import urljoin
from django.conf import settings
from django.contrib import admin
from django import forms
from django.db import models
try:
from funfactory.urlresolvers import reverse
except ImportError:
from django.core.urlresolvers import reverse
from .models import (Badge, Award, Nomination, Progress, DeferredAward)
UPLOADS_URL = getattr(settings, 'BADGER_MEDIA_URL',
urljoin(getattr(settings, 'MEDIA_URL', '/media/'), 'uploads/'))
show_unicode.short_description = "Display"
show_image.allow_tags = True
show_image.short_description = "Image"
related_deferredawards_link.allow_tags = True
related_deferredawards_link.short_description = "Deferred Awards"
related_awards_link.allow_tags = True
related_awards_link.short_description = "Awards"
badge_link.allow_tags = True
badge_link.short_description = 'Badge'
claim_code_link.allow_tags = True
claim_code_link.short_description = "Claim Code"
award_link.allow_tags = True
award_link.short_description = 'award'
for x in ((Badge, BadgeAdmin),
(Award, AwardAdmin),
(Nomination, NominationAdmin),
(Progress, ProgressAdmin),
(DeferredAward, DeferredAwardAdmin),):
admin.site.register(*x)
| [
6738,
19016,
29572,
1330,
19016,
22179,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198... | 2.821918 | 438 |
import pytest
import grappa
from grappa.config import Config
| [
11748,
12972,
9288,
198,
11748,
21338,
64,
198,
6738,
21338,
64,
13,
11250,
1330,
17056,
628
] | 3.875 | 16 |
# Generated by Django 3.0.5 on 2020-05-20 13:05
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
20,
319,
12131,
12,
2713,
12,
1238,
1511,
25,
2713,
198,
198,
11748,
334,
27112,
198,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
198,
6738,
42625,
14208,
13,
10414,... | 2.982456 | 57 |
a = input('Digite Algo: ')
print(f'O tipo primitivo desse valor é: ', type(a))
print(f'Só tem espaços? {a.isspace()} ')
print(f'Isso é um numero? {a.isnumeric()} ')
print(f'Isso é Alfabético? {a.isalpha()} ')
print(f'Isso é Alfanumerico? {a.isnumeric()} ')
print(f'Está em letras maiúsculas? {a.isupper()} ')
print(f'Está em letras minúsculas? {a.islower()} ')
print(f'Está capitalizada? {a.istitle()} ')
| [
64,
796,
5128,
10786,
19511,
578,
978,
2188,
25,
705,
8,
198,
4798,
7,
69,
6,
46,
8171,
78,
2684,
270,
23593,
748,
325,
1188,
273,
38251,
25,
46083,
2099,
7,
64,
4008,
198,
4798,
7,
69,
6,
50,
10205,
2169,
1658,
8957,
16175,
418... | 2.097938 | 194 |
import tensorflow as tf
from preppy import BibPreppy
def expand(x):
'''
Hack. Because padded_batch doesn't play nice with scalars, so we expand the scalar to a vector of length 1
:param x:
:return:
'''
x['length'] = tf.expand_dims(tf.convert_to_tensor(x['length']), 0)
x['book_id'] = tf.expand_dims(tf.convert_to_tensor(x['book_id']), 0)
return x
def deflate(x):
'''
Undo Hack. We undo the expansion we did in expand
'''
x['length'] = tf.squeeze(x['length'])
x['book_id'] = tf.squeeze(x['book_id'])
return x
def make_dataset(path, batch_size=128):
'''
Makes a Tensorflow dataset that is shuffled, batched and parsed according to BibPreppy.
You can chain all the lines here, I split them into separate calls so I could comment easily
:param path: The path to a tf record file
:param path: The size of our batch
:return: a Dataset that shuffles and is padded
'''
# Read a tf record file. This makes a dataset of raw TFRecords
dataset = tf.data.TFRecordDataset([path])
# Apply/map the parse function to every record. Now the dataset is a bunch of dictionaries of Tensors
dataset = dataset.map(BibPreppy.parse, num_parallel_calls=5)
# Shuffle the dataset
dataset = dataset.shuffle(buffer_size=10000)
# In order the pad the dataset, I had to use this hack to expand scalars to vectors.
dataset = dataset.map(expand)
# Batch the dataset so that we get batch_size examples in each batch.
# Remember each item in the dataset is a dict of tensors, we need to specify padding for each tensor seperatly
dataset = dataset.padded_batch(batch_size, padded_shapes={
"book_id": 1, # book_id is a scalar it doesn't need any padding, its always length one
"length": 1, # Likewise for the length of the sequence
"seq": tf.TensorShape([None]) # but the seqeunce is variable length, we pass that information to TF
})
# Finally, we need to undo that hack from the expand function
dataset = dataset.map(deflate)
return dataset
| [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
662,
14097,
1330,
43278,
6719,
14097,
628,
198,
4299,
4292,
7,
87,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
18281,
13,
4362,
44582,
62,
43501,
1595,
470,
711,
3621,... | 2.846995 | 732 |
from .base_datatype_translator import BaseDatatypeTranslator
class BooleanTranslator(BaseDatatypeTranslator):
"""A Translator class for converting fields into DynamoDB booleans
For example::
translator = BooleanTranslator(Boolean())
translator.to_dynamodb(True)
{'BOOL': True}
translator.to_cerami({'BOOL': True})
True
"""
| [
6738,
764,
8692,
62,
19608,
265,
2981,
62,
7645,
41880,
1330,
7308,
27354,
265,
2981,
8291,
41880,
198,
198,
4871,
41146,
8291,
41880,
7,
14881,
27354,
265,
2981,
8291,
41880,
2599,
198,
220,
220,
220,
37227,
32,
3602,
41880,
1398,
329,... | 2.587838 | 148 |
import numpy as np
import random
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198
] | 3.666667 | 9 |
import numpy as np
import librosa
import os
import scipy
import json
with open('train-test.json') as fopen:
wavs = json.load(fopen)['train']
if not os.path.exists('augment'):
os.makedirs('augment')
for no, wav in enumerate(wavs):
try:
root, ext = os.path.splitext(wav)
if (no + 1) % 100 == 0:
print(no + 1, root, ext)
root = root.replace('/', '<>')
root = '%s/%s' % ('augment', root)
sample_rate, samples = scipy.io.wavfile.read(wav)
aug = change_pitch_speech(samples)
librosa.output.write_wav(
'%s-1%s' % (root, ext),
aug.astype('float32'),
sample_rate,
norm = True,
)
aug = change_amplitude(samples)
librosa.output.write_wav(
'%s-2%s' % (root, ext),
aug.astype('float32'),
sample_rate,
norm = True,
)
aug = add_noise(samples)
librosa.output.write_wav(
'%s-3%s' % (root, ext),
aug.astype('float32'),
sample_rate,
norm = True,
)
aug = add_hpss(samples)
librosa.output.write_wav(
'%s-4%s' % (root, ext),
aug.astype('float32'),
sample_rate,
norm = True,
)
aug = strech(samples)
librosa.output.write_wav(
'%s-5%s' % (root, ext),
aug.astype('float32'),
sample_rate,
norm = True,
)
aug = random_augmentation(samples)
librosa.output.write_wav(
'%s-6%s' % (root, ext),
aug.astype('float32'),
sample_rate,
norm = True,
)
except Exception as e:
print(e)
pass
| [
11748,
299,
32152,
355,
45941,
198,
11748,
9195,
4951,
64,
198,
11748,
28686,
198,
11748,
629,
541,
88,
198,
11748,
33918,
628,
628,
628,
628,
198,
4480,
1280,
10786,
27432,
12,
9288,
13,
17752,
11537,
355,
277,
9654,
25,
198,
220,
22... | 1.763366 | 1,010 |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from knack.util import CLIError
| [
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321,
13,
198,
2,
198,
2,
6127,
7560,
... | 5.590909 | 88 |
# import necessary libraries
import numpy as np
import pandas as pd
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
import csv
# global variable definition
n=100 # number of sample points
deg1 = 30 # polynomial order for fit
xs=np.linspace(0,1,n) # spacing for data
x= np.arange(0, n, 1)
# input data generation
def data_f (f):
"""
Generate a random signal with noise
Parameters
----------
f: frequency of the signal
Returns
-------
data of the signal
"""
return np.random.normal(0, 0.5, n)+np.sin (np.linspace(0, f*np.pi, n))
data = data_f(5)
# curve fitting
def curve_fit(d, deg=20):
"""
Make curve fitting
Parameters
----------
d: data to be fitted
deg: polynomial order for fit
default value = 20
Returns
-------
g: fitted curve data
error: absolute error of the fit results
"""
V=np.polynomial.legendre.legvander(xs,deg)
coeffs=np.linalg.lstsq(V,d,rcond=None)[0]
g=np.polynomial.legendre.legval(xs,coeffs)
#error calculations
error2 = ((d-g)**2)
error = d-g
c_error = np.sum(error)
return g, error
curve_data, er = curve_fit(data)
err_max = max(er)
#peak finder
peaks, _ = find_peaks(curve_data)
print(peaks,curve_data[peaks])
with open('peaks.csv', 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['peaks'] + ['data'])
spamwriter.writerow(['peaks(x)', 'peaks value', 'error'])
fig, axes = plt.subplots()
axes.plot(data,label='original data',linestyle='solid',linewidth=1,color="k")
axes.plot(peaks, curve_data[peaks], color='r', marker='x', linestyle='',linewidth=5, markersize=14,label='identified peaks')
axes.plot(curve_data,linestyle='--',label='fitting')
# put error bars on the points, but put no lines between the errorbars
# plotting the different output values
axes.errorbar(x,data, yerr=er, ecolor='y', elinewidth=1, linestyle='',label='errorbar')
axes.set_xlabel('wavelength [$nm$]', size=15)
axes.set_ylabel('r', size=15)
axes.set_title('data and fitting', size=20)
axes.legend(loc=0)
fig.savefig('data and fitting-random.png')
fig, axes = plt.subplots()
axes.plot(er, label="error")
axes.legend(loc=0)
axes.set_xlabel('wavelength [$nm$]', size=15)
axes.set_ylabel('error', size=15)
axes.set_title('error', size=20)
#axes.legend('error')
fig.savefig('error_random.png') | [
2,
1330,
3306,
12782,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
629,
541,
88,
13,
12683,
282,
1330,
1064,
62,
431,
4730,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
1... | 2.458706 | 1,005 |
import json
import requests
from lxml import html
import redis
from pymongo import MongoClient
import datetime
# 1.直接下载coursebook入库
# 2.直接下载coursebook reportmonkey入库
# 3.下载coursebook获取reportmonkey key然后下载coursebook reportmonkey入库
# TODO 三个解析器
# TODO 两个下载器
if __name__ == "__main__":
coursebook_spider = CouseBookSpider()
coursebook_spider.run()
| [
11748,
33918,
198,
11748,
7007,
198,
6738,
300,
19875,
1330,
27711,
198,
11748,
2266,
271,
198,
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
11748,
4818,
8079,
628,
198,
2,
352,
13,
33566,
112,
162,
236,
98,
10310,
233,
164,
121,
... | 1.972527 | 182 |
"""Top-level objects and functions offered by the Skyfield library.
Importing this library is not always the fastest way to use a Skyfield
feature, since importing this module involves importing almost the
entirety of Skyfield and its dependencies, but is the most convenient
way for most users to use Skyfield's main features.
"""
from datetime import datetime
from .constants import B1950, T0, pi, tau
from .constellationlib import load_constellation_map, load_constellation_names
from .errors import DeprecationError
from .iokit import Loader, load_file
from .planetarylib import PlanetaryConstants
from .positionlib import position_from_radec, position_of_radec
from .starlib import Star
from .sgp4lib import EarthSatellite
from .timelib import (
GREGORIAN_START, GREGORIAN_START_ENGLAND, Time, Timescale, utc
)
from .toposlib import Topos, iers2010, wgs84
from .units import Angle, Distance, Velocity, wms
load = Loader('.')
N = E = +1.0
S = W = -1.0
__all__ = [
'Angle', 'B1950', 'Distance', 'E', 'EarthSatellite',
'GREGORIAN_START', 'GREGORIAN_START_ENGLAND',
'Loader', 'PlanetaryConstants', 'N', 'S', 'Star', 'W',
'T0', 'Time', 'Timescale', 'Topos', 'Velocity',
'datetime', 'iers2010', 'load', 'load_constellation_map',
'load_constellation_names', 'load_file',
'position_from_radec', 'position_of_radec',
'utc', 'pi', 'tau', 'wgs84', 'wms',
]
# An attempt at friendliest-possible deprecations:
| [
37811,
9126,
12,
5715,
5563,
290,
5499,
4438,
416,
262,
5274,
3245,
5888,
13,
198,
198,
20939,
278,
428,
5888,
318,
407,
1464,
262,
14162,
835,
284,
779,
257,
5274,
3245,
198,
30053,
11,
1201,
33332,
428,
8265,
9018,
33332,
2048,
262,... | 2.9389 | 491 |
general_error = """
<html>
<head>
<style>
body {
height: 100%;
width: 100%;
background-color: #9E9E9E;
}
.box {
border: 1px solid green ;
background-color: white;
text-align: center;
}
</style>
</head>
<body>
<div class="box">
<h1>An error has occurred!</h1>
<p>Connection refused</p>
</div>
</body>
</html>
""" | [
24622,
62,
18224,
796,
37227,
198,
220,
220,
220,
1279,
6494,
29,
198,
220,
220,
220,
220,
220,
220,
220,
1279,
2256,
29,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1279,
7635,
29,
198,
220,
220,
220,
220,
220,
... | 1.49635 | 411 |
#!/usr/bin/env python2.7
# coding=utf-8
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
# Check that the ./pants.pex was built using the passed abi specification.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import json
import os.path
import zipfile
RED = "\033[31m"
BLUE = "\033[34m"
RESET = "\033[0m"
def parse_abi_from_filename(filename):
"""This parses out the abi from a wheel filename.
For example, `configparser-3.5.0-py2-abi3-any.whl` would return `abi3`.
See https://www.python.org/dev/peps/pep-0425/#use for how wheel filenames are defined."""
return filename.split("-")[-2]
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
13,
22,
198,
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
13130,
41689,
1628,
20420,
357,
3826,
27342,
9865,
3843,
20673,
13,
9132,
737,
198,
2,
49962,
739,
262,
24843,
13789,
11,
1062... | 2.954545 | 264 |
from solvers.ea_solver import EASolver
from solvers.math import interpolate_signal, ls_fit
from solvers.tests.base_test_case import BaseTestCase
from solvers.tests.correction_models import linear_correction
| [
6738,
1540,
690,
13,
18213,
62,
82,
14375,
1330,
412,
1921,
14375,
198,
6738,
1540,
690,
13,
11018,
1330,
39555,
378,
62,
12683,
282,
11,
43979,
62,
11147,
198,
6738,
1540,
690,
13,
41989,
13,
8692,
62,
9288,
62,
7442,
1330,
7308,
1... | 3.409836 | 61 |
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from .models import Purchase
from django.test import TestCase
from django.urls import reverse
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
764,
27530,
1330,
27637,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
... | 3.528302 | 53 |
#####################################################################################
# MIT License #
# #
# Copyright (C) 2018 Charly Lamothe #
# #
# This file is part of copyright-updater. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
from .comment_type import CommentType
from .comment_parameters import CommentParameters
from .copyright import Copyright | [
1303,
29113,
29113,
14468,
4242,
198,
1303,
17168,
13789,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.006993 | 1,144 |
# -*- coding: utf-8 -*-
from sqlalchemy import Boolean, Column, Date, func, Integer, select, String, Text, Unicode
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.orm import column_property, relationship
from .api_model import ApiModel
from .base import Base
from .bill_keyword import bill_keyword
from .bill_status import BillStatus
bill_and_status = select([func.row_number().over().label('status_order'),
func.unnest(Bill.status_ids).label('bill_status_id'),
Bill.id.label('bill_id')]).alias()
Bill.statuses = relationship("BillStatus",
secondary=bill_and_status,
primaryjoin=Bill.id == bill_and_status.c.bill_id,
secondaryjoin=bill_and_status.c.bill_status_id == BillStatus.id,
order_by=bill_and_status.c.status_order,
viewonly=True,
backref='bills')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
44161,
282,
26599,
1330,
41146,
11,
29201,
11,
7536,
11,
25439,
11,
34142,
11,
2922,
11,
10903,
11,
8255,
11,
34371,
198,
6738,
44161,
282,
26599,
13,
38969,
... | 2.432432 | 370 |
from django.urls import path
from .views import (
StudentFeePaymentCreateView,
StudentFeePaymentListView,
StudentFeePaymentDetailView,
StudentFeePaymentUpdateView,
)
urlpatterns = [
path(
'add/',
StudentFeePaymentCreateView.as_view(),
name='StudentFeePayment_add'),
path(
'<int:pk>/edit/',
StudentFeePaymentUpdateView.as_view(),
name='StudentFeePayment_edit'),
# path(
# '<int:pk>/',
# StudentFeePaymentDetailView.as_view(),
# name='StudentFeePayment_detail'),
path(
'', StudentFeePaymentListView.as_view(),
name='StudentFeePayment_list'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
33571,
1330,
357,
198,
220,
220,
220,
13613,
37,
1453,
19197,
434,
16447,
7680,
11,
198,
220,
220,
220,
13613,
37,
1453,
19197,
434,
8053,
7680,
11,
198,
220,
220,
2... | 2.162866 | 307 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modeling classes for ALBERT model."""
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import Layer
from .. import PretrainedModel, register_base_model
__all__ = [
"AlbertPretrainedModel",
"AlbertModel",
"AlbertForPretraining",
"AlbertForMaskedLM",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertForQuestionAnswering",
"AlbertForMultipleChoice",
]
dtype_float = paddle.get_default_dtype()
def gelu_new(x):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + paddle.tanh(
math.sqrt(2.0 / math.pi) * (x + 0.044715 * paddle.pow(x, 3.0))))
ACT2FN = {
"relu": F.relu,
"gelu": F.gelu,
"gelu_new": gelu_new,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"mish": mish,
"linear": linear_act,
"swish": swish,
}
class AlbertEmbeddings(Layer):
"""
Constructs the embeddings from word, position and token_type embeddings.
"""
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores
class AlbertPretrainedModel(PretrainedModel):
"""
An abstract class for pretrained ALBERT models. It provides ALBERT related
`model_config_file`, `pretrained_init_configuration`, `resource_files_names`,
`pretrained_resource_files_map`, `base_model_prefix` for downloading and
loading pretrained models. See `PretrainedModel` for more details.
"""
model_config_file = "model_config.json"
pretrained_init_configuration = {
"albert-base-v1": {
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"inner_group_num": 1,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_groups": 1,
"num_hidden_layers": 12,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 30000
},
"albert-large-v1": {
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"inner_group_num": 1,
"intermediate_size": 4096,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_groups": 1,
"num_hidden_layers": 24,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 30000
},
"albert-xlarge-v1": {
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 2048,
"initializer_range": 0.02,
"inner_group_num": 1,
"intermediate_size": 8192,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_groups": 1,
"num_hidden_layers": 24,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 30000
},
"albert-xxlarge-v1": {
"attention_probs_dropout_prob": 0,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "gelu",
"hidden_dropout_prob": 0,
"hidden_size": 4096,
"initializer_range": 0.02,
"inner_group_num": 1,
"intermediate_size": 16384,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 64,
"num_hidden_groups": 1,
"num_hidden_layers": 12,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 30000
},
"albert-base-v2": {
"attention_probs_dropout_prob": 0,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "gelu_new",
"hidden_dropout_prob": 0,
"hidden_size": 768,
"initializer_range": 0.02,
"inner_group_num": 1,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_groups": 1,
"num_hidden_layers": 12,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 30000
},
"albert-large-v2": {
"attention_probs_dropout_prob": 0,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "gelu_new",
"hidden_dropout_prob": 0,
"hidden_size": 1024,
"initializer_range": 0.02,
"inner_group_num": 1,
"intermediate_size": 4096,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_groups": 1,
"num_hidden_layers": 24,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 30000
},
"albert-xlarge-v2": {
"attention_probs_dropout_prob": 0,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "gelu_new",
"hidden_dropout_prob": 0,
"hidden_size": 2048,
"initializer_range": 0.02,
"inner_group_num": 1,
"intermediate_size": 8192,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_groups": 1,
"num_hidden_layers": 24,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 30000
},
"albert-xxlarge-v2": {
"attention_probs_dropout_prob": 0,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "gelu_new",
"hidden_dropout_prob": 0,
"hidden_size": 4096,
"initializer_range": 0.02,
"inner_group_num": 1,
"intermediate_size": 16384,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 64,
"num_hidden_groups": 1,
"num_hidden_layers": 12,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 30000
},
"albert-chinese-tiny": {
"attention_probs_dropout_prob": 0.0,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_size": 312,
"initializer_range": 0.02,
"inner_group_num": 1,
"intermediate_size": 1248,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_groups": 1,
"num_hidden_layers": 4,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 21128
},
"albert-chinese-small": {
"attention_probs_dropout_prob": 0.0,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_size": 384,
"initializer_range": 0.02,
"inner_group_num": 1,
"intermediate_size": 1536,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_groups": 1,
"num_hidden_layers": 6,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 21128
},
"albert-chinese-base": {
"attention_probs_dropout_prob": 0,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "relu",
"hidden_dropout_prob": 0,
"hidden_size": 768,
"initializer_range": 0.02,
"inner_group_num": 1,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_groups": 1,
"num_hidden_layers": 12,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 21128
},
"albert-chinese-large": {
"attention_probs_dropout_prob": 0,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "relu",
"hidden_dropout_prob": 0,
"hidden_size": 1024,
"initializer_range": 0.02,
"inner_group_num": 1,
"intermediate_size": 4096,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_groups": 1,
"num_hidden_layers": 24,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 21128
},
"albert-chinese-xlarge": {
"attention_probs_dropout_prob": 0,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "relu",
"hidden_dropout_prob": 0,
"hidden_size": 2048,
"initializer_range": 0.014,
"inner_group_num": 1,
"intermediate_size": 8192,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_groups": 1,
"num_hidden_layers": 24,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 21128
},
"albert-chinese-xxlarge": {
"attention_probs_dropout_prob": 0,
"bos_token_id": 2,
"embedding_size": 128,
"eos_token_id": 3,
"hidden_act": "relu",
"hidden_dropout_prob": 0,
"hidden_size": 4096,
"initializer_range": 0.01,
"inner_group_num": 1,
"intermediate_size": 16384,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_groups": 1,
"num_hidden_layers": 12,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 21128
},
}
resource_files_names = {"model_state": "model_state.pdparams"}
pretrained_resource_files_map = {
"model_state": {
"albert-base-v1":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-base-v1.pdparams",
"albert-large-v1":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-large-v1.pdparams",
"albert-xlarge-v1":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-xlarge-v1.pdparams",
"albert-xxlarge-v1":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-xxlarge-v1.pdparams",
"albert-base-v2":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-base-v2.pdparams",
"albert-large-v2":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-large-v2.pdparams",
"albert-xlarge-v2":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-xlarge-v2.pdparams",
"albert-xxlarge-v2":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-xxlarge-v2.pdparams",
"albert-chinese-tiny":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-chinese-tiny.pdparams",
"albert-chinese-small":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-chinese-small.pdparams",
"albert-chinese-base":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-chinese-base.pdparams",
"albert-chinese-large":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-chinese-large.pdparams",
"albert-chinese-xlarge":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-chinese-xlarge.pdparams",
"albert-chinese-xxlarge":
"https://bj.bcebos.com/paddlenlp/models/transformers/albert/albert-chinese-xxlarge.pdparams",
}
}
base_model_prefix = "transformer"
@register_base_model
class AlbertModel(AlbertPretrainedModel):
"""
The bare Albert Model transformer outputting raw hidden-states.
This model inherits from :class:`~paddlenlp.transformers.model_utils.PretrainedModel`.
Refer to the superclass documentation for the generic methods.
This model is also a Paddle `paddle.nn.Layer <https://www.paddlepaddle.org.cn/documentation
/docs/en/api/paddle/fluid/dygraph/layers/Layer_en.html>`__ subclass. Use it as a regular Paddle Layer
and refer to the Paddle documentation for all matter related to general usage and behavior.
Args:
vocab_size (int, optional):
Vocabulary size of `inputs_ids` in `AlbertModel`. Also is the vocab size of token embedding matrix.
Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling `AlbertModel`.
Defaults to `30000`.
embedding_size (int, optional):
Dimensionality of the embedding layer. Defaults to `128`.
hidden_size (int, optional):
Dimensionality of the encoder layer and pooler layer. Defaults to `768`.
num_hidden_layers (int, optional):
Number of hidden layers in the Transformer encoder. Defaults to `12`.
inner_group_num (int, optional):
Number of hidden groups in the Transformer encoder. Defaults to `1`.
num_attention_heads (int, optional):
Number of attention heads for each attention layer in the Transformer encoder.
Defaults to `12`.
intermediate_size (int, optional):
Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors
to ff layers are firstly projected from `hidden_size` to `intermediate_size`,
and then projected back to `hidden_size`. Typically `intermediate_size` is larger than `hidden_size`.
inner_group_num (int, optional):
Number of inner groups in a hidden group. Default to `1`.
hidden_act (str, optional):
The non-linear activation function in the feed-forward layer.
``"gelu"``, ``"relu"`` and any other paddle supported activation functions
are supported.
hidden_dropout_prob (float, optional):
The dropout probability for all fully connected layers in the embeddings and encoder.
Defaults to `0`.
attention_probs_dropout_prob (float, optional):
The dropout probability used in MultiHeadAttention in all encoder layers to drop some attention target.
Defaults to `0`.
max_position_embeddings (int, optional):
The maximum value of the dimensionality of position encoding, which dictates the maximum supported length of an input
sequence. Defaults to `512`.
type_vocab_size (int, optional):
The vocabulary size of `token_type_ids`. Defaults to `12`.
initializer_range (float, optional):
The standard deviation of the normal initializer. Defaults to `0.02`.
.. note::
A normal_initializer initializes weight matrices as normal distributions.
See :meth:`BertPretrainedModel.init_weights()` for how weights are initialized in `ElectraModel`.
layer_norm_eps(float, optional):
The `epsilon` parameter used in :class:`paddle.nn.LayerNorm` for initializing layer normalization layers.
A small value to the variance added to the normalization layer to prevent division by zero.
Default to `1e-12`.
pad_token_id (int, optional):
The index of padding token in the token vocabulary. Defaults to `0`.
add_pooling_layer(bool, optional):
Whether or not to add the pooling layer. Default to `False`.
"""
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(
-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(
-1) # We can specify head_mask for each layer
assert head_mask.dim(
) == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = paddle.cast(head_mask, dtype=dtype_float)
return head_mask
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
return_dict=False,
):
r'''
The AlbertModel forward method, overrides the `__call__()` special method.
Args:
input_ids (Tensor):
Indices of input sequence tokens in the vocabulary. They are
numerical representations of tokens that build the input sequence.
Its data type should be `int64` and it has a shape of [batch_size, sequence_length].
attention_mask (Tensor, optional):
Mask used in multi-head attention to avoid performing attention on to some unwanted positions,
usually the paddings or the subsequent positions.
Its data type can be int, float and bool.
When the data type is bool, the `masked` tokens have `False` values and the others have `True` values.
When the data type is int, the `masked` tokens have `0` values and the others have `1` values.
When the data type is float, the `masked` tokens have `-INF` values and the others have `0` values.
It is a tensor with shape broadcasted to `[batch_size, num_attention_heads, sequence_length, sequence_length]`.
Defaults to `None`, which means nothing needed to be prevented attention to.
token_type_ids (Tensor, optional):
Segment token indices to indicate different portions of the inputs.
Selected in the range ``[0, type_vocab_size - 1]``.
If `type_vocab_size` is 2, which means the inputs have two portions.
Indices can either be 0 or 1:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
Its data type should be `int64` and it has a shape of [batch_size, sequence_length].
Defaults to `None`, which means we don't add segment embeddings.
position_ids(Tensor, optional):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
max_position_embeddings - 1]``.
Shape as `(batch_size, num_tokens)` and dtype as int64. Defaults to `None`.
head_mask (Tensor, optional):
Mask to nullify selected heads of the self-attention modules. Masks values can either be 0 or 1:
- 1 indicates the head is **not masked**,
- 0 indicated the head is **masked**.
inputs_embeds (Tensor, optional):
If you want to control how to convert `inputs_ids` indices into associated vectors, you can
pass an embedded representation directly instead of passing `inputs_ids`.
return_dict (bool, optional):
Whether or not to return a dict instead of a plain tuple. Default to `False`.
Returns:
tuple or Dict: Returns tuple (`sequence_output`, `pooled_output`) or a dict with
`last_hidden_state`, `pooled_output`, `all_hidden_states`, `all_attentions` fields.
With the fields:
- `sequence_output` (Tensor):
Sequence of hidden-states at the last layer of the model.
It's data type should be float32 and has a shape of [`batch_size, sequence_length, hidden_size`].
- `pooled_output` (Tensor):
The output of first token (`[CLS]`) in sequence.
We "pool" the model by simply taking the hidden state corresponding to the first token.
Its data type should be float32 and
has a shape of [batch_size, hidden_size].
- `last_hidden_state` (Tensor):
The output of the last encoder layer, it is also the `sequence_output`.
It's data type should be float32 and has a shape of [batch_size, sequence_length, hidden_size].
- `all_hidden_states` (Tensor):
Hidden_states of all layers in the Transformer encoder. The length of `all_hidden_states` is `num_hidden_layers + 1`.
For all element in the tuple, its data type should be float32 and its shape is [`batch_size, sequence_length, hidden_size`].
- `all_attentions` (Tensor):
Attentions of all layers of in the Transformer encoder. The length of `all_attentions` is `num_hidden_layers`.
For all element in the tuple, its data type should be float32 and its shape is
[`batch_size, num_attention_heads, sequence_length, sequence_length`].
Example:
.. code-block::
import paddle
from paddlenlp.transformers import AlbertModel, AlbertTokenizer
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v1')
model = AlbertModel.from_pretrained('albert-base-v1')
inputs = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!")
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
output = model(**inputs)
'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.shape[:-1]
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds")
if attention_mask is None:
attention_mask = paddle.ones(shape=input_shape)
if token_type_ids is None:
token_type_ids = paddle.zeros(shape=input_shape, dtype="int64")
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = paddle.cast(extended_attention_mask,
dtype=dtype_float)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = self.get_head_mask(head_mask, self.num_hidden_layers)
embedding_output = self.embeddings(
input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
head_mask=head_mask,
return_dict=return_dict,
)
sequence_output = encoder_outputs if not return_dict \
else encoder_outputs["last_hidden_state"]
pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) \
if self.pooler is not None else None
if return_dict:
return {
"last_hidden_state": sequence_output,
"pooler_output": pooled_output,
"all_hidden_states": encoder_outputs["all_hidden_states"],
"all_attentions": encoder_outputs["all_attentions"],
}
return sequence_output, pooled_output
class AlbertForPretraining(AlbertPretrainedModel):
"""
Albert Model with a `masked language modeling` head and a `sentence order prediction` head
on top.
Args:
albert (:class:`AlbertModel`):
An instance of :class:`AlbertModel`.
lm_head (:class:`AlbertMLMHead`):
An instance of :class:`AlbertSOPHead`.
sop_head (:class:`AlbertSOPHead`):
An instance of :class:`AlbertSOPHead`.
vocab_size (int):
See :class:`AlbertModel`.
"""
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
sentence_order_label=None,
return_dict=False,
):
r"""
The AlbertForPretraining forward method, overrides the __call__() special method.
Args:
input_ids (Tensor):
See :class:`AlbertModel`.
attention_mask (list, optional):
See :class:`AlbertModel`.
token_type_ids (Tensor, optional):
See :class:`AlbertModel`.
position_ids(Tensor, optional):
See :class:`AlbertModel`.
head_mask(Tensor, optional):
See :class:`AlbertModel`.
inputs_embeds(Tensor, optional):
See :class:`AlbertModel`.
sentence_order_label(Tensor, optional):
Labels of the next sequence prediction. Input should be a sequence pair
Indices should be 0 or 1. ``0`` indicates original order (sequence A, then sequence B),
and ``1`` indicates switched order (sequence B, then sequence A). Defaults to `None`.
return_dict(bool, optional):
See :class:`AlbertModel`.
Returns:
tuple or Dict: Returns tuple (`prediction_scores`, `sop_scores`) or a dict with
`prediction_logits`, `sop_logits`, `pooled_output`, `hidden_states`, `attentions` fields.
With the fields:
- `prediction_scores` (Tensor):
The scores of masked token prediction. Its data type should be float32.
and its shape is [batch_size, sequence_length, vocab_size].
- `sop_scores` (Tensor):
The scores of sentence order prediction.
Its data type should be float32 and its shape is [batch_size, 2].
- `prediction_logits` (Tensor):
The scores of masked token prediction. Its data type should be float32.
and its shape is [batch_size, sequence_length, vocab_size].
- `sop_logits` (Tensor):
The scores of sentence order prediction.
Its data type should be float32 and its shape is [batch_size, 2].
- `hidden_states` (Tensor):
Hidden_states of all layers in the Transformer encoder. The length of `hidden_states` is `num_hidden_layers + 1`.
For all element in the tuple, its data type should be float32 and its shape is [`batch_size, sequence_length, hidden_size`].
- `attentions` (Tensor):
Attentions of all layers of in the Transformer encoder. The length of `attentions` is `num_hidden_layers`.
For all element in the tuple, its data type should be float32 and its shape is
[`batch_size, num_attention_heads, sequence_length, sequence_length`].
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
return_dict=return_dict,
)
sequence_output = outputs[0] if not return_dict \
else outputs["last_hidden_state"]
pooled_output = outputs[1] if not return_dict \
else outputs["pooler_output"]
prediction_scores = self.predictions(sequence_output)
sop_scores = self.sop_classifier(pooled_output)
if return_dict:
return {
"prediction_logits": prediction_scores,
"sop_logits": sop_scores,
"hidden_states": outputs["all_hidden_states"],
"attentions": outputs["all_attentions"],
}
return prediction_scores, sop_scores
class AlbertForMaskedLM(AlbertPretrainedModel):
"""
Albert Model with a `masked language modeling` head on top.
Args:
albert (:class:`AlbertModel`):
An instance of :class:`AlbertModel`.
"""
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
return_dict=False,
):
r"""
The AlbertForPretraining forward method, overrides the __call__() special method.
Args:
input_ids (Tensor):
See :class:`AlbertModel`.
attention_mask (list, optional):
See :class:`AlbertModel`.
token_type_ids (Tensor, optional):
See :class:`AlbertModel`.
position_ids(Tensor, optional):
See :class:`AlbertModel`.
head_mask(Tensor, optional):
See :class:`AlbertModel`.
inputs_embeds(Tensor, optional):
See :class:`AlbertModel`.
return_dict(bool, optional):
See :class:`AlbertModel`.
Returns:
Tensor or Dict: Returns tensor `prediction_scores` or a dict with `logits`,
`hidden_states`, `attentions` fields.
With the fields:
- `prediction_scores` (Tensor):
The scores of masked token prediction. Its data type should be float32.
and its shape is [batch_size, sequence_length, vocab_size].
- `logits` (Tensor):
The scores of masked token prediction. Its data type should be float32.
and its shape is [batch_size, sequence_length, vocab_size].
- `hidden_states` (Tensor):
Hidden_states of all layers in the Transformer encoder. The length of `hidden_states` is `num_hidden_layers + 1`.
For all element in the tuple, its data type should be float32 and its shape is [`batch_size, sequence_length, hidden_size`].
- `attentions` (Tensor):
Attentions of all layers of in the Transformer encoder. The length of `attentions` is `num_hidden_layers`.
For all element in the tuple, its data type should be float32 and its shape is
[`batch_size, num_attention_heads, sequence_length, sequence_length`].
"""
transformer_outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
return_dict=return_dict,
)
sequence_outputs = transformer_outputs[0] if not return_dict \
else transformer_outputs["last_hidden_state"]
prediction_scores = self.predictions(sequence_outputs)
if return_dict:
return {
"logits": prediction_scores,
"hidden_states": transformer_outputs["all_hidden_states"],
"attentions": transformer_outputs["all_attentions"]
}
return prediction_scores
class AlbertForSequenceClassification(AlbertPretrainedModel):
"""
Albert Model with a linear layer on top of the output layer,
designed for sequence classification/regression tasks like GLUE tasks.
Args:
albert (:class:`AlbertModel`):
An instance of AlbertModel.
classifier_dropput_prob (float, optional):
The dropout probability for the classifier.
Defaults to `0`.
num_classes (int, optional):
The number of classes. Defaults to `2`.
"""
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
return_dict=False,
):
r"""
The AlbertForSequenceClassification forward method, overrides the __call__() special method.
Args:
input_ids (Tensor):
See :class:`AlbertModel`.
attention_mask (list, optional):
See :class:`AlbertModel`.
token_type_ids (Tensor, optional):
See :class:`AlbertModel`.
position_ids(Tensor, optional):
See :class:`AlbertModel`.
head_mask(Tensor, optional):
See :class:`AlbertModel`.
inputs_embeds(Tensor, optional):
See :class:`AlbertModel`.
return_dict(bool, optional):
See :class:`AlbertModel`.
Returns:
Tensor or Dict: Returns tensor `logits`, or a dict with `logits`, `hidden_states`, `attentions` fields.
With the fields:
- `logits` (Tensor):
A tensor of the input text classification logits.
Shape as `[batch_size, num_classes]` and dtype as float32.
- `hidden_states` (Tensor):
Hidden_states of all layers in the Transformer encoder. The length of `hidden_states` is `num_hidden_layers + 1`.
For all element in the tuple, its data type should be float32 and its shape is [`batch_size, sequence_length, hidden_size`].
- `attentions` (Tensor):
Attentions of all layers of in the Transformer encoder. The length of `attentions` is `num_hidden_layers`.
For all element in the tuple, its data type should be float32 and its shape is
[`batch_size, num_attention_heads, sequence_length, sequence_length`].
Example:
.. code-block::
import paddle
from paddlenlp.transformers import AlbertForSequenceClassification, AlbertTokenizer
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v1')
model = AlbertForSequenceClassification.from_pretrained('albert-base-v1')
inputs = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!")
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
outputs = model(**inputs)
logits = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
return_dict=return_dict,
)
pooled_output = transformer_outputs[1] if not return_dict \
else transformer_outputs["pooler_output"]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if return_dict:
return {
"logits": logits,
"hidden_states": transformer_outputs["all_hidden_states"],
"attentions": transformer_outputs["all_attentions"]
}
return logits
class AlbertForTokenClassification(AlbertPretrainedModel):
"""
Albert Model with a linear layer on top of the hidden-states output layer,
designed for token classification tasks like NER tasks.
Args:
albert (:class:`AlbertModel`):
An instance of AlbertModel.
num_classes (int, optional):
The number of classes. Defaults to `2`.
"""
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
return_dict=False,
):
r"""
The AlbertForTokenClassification forward method, overrides the __call__() special method.
Args:
input_ids (Tensor):
See :class:`AlbertModel`.
attention_mask (list, optional):
See :class:`AlbertModel`.
token_type_ids (Tensor, optional):
See :class:`AlbertModel`.
position_ids(Tensor, optional):
See :class:`AlbertModel`.
head_mask(Tensor, optional):
See :class:`AlbertModel`.
inputs_embeds(Tensor, optional):
See :class:`AlbertModel`.
return_dict(bool, optional):
See :class:`AlbertModel`.
Returns:
Tensor or Dict: Returns tensor `logits`, or a dict with `logits`, `hidden_states`, `attentions` fields.
With the fields:
- `logits` (Tensor):
A tensor of the input token classification logits.
Shape as `[batch_size, sequence_length, num_classes]` and dtype as `float32`.
- `hidden_states` (Tensor):
Hidden_states of all layers in the Transformer encoder. The length of `hidden_states` is `num_hidden_layers + 1`.
For all element in the tuple, its data type should be float32 and its shape is [`batch_size, sequence_length, hidden_size`].
- `attentions` (Tensor):
Attentions of all layers of in the Transformer encoder. The length of `attentions` is `num_hidden_layers`.
For all element in the tuple, its data type should be float32 and its shape is
[`batch_size, num_attention_heads, sequence_length, sequence_length`].
Example:
.. code-block::
import paddle
from paddlenlp.transformers import AlbertForTokenClassification, AlbertTokenizer
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v1')
model = AlbertForTokenClassification.from_pretrained('albert-base-v1')
inputs = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!")
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
outputs = model(**inputs)
logits = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
return_dict=return_dict,
)
sequence_output = transformer_outputs[0] if not return_dict \
else transformer_outputs["sequence_output"]
logits = self.classifier(sequence_output)
if return_dict:
return {
"logits": logits,
"hidden_states": transformer_outputs["all_hidden_states"],
"attentions": transformer_outputs["all_attentions"]
}
return logits
class AlbertForQuestionAnswering(AlbertPretrainedModel):
"""
Albert Model with a linear layer on top of the hidden-states output to compute `span_start_logits`
and `span_end_logits`, designed for question-answering tasks like SQuAD.
Args:
albert (:class:`AlbertModel`):
An instance of AlbertModel.
num_classes (int):
The number of classes.
"""
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
return_dict=False,
):
r"""
The AlbertForQuestionAnswering forward method, overrides the __call__() special method.
Args:
input_ids (Tensor):
See :class:`AlbertModel`.
attention_mask (list, optional):
See :class:`AlbertModel`.
token_type_ids (Tensor, optional):
See :class:`AlbertModel`.
position_ids(Tensor, optional):
See :class:`AlbertModel`.
head_mask(Tensor, optional):
See :class:`AlbertModel`.
inputs_embeds(Tensor, optional):
See :class:`AlbertModel`.
start_positions(Tensor, optional):
Start positions of the text. Defaults to `None`.
end_positions(Tensor, optional):
End positions of the text. Defaults to `None`.
return_dict(bool, optional):
See :class:`AlbertModel`.
Returns:
tuple or Dict: Returns tuple (`start_logits, end_logits`)or a dict
with `start_logits`, `end_logits`, `hidden_states`, `attentions` fields.
With the fields:
- `start_logits` (Tensor):
A tensor of the input token classification logits, indicates the start position of the labelled span.
Its data type should be float32 and its shape is [batch_size, sequence_length].
- `end_logits` (Tensor):
A tensor of the input token classification logits, indicates the end position of the labelled span.
Its data type should be float32 and its shape is [batch_size, sequence_length].
- `hidden_states` (Tensor):
Hidden_states of all layers in the Transformer encoder. The length of `hidden_states` is `num_hidden_layers + 1`.
For all element in the tuple, its data type should be float32 and its shape is [`batch_size, sequence_length, hidden_size`].
- `attentions` (Tensor):
Attentions of all layers of in the Transformer encoder. The length of `attentions` is `num_hidden_layers`.
For all element in the tuple, its data type should be float32 and its shape is
[`batch_size, num_attention_heads, sequence_length, sequence_length`].
Example:
.. code-block::
import paddle
from paddlenlp.transformers import AlbertForQuestionAnswering, AlbertTokenizer
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v1')
model = AlbertForQuestionAnswering.from_pretrained('albert-base-v1')
inputs = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!")
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
outputs = model(**inputs)
logits = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
return_dict=return_dict,
)
sequence_output = transformer_outputs[0] if not return_dict \
else transformer_outputs["sequence_output"]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = paddle.split(logits,
num_or_sections=1,
axis=-1)
start_logits = start_logits.squeeze(axis=-1)
end_logits = start_logits.squeeze(axis=-1)
if return_dict:
return {
"start_logits": start_logits,
"end_logits": end_logits,
"hidden_states": transformer_outputs["all_hidden_states"],
"attentions": transformer_outputs["all_attentions"]
}
return start_logits, end_logits
class AlbertForMultipleChoice(AlbertPretrainedModel):
"""
Albert Model with a linear layer on top of the hidden-states output layer,
designed for multiple choice tasks like SWAG tasks .
Args:
albert (:class:`AlbertModel`):
An instance of AlbertModel.
"""
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
return_dict=False,
):
r"""
The AlbertForQuestionAnswering forward method, overrides the __call__() special method.
Args:
input_ids (Tensor):
See :class:`AlbertModel`.
attention_mask (list, optional):
See :class:`AlbertModel`.
token_type_ids (Tensor, optional):
See :class:`AlbertModel`.
position_ids(Tensor, optional):
See :class:`AlbertModel`.
head_mask(Tensor, optional):
See :class:`AlbertModel`.
inputs_embeds(Tensor, optional):
See :class:`AlbertModel`.
start_positions(Tensor, optional):
Start positions of the text. Defaults to `None`.
end_positions(Tensor, optional):
End positions of the text. Defaults to `None`.
return_dict(bool, optional):
See :class:`AlbertModel`.
Returns:
Tensor or Dict: Returns tensor `reshaped_logits` or a dict
with `reshaped_logits`, `hidden_states`, `attentions` fields.
With the fields:
- `reshaped_logits` (Tensor):
A tensor of the input multiple choice classification logits.
Shape as `[batch_size, num_classes]` and dtype as `float32`.
- `hidden_states` (Tensor):
Hidden_states of all layers in the Transformer encoder. The length of `hidden_states` is `num_hidden_layers + 1`.
For all element in the tuple, its data type should be float32 and its shape is [`batch_size, sequence_length, hidden_size`].
- `attentions` (Tensor):
Attentions of all layers of in the Transformer encoder. The length of `attentions` is `num_hidden_layers`.
For all element in the tuple, its data type should be float32 and its shape is
[`batch_size, num_attention_heads, sequence_length, sequence_length`].
"""
num_choices = input_ids.shape[
1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.reshape([-1, input_ids.shape[-1]]) \
if input_ids is not None else None
attention_mask = attention_mask.reshape([-1, attention_mask.shape[-1]]) \
if attention_mask is not None else None
token_type_ids = token_type_ids.reshape([-1, token_type_ids.shape[-1]]) \
if token_type_ids is not None else None
position_ids = position_ids.reshape([-1, position_ids.shape[-1]]) \
if position_ids is not None else None
inputs_embeds = (inputs_embeds.reshape([
-1, inputs_embeds.shape[-2], inputs_embeds.shape[-1]
]) if inputs_embeds is not None else None)
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
return_dict=return_dict,
)
pooled_output = transformer_outputs[1] if not return_dict \
else transformer_outputs["pooler_output"]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.reshape([-1, num_choices])
if return_dict:
return {
"logits": reshaped_logits,
"hidden_states": transformer_outputs["all_hidden_states"],
"attentions": transformer_outputs["all_attentions"]
}
return reshaped_logits
| [
2,
15069,
357,
66,
8,
33448,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
15069,
2864,
3012,
9552,
11,
3012,
14842,
290,
262,
12905,
2667,
32388,
3457,
13,
1074,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,... | 2.096551 | 24,008 |
#!/usr/bin/env python3
import math
from math import sin, cos, pi
import rospy
import tf
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3
from am_driver.msg import WheelEncoder
if __name__ == '__main__':
try:
poseEncCheckRK4 = PoseEncCheckRK4()
rospy.spin()
#odomCheck.run()
except rospy.ROSInterruptException:
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
10688,
198,
6738,
10688,
1330,
7813,
11,
8615,
11,
31028,
198,
198,
11748,
686,
2777,
88,
198,
11748,
48700,
198,
6738,
22939,
62,
907,
14542,
13,
19662,
1330,
6252,
11... | 2.425 | 160 |
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
import os
import importlib
import argparse
from utils import logger
from utils.common_utils import check_frozen_norm_layer
from utils.ddp_utils import is_master, is_start_rank_node
from utils.download_utils import get_local_path
from common import SUPPORTED_VIDEO_CLIP_VOTING_FN
from .base_cls import BaseVideoEncoder
from ...misc.common import load_pretrained_model
CLS_MODEL_REGISTRY = {}
# automatically import the models
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module(
"cvnets.models.video_classification." + model_name
)
| [
2,
198,
2,
1114,
15665,
766,
19249,
38559,
24290,
2393,
13,
198,
2,
15069,
357,
34,
8,
33160,
4196,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
198,
11748,
28686,
198,
11748,
1330,
8019,
198,
11748,
1822,
29572,
198,
198,
6738,
33... | 2.66756 | 373 |
import cv2
import numpy as np
if __name__ == '__main__':
ex_1()
ex_2()
ex_3()
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
409,
62,
16,
3419,
198,
220,
220,
220,
409,
62,
17,
3419,
198,
220,
220,
220... | 1.938776 | 49 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@Author : 王超逸
@File : graphgl_api.py
@Time : 2021/2/25 14:15
@Desc : graphgl风格的api
"""
from model import *
from graphene_model_base import BaseCURD, GraphQLViewSet, GrapheneModelObject
from graphene import Int, String, Field, List, Schema
schema = Schema(query=DemoViewSet)
print(schema)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
13838,
220,
1058,
220,
220,
13328,
23329,
164,
41678,
34460,
116,
198,
31,
8979,
220,
220,
220,
1058,
... | 2.343949 | 157 |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import numpy as np
from qiskit_metal import draw, Dict
from .base import QComponent
from numpy.linalg import norm
from typing import List, Tuple, Union, AnyStr
from collections.abc import Mapping
from qiskit_metal.toolbox_metal import math_and_overrides as mao
import math
import re
class QRoutePoint:
"""A convenience wrapper class to define an point with orientation, with a
2D position and a 2D direction (XY plane).
All values stored as np.ndarray of parsed floats.
"""
def __init__(self, position: np.array, direction: np.array = None):
"""
Args:
position (np.ndarray of 2 points): Center point of the pin.
direction (np.ndarray of 2 points): *Normal vector*
This is the normal vector to the surface on which the pin mates.
Defines which way it points outward. Has unit norm. Defaults to None.
"""
self.position = position
if isinstance(position, list):
if len(position[-1]) == 2:
self.position = position[-1]
self.direction = direction
class QRoute(QComponent):
"""Super-class implementing routing methods that are valid irrespective of
the number of pins (>=1). The route is stored in a n array of planar points
(x,y coordinates) and one direction, which is that of the last point in the
array. Values are stored as np.ndarray of parsed floats or np.array float
pair.
Inherits `QComponent` class
Default Options:
* pin_inputs: Dict
* start_pin: Dict -- Component and pin string pair. Define which pin to start from
* component: '' -- Name of component to start from, which has a pin
* pin: '' -- Name of pin used for pin_start
* end_pin=Dict -- Component and pin string pair. Define which pin to start from
* component: '' -- Name of component to end on, which has a pin
* pin: '' -- Name of pin used for pin_end
* lead: Dict
* start_straight: '0mm' -- Lead-in, defined as the straight segment extension from start_pin. Defaults to 0.1um.
* end_straight: '0mm' -- Lead-out, defined as the straight segment extension from end_pin. Defaults to 0.1um.
* start_jogged_extension: '' -- Lead-in, jogged extension of lead-in. Described as list of tuples
* end_jogged_extension: '' -- Lead-out, jogged extension of lead-out. Described as list of tuples
* fillet: '0'
* total_length: '7mm'
* trace_width: 'cpw_width' -- Defines the width of the line. Defaults to 'cpw_width'.
How to specify \*_jogged_extensions for the QRouteLeads:
\*_jogged_extensions have to be specified in an OrderedDict with incremental keys.
the value of each key specifies the direction of the jog and the extension past the jog.
For example:
.. code-block:: python
:linenos:
jogs = OrderedDict()
jogs[0] = ["R", '200um']
jogs[1] = ["R", '200um']
jogs[2] = ["L", '200um']
jogs[3] = ["L", '500um']
jogs[4] = ["R", '200um']
jogs_other = ....
options = {'lead': {
'start_straight': '0.3mm',
'end_straight': '0.3mm',
'start_jogged_extension': jogs,
'end_jogged_extension': jogs_other
}}
The jog direction can be specified in several ways. Feel free to pick the one more
convenient for your coding style:
>> "L", "L#", "R", "R#", #, "#", "A,#", "left", "left#", "right", "right#"
where # is any signed or unsigned integer or floating point value.
For example the following will all lead to the same turn:
>> "L", "L90", "R-90", 90, "90", "A,90", "left", "left90", "right-90"
"""
component_metadata = Dict(short_name='route', _qgeometry_table_path='True')
"""Component metadata"""
default_options = Dict(
pin_inputs=Dict(
start_pin=Dict( # QRoute also supports single pin routes
component='', # Name of component to start from, which has a pin
pin=''), # Name of pin used for pin_start
end_pin=Dict(
component='', # Name of component to end on, which has a pin
pin='') # Name of pin used for pin_end
),
fillet='0',
lead=Dict(start_straight='0mm',
end_straight='0mm',
start_jogged_extension='',
end_jogged_extension=''),
total_length='7mm',
trace_width='cpw_width')
"""Default options"""
TOOLTIP = """QRoute"""
def __init__(self,
design,
name: str = None,
options: Dict = None,
type: str = "CPW",
**kwargs):
"""Initializes all Routes.
Calls the QComponent __init__() to create a new Metal component.
Before that, it adds the variables that are needed to support routing.
Args:
design (QDesign): The parent design.
name (str): Name of the component. Auto-named if possible.
options (dict): User options that will override the defaults. Defaults to None.
type (string): Supports Route (single layer trace) and CPW (adds the gap around it).
Defaults to "CPW".
"""
# Class key Attributes:
# * head (QRouteLead()): Stores sequential points to start the route.
# * tail (QRouteLead()): (optional) Stores sequential points to terminate the route.
# * intermediate_pts: (list or numpy Nx2 or dict) Sequence of points between and other
# than head and tail. Defaults to None. Type could be either list or numpy Nx2,
# or dict/OrderedDict nesting lists or numpy Nx2.
# * start_pin_name (string): Head pin name. Defaults to "start".
# * end_pin_name (string): Tail pin name. Defaults to "end".
self.head = QRouteLead()
self.tail = QRouteLead()
# keep track of all points so far in the route from both ends
self.intermediate_pts = np.empty((0, 2), float) # will be numpy Nx2
# supported pin names (constants)
self.start_pin_name = "start"
self.end_pin_name = "end"
self.type = type.upper().strip()
# # add default_options that are QRoute type specific:
options = self._add_route_specific_options(options)
# regular QComponent boot, including the run of make()
super().__init__(design, name, options, **kwargs)
def _add_route_specific_options(self, options):
"""Enriches the default_options to support different types of route
styles.
Args:
options (dict): User options that will override the defaults
Return:
A modified options dictionary
Raises:
Exception: Unsupported route type
"""
if self.type == "ROUTE":
# all the defaults are fine as-is
None
elif self.type == "CPW":
# add the variable to define the space between the route and the ground plane
cpw_options = Dict(trace_gap='cpw_gap')
if options:
if "trace_gap" not in options:
# user did not pass the trace_gap, so add it
options.update(cpw_options)
else:
# user did not pass custom options, so create it to add trace_gap
options["options"] = cpw_options
else:
raise Exception("Unsupported Route type: " + self.type +
" The only supported types are CPW and route")
return options
def _get_connected_pin(self, pin_data: Dict):
"""Recovers a pin from the dictionary.
Args:
pin_data: dict {component: string, pin: string}
Return:
The actual pin object.
"""
return self.design.components[pin_data.component].pins[pin_data.pin]
def set_pin(self, name: str) -> QRoutePoint:
"""Defines the CPW pins and returns the pin coordinates and normal
direction vector.
Args:
name: String (supported pin names are: start, end)
Return:
QRoutePoint: Last point (for now the single point) in the QRouteLead
Raises:
Exception: Ping name is not supported
"""
# First define which pin/lead you intend to initialize
if name == self.start_pin_name:
options_pin = self.options.pin_inputs.start_pin
lead = self.head
elif name == self.end_pin_name:
options_pin = self.options.pin_inputs.end_pin
lead = self.tail
else:
raise Exception("Pin name \"" + name +
"\" is not supported for this CPW." +
" The only supported pins are: start, end.")
# grab the reference component pin
reference_pin = self._get_connected_pin(options_pin)
# create the cpw pin and document the connections to the reference_pin in the netlist
self.add_pin(name, reference_pin.points[::-1], self.p.trace_width)
self.design.connect_pins(
self.design.components[options_pin.component].id, options_pin.pin,
self.id, name)
# anchor the correct lead to the pin and return its position and direction
return lead.seed_from_pin(reference_pin)
def set_lead(self, name: str) -> QRoutePoint:
"""Defines the lead_extension by adding a point to the self.head/tail.
Args:
name: String (supported pin names are: start, end)
Return:
QRoutePoint: Last point in the QRouteLead (self.head/tail)
Raises:
Exception: Ping name is not supported
"""
p = self.parse_options()
# First define which lead you intend to modify
if name == self.start_pin_name:
options_lead = p.lead.start_straight
lead = self.head
jogged_lead = self.p.lead.start_jogged_extension
elif name == self.end_pin_name:
options_lead = p.lead.end_straight
lead = self.tail
jogged_lead = self.p.lead.end_jogged_extension
else:
raise Exception("Pin name \"" + name +
"\" is not supported for this CPW." +
" The only supported pins are: start, end.")
# then change the lead by adding a point in the same direction of the seed pin
# minimum lead, to be able to jog correctly
lead_length = max(options_lead, self.p.trace_width / 2.0)
lead.go_straight(lead_length)
# then add all the jogged lead information
if jogged_lead:
self.set_lead_extension(name) # consider merging with set_lead
# return the last QRoutePoint of the lead
return lead.get_tip()
def set_lead_extension(self, name: str) -> QRoutePoint:
"""Defines the jogged lead_extension by adding a series of turns to the
self.head/tail.
Args:
name: String (supported pin names are: start, end)
Return:
QRoutePoint: Last point in the QRouteLead (self.head/tail)
Raises:
Exception: Ping name is not supported
Exception: Dictionary error
"""
p = self.parse_options()
# First define which lead you intend to modify
if name == self.start_pin_name:
options_lead = p.lead.start_jogged_extension
lead = self.head
elif name == self.end_pin_name:
options_lead = p.lead.end_jogged_extension
lead = self.tail
else:
raise Exception("Pin name \"" + name +
"\" is not supported for this CPW." +
" The only supported pins are: start, end.")
# then change the lead by adding points
for turn, length in options_lead.values():
if isinstance(turn, (float, int)):
# turn is a number indicating the angle
lead.go_angle(length, turn)
elif re.search(r'^[-+]?(\d+\.\d+|\d+)$', turn):
# turn is a string of a number indicating the angle
lead.go_angle(length, float(turn))
elif turn in ("left", "L"):
# implicit turn -90 degrees
lead.go_left(length)
elif turn in ("right", "R"):
# implicit turn 90 degrees
lead.go_right(length)
elif turn in ("straight", "D", "S"):
# implicit 0 degrees movement
lead.go_straight(length)
elif re.search(r'^(left|L)[-+]?(\d+\.\d+|\d+)$', turn):
# left turn by the specified int/float degrees. can be signed
angle = re.sub(r'^(left|L)', "", turn)
lead.go_angle(length, float(angle))
elif re.search(r'^(right|R)[-+]?(\d+\.\d+|\d+)$', turn):
# right turn by the specified int/float degrees. can be signed
angle = re.sub(r'^(right|R)', "", turn)
lead.go_angle(length, -1 * float(angle))
elif ('A' or 'angle') in turn:
# turn by the specified int/float degrees. Positive numbers turn left.
turn, angle = turn.split(',')
lead.go_angle(length, float(angle))
else:
raise Exception(
f"\nThe input string {turn} is not supported. Please specify the jog turn "
"using one of the supported formats:\n\"L\", \"L#\", \"R\", \"R#\", #, "
"\"#\", \"A,#\", \"left\", \"left#\", \"right\", \"right#\""
"\nwhere # is any signed or unsigned integer or floating point value.\n"
"For example the following will all lead to the same turn:\n"
"\"L\", \"L90\", \"R-90\", 90, "
"\"90\", \"A,90\", \"left\", \"left90\", \"right-90\"")
# return the last QRoutePoint of the lead
return lead.get_tip()
def _get_lead2pts_array(self, arr) -> Tuple:
"""Return the last "diff pts" of the array. If the array is one
dimensional or has only identical points, return -1 for tip_pt_minus_1.
Return:
Tuple: Of two np.ndarray. the arrays could be -1 instead, if point not found
"""
pt = pt_minus_1 = None
if len(arr) == 1:
pt = arr[0]
elif len(arr) > 1:
if not isinstance(arr, np.ndarray) and len(arr) == 2 and len(
arr[0]) == 1:
# array 2,1
pt = arr
else:
# array N,2
pt = arr[-1]
prev_id = -2
pt_minus_1 = arr[prev_id]
while (pt_minus_1 == pt).all() and prev_id > -len(arr):
prev_id -= 1
pt_minus_1 = arr[prev_id]
if (pt_minus_1 == pt).all():
pt_minus_1 = None
return pt, pt_minus_1
def get_tip(self) -> QRoutePoint:
"""Access the last element in the QRouteLead.
Return:
QRoutePoint: Last point in the QRouteLead
The values are numpy arrays with two float points each.
"""
if self.intermediate_pts is None:
# no points in between, so just grab the last point from the lead-in
return self.head.get_tip()
tip_pt = tip_pt_minus_1 = None
if isinstance(self.intermediate_pts, list) or isinstance(
self.intermediate_pts, np.ndarray):
tip_pt, tip_pt_minus_1 = self._get_lead2pts_array(
self.intermediate_pts)
elif isinstance(self.intermediate_pts, Mapping):
# then it is either a dict or a OrderedDict
# this method relies on the keys to be numerical integer. Will use the last points
# assumes that the "value" associated with each key is some "not empty" list/array
sorted_keys = sorted(self.intermediate_pts.keys(), reverse=True)
for key in sorted_keys:
pt0, pt_minus1 = self._get_lead2pts_array(
self.intermediate_pts[key])
if pt0 is None:
continue
if tip_pt_minus_1 is None:
tip_pt_minus_1 = pt0
if tip_pt is None:
tip_pt, tip_pt_minus_1 = tip_pt_minus_1, tip_pt
tip_pt_minus_1 = pt_minus1
else:
print("unsupported type for self.intermediate_pts",
type(self.intermediate_pts))
return
if tip_pt is None:
# no point in the intermediate array
return self.head.get_tip()
if tip_pt_minus_1 is None:
# no "previous" point in the intermediate array
tip_pt_minus_1 = self.head.get_tip().position
return QRoutePoint(tip_pt, tip_pt - tip_pt_minus_1)
def del_colinear_points(self, inarray):
"""Delete colinear points from the given array.
Args:
inarray (list): List of points
Returns:
list: List of points without colinear points
"""
if len(inarray) <= 1:
return
else:
outarray = list() #outarray = np.empty(shape=[0, 2])
pts = [None, None, inarray[0]]
for idxnext in range(1, len(inarray)):
pts = pts[1:] + [inarray[idxnext]]
# delete identical points
if np.allclose(*pts[1:]):
pts = [None] + pts[0:2]
continue
# compare points once you have 3 unique points in pts
if pts[0] is not None:
# if all(mao.round(i[1]) == mao.round(pts[0][1]) for i in pts) \
# or all(mao.round(i[0]) == mao.round(pts[0][0]) for i in pts):
if mao.aligned_pts(pts):
pts = [None] + [pts[0]] + [pts[2]]
# save a point once you successfully establish the three are not aligned,
# and before it gets dropped in the next loop cycle
if pts[0] is not None:
outarray.append(pts[0])
# save the remainder non-aligned points
if pts[1] is not None:
outarray.extend(pts[1:])
else:
outarray.append(pts[2])
return np.array(outarray)
def get_points(self) -> np.ndarray:
"""Assembles the list of points for the route by concatenating:
head_pts + intermediate_pts, tail_pts.
Returns:
np.ndarray: ((H+N+T)x2) all points (x,y) of the CPW
"""
# cover case where there is no intermediate points (straight connection between lead ends)
if self.intermediate_pts is None:
beginning = self.head.pts
else:
beginning = np.concatenate([self.head.pts, self.intermediate_pts],
axis=0)
# cover case where there is no tail defined (floating end)
if self.tail is None:
polished = beginning
else:
polished = np.concatenate([beginning, self.tail.pts[::-1]], axis=0)
polished = self.del_colinear_points(polished)
return polished
def get_unit_vectors(self,
start: QRoutePoint,
end: QRoutePoint,
snap: bool = False) -> Tuple:
"""Return the unit and target vector in which the CPW should process as
its coordinate sys.
Args:
start (QRoutePoint): Reference start point (direction from here)
end (QRoutePoint): Reference end point (direction to here)
snap (bool): True to snap to grid. Defaults to False.
Returns:
array: straight and 90 deg CCW rotated vecs 2D
(array([1., 0.]), array([0., 1.]))
"""
# handle chase when start and end are same?
v = end.position - start.position
direction = v / norm(v)
if snap:
direction = draw.Vector.snap_unit_vector(direction, flip=False)
normal = draw.Vector.rotate(direction, np.pi / 2)
return direction, normal
@property
def length(self) -> float:
"""Sum of all segments length, including the head.
Return:
length (float): Full point_array length
"""
# get the final points (get_point also eliminate co-linear and short edges)
points = self.get_points()
# get the length without the corner rounding radius adjustment
length_estimate = sum(
norm(points[i + 1] - points[i]) for i in range(len(points) - 1))
# compensate for corner rounding
length_estimate -= self.length_excess_corner_rounding(points)
return length_estimate
def length_excess_corner_rounding(self, points) -> float:
"""Computes how much length to deduce for compensating the fillet
settings.
Args:
points (list or array): List of vertices that will be receiving the corner rounding radius
Return:
length_excess (float): Corner rounding radius excess multiplied by the number of points
"""
# deduct the corner rounding (WARNING: assumes fixed fillet for all corners)
length_arch = 0.5 * self.p.fillet * math.pi
length_corner = 2 * self.p.fillet
length_excess = length_corner - length_arch
# the start and and point are the pins, so no corner rounding
return (len(points) - 2) * length_excess
def assign_direction_to_anchor(self, ref_pt: QRoutePoint,
anchor_pt: QRoutePoint):
"""Method to assign a direction to a point. Currently assigned as the
max(x,y projection) of the direct path between the reference point and
the anchor. Method directly modifies the anchor_pt.direction, thus
there is no return value.
Args:
ref_pt (QRoutePoint): Reference point
anchor_pt (QRoutePoint): Anchor point. if it already has a direction, the method will not overwrite it
"""
if anchor_pt.direction is not None:
# anchor_pt already has a direction (not an anchor?), so do nothing
return
# Current rule: stop_direction aligned with longer edge of the rectangle connecting ref_pt and anchor_pt
ref = ref_pt.position
anchor = anchor_pt.position
# Absolute value of displacement between ref and anchor in x direction
offsetx = abs(anchor[0] - ref[0])
# Absolute value of displacement between ref and anchor in y direction
offsety = abs(anchor[1] - ref[1])
if offsetx >= offsety: # "Wide" rectangle -> anchor_arrow points along x
assigned_direction = np.array([ref[0] - anchor[0], 0])
else: # "Tall" rectangle -> anchor_arrow points along y
assigned_direction = np.array([0, ref[1] - anchor[1]])
anchor_pt.direction = assigned_direction / norm(assigned_direction)
def make_elements(self, pts: np.ndarray):
"""Turns the CPW points into design elements, and add them to the
design object.
Args:
pts (np.ndarray): Array of points
"""
# prepare the routing track
line = draw.LineString(pts)
# compute actual final length
p = self.p
self.options._actual_length = str(
line.length - self.length_excess_corner_rounding(line.coords)
) + ' ' + self.design.get_units()
# expand the routing track to form the substrate core of the cpw
self.add_qgeometry('path', {'trace': line},
width=p.trace_width,
fillet=p.fillet,
layer=p.layer)
if self.type == "CPW":
# expand the routing track to form the two gaps in the substrate
# final gap will be form by this minus the trace above
self.add_qgeometry('path', {'cut': line},
width=p.trace_width + 2 * p.trace_gap,
fillet=p.fillet,
layer=p.layer,
subtract=True)
class QRouteLead:
"""A simple class to define a an array of points with some properties,
defines 2D positions and some of the 2D directions (XY plane).
All values stored as np.ndarray of parsed floats.
"""
def __init__(self, *args, **kwargs):
"""QRouteLead is a simple sequence of points.
Used to accurately control one of the QRoute termination points
Before that, it adds the variables that are needed to support routing.
Attributes:
pts (numpy Nx2): Sequence of points. Defaults to None.
direction (numpy 2x1): Normal from the last point of the array. Defaults to None.
"""
# keep track of all points so far in the route from both ends
self.pts = None # will be numpy Nx2
# keep track of the direction of the tip of the lead (last point)
self.direction = None # will be numpy 2x1
def seed_from_pin(self, pin: Dict) -> QRoutePoint:
"""Initialize the QRouteLead by giving it a starting point and a
direction.
Args:
pin: object describing the "reference_pin" (not cpw_pin) this is attached to.
this is currently (8/4/2020) a dictionary
Return:
QRoutePoint: Last point (for now the single point) in the QRouteLead
The values are numpy arrays with two float points each.
"""
position = pin['middle']
direction = pin['normal']
self.direction = direction
self.pts = np.array([position])
return QRoutePoint(position, direction)
def go_straight(self, length: float):
"""Add a point ot 'length' distance in the same direction.
Args:
length (float) : How much to move by
"""
self.pts = np.append(self.pts, [self.pts[-1] + self.direction * length],
axis=0)
def go_left(self, length: float):
"""Straight line 90deg counter-clock-wise direction w.r.t. lead tip
direction.
Args:
length (float): How much to move by
"""
self.direction = draw.Vector.rotate(self.direction, np.pi / 2)
self.pts = np.append(self.pts, [self.pts[-1] + self.direction * length],
axis=0)
def go_right(self, length: float):
"""Straight line 90deg clock-wise direction w.r.t. lead tip direction.
Args:
length (float): How much to move by
"""
self.direction = draw.Vector.rotate(self.direction, -1 * np.pi / 2)
self.pts = np.append(self.pts, [self.pts[-1] + self.direction * length],
axis=0)
def go_right45(self, length: float):
"""Straight line at 45 angle clockwise w.r.t lead tip direction.
Args:
length(float): How much to move by
"""
self.direction = draw.Vector.rotate(self.direction, -1 * np.pi / 4)
self.pts = np.append(self.pts, [self.pts[-1] + self.direction * length],
axis=0)
def go_left45(self, length: float):
"""Straight line at 45 angle counter-clockwise w.r.t lead tip direction.
Args:
length(float): How much to move by
"""
self.direction = draw.Vector.rotate(self.direction, np.pi / 4)
self.pts = np.append(self.pts, [self.pts[-1] + self.direction * length],
axis=0)
def go_angle(self, length: float, angle: float):
""" Straight line at any angle w.r.t lead tip direction.
Args:
length(float): How much to move by
angle(float): rotation angle w.r.t lead tip direction
"""
self.direction = draw.Vector.rotate(self.direction, np.pi / 180 * angle)
self.pts = np.append(self.pts, [self.pts[-1] + self.direction * length],
axis=0)
@property
def length(self):
"""Sum of all segments length, including the head.
Return:
length (float): Full point_array length
"""
return sum(
norm(self.pts[i + 1] - self.pts[i])
for i in range(len(self.pts) - 1))
def get_tip(self) -> QRoutePoint:
"""Access the last element in the QRouteLead.
Return:
QRoutePoint: Last point in the QRouteLead
The values are numpy arrays with two float points each.
"""
if self.pts.ndim == 1:
return QRoutePoint(self.pts, self.direction)
return QRoutePoint(self.pts[-1], self.direction)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
770,
2438,
318,
636,
286,
1195,
1984,
270,
13,
198,
2,
198,
2,
357,
34,
8,
15069,
19764,
2177,
11,
33448,
13,
198,
2,
198,
2,
770,
2438,
318,
11971,
739,
... | 2.209342 | 13,552 |
from . import YESTERDAY, PST_TIMEZONE
from bs4 import BeautifulSoup
from datetime import datetime
import re
import requests
| [
6738,
764,
1330,
21560,
5781,
26442,
11,
28220,
62,
34694,
57,
11651,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
302,
198,
11748,
7007,
628,
628
] | 3.527778 | 36 |
from typing import Optional
from .typing import EstimatorType
from .typing import RandomStateType
| [
6738,
19720,
1330,
32233,
198,
198,
6738,
764,
774,
13886,
1330,
10062,
320,
1352,
6030,
198,
6738,
764,
774,
13886,
1330,
14534,
9012,
6030,
628,
628
] | 3.923077 | 26 |
import torch
from rph import RandomProjectionHashModule
if __name__ == '__main__':
s1 = "AAATGCGGATGT"
s2 = "TAATGCGGATGT"
d = {
"AA": 0,
"AC": 1,
"AT": 2,
"AG": 3,
"CA": 4,
"CC": 5,
"CT": 6,
"CG": 7,
"TA": 8,
"TC": 9,
"TT": 10,
"TG": 11,
"GA": 12,
"GC": 13,
"GT": 14,
"GG": 15
}
s1v = [0] * 16
s2v = [0] * 16
for i in range(len(s1) - 1):
s1v[d[s1[i:i + 2]]] += 1
for i in range(len(s2) - 1):
s2v[d[s2[i:i + 2]]] += 1
hash = RandomProjectionHashModule(16, 4)
inp = torch.stack([torch.tensor(s1v), torch.tensor(s2v)])
print(inp.size(), inp)
print(hash(inp))
| [
11748,
28034,
198,
6738,
374,
746,
1330,
14534,
16775,
295,
26257,
26796,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
264,
16,
796,
366,
3838,
1404,
15916,
11190,
1404,
19555,
1,
198,
220,
2... | 1.637555 | 458 |
"""
kombu.transport
===============
Built-in transports.
:copyright: (c) 2009 - 2010 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
import sys
from kombu.utils import rpartition
DEFAULT_TRANSPORT = "kombu.transport.pyamqplib.Transport"
MISSING_LIB = """
The %(feature)s requires the %(lib)s module to be
installed; http://pypi.python.org/pypi/%(lib)s
Use pip to install this module::
$ pip install %(lib)s
or using easy_install::
$ easy_install %(lib)s
"""
TRANSPORT_ALIASES = {
"amqplib": "kombu.transport.pyamqplib.Transport",
"pika": "kombu.transport.pypika.AsyncoreTransport",
"syncpika": "kombu.transport.pypika.SyncTransport",
"memory": "kombu.transport.memory.Transport",
"redis": "kombu.transport.pyredis.Transport",
"beanstalk": "kombu.transport.beanstalk.Transport",
"mongodb": "kombu.transport.mongodb.Transport",
"couchdb": "kombu.transport.pycouchdb.Transport",
"django": _django_transport,
"sqlalchemy": _sqlalchemy_transport,
}
_transport_cache = {}
def get_transport_cls(transport=None):
"""Get transport class by name.
The transport string is the full path to a transport class, e.g.::
"kombu.transport.pyamqplib.Transport"
If the name does not include `"."` (is not fully qualified),
the alias table will be consulted.
"""
transport = transport or DEFAULT_TRANSPORT
if transport not in _transport_cache:
_transport_cache[transport] = _get_transport_cls(transport)
return _transport_cache[transport]
| [
37811,
198,
74,
2381,
84,
13,
7645,
634,
198,
25609,
18604,
198,
198,
39582,
12,
259,
45245,
13,
198,
198,
25,
22163,
4766,
25,
357,
66,
8,
3717,
532,
3050,
416,
16981,
1406,
10671,
13,
198,
25,
43085,
25,
347,
10305,
11,
766,
385... | 2.467919 | 639 |
# Code written by Rakesh C Jakati for the Motor Imagery tutorial
from oscpy.server import OSCThreadServer
from time import sleep
from oscpy.client import OSCClient
osc = OSCThreadServer()
sock = osc.listen(address='127.0.0.1', port=9002, default=True)
@osc.address(b'/neuropype')
sleep(100)
| [
2,
6127,
3194,
416,
371,
1124,
71,
327,
25845,
7246,
329,
262,
12533,
39440,
1924,
11808,
198,
198,
6738,
267,
1416,
9078,
13,
15388,
1330,
440,
6173,
16818,
10697,
198,
6738,
640,
1330,
3993,
198,
6738,
267,
1416,
9078,
13,
16366,
13... | 2.882353 | 102 |
# Copyright (c) 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
try:
from unittest import mock
except ImportError:
import mock
from pylxd import exceptions, models
from pylxd.exceptions import LXDAPIExtensionNotAvailable
from pylxd.tests import testing
class TestNetwork(testing.PyLXDTestCase):
"""Tests for pylxd.models.Network."""
def test_get(self):
"""A network is fetched."""
name = "eth0"
an_network = models.Network.get(self.client, name)
self.assertEqual(name, an_network.name)
def test_get_not_found(self):
"""LXDAPIException is raised on unknown network."""
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/networks/eth0$",
}
)
self.assertRaises(
exceptions.LXDAPIException, models.Network.get, self.client, "eth0"
)
def test_get_error(self):
"""LXDAPIException is raised on error."""
self.add_rule(
{
"text": error,
"method": "GET",
"url": r"^http://pylxd.test/1.0/networks/eth0$",
}
)
self.assertRaises(
exceptions.LXDAPIException, models.Network.get, self.client, "eth0"
)
def test_exists(self):
"""True is returned if network exists."""
name = "eth0"
self.assertTrue(models.Network.exists(self.client, name))
def test_not_exists(self):
"""False is returned when network does not exist."""
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/networks/eth0$",
}
)
name = "eth0"
self.assertFalse(models.Network.exists(self.client, name))
def test_update(self):
"""A network is updated."""
with mock.patch.object(self.client, "assert_has_api_extension"):
network = models.Network.get(self.client, "eth0")
network.config = {}
network.save()
self.assertEqual({}, network.config)
def test_fetch(self):
"""A partial network is synced."""
network = self.client.networks.all()[1]
network.sync()
self.assertEqual("Network description", network.description)
def test_fetch_not_found(self):
"""LXDAPIException is raised on bogus network fetch."""
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/networks/eth0$",
}
)
network = models.Network(self.client, name="eth0")
self.assertRaises(exceptions.LXDAPIException, network.sync)
def test_fetch_error(self):
"""LXDAPIException is raised on fetch error."""
self.add_rule(
{
"text": error,
"method": "GET",
"url": r"^http://pylxd.test/1.0/networks/eth0$",
}
)
network = models.Network(self.client, name="eth0")
self.assertRaises(exceptions.LXDAPIException, network.sync)
def test_delete(self):
"""A network is deleted."""
network = models.Network(self.client, name="eth0")
network.delete()
def test_str(self):
"""Network is printed in JSON format."""
network = models.Network.get(self.client, "eth0")
self.assertEqual(
json.loads(str(network)),
{
"name": "eth0",
"description": "Network description",
"type": "bridge",
"config": {
"ipv4.address": "10.80.100.1/24",
"ipv4.nat": "true",
"ipv6.address": "none",
"ipv6.nat": "false",
},
"managed": True,
"used_by": [],
},
)
| [
2,
15069,
357,
66,
8,
1584,
19507,
605,
12052,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
... | 2.072365 | 2,211 |
import traceback
try:
import signals.sim_server as sim
import signals.sentiment as sentiment
import sys
server = sim.SimServer('/home/ibraaaa/servers/1mon_preprocess/')
result = server.query(sys.argv[1])
result = server.rank(result[0], result[1])
for r in result:
sentiment_score = 0#sentiment.Sentiment().calc_headline_sentiment_by_arabic(r[2]['url'])
print "'",r[2]['url'], "','", r[0], "',", r[2]['news_site'], ',', r[2]['date'], ',' , r[1], ',', sentiment_score
except:
print traceback.format_exc() | [
11748,
12854,
1891,
198,
28311,
25,
198,
220,
220,
220,
1330,
10425,
13,
14323,
62,
15388,
355,
985,
198,
220,
220,
220,
1330,
10425,
13,
34086,
3681,
355,
15598,
198,
220,
220,
220,
1330,
25064,
198,
220,
220,
220,
220,
198,
220,
2... | 2.361345 | 238 |
__version_info__ = (0, 9, 34)
__version__ = '.'.join(str(v) for v in __version_info__)
| [
834,
9641,
62,
10951,
834,
796,
357,
15,
11,
860,
11,
4974,
8,
198,
834,
9641,
834,
796,
705,
2637,
13,
22179,
7,
2536,
7,
85,
8,
329,
410,
287,
11593,
9641,
62,
10951,
834,
8,
198
] | 2.351351 | 37 |