hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0721455cc0a7bbad4ef3dc27a41e21d464b9bd
| 241
|
py
|
Python
|
example/foo/generators.py
|
pydanny/django-oauth-toolkit
|
bf9ce2837f9ae2be939ac00a23f348266b91325d
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2019-07-25T23:15:39.000Z
|
2019-07-25T23:15:39.000Z
|
example/foo/generators.py
|
pydanny/django-oauth-toolkit
|
bf9ce2837f9ae2be939ac00a23f348266b91325d
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
example/foo/generators.py
|
pydanny/django-oauth-toolkit
|
bf9ce2837f9ae2be939ac00a23f348266b91325d
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import random
import string
from oauth2_provider.generators import BaseHashGenerator
class ClientIdGenerator(BaseHashGenerator):
def hash(self):
return ''.join(random.choice(string.letters + string.digits) for i in range(100))
| 26.777778
| 89
| 0.775934
|
4a07215ae21ec3e4f595ce1be8fc4dcdc6452607
| 6,310
|
py
|
Python
|
app/main/views.py
|
dandud/dabl
|
4e5e3c5b0e8ec582b2c2b17fe25e0bbe83c26c2f
|
[
"MIT"
] | 2
|
2022-01-10T00:16:39.000Z
|
2022-01-23T20:55:59.000Z
|
app/main/views.py
|
dandud/dabl
|
4e5e3c5b0e8ec582b2c2b17fe25e0bbe83c26c2f
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
dandud/dabl
|
4e5e3c5b0e8ec582b2c2b17fe25e0bbe83c26c2f
|
[
"MIT"
] | null | null | null |
from flask import render_template, session, redirect, url_for, current_app, flash
from .. import db
from ..models import Measurement, User, Batch, Action, Brewtype, Brewstyle, Status, Container, Containertype, Vessel
from ..email import send_email
from . import main, batches
from .forms import NameForm, BatchAddForm, BatchEditForm, BatchMoveForm
from datetime import datetime
@main.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
db.session.commit()
session['known'] = False
if current_app.config['FLASKY_ADMIN']:
send_email(current_app.config['FLASKY_ADMIN'], 'New User',
'mail/new_user', user=user)
else:
session['known'] = True
session['name'] = form.name.data
return redirect(url_for('.index'))
return render_template('index.html',
form=form, name=session.get('name'),
known=session.get('known', False))
@batches.route('/batch_overview', methods=['GET', 'POST'])
def all_batches():
_all_batches = Batch.query.all()
return render_template('batch_overview.html',
all_batches=_all_batches)
@batches.route('/batch_add', methods=['GET', 'POST'])
def batch_add():
form = BatchAddForm()
_batch = Batch()
time_now = datetime.now()
form.time_start.data = time_now
form.type_id.choices = [(row.id, row.name) for row in Brewtype.query.all()]
form.style_id.choices = [(row.id, row.name) for row in Brewstyle.query.all()]
if form.validate_on_submit():
_batch.time_updated = time_now
_batch.status_id = 1000
form.populate_obj(_batch)
db.session.add(_batch)
db.session.commit()
db.session.refresh(_batch)
flash('Batch added.', 'success')
return redirect(url_for('batches.all_batches'))
return render_template('batch_add.html',
form=form,
batch=_batch)
@batches.route('/batch_move/<batch_name>', methods=['GET', 'POST'])
def batch_move(batch_name):
_batch = Batch.query.filter_by(name=batch_name).first()
_vessels = Container.query.join(Container.containertype_rel).filter(Containertype.is_vessel.is_(True), Container.batch_id==_batch.id).all
_all_vessels = Container.query.join(Container.containertype_rel).filter(Containertype.is_vessel.is_(True)).all()
form = BatchMoveForm(obj=_batch)
time_now = datetime.now()
form.type_id.choices = [(row.id, row.name) for row in Brewtype.query.all()]
form.style_id.choices = [(row.id, row.name) for row in Brewstyle.query.all()]
form.status_id.choices = [(row.id, row.name) for row in Status.query.filter_by(type='Batch').all()]
form.status_id.choices = [(row.id, row.name) for row in Status.query.filter_by(type='Batch').all()]
if form.validate_on_submit():
_batch.time_updated = time_now
form.populate_obj(_batch)
db.session.add(_batch)
db.session.commit()
db.session.refresh(_batch)
flash('Batch updated.', 'success')
return redirect(url_for("batches.all_batches"))
return render_template('batch_move.html',
form=form,
batch=_batch)
@batches.route('/batch_edit/<batch_name>', methods=['GET', 'POST'])
def batch_edit(batch_name):
_batch = Batch.query.filter_by(name=batch_name).first()
form = BatchEditForm(obj=_batch)
time_now = datetime.now()
form.type_id.choices = [(row.id, row.name) for row in Brewtype.query.all()]
form.style_id.choices = [(row.id, row.name) for row in Brewstyle.query.all()]
form.status_id.choices = [(row.id, row.name) for row in Status.query.filter_by(type='Batch').all()]
if form.validate_on_submit():
_batch.time_updated = time_now
form.populate_obj(_batch)
db.session.add(_batch)
db.session.commit()
db.session.refresh(_batch)
flash('Batch updated.', 'success')
return redirect(url_for('batches.all_batches'))
return render_template('batch_edit.html',
form=form,
batch=_batch)
@batches.route('/batch_view/<name>', methods=['GET', 'POST'])
def batch_view(name):
_batch = Batch.query.filter_by(name=name).first()
_measurements = Measurement.query.filter_by(batch_id=_batch.id).all()
_actions = Action.query.filter_by(batch_id=_batch.id).all()
_vessels = Vessel.query.join(Vessel.vesseltype_rel).filter(Vessel.batch_id==_batch.id).first()
_containers = Container.query.join(Container.containertype_rel).filter(Container.batch_id==_batch.id).all()
if not _batch:
flash('Oops! Something went wrong!.', 'danger')
return redirect(url_for('batches.all_batches'))
return render_template('batch_view.html',
batch=_batch,
measurements=_measurements,
actions=_actions,
vessels=_vessels,
containers=_containers)
@batches.route('/batch_view_enhance/<name>', methods=['GET', 'POST'])
def batch_view_enhance(name):
_batch = Batch.query.filter_by(name=name).first()
_measurements = Measurement.query.filter_by(batch_id=_batch.id).all()
_actions = Action.query.filter_by(batch_id=_batch.id).all()
_vessels = Vessel.query.join(Vessel.vesseltype_rel).filter(Vessel.batch_id==_batch.id).first()
_containers = Container.query.join(Container.containertype_rel).filter(Container.batch_id==_batch.id).all()
if not _batch:
flash('Oops! Something went wrong!.', 'danger')
return redirect(url_for('batches.all_batches'))
return render_template('batch_view_enhance.html',
batch=_batch,
measurements=_measurements,
actions=_actions,
vessels=_vessels,
containers=_containers)
| 38.242424
| 141
| 0.633281
|
4a07222a3e200db07f24baaead11589968c670fe
| 683
|
py
|
Python
|
Leetcode/1000-2000/1057. Campus Bikes/1057.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
Leetcode/1000-2000/1057. Campus Bikes/1057.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
Leetcode/1000-2000/1057. Campus Bikes/1057.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
class Solution:
def assignBikes(self, workers: List[List[int]], bikes: List[List[int]]) -> List[int]:
ans = [-1] * len(workers)
usedBikes = [False] * len(bikes)
# buckets[k] := (i, j), where k = dist(workers[i], bikes[j])
buckets = [[] for _ in range(2001)]
def dist(p1: List[int], p2: List[int]) -> int:
return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])
for i, worker in enumerate(workers):
for j, bike in enumerate(bikes):
buckets[dist(worker, bike)].append((i, j))
for k in range(2001):
for i, j in buckets[k]:
if ans[i] == -1 and not usedBikes[j]:
ans[i] = j
usedBikes[j] = True
return ans
| 31.045455
| 87
| 0.553441
|
4a0722b57621463399fea877dfa44d0f58ee557f
| 968
|
py
|
Python
|
leetcode/704_binary_search.py
|
coocos/leetcode
|
007bbeb46fa4b32e1c92fc894edeb2100eb6ba21
|
[
"MIT"
] | null | null | null |
leetcode/704_binary_search.py
|
coocos/leetcode
|
007bbeb46fa4b32e1c92fc894edeb2100eb6ba21
|
[
"MIT"
] | null | null | null |
leetcode/704_binary_search.py
|
coocos/leetcode
|
007bbeb46fa4b32e1c92fc894edeb2100eb6ba21
|
[
"MIT"
] | null | null | null |
import unittest
from typing import List
class Solution:
"""
This solution is a standard iterative binary search implementation.
"""
def search(self, nums: List[int], target: int) -> int:
low = 0
high = len(nums) - 1
while low <= high:
mid = low + (high - low) // 2
if nums[mid] == target:
return mid
elif nums[mid] > target:
high = mid - 1
else:
low = mid + 1
return -1
class TestSolution(unittest.TestCase):
def test_successful_search(self):
numbers = [-1, 0, 3, 5, 9, 12]
self.assertEqual(Solution().search(numbers, 9), 4)
def test_missing_search(self):
numbers = [-1, 0, 3, 5, 9, 12]
self.assertEqual(Solution().search(numbers, 2), -1)
def test_searching_single_value_list(self):
numbers = [5]
self.assertEqual(Solution().search(numbers, 5), 0)
| 21.511111
| 71
| 0.544421
|
4a072357514d9d9051d41b5bbe52837743f3dd4d
| 1,455
|
py
|
Python
|
S4/S4 Library/simulation/tag.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | 1
|
2021-05-20T19:33:37.000Z
|
2021-05-20T19:33:37.000Z
|
S4/S4 Library/simulation/tag.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
S4/S4 Library/simulation/tag.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
import functools
from sims4.tuning.dynamic_enum import DynamicEnumLocked
from sims4.tuning.tunable import TunableSet, TunableEnumEntry, TunableEnumWithFilter
from sims4.tuning.tunable_base import ExportModes
import singletons
PORTAL_DISALLOWANCE_PREFIX = ('PortalDisallowance',)
INTERACTION_PREFIX = ('interaction',)
SPAWN_PREFIX = ('Spawn',)
class Tag(DynamicEnumLocked, export_modes=(ExportModes.ClientBinary, ExportModes.ServerXML), display_sorted=True, partitioned=True):
INVALID = 0
class TagCategory(DynamicEnumLocked, export_modes=(ExportModes.ClientBinary, ExportModes.ServerXML)):
INVALID = 0
class TunableTag(TunableEnumWithFilter):
def __init__(self, description='A tag.', filter_prefixes=singletons.EMPTY_SET, pack_safe=True, **kwargs):
super().__init__(tunable_type=Tag, default=Tag.INVALID, invalid_enums=(Tag.INVALID,), pack_safe=pack_safe, filter_prefixes=filter_prefixes, description=description, **kwargs)
class TunableTags(TunableSet):
def __init__(self, filter_prefixes=None, pack_safe=True, minlength=None, maxlength=None, **kwargs):
if filter_prefixes is None:
tunable_fn = TunableEnumEntry
else:
tunable_fn = functools.partial(TunableEnumWithFilter, filter_prefixes=filter_prefixes)
super().__init__(tunable_fn(tunable_type=Tag, default=Tag.INVALID, invalid_enums=(Tag.INVALID,), pack_safe=pack_safe, **kwargs), minlength=minlength, maxlength=maxlength)
| 50.172414
| 182
| 0.785567
|
4a0723c93479a8bbc5ca71dc3546fad9ba304db8
| 68
|
py
|
Python
|
astprint/__init__.py
|
fjarri-attic/astprint
|
b6f5113ed0dd2af0a7ba8b0445fc25ba6d970321
|
[
"MIT"
] | 2
|
2016-12-25T08:13:36.000Z
|
2020-05-11T01:40:00.000Z
|
astprint/__init__.py
|
fjarri-attic/astprint
|
b6f5113ed0dd2af0a7ba8b0445fc25ba6d970321
|
[
"MIT"
] | null | null | null |
astprint/__init__.py
|
fjarri-attic/astprint
|
b6f5113ed0dd2af0a7ba8b0445fc25ba6d970321
|
[
"MIT"
] | 3
|
2016-12-27T07:06:47.000Z
|
2021-01-20T03:22:35.000Z
|
from astprint.code import as_code
from astprint.tree import as_tree
| 22.666667
| 33
| 0.852941
|
4a0723f059a07099f0c493d4e32cf531d29075ff
| 6,551
|
py
|
Python
|
telethon/client/messageparse.py
|
rotem443/Telethon
|
35ba9848d9126462b6d51a35d3e16762b18660a9
|
[
"MIT"
] | 1
|
2019-05-31T10:20:06.000Z
|
2019-05-31T10:20:06.000Z
|
telethon/client/messageparse.py
|
exceloo/Telethon
|
30a0e390603072d3ec57a2f0eef0a297a9b0321b
|
[
"MIT"
] | 5
|
2021-04-30T21:14:18.000Z
|
2022-03-12T00:21:58.000Z
|
telethon/client/messageparse.py
|
exceloo/Telethon
|
30a0e390603072d3ec57a2f0eef0a297a9b0321b
|
[
"MIT"
] | null | null | null |
import itertools
import re
import typing
from .users import UserMethods
from .. import utils
from ..tl import types
if typing.TYPE_CHECKING:
from .telegramclient import TelegramClient
class MessageParseMethods(UserMethods):
# region Public properties
@property
def parse_mode(self: 'TelegramClient'):
"""
This property is the default parse mode used when sending messages.
Defaults to `telethon.extensions.markdown`. It will always
be either ``None`` or an object with ``parse`` and ``unparse``
methods.
When setting a different value it should be one of:
* Object with ``parse`` and ``unparse`` methods.
* A ``callable`` to act as the parse method.
* A ``str`` indicating the ``parse_mode``. For Markdown ``'md'``
or ``'markdown'`` may be used. For HTML, ``'htm'`` or ``'html'``
may be used.
The ``parse`` method should be a function accepting a single
parameter, the text to parse, and returning a tuple consisting
of ``(parsed message str, [MessageEntity instances])``.
The ``unparse`` method should be the inverse of ``parse`` such
that ``assert text == unparse(*parse(text))``.
See :tl:`MessageEntity` for allowed message entities.
Example
.. code-block:: python
# Disabling default formatting
client.parse_mode = None
# Enabling HTML as the default format
client.parse_mode = 'html'
"""
return self._parse_mode
@parse_mode.setter
def parse_mode(self: 'TelegramClient', mode: str):
self._parse_mode = utils.sanitize_parse_mode(mode)
# endregion
# region Private methods
async def _replace_with_mention(self: 'TelegramClient', entities, i, user):
"""
Helper method to replace ``entities[i]`` to mention ``user``,
or do nothing if it can't be found.
"""
try:
entities[i] = types.InputMessageEntityMentionName(
entities[i].offset, entities[i].length,
await self.get_input_entity(user)
)
return True
except (ValueError, TypeError):
return False
async def _parse_message_text(self: 'TelegramClient', message, parse_mode):
"""
Returns a (parsed message, entities) tuple depending on ``parse_mode``.
"""
if parse_mode is ():
parse_mode = self._parse_mode
else:
parse_mode = utils.sanitize_parse_mode(parse_mode)
if not parse_mode:
return message, []
message, msg_entities = parse_mode.parse(message)
for i in reversed(range(len(msg_entities))):
e = msg_entities[i]
if isinstance(e, types.MessageEntityTextUrl):
m = re.match(r'^@|\+|tg://user\?id=(\d+)', e.url)
if m:
user = int(m.group(1)) if m.group(1) else e.url
is_mention = await self._replace_with_mention(msg_entities, i, user)
if not is_mention:
del msg_entities[i]
elif isinstance(e, (types.MessageEntityMentionName,
types.InputMessageEntityMentionName)):
is_mention = await self._replace_with_mention(msg_entities, i, e.user_id)
if not is_mention:
del msg_entities[i]
return message, msg_entities
def _get_response_message(self: 'TelegramClient', request, result, input_chat):
"""
Extracts the response message known a request and Update result.
The request may also be the ID of the message to match.
If ``request is None`` this method returns ``{id: message}``.
If ``request.random_id`` is a list, this method returns a list too.
"""
if isinstance(result, types.UpdateShort):
updates = [result.update]
entities = {}
elif isinstance(result, (types.Updates, types.UpdatesCombined)):
updates = result.updates
entities = {utils.get_peer_id(x): x
for x in
itertools.chain(result.users, result.chats)}
else:
return None
random_to_id = {}
id_to_message = {}
for update in updates:
if isinstance(update, types.UpdateMessageID):
random_to_id[update.random_id] = update.id
elif isinstance(update, (
types.UpdateNewChannelMessage, types.UpdateNewMessage)):
update.message._finish_init(self, entities, input_chat)
id_to_message[update.message.id] = update.message
elif (isinstance(update, types.UpdateEditMessage)
and not isinstance(request.peer, types.InputPeerChannel)):
if request.id == update.message.id:
update.message._finish_init(self, entities, input_chat)
return update.message
elif (isinstance(update, types.UpdateEditChannelMessage)
and utils.get_peer_id(request.peer) ==
utils.get_peer_id(update.message.to_id)):
if request.id == update.message.id:
update.message._finish_init(self, entities, input_chat)
return update.message
if request is None:
return id_to_message
random_id = request if isinstance(request, int) else request.random_id
if not utils.is_list_like(random_id):
msg = id_to_message.get(random_to_id.get(random_id))
if not msg:
self._log[__name__].warning(
'Request %s had missing message mapping %s', request, result)
return msg
try:
return [id_to_message[random_to_id[rnd]] for rnd in random_id]
except KeyError:
# Sometimes forwards fail (`MESSAGE_ID_INVALID` if a message gets
# deleted or `WORKER_BUSY_TOO_LONG_RETRY` if there are issues at
# Telegram), in which case we get some "missing" message mappings.
# Log them with the hope that we can better work around them.
self._log[__name__].warning(
'Request %s had missing message mappings %s', request, result)
return [id_to_message.get(random_to_id.get(rnd)) for rnd in random_to_id]
# endregion
| 37.649425
| 89
| 0.591665
|
4a07240eaac8910e8e0da98651bde283eab96bf4
| 1,083
|
py
|
Python
|
tests/test_orbiter.py
|
michaelaye/pytelescope
|
8e02fd790b1265c91faa786d9bd16131a869dcca
|
[
"ISC"
] | null | null | null |
tests/test_orbiter.py
|
michaelaye/pytelescope
|
8e02fd790b1265c91faa786d9bd16131a869dcca
|
[
"ISC"
] | null | null | null |
tests/test_orbiter.py
|
michaelaye/pytelescope
|
8e02fd790b1265c91faa786d9bd16131a869dcca
|
[
"ISC"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pytelescope` package."""
import pytest
from pytelescope import orbiter
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/michaelaye/cookiecutter-pypackage-conda')
@pytest.fixture()
def mars_orbiter():
orb = orbiter.MarsOrbiter(350)
return orb
def test_mars_orbiter_orbital_period(mars_orbiter):
assert mars_orbiter.T.to(u.hour).value == pytest.approx(1.933, abs=1e-2)
def test_mars_orbiter_slew_rate(mars_orbiter):
assert mars_orbiter.slew_rate.value == pytest.approx(0.5019, abs=1e-2)
def test_mars_orbiter_altitude(mars_orbiter):
assert mars_orbiter.alt.value == pytest.approx(350)
def test_mars_orbiter_orbital_velocity(mars_orbiter):
assert mars_orbiter.v.value == pytest.approx(3384.208966304714)
def test_mars_orbiter_ground_track_speed(mars_orbiter):
assert mars_orbiter.v_surf.value == pytest.approx(3067.464831608527)
| 24.066667
| 87
| 0.746076
|
4a0726eab2fc58c775bad85922fdf8c04bcd3559
| 805
|
py
|
Python
|
PythonExercicios/ex038.py
|
VitorFRodrigues/Python-curso
|
af75ff4a7ca14bc7e67b4f3362af837d355b1746
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex038.py
|
VitorFRodrigues/Python-curso
|
af75ff4a7ca14bc7e67b4f3362af837d355b1746
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex038.py
|
VitorFRodrigues/Python-curso
|
af75ff4a7ca14bc7e67b4f3362af837d355b1746
|
[
"MIT"
] | null | null | null |
cores = {'limpa':'\033[m',
'branco':'\033[30m',
'vermelho':'\033[31m',
'verde':'\033[32m',
'amarelo':'\033[33m',
'azul':'\033[34m',
'roxo':'\033[35m',
'azulclaro':'\033[36m',
'cinza':'\033[37m',
'pretoebranco':'\033[7;30m'}
a = int(input('Digite um número inteiro: '))
b = int(input('Digite outro número inteiro: '))
if a > b:
print('O {}primeiro valor{} é o {}maior{}.'.format(cores['amarelo'], cores['limpa'], cores['azul'], cores['limpa']))
elif a < b:
print('O {}segundo valor{} é o {}maior{}.'.format(cores['amarelo'], cores['limpa'], cores['azul'], cores['limpa']))
else:
print('{}Não existe{} valor maior, os dois são {}iguais{}.'.format(cores['amarelo'], cores['limpa'], cores['azul'], cores['limpa']))
| 40.25
| 136
| 0.542857
|
4a0727482a67cd242892bf70bf888536887db905
| 385
|
py
|
Python
|
testing/issspace.py
|
BicycleWalrus/slop
|
300f994cb7d7a58a330fdac1afcd36ffd1da80ff
|
[
"MIT"
] | null | null | null |
testing/issspace.py
|
BicycleWalrus/slop
|
300f994cb7d7a58a330fdac1afcd36ffd1da80ff
|
[
"MIT"
] | null | null | null |
testing/issspace.py
|
BicycleWalrus/slop
|
300f994cb7d7a58a330fdac1afcd36ffd1da80ff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""TPatrick | Alta3 Research
Script to interact with Astros API"""
import requests
def astros():
r = requests.get("http://api.open-notify.org/astros.json")
if r.status_code == 200:
return r.json()
else:
return None
def main():
print("Right now in space we have...")
print(astros())
if __name__ == "__main__":
main()
| 20.263158
| 62
| 0.620779
|
4a0728388715df412a03e56b9fb75aa1759671ea
| 470
|
py
|
Python
|
applitools/__init__.py
|
applitools/eyes.selenium.python
|
3a09a3372a3a8915b3c97ee54fc223580c45c0a3
|
[
"Apache-2.0"
] | 11
|
2016-04-20T21:21:37.000Z
|
2020-04-27T19:46:56.000Z
|
applitools/__init__.py
|
applitools/eyes.selenium.python
|
3a09a3372a3a8915b3c97ee54fc223580c45c0a3
|
[
"Apache-2.0"
] | 15
|
2017-01-11T04:58:31.000Z
|
2019-09-13T18:00:35.000Z
|
applitools/__init__.py
|
applitools/eyes.selenium.python
|
3a09a3372a3a8915b3c97ee54fc223580c45c0a3
|
[
"Apache-2.0"
] | 15
|
2016-03-23T22:06:39.000Z
|
2020-06-14T09:11:58.000Z
|
from .__version__ import __version__
from .core import * # noqa
from .selenium import * # noqa
from .utils import * # noqa
# for backward compatibility
from .core import errors, geometry # noqa
from .selenium import eyes, target # noqa
__all__ = (
core.__all__ + # noqa
selenium.__all__ + # noqa
utils.__all__ + # noqa
('errors', 'geometry', 'target', 'StitchMode', 'eyes')
)
# for backward compatibility
VERSION = __version__
| 24.736842
| 62
| 0.66383
|
4a07294d7861727ce8121fda41f9b9abda97394b
| 21,094
|
py
|
Python
|
frites/simulations/sim_local_mi.py
|
adam2392/frites
|
4a6afa6dd0a2d559f5ae81d455c77210f018450c
|
[
"BSD-3-Clause"
] | null | null | null |
frites/simulations/sim_local_mi.py
|
adam2392/frites
|
4a6afa6dd0a2d559f5ae81d455c77210f018450c
|
[
"BSD-3-Clause"
] | null | null | null |
frites/simulations/sim_local_mi.py
|
adam2392/frites
|
4a6afa6dd0a2d559f5ae81d455c77210f018450c
|
[
"BSD-3-Clause"
] | null | null | null |
"""Simulate local representation of mutual information."""
import numpy as np
import xarray as xr
from frites.io import logger, set_log_level
"""
###############################################################################
I(C; C)
###############################################################################
- MI between two continuous variables
- Single / Multi subjects simulations
"""
def sim_local_cc_ms(n_subjects, random_state=None, **kwargs):
"""Multi-subjects simulations for computing local MI (CC).
This function can be used for simulating local representations of mutual
information between two continuous variables (CC) across multiple subjects.
Parameters
----------
n_subjects : int
Number of subjects
kwargs : dict | {}
Additional arguments are send to the function :func:`sim_local_cc_ss`
Returns
-------
x : list
List length n_subjects composed of data arrays each one with a shape of
(n_epochs, n_channels, n_times)
y : list
List of length n_subjects composed of regressor arrays each one with a
shape of (n_epochs)
roi : list
List of length n_subjects composed of roi arrays each one with a shape
of (n_roi)
times : array_like
Time vector
"""
# get the default cluster indices and covariance
cl_index = kwargs.get('cl_index', [40, 60])
cl_cov = kwargs.get('cl_cov', [.8])
cl_sgn = kwargs.get('cl_sgn', [1])
# repeat if not equal to subject length
if len(cl_index) != n_subjects:
cl_index = [cl_index] * n_subjects
if len(cl_cov) != n_subjects:
cl_cov = [cl_cov] * n_subjects
if len(cl_sgn) != n_subjects:
cl_sgn = cl_sgn * n_subjects
if not isinstance(random_state, int):
random_state = np.random.randint(1000)
# now generate the data
x, y, roi = [], [], []
for n_s in range(n_subjects):
# re-inject the indices and covariance
kwargs['cl_index'] = cl_index[n_s]
kwargs['cl_cov'] = cl_cov[n_s]
kwargs['cl_sgn'] = cl_sgn[n_s]
# generate the data of a single subject
_x, _y, _roi, times = sim_local_cc_ss(random_state=random_state + n_s,
**kwargs)
# merge data
x += [_x]
y += [_y]
roi += [_roi]
return x, y, roi, times
def sim_local_cc_ss(n_epochs=10, n_times=100, n_roi=1, cl_index=[40, 60],
cl_cov=[.8], cl_sgn=1, random_state=None):
"""Single-subject simulations for computing local MI (CC).
This function can be used for simulating local representations of mutual
information between two continuous variables (CC) for a single subject.
Parameters
----------
n_epochs : int | 30
Number of trials
n_times : int | 100
Number of time points
n_roi : int | 1
Number of ROI
cl_index : array_like | [40, 60]
Sample indices where the clusters are located. Should be an array of
shape (n_clusters, 2)
cl_cov : array_like | [.8]
Covariance level between the data and the regressor variable. Should be
an array of shape (n_clusters,)
cl_sgn : {-1, 1}
Sign of the correlation. Use -1 for anti-correlated variables and 1
for correlated variables
random_state : int | None
Random state (use it for reproducibility)
Returns
-------
x : array_like
Data array of shape (n_epochs, n_channels, n_times)
y : array_like
Regressor array of shape (n_epochs,)
roi : array_like
Array of ROI names of shape (n_roi,)
times : array_like
Time vector of shape (n_times,)
"""
random_state = np.random.randint(100) if not isinstance(
random_state, int) else random_state
rnd = np.random.RandomState(random_state)
# -------------------------------------------------------------------------
# Pick random n_roi
roi = np.array([f"roi_{k}" for k in range(n_roi)])
# -------------------------------------------------------------------------
# check cluster types
cl_index, cl_cov = np.atleast_2d(cl_index), np.asarray(cl_cov)
assert (cl_index.shape[-1] == 2) and (cl_cov.ndim == 1)
assert cl_sgn in [-1, 1]
if cl_index.shape[0] == 1:
cl_index = np.tile(cl_index, (n_roi, 1))
if cl_cov.shape[0] == 1:
cl_cov = np.repeat(cl_cov, n_roi)
assert (cl_index.shape == (n_roi, 2)) and (cl_cov.shape == (n_roi,))
# -------------------------------------------------------------------------
# Built a random dataset
x = rnd.randn(n_epochs, n_roi, n_times)
y = rnd.randn(n_epochs).reshape(-1, 1)
# -------------------------------------------------------------------------
# Introduce a correlation between the data and the regressor
for num, (idx, cov) in enumerate(zip(cl_index, cl_cov)):
if not np.isfinite(cov): continue # noqa
# define correlation strength
t_len = idx[1] - idx[0]
epsilon = np.sqrt((1. - cov ** 2) / cov ** 2)
# Generate noise
rnd_noise = np.random.RandomState(random_state + num + 1)
noise = epsilon * rnd_noise.randn(n_epochs, t_len)
x[:, num, idx[0]:idx[1]] = cl_sgn * y + noise
times = np.arange(n_times)
return x, y.ravel(), roi, times
"""
###############################################################################
I(C; D)
###############################################################################
- MI between a continuous and a discret variable
- Single / Multi subjects simulations
"""
def sim_local_cd_ms(n_subjects, **kwargs):
"""Multi-subjects simulations for computing local MI (CD).
This function can be used for simulating local representations of mutual
information between a continuous and a discret variables (CD) for a single
subject.
Parameters
----------
n_subjects : int
Number of subjects
kwargs : dict | {}
Additional arguments are send to the function :func:`sim_local_cd_ss`
Returns
-------
x : list
List length n_subjects composed of data arrays each one with a shape of
(n_epochs, n_channels, n_times)
y : list
List of length n_subjects composed of regressor arrays each one with a
shape of (n_epochs)
roi : list
List of length n_subjects composed of roi arrays each one with a shape
of (n_roi)
times : array_like
Time vector of shape (n_times,)
"""
# get the default cluster indices and covariance
cl_index = kwargs.get('cl_index', [40, 60])
cl_cov = kwargs.get('cl_cov', [.8])
# repeat if not equal to subject length
if len(cl_index) != n_subjects:
cl_index = [cl_index] * n_subjects
if len(cl_cov) != n_subjects:
cl_cov = [cl_cov] * n_subjects
# now generate the data
x, y, roi = [], [], []
for n_s in range(n_subjects):
# re-inject the indices and covariance
kwargs['cl_index'] = cl_index[n_s]
kwargs['cl_cov'] = cl_cov[n_s]
# generate the data of a single subject
_x, _y, _roi, times = sim_local_cd_ss(random_state=n_s, **kwargs)
# merge data
x += [_x]
y += [_y]
roi += [_roi]
return x, y, roi, times
def sim_local_cd_ss(n_conditions=2, n_epochs=10, n_times=100, n_roi=1,
cl_index=[40, 60], cl_cov=[.8], random_state=None):
"""Single-subject simulations for computing local MI (CD).
This function can be used for simulating local representations of mutual
information between a continuous and a discret variable (CD) for a single
subject.
Parameters
----------
n_conditions : int | 2
Number of conditions
n_epochs : int | 30
Number of trials
n_times : int | 100
Number of time points
n_roi : int | 1
Number of ROI
cl_index : array_like | [40, 60]
Sample indices where the clusters are located. Should be an array of
shape (n_clusters, 2)
cl_cov : array_like | [.8]
Covariance level between the data and the regressor variable. Should be
an array of shape (n_clusters,)
random_state : int | None
Random state (use it for reproducibility)
Returns
-------
x : array_like
Data array of shape (n_epochs, n_channels, n_times)
y : array_like
Condition array of shape (n_epochs,)
roi : array_like
Array of ROI names of shape (n_roi,)
times : array_like
Time vector of shape (n_times,)
"""
random_state = np.random.randint(100) if not isinstance(
random_state, int) else random_state
rnd = np.random.RandomState(random_state)
# -------------------------------------------------------------------------
# Pick random n_roi
roi = np.array([f"roi_{k}" for k in range(n_roi)])
# -------------------------------------------------------------------------
# check cluster types
cl_index, cl_cov = np.atleast_2d(cl_index), np.asarray(cl_cov)
assert (cl_index.shape[-1] == 2) and (cl_cov.ndim == 1)
if cl_index.shape[0] == 1:
cl_index = np.tile(cl_index, (n_roi, 1))
if cl_cov.shape[0] == 1:
cl_cov = np.repeat(cl_cov, n_roi)
assert (cl_index.shape == (n_roi, 2)) and (cl_cov.shape == (n_roi,))
# -------------------------------------------------------------------------
# linearly spaced values taken from a gaussian distribution
res = 100
pick_up = np.linspace(0, res - 1, n_conditions).astype(int)
values = np.sort(rnd.randn(res))[pick_up]
# regressor variable
y_regr = np.repeat(values, np.round(n_epochs, n_conditions))
y_regr = rnd.permutation(y_regr)[0:n_epochs]
# condition variable
_, y = np.unique(y_regr, return_inverse=True)
y_regr = y_regr.reshape(-1, 1)
x = rnd.randn(n_epochs, n_roi, n_times)
# -------------------------------------------------------------------------
# Introduce a correlation between the data and the regressor
for num, (idx, cov) in enumerate(zip(cl_index, cl_cov)):
# define correlation strength
t_len = idx[1] - idx[0]
epsilon = np.sqrt((1. - cov ** 2) / cov ** 2)
# Generate noise
rnd_noise = np.random.RandomState(random_state + num + 1)
noise = epsilon * rnd_noise.randn(n_epochs, t_len)
x[:, num, idx[0]:idx[1]] = y_regr + noise
times = np.arange(n_times)
return x, y.astype(int), roi, times
"""
###############################################################################
I(C; C | D)
###############################################################################
- MI between two continuous variables conditioned by a discret variable
- Single / Multi subjects simulations
"""
def sim_local_ccd_ms(n_subjects, **kwargs):
"""Multi-subjects simulations for computing local MI (CCD).
This function can be used for simulating local representations of mutual
information between two continuous variables conditioned by a third discret
one (CCD) across multiple subjects.
Parameters
----------
n_subjects : int
Number of subjects
kwargs : dict | {}
Additional arguments are send to the function :func:`sim_local_ccd_ss`
Returns
-------
x : list
List length n_subjects composed of data arrays each one with a shape of
(n_epochs, n_channels, n_times)
y : list
List of length n_subjects composed of regressor arrays each one with a
shape of (n_epochs)
z : array_like
Condition array of shape (n_epochs,)
roi : list
List of length n_subjects composed of roi arrays each one with a shape
of (n_roi)
times : array_like
Time vector
"""
n_c = kwargs.get('n_conditions', 2)
if 'n_conditions' in list(kwargs.keys()):
kwargs.pop('n_conditions')
x, y, roi, times = sim_local_cc_ms(n_subjects, **kwargs)
n_e = len(y[0])
z = [np.random.randint(0, n_c, (n_e,)) for k in range(n_subjects)]
return x, y, z, roi, times
def sim_local_ccd_ss(n_epochs=10, n_times=100, n_roi=1, n_conditions=2,
cl_index=[40, 60], cl_cov=[.8], random_state=None):
"""Single-subject simulations for computing local MI (CC).
This function can be used for simulating local representations of mutual
information between two continuous variables conditioned by a third discret
one (CCD) for a single subject.
Parameters
----------
n_epochs : int | 30
Number of trials
n_times : int | 100
Number of time points
n_roi : int | 1
Number of ROI
n_conditions : int | 2
Number of conditions
cl_index : array_like | [40, 60]
Sample indices where the clusters are located. Should be an array of
shape (n_clusters, 2)
cl_cov : array_like | [.8]
Covariance level between the data and the regressor variable. Should be
an array of shape (n_clusters,)
random_state : int | None
Random state (use it for reproducibility)
Returns
-------
x : array_like
Data array of shape (n_epochs, n_channels, n_times)
y : array_like
Regressor array of shape (n_epochs,)
z : array_like
Condition array of shape (n_epochs,)
roi : array_like
Array of ROI names of shape (n_roi,)
times : array_like
Time vector of shape (n_times,)
"""
x, y, roi, times = sim_local_cc_ss(
n_epochs=n_epochs, n_times=n_times, n_roi=n_roi, cl_index=cl_index,
cl_cov=cl_cov, random_state=random_state)
z = np.random.randint(0, n_conditions, (n_epochs,))
return x, y, z, roi, times
def sim_ground_truth(n_subjects, n_epochs, gtype='tri', perc=100, p_pos=1.,
gt_as_cov=False, gt_only=False, random_state=None,
verbose=None):
"""Spatio-temporal ground truth simulation.
This function can be used to simulate the data coming from multiple
subjects with specific ground-truth arrays with clusters distributed over
time ad space. This function typically returned two outputs :
* The simulated data (i.e. the brain data and a continuous external
variable)
* The ground-truth array which specify where the effects are and the
strength of the effects
The outputs of this function can be used to compare different statistical
models and methods for correcting for multiple comparisons.
Parameters
----------
n_subjects, n_epochs : int
Number of subjects and trials
gtype : {'tri', 'tri_r', 'diffuse', 'focal'}
Ground-truth type. Choose either :
* 'tri' or 'tri_r' : spatially diffuse effects
* 'diffuse' : weak effect spatially diffuse
* 'focal' : strong and spatially focalized effect
perc : int | 100
Percentage of subjects having the effect (equivalent to inter-subject
consistency). For example, if n_subjects=10 and perc is 80%, 8 subjects
out of the 10 will have the effect. This parameters can be useful to
investigate how many subjects are necessary to retrieve the effect.
p_pos : float | 1.
Proportion of positive correlation. A value of 1 indicates that all of
the subjects have positive correlations between the simulated brain
data and the regressor. Conversely, a value of 0. indicates that all
of the subjects have negative correlations. In between, for example
with a value 0.5 indicates that half of the subjects will have positive
correlations and the other half, negative correlations.
gt_as_cov : bool | False
Specify whether the returned ground-truth array should contained
boolean values indicating whether there is an effect or not at a
specific time-space bin (False). If True, the ground-truth array will
contained the values used for covariance.
gt_only : bool | False
Specify whether only the ground-truth should be returned
random_state : int | None
Fix the random state of the machine. This parameter can be useful for
having reproducible results. If None, the seed is randomly picked.
Returns
-------
da : list
List containing the simulated data coming from multiple subjects. Each
element of the list represents a single subject of shape
(n_epoch, n_roi, n_times). This argument is returned if gt_only is set
to False.
gt : array_like
Ground-truth array of shape (n_roi, n_times)
"""
set_log_level(verbose)
assert gtype in ['tri', 'tri_r', 'diffuse', 'focal']
assert 0 <= perc <= 100
if not isinstance(random_state, int):
random_state = np.random.randint(1000)
rnd = np.random.RandomState(random_state)
logger.info(f"-> GT={gtype}; #suj={n_subjects}; #trials={n_epochs}; "
f"perc={perc}%; p_pos={p_pos}; random_state={random_state}")
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# settings according to ground-truth type
if gtype in ['tri', 'tri_r']:
# ---------------------------------------------------------------------
# settings
n_roi = 15
n_times = 50
cl_width = np.linspace(3, 15, n_roi, endpoint=True)
if gtype is 'tri':
cl_cov = np.linspace(.01, .3, n_roi, endpoint=True)
elif gtype is 'tri_r':
cl_cov = np.linspace(.3, .01, n_roi, endpoint=True)
# ---------------------------------------------------------------------
# build cluster indices
cl_middles = np.linspace(5, 25, n_roi, endpoint=True)
cl_index = np.c_[cl_middles - cl_width / 2, cl_middles + cl_width / 2]
cl_index = cl_index.astype(int)
elif gtype == 'diffuse':
# ---------------------------------------------------------------------
# settings
n_roi = 10
n_times = 10
cl_width = np.full((n_roi,), 3)
cl_cov = np.full((n_roi,), .1)
# ---------------------------------------------------------------------
# build cluster indices
cl_left = np.full((n_roi,), 1)
cl_index = np.c_[cl_left, cl_left + cl_width].astype(int)
elif gtype == 'focal':
# ---------------------------------------------------------------------
# settings
n_roi = 10
n_times = 10
cl_width = np.full((n_roi,), 3)
cl_cov = np.full((n_roi,), np.nan)
cl_cov[4] = .3
# ---------------------------------------------------------------------
# build cluster indices
cl_left = np.full((n_roi,), 1)
cl_index = np.c_[cl_left, cl_left + cl_width].astype(int)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# build the ground-truth array
if gt_as_cov:
gt = np.full((n_times, n_roi), np.nan, dtype=float)
for n_cl, cl in enumerate(cl_index):
gt[cl[0]:cl[1], n_cl] = cl_cov[n_cl]
else:
gt = np.zeros((n_times, n_roi), dtype=bool)
for n_cl, cl in enumerate(cl_index):
gt[cl[0]:cl[1], n_cl] = np.isfinite(cl_cov[n_cl])
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# get the number of subjects that are going to have the effect
n_suj_perc = int(np.round(n_subjects * perc / 100.))
use_suj = np.arange(n_subjects)
rnd.shuffle(use_suj)
use_suj = use_suj[:n_suj_perc]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# proportion of positive / negative correlation across subjects
cl_sgn = rnd.choice([-1, 1], size=(n_subjects,), p=[1 - p_pos, p_pos])
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# manage the effect when it's not present
cl_cov_null = np.full((n_roi,), np.nan)
cl_index_perc = [cl_index] * n_subjects
cl_cov_perc = [cl_cov_null] * n_subjects
for _u in use_suj:
cl_cov_perc[_u] = cl_cov
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# build the data
x, y, roi, times = sim_local_cc_ms(
n_subjects, n_epochs=n_epochs, n_times=n_times, n_roi=n_roi,
cl_index=cl_index_perc, cl_cov=cl_cov_perc, cl_sgn=cl_sgn,
random_state=random_state)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# xarray conversion
da = []
for k in range(len(x)):
_da = xr.DataArray(x[k], dims=('y', 'roi', 'times'),
coords=(y[k], roi[k], times), name=f"subject{k}")
da += [_da]
gt = xr.DataArray(gt, dims=('times', 'roi'), coords=(times, roi[0]),
name=gtype)
if gt_only:
return gt
else:
return da, gt
| 37.938849
| 79
| 0.563762
|
4a0729a272f747903f93e3d962dd1aee9c958c07
| 613
|
py
|
Python
|
emlo-edit-php-helper/exporter/export_sophiahedwig.py
|
culturesofknowledge/site-editor
|
9a74580d2567755ab068a2d8761df8f81718910e
|
[
"MIT"
] | null | null | null |
emlo-edit-php-helper/exporter/export_sophiahedwig.py
|
culturesofknowledge/site-editor
|
9a74580d2567755ab068a2d8761df8f81718910e
|
[
"MIT"
] | null | null | null |
emlo-edit-php-helper/exporter/export_sophiahedwig.py
|
culturesofknowledge/site-editor
|
9a74580d2567755ab068a2d8761df8f81718910e
|
[
"MIT"
] | 1
|
2021-11-15T13:19:28.000Z
|
2021-11-15T13:19:28.000Z
|
__author__ = 'matthew'
from exporter.exporter import Exporter
from config import config
debug_on = True
postgres_connection = "dbname='" + config["dbname"] + "'" \
+ " host='" + config["host"] + "' port='" + config["port"] + "'" \
+ " user='" + config["user"] + "' password='" + config["password"] + "'"
e = Exporter( postgres_connection, False, debug_on )
command = "select work_id from cofk_union_work where original_catalogue='SOPHIAHEDWIG'"
work_ids = e.select_all( command )
work_ids = [id['work_id'] for id in work_ids]
e.export( work_ids, "sophiahedwig" )
| 32.263158
| 94
| 0.62969
|
4a072abc44dd785a50e77db3cc409167467578ba
| 5,685
|
py
|
Python
|
Port 11211 - MemcacheD (bonus seeding scripts)/memcached-seeder.py
|
racompton/AMP-Research
|
aabc1bb3f08ed960d8466bd1e53408d2977db1fe
|
[
"MIT"
] | 183
|
2019-09-30T09:22:44.000Z
|
2022-03-30T20:39:30.000Z
|
Port 11211 - MemcacheD (bonus seeding scripts)/memcached-seeder.py
|
racompton/AMP-Research
|
aabc1bb3f08ed960d8466bd1e53408d2977db1fe
|
[
"MIT"
] | 5
|
2020-03-25T11:21:52.000Z
|
2022-03-09T01:43:07.000Z
|
Port 11211 - MemcacheD (bonus seeding scripts)/memcached-seeder.py
|
racompton/AMP-Research
|
aabc1bb3f08ed960d8466bd1e53408d2977db1fe
|
[
"MIT"
] | 72
|
2019-09-28T19:12:39.000Z
|
2022-03-27T20:08:07.000Z
|
# Meme-cacheD seeder by Phenomite 2020
import os
import sys
import socket
import re
import time
import requests
import memcache
import concurrent.futures
exit = False
memcachePort = 11211
def getseed(url):
seed_content = requests.get(url).text
# Force into memory
if not seed_content:
print("Seeder failed...")
else:
print("Grabbing new seed...")
return seed_content
def seed_items(ip, seed_content, seed_content2, seed_content3):
# Grabs the list of all items in the memcache server
global exit
seed = ""
try:
print("Seeding items now (because UDP is unreliable by design you might need to run this again)")
mc = memcache.Client([ip], debug=False)
mc.set("p", seed_content3)
mc.set("h", seed_content2)
mc.set("e", seed_content)
seed = "phenomite" # junk for credit
except UnicodeDecodeError:
# Ignore non-utf8 responses and attempts at killing decoders (potential pots, unlikely though)
pass
except Exception as e:
# If no data is received, you get here, but it's not an error
# Ignore and continue
print("\nError occurred while seeding: " + str(e))
pass
except KeyboardInterrupt:
exit = True
return seed
def get_best_items(ip, string_items, min_size):
global exit
overall_best_item = ""
iterator = 0
for item in string_items.split(" "): # Bad yes but bite me
iterator = iterator + 1
try:
# Set query to dump list of item keys
print("\nItem: " + item)
mc = memcache.Client([ip], debug=False)
memcache_item_value = mc.get(item)
item_size_returned = len(memcache_item_value)
print("Reported size of item: " + str(item_size_returned))
# Check the returned size is over our min threshold
# and (item_size_returned > size_of_last_best_item)) or (len(item) <
# len(overall_best_item) and item_size_returned ==
# size_of_last_best_item):
if item_size_returned >= int(min_size):
overall_best_item += item + " " # Set return val
print("Added this item to key string for this IP")
except UnicodeDecodeError:
# Ignore non-utf8 responses and attempts at killing decoders (potential pots, unlikely though)
pass
except Exception as e:
# If no data is received, you get here, but it's not an error
# Ignore and continue
print("\nError occurred while querying: " + str(e))
pass
except KeyboardInterrupt:
exit = True
return overall_best_item.strip()
def executors(ip, content_to_seed, content_to_seed2, content_to_seed3):
print("\n---------New-IP---------")
ip = ip.rstrip() # Clean random DOS CRLF cancer
try:
# Get items on the IP (within response time constraints [hardcoded for now])
seed_ip = seed_items(ip, content_to_seed, content_to_seed2, content_to_seed3)
if not seed_ip:
return # Get the non responders out of here
# Check for our seed
ip_items = get_best_items(ip, seed_ip, "1000")
if not ip_items:
return # Get the non responders out of here
return ip_items
except Exception as e:
print("FATAL: " + str(e)) # debug
def main(fileInput, fileOutput,
url="https://www.netflix.com/watch/70177848",
url2="https://yahoo.com",
url3="https://stackoverflow.com/questions/24017316/pragma-mark-in-swift"):
global exit
# TODO: Randomise the seed to not hit flowspec size matching
content_to_seed = getseed(url)
content_to_seed2 = getseed(url2) # eh
content_to_seed3 = getseed(url3) # ehhhh
with open(fileInput, 'r') as ips:
with concurrent.futures.ProcessPoolExecutor(max_workers=50) as executor:
future_seeder = {executor.submit(executors, ip, content_to_seed,
content_to_seed2, content_to_seed3): ip for ip in ips}
for future in concurrent.futures.as_completed(future_seeder):
ip_seeded = future_seeder[future]
try:
return_ip = future.result()
except Exception as ex:
print('%r generated an exception: %s' % (ip_seeded, ex))
else:
print('%s cache contains %s item!' % (str(ip_seeded).strip(), return_ip))
print("\nWill write this to file: " + str(ip_seeded).strip())
f = open(fileOutput, "a+")
f.write(str(ip_seeded).strip() + "\n")
f.close()
print() # Spacer
print("\n---------Done---------")
print("\nFinished seeding memcacheD IP list\n")
if __name__ == '__main__':
print("-- MemcacheD Seeder -- Phenomite 2020 --")
if len(sys.argv) < 3:
print("Argument 1 (mandatory): Input filename containing one UDP memcache responder IP per line!\n\tAND DONT USE CRLF YOU WINDOWS FOOL. `dos2unix inputfile` if you are unsure.")
print("Argument 2 (mandatory): Output filename.")
print("Argument 3: Optional 1st domain to seed from (defaults if not entered).")
print("Argument 4: Optional 2nd domain to seed from (defaults if not entered).")
print("Argument 5: Optional 3rd domain to seed from (defaults if not entered).")
quit()
elif len(sys.argv) == 3:
main(sys.argv[1], sys.argv[2])
elif len(sys.argv) > 3:
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| 39.206897
| 185
| 0.612665
|
4a072b429d9fcbf5fcaf9a0458bc80936d48b556
| 69,847
|
py
|
Python
|
resources/lib/KodiHelper.py
|
db10bo/plugin.video.netflix
|
f9e0ac4512836807435cc992ddf90b3cd6c52c17
|
[
"MIT"
] | 1
|
2018-03-08T22:44:59.000Z
|
2018-03-08T22:44:59.000Z
|
resources/lib/KodiHelper.py
|
db10bo/plugin.video.netflix
|
f9e0ac4512836807435cc992ddf90b3cd6c52c17
|
[
"MIT"
] | null | null | null |
resources/lib/KodiHelper.py
|
db10bo/plugin.video.netflix
|
f9e0ac4512836807435cc992ddf90b3cd6c52c17
|
[
"MIT"
] | null | null | null |
# pylint: skip-file
# -*- coding: utf-8 -*-
# Module: KodiHelper
# Created on: 13.01.2017
import re
import json
import base64
import hashlib
from os import remove
from uuid import uuid4
from urllib import urlencode
import copy
from Cryptodome import Random
from os.path import join, isfile
from Cryptodome.Cipher import AES
from Cryptodome.Util import Padding
import xbmc
import xbmcgui
import xbmcplugin
import inputstreamhelper
from xbmcaddon import Addon
from resources.lib.MSL import MSL
from resources.lib.kodi.Dialogs import Dialogs
from utils import dd, get_user_agent, uniq_id
from UniversalAnalytics import Tracker
from collections import OrderedDict
from Search import SearchParams, SearchResults
try:
import cPickle as pickle
except:
import pickle
try:
# Python 2.6-2.7
from HTMLParser import HTMLParser
except ImportError:
# Python 3
from html.parser import HTMLParser
VIEW_FOLDER = 'folder'
VIEW_MOVIE = 'movie'
VIEW_SHOW = 'show'
VIEW_SEASON = 'season'
VIEW_EPISODE = 'episode'
class KodiListItem(object):
""" Pickle-able wrapper for xbmcgui.ListItem class """
_method_proto = {
'method': '',
'argv': [],
'kwargs': {}
}
_list_item_proto = {
'label': '',
'label2': '',
'iconImage': '',
'thumbnailImage': '',
'path': '',
'offscreen': False
}
def __init__(self, *argv, **kwargs):
self.list_item = copy.deepcopy(self._list_item_proto)
self.calls = []
if argv:
if isinstance(argv[0],self.__class__):
for attr in dir(self):
if attr[:1] != '_':
if hasattr(argv[0],attr):
setattr(self,attr,getattr(argv[0],attr))
elif isinstance(argv[0],dict):
self.list_item.update({k:v for k,v in argv[0].iteritems() if k in self.list_item})
elif kwargs:
for attr,value in kwargs.iteritems():
if attr in self.list_item:
self.list_item[attr] = value
elif attr[:1] != '_' and hasattr(self,attr):
setattr(self,attr,value)
return
def __getattribute__(self, attr):
ret = None
try:
ret = object.__getattribute__(self,attr)
except:
if attr[:1] != '_':
self._last_get_attr = attr
return self._dummy
raise
return ret
def _dummy(self, *argv, **kwargs):
method = copy.deepcopy(self._method_proto)
method['method'] = copy.deepcopy(self._last_get_attr)
method['argv'] = copy.deepcopy(argv)
method['kwargs'] = copy.deepcopy(kwargs)
self.calls.append(method)
return
def build(self):
li = xbmcgui.ListItem(**self.list_item)
for call in self.calls:
if hasattr(li,call['method']):
getattr(li,call['method'])(*call['argv'],**call['kwargs'])
return li
class KodiDirectoryBuilder(object):
""" Pickle-able class for building a Kodi directory """
_listing_proto = {
'url': None, # string
'listitem': None, # KodiListItem (xbmcgui.ListItem)
'isFolder': None # bool
}
def __init__(self, *argv, **kwargs):
self.listings = []
self.view_mode = VIEW_FOLDER # default
self.sort_methods = [xbmcplugin.SORT_METHOD_NONE] # default
if argv and isinstance(argv[0], self.__class__):
for attr in dir(self):
if attr[:1] != '_':
if hasattr(argv[0],attr):
setattr(self,attr,getattr(argv[0],attr))
elif kwargs:
for attr,value in kwargs.iteritems():
if attr[:1] != '_' and hasattr(self,attr):
setattr(self,attr,value)
return
def __len__(self):
return len(self.listings)
@classmethod
def new_listing(cls,**kwargs):
ret = copy.deepcopy(cls._listing_proto)
if kwargs:
ret.update({k:v for k,v in kwargs.iteritems() if k in ret})
return ret
@staticmethod
def new_list_item(*argv, **kwargs):
return KodiListItem(*argv, **kwargs)
def add_listing(self, listing):
new_listing = self.new_listing()
new_listing.update(
{k:v for k,v in listing.iteritems() if k in new_listing}
)
self.listings.append(new_listing)
return
def build(self, plugin_handle):
for item in self.listings:
if isinstance(item['listitem'], KodiListItem):
_item = copy.deepcopy(item)
_item['listitem'] = _item['listitem'].build()
xbmcplugin.addDirectoryItem(handle=plugin_handle, **_item)
for sort_method in self.sort_methods:
xbmcplugin.addSortMethod(handle=plugin_handle, sortMethod=sort_method)
if self.view_mode:
xbmc.executebuiltin('Container.SetViewMode({})'.format(self.view_mode))
xbmcplugin.endOfDirectory(plugin_handle)
return True
class KodiHelper(object):
"""
Consumes all the configuration data from Kodi as well as
turns data into lists of folders and videos"""
def __init__(self, plugin_handle=None, base_url=None):
"""
Fetches all needed info from Kodi &
configures the baseline of the plugin
Parameters
----------
plugin_handle : :obj:`int`
Plugin handle
base_url : :obj:`str`
Plugin base url
"""
addon = self.get_addon()
raw_data_path = 'special://profile/addon_data/service.msl'
data_path = xbmc.translatePath(raw_data_path)
self.plugin_handle = plugin_handle
self.base_url = base_url
self.plugin = addon.getAddonInfo('name')
self.version = addon.getAddonInfo('version')
self.base_data_path = xbmc.translatePath(addon.getAddonInfo('profile'))
self.home_path = xbmc.translatePath('special://home')
self.plugin_path = addon.getAddonInfo('path')
self.cookie_path = self.base_data_path + 'COOKIE'
self.data_path = self.base_data_path + 'DATA'
self.config_path = join(self.base_data_path, 'config')
self.msl_data_path = data_path.decode('utf-8') + '/'
self.verb_log = addon.getSetting('logging') == 'true'
self.custom_export_name = addon.getSetting('customexportname')
self.show_update_db = addon.getSetting('show_update_db')
self.default_fanart = addon.getAddonInfo('fanart')
self.bs = 32
self.crypt_key = uniq_id()
self.library = None
self.setup_memcache()
self.dialogs = Dialogs(
get_local_string=self.get_local_string,
custom_export_name=self.custom_export_name)
self.GLOBAL_PCACHE_LIMIT = 40 # item limit of 40
def get_addon(self):
"""Returns a fresh addon instance"""
return Addon()
def check_folder_path(self, path):
"""
Check if folderpath ends with path delimator
If not correct it (makes sure xbmcvfs.exists is working correct)
"""
if isinstance(path, unicode):
check = path.encode('ascii', 'ignore')
if '/' in check and not str(check).endswith('/'):
end = u'/'
path = path + end
return path
if '\\' in check and not str(check).endswith('\\'):
end = u'\\'
path = path + end
return path
if '/' in path and not str(path).endswith('/'):
path = path + '/'
return path
if '\\' in path and not str(path).endswith('\\'):
path = path + '\\'
return path
def refresh(self):
"""Refresh the current list"""
return xbmc.executebuiltin('Container.Refresh')
def set_setting(self, key, value):
"""Public interface for the addons setSetting method
Returns
-------
bool
Setting could be set or not
"""
return self.get_addon().setSetting(key, value)
def get_setting(self, key):
"""Public interface to the addons getSetting method
Returns
-------
Returns setting key
"""
return self.get_addon().getSetting(key)
def toggle_adult_pin(self):
"""Toggles the adult pin setting"""
addon = self.get_addon()
adultpin_enabled = False
raw_adultpin_enabled = addon.getSetting('adultpin_enable')
if raw_adultpin_enabled == 'true' or raw_adultpin_enabled == 'True':
adultpin_enabled = True
if adultpin_enabled is False:
return addon.setSetting('adultpin_enable', 'True')
return addon.setSetting('adultpin_enable', 'False')
def get_credentials(self):
"""Returns the users stored credentials
Returns
-------
:obj:`dict` of :obj:`str`
The users stored account data
"""
addon = self.get_addon()
email = addon.getSetting('email')
password = addon.getSetting('password')
# soft migration for existing credentials
# base64 can't contain `@` chars
if '@' in email:
addon.setSetting('email', self.encode(raw=email))
addon.setSetting('password', self.encode(raw=password))
return {
'email': self.get_addon().getSetting('email'),
'password': self.get_addon().getSetting('password')
}
# if everything is fine, we decode the values
if '' != email or '' != password:
return {
'email': self.decode(enc=email),
'password': self.decode(enc=password)
}
# if email is empty, we return an empty map
return {
'email': '',
'password': ''
}
def encode(self, raw):
"""
Encodes data
:param data: Data to be encoded
:type data: str
:returns: string -- Encoded data
"""
raw = Padding.pad(data_to_pad=raw, block_size=self.bs)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.crypt_key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw))
def decode(self, enc):
"""
Decodes data
:param data: Data to be decoded
:type data: str
:returns: string -- Decoded data
"""
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.crypt_key, AES.MODE_CBC, iv)
decoded = Padding.unpad(
padded_data=cipher.decrypt(enc[AES.block_size:]),
block_size=self.bs).decode('utf-8')
return decoded
def get_esn(self):
"""
Returns the esn from settings
"""
return self.get_addon().getSetting('esn')
def set_esn(self, esn):
"""
Returns the esn from settings
"""
stored_esn = self.get_esn()
if not stored_esn and esn:
self.set_setting('esn', esn)
self.delete_manifest_data()
return esn
return stored_esn
def delete_manifest_data(self):
if isfile(self.msl_data_path + 'msl_data.json'):
remove(self.msl_data_path + 'msl_data.json')
if isfile(self.msl_data_path + 'manifest.json'):
remove(self.msl_data_path + 'manifest.json')
msl = MSL(kodi_helper=self)
msl.perform_key_handshake()
msl.save_msl_data()
def get_dolby_setting(self):
"""
Returns if the dolby sound is enabled
:return: bool - Dolby Sourrind profile setting is enabled
"""
use_dolby = False
setting = self.get_addon().getSetting('enable_dolby_sound')
if setting == 'true' or setting == 'True':
use_dolby = True
return use_dolby
def use_hevc(self):
"""
Checks if HEVC profiles should be used
:return: bool - HEVC profile setting is enabled
"""
use_hevc = False
setting = self.get_addon().getSetting('enable_hevc_profiles')
if setting == 'true' or setting == 'True':
use_hevc = True
return use_hevc
def get_custom_library_settings(self):
"""Returns the settings in regards to the custom library folder(s)
Returns
-------
:obj:`dict` of :obj:`str`
The users library settings
"""
addon = self.get_addon()
return {
'enablelibraryfolder': addon.getSetting('enablelibraryfolder'),
'customlibraryfolder': addon.getSetting('customlibraryfolder')
}
def get_ssl_verification_setting(self):
"""
Returns the setting that describes if we should
verify the ssl transport when loading data
Returns
-------
bool
Verify or not
"""
return self.get_addon().getSetting('ssl_verification') == 'true'
def set_main_menu_selection(self, type):
"""Persist the chosen main menu entry in memory
Parameters
----------
type : :obj:`str`
Selected menu item
"""
current_window = xbmcgui.getCurrentWindowId()
xbmcgui.Window(current_window).setProperty('main_menu_selection', type)
def get_main_menu_selection(self):
"""Gets the persisted chosen main menu entry from memory
Returns
-------
:obj:`str`
The last chosen main menu entry
"""
current_window = xbmcgui.getCurrentWindowId()
window = xbmcgui.Window(current_window)
return window.getProperty('main_menu_selection')
def setup_memcache(self):
"""Sets up the memory cache if not existant"""
current_window = xbmcgui.getCurrentWindowId()
window = xbmcgui.Window(current_window)
try:
cached_items = window.getProperty('memcache')
# no cache setup yet, create one
if len(cached_items) < 1:
window.setProperty('memcache', pickle.dumps({}))
except EOFError:
pass
def setup_persistentcache(self,type=None):
"""Sets up the persistent memcache if not existant"""
cache = 'persistentcache_{}'.format(type)
try:
cached_items = xbmcgui.Window(xbmcgui.getCurrentWindowId()).getProperty(cache)
# no cache setup yet, create one
if len(cached_items) < 1:
xbmcgui.Window(xbmcgui.getCurrentWindowId()).setProperty(cache, pickle.dumps(OrderedDict()))
except EOFError:
pass
return cache
def invalidate_memcache(self):
"""Invalidates the memory cache"""
current_window = xbmcgui.getCurrentWindowId()
window = xbmcgui.Window(current_window)
try:
window.setProperty('memcache', pickle.dumps({}))
except EOFError:
pass
def invalidate_persistentcache(self,type=None):
"""Invalidates the persistent memory cache"""
try:
xbmcgui.Window(xbmcgui.getCurrentWindowId()).setProperty('persistentcache_{}'.format(type), pickle.dumps(OrderedDict()))
except EOFError:
pass
def get_cached_item(self, cache_id):
"""Returns an item from the in memory cache
Parameters
----------
cache_id : :obj:`str`
ID of the cache entry
Returns
-------
mixed
Contents of the requested cache item or none
"""
ret = None
current_window = xbmcgui.getCurrentWindowId()
window = xbmcgui.Window(current_window)
try:
cached_items = pickle.loads(window.getProperty('memcache'))
ret = cached_items.get(cache_id)
except EOFError:
ret = None
return ret
def get_pcached_item(self, cache_id, type=None):
"""Returns an item from the in memory persistent cache
Parameters
----------
cache_id : :obj:`str`
ID of the cache entry
type : :obj:`str`
Cache type
Returns
-------
mixed
Contents of the requested cache item or none
"""
ret = None
cache = self.setup_persistentcache(type=type)
try:
cached_items = pickle.loads(xbmcgui.Window(xbmcgui.getCurrentWindowId()).getProperty(cache))
ret = cached_items.get(cache_id)
except EOFError:
ret = None
return ret
def add_cached_item(self, cache_id, contents):
"""Adds an item to the in memory cache
Parameters
----------
cache_id : :obj:`str`
ID of the cache entry
contents : mixed
Cache entry contents
"""
current_window = xbmcgui.getCurrentWindowId()
window = xbmcgui.Window(current_window)
try:
cached_items = pickle.loads(window.getProperty('memcache'))
cached_items.update({cache_id: contents})
window.setProperty('memcache', pickle.dumps(cached_items))
except EOFError:
pass
def add_pcached_item(self, cache_id, contents, type=None):
"""Adds an item to the in memory persistent cache and manages cache limit
Parameters
----------
cache_id : :obj:`str`
ID of the cache entry
contents : mixed
Cache entry contents
type : :obj:`str`
Cache type
"""
cache = self.setup_persistentcache(type=type)
try:
cached_items = pickle.loads(xbmcgui.Window(xbmcgui.getCurrentWindowId()).getProperty(cache))
cached_items.update({cache_id: contents})
# ensure limit
for item in range(self.GLOBAL_PCACHE_LIMIT, len(cached_items)):
cached_items.popitem(last=False)
xbmcgui.Window(xbmcgui.getCurrentWindowId()).setProperty(cache, pickle.dumps(cached_items))
except EOFError:
pass
def create_pcached_id(self, cache_item, type=None):
"""Save item to cache and return the ID"""
cache_id = str(uuid4())
self.add_pcached_item(cache_id, cache_item, type=type)
return cache_id
def get_custom_view(self, content):
"""Get the view mode
Returns
----------
view
Type of content in container
(folder, movie, show, season, episode, login)
"""
custom_view = self.get_addon().getSetting('customview')
if custom_view == 'true':
view = int(self.get_addon().getSetting('viewmode' + content))
if view != -1:
return view
return None
def set_custom_view(self, content):
"""Set the view mode
Parameters
----------
content : :obj:`str`
Type of content in container
(folder, movie, show, season, episode, login)
"""
custom_view = self.get_custom_view(content)
if custom_view is not None:
xbmc.executebuiltin('Container.SetViewMode(%s)' % custom_view)
def save_autologin_data(self, autologin_user, autologin_id):
"""Write autologin data to settings
Parameters
----------
autologin_user : :obj:`str`
Profile name from netflix
autologin_id : :obj:`str`
Profile id from netflix
"""
self.set_setting('autologin_user', autologin_user)
self.set_setting('autologin_id', autologin_id)
self.set_setting('autologin_enable', 'True')
self.dialogs.show_autologin_enabled_notify()
self.invalidate_memcache()
self.refresh()
def get_new_kodidirectorybuilder(self, *argv, **kwargs):
return KodiDirectoryBuilder(*argv,**kwargs)
def build_profiles_listing(self, profiles, action, build_url):
"""
Builds the profiles list Kodi screen
Parameters
----------
profiles : :obj:`list` of :obj:`dict` of :obj:`str`
List of user profiles
action : :obj:`str`
Action paramter to build the subsequent routes
build_url : :obj:`fn`
Function to build the subsequent routes
:param profiles: list of user profiles
:type profiles: list
:param action: action paramter to build the subsequent routes
:type action: str
:param build_url: function to build the subsequent routes
:type build_url: fn
:returns: bool -- List could be build
"""
# init html parser for entity decoding
html_parser = HTMLParser()
# build menu items for every profile
for profile in profiles:
# load & encode profile data
enc_profile_name = profile.get('profileName', '').encode('utf-8')
unescaped_profile_name = html_parser.unescape(enc_profile_name)
profile_guid = profile.get('guid')
# build urls
url = build_url({'action': action, 'profile_id': profile_guid})
autologin_url = build_url({
'action': 'save_autologin',
'autologin_id': profile_guid,
'autologin_user': enc_profile_name})
# add list item
list_item = xbmcgui.ListItem(
label=unescaped_profile_name,
iconImage=profile.get('avatar'))
list_item.setProperty(
key='fanart_image',
value=self.default_fanart)
# add context menu options
auto_login = (
self.get_local_string(30053),
'RunPlugin(' + autologin_url + ')')
list_item.addContextMenuItems(items=[auto_login])
# add directory & sorting options
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url,
listitem=list_item,
isFolder=True)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LABEL)
return xbmcplugin.endOfDirectory(handle=self.plugin_handle)
def build_main_menu_listing(self, video_list_ids, user_list_order, actions, build_url):
"""
Builds the video lists (my list, continue watching, etc.) Kodi screen
Parameters
----------
video_list_ids : :obj:`dict` of :obj:`str`
List of video lists
user_list_order : :obj:`list` of :obj:`str`
Ordered user lists
to determine what should be displayed in the main menue
actions : :obj:`dict` of :obj:`str`
Dictionary of actions to build subsequent routes
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
preselect_items = []
for category in user_list_order:
for video_list_id in video_list_ids['user']:
if video_list_ids['user'][video_list_id]['name'] == category:
label = video_list_ids['user'][video_list_id]['displayName']
if category == 'netflixOriginals':
label = label.capitalize()
li = xbmcgui.ListItem(label=label, iconImage=self.default_fanart)
li.setProperty('fanart_image', self.default_fanart)
# determine action route
action = actions['default']
if category in actions.keys():
action = actions[category]
# determine if the item should be selected
preselect_items.append((False, True)[category == self.get_main_menu_selection()])
url = build_url({'action': action, 'video_list_id': video_list_id, 'type': category})
xbmcplugin.addDirectoryItem(handle=self.plugin_handle, url=url, listitem=li, isFolder=True)
# add recommendations/genres as subfolders
# (save us some space on the home page)
i18n_ids = {
'recommendations': self.get_local_string(30001),
'genres': self.get_local_string(30010)
}
for type in i18n_ids.keys():
# determine if the lists have contents
if len(video_list_ids[type]) > 0:
# determine action route
action = actions['default']
if type in actions.keys():
action = actions[type]
# determine if the item should be selected
preselect_items.append((False, True)[type == self.get_main_menu_selection()])
li_rec = xbmcgui.ListItem(
label=i18n_ids[type],
iconImage=self.default_fanart)
li_rec.setProperty('fanart_image', self.default_fanart)
url_rec = build_url({'action': action, 'type': type})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url_rec,
listitem=li_rec,
isFolder=True)
# add search as subfolder
action = actions['default']
if 'search' in actions.keys():
action = actions[type]
li_rec = xbmcgui.ListItem(
label=self.get_local_string(30011),
iconImage=self.default_fanart)
li_rec.setProperty('fanart_image', self.default_fanart)
url_rec = build_url({'action': action, 'type': 'search'})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url_rec,
listitem=li_rec,
isFolder=True)
# add exported as subfolder
action = actions['default']
if 'exported' in actions.keys():
action = actions[type]
li_rec = xbmcgui.ListItem(
label=self.get_local_string(30048),
iconImage=self.default_fanart)
li_rec.setProperty('fanart_image', self.default_fanart)
url_rec = build_url({'action': action, 'type': 'exported'})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url_rec,
listitem=li_rec,
isFolder=True)
if self.show_update_db == 'true':
# add updatedb as subfolder
li_rec = xbmcgui.ListItem(
label=self.get_local_string(30049),
iconImage=self.default_fanart)
li_rec.setProperty('fanart_image', self.default_fanart)
url_rec = build_url({'action': 'updatedb'})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url_rec,
listitem=li_rec,
isFolder=True)
# no sorting & close
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.endOfDirectory(self.plugin_handle)
# (re)select the previously selected main menu entry
idx = 1
for item in preselect_items:
idx += 1
preselected_list_item = idx if item else None
preselected_list_item = idx + 1 if self.get_main_menu_selection() == 'search' else preselected_list_item
if preselected_list_item is not None:
xbmc.executebuiltin('ActivateWindowAndFocus(%s, %s)' % (str(xbmcgui.Window(xbmcgui.getCurrentWindowId()).getFocusId()), str(preselected_list_item)))
self.set_custom_view(VIEW_FOLDER)
return True
def generate_video_listings(self, video_list, actions, build_url):
listings = []
for video_list_id in video_list:
video = video_list[video_list_id]
li = KodiDirectoryBuilder.new_list_item(label=video['title'], iconImage=self.default_fanart)
# add some art to the item
li = self._generate_art_info(entry=video, li=li)
# add list item info
li, infos = self._generate_entry_info(entry=video, li=li)
li = self._generate_context_menu_items(entry=video, li=li)
# lists can be mixed with shows & movies, therefor we need to check if its a movie, so play it right away
if video_list[video_list_id]['type'] == 'movie':
# it´s a movie, so we need no subfolder & a route to play it
isFolder = False
maturity = video.get('maturity', {}).get('level', 999)
needs_pin = (True, False)[int() >= 100]
url = build_url({
'action': 'play_video',
'video_id': video_list_id,
'infoLabels': infos,
'pin': needs_pin})
view = VIEW_MOVIE
else:
# it´s a show, so we need a subfolder & route (for seasons)
isFolder = True
params = {
'action': actions[video['type']],
'show_id': video_list_id
}
params['pin'] = (True, False)[int(video.get('maturity', {}).get('level', 1001)) >= 1000]
if 'tvshowtitle' in infos:
title = infos.get('tvshowtitle', '').encode('utf-8')
params['tvshowtitle'] = base64.urlsafe_b64encode(title)
url = build_url(params)
listings.append(KodiDirectoryBuilder.new_listing(url=url, listitem=li, isFolder=isFolder))
return listings
def build_video_listing(self, video_list, actions, build_url):
"""
Builds the video lists (my list, continue watching, etc.)
contents Kodi screen
Parameters
----------
video_list_ids : :obj:`dict` of :obj:`str`
List of video lists
actions : :obj:`dict` of :obj:`str`
Dictionary of actions to build subsequent routes
type : :obj:`str`
None or 'queue' f.e. when it´s a special video lists
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
video_list_directory = KodiDirectoryBuilder(
view_mode = self.get_custom_view(VIEW_MOVIE),
sort_methods = [
xbmcplugin.SORT_METHOD_UNSORTED,
xbmcplugin.SORT_METHOD_LABEL,
xbmcplugin.SORT_METHOD_TITLE,
xbmcplugin.SORT_METHOD_VIDEO_YEAR,
xbmcplugin.SORT_METHOD_GENRE,
xbmcplugin.SORT_METHOD_LASTPLAYED
]
)
for video_listing in self.generate_video_listings(video_list=video_list, actions=actions, build_url=build_url):
video_list_directory.add_listing(video_listing)
if video_list_directory:
video_list_directory.build(plugin_handle=self.plugin_handle)
return True
def build_video_listing_exported(self, content, build_url):
"""Build list of exported movies / shows
Parameters
----------
content : :obj:`dict` of :obj:`str`
List of video lists
Returns
-------
bool
List could be build
"""
action = ['remove_from_library', self.get_local_string(30030), 'remove']
listing = content
for video in listing[0]:
year = self.library.get_exported_movie_year(title=video)
li = xbmcgui.ListItem(
label=str(video)+' ('+str(year)+')',
iconImage=self.default_fanart)
li.setProperty('fanart_image', self.default_fanart)
isFolder = False
url = build_url({
'action': 'removeexported',
'title': str(video),
'year': str(year),
'type': 'movie'})
art = {}
image = self.library.get_previewimage(video)
art.update({
'landscape': image,
'thumb': image
})
li.setArt(art)
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url,
listitem=li,
isFolder=isFolder)
for video in listing[2]:
li = xbmcgui.ListItem(
label=str(video),
iconImage=self.default_fanart)
li.setProperty('fanart_image', self.default_fanart)
isFolder = False
year = '0000'
url = build_url({
'action': 'removeexported',
'title': str(str(video)),
'year': str(year),
'type': 'show'})
art = {}
image = self.library.get_previewimage(video)
art.update({
'landscape': image,
'thumb': image
})
li.setArt(art)
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url,
listitem=li,
isFolder=isFolder)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.endOfDirectory(self.plugin_handle)
self.set_custom_view(VIEW_FOLDER)
return True
def build_search_result_folder(self, build_url, term, search_id):
"""Add search result folder
Parameters
----------
build_url : :obj:`fn`
Function to build the subsequent routes
term : :obj:`str`
Search term
Returns
-------
:obj:`str`
Search result folder URL
"""
# add search result as subfolder
li_rec = xbmcgui.ListItem(
label='({})'.format(term),
iconImage=self.default_fanart)
li_rec.setProperty('fanart_image', self.default_fanart)
url_rec = build_url({'action': 'search_result', 'search_id': search_id})
xbmcplugin.addDirectoryItem(handle=self.plugin_handle, url=url_rec, listitem=li_rec, isFolder=True)
xbmcplugin.addSortMethod(handle=self.plugin_handle, sortMethod=xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.endOfDirectory(self.plugin_handle)
self.set_custom_view(VIEW_FOLDER)
return url_rec
def set_location(self, url, replace=False):
"""Set URL location
Parameters
----------
url : :obj:`str`
Window URL
ret : bool
Return to location prior to activation
Returns
-------
bool
Window was activated
"""
cmd = 'Container.Update({},{})'.format(url, str(replace))
return xbmc.executebuiltin(cmd)
def build_search_result_listing(self, search_params, search_results, video_list, actions, build_url):
"""Builds the search results list Kodi screen
Parameters
----------
video_list : :obj:`dict` of :obj:`str`
List of videos or shows
actions : :obj:`dict` of :obj:`str`
Dictionary of actions to build subsequent routes
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
entity_list = []
search_directory = KodiDirectoryBuilder(
view_mode = self.get_custom_view(VIEW_MOVIE),
sort_methods = [
xbmcplugin.SORT_METHOD_UNSORTED,
xbmcplugin.SORT_METHOD_LABEL,
xbmcplugin.SORT_METHOD_TITLE,
xbmcplugin.SORT_METHOD_VIDEO_YEAR,
xbmcplugin.SORT_METHOD_GENRE,
xbmcplugin.SORT_METHOD_LASTPLAYED
]
)
# collect entities
for result,result_values in search_results.iteritems():
entity_list += result_values['data'].get('entities',[])
if entity_list:
# build suggestions directory
suggestions_directory = KodiDirectoryBuilder(
view_mode = self.get_custom_view(VIEW_FOLDER),
sort_methods = [
xbmcplugin.SORT_METHOD_UNSORTED,
xbmcplugin.SORT_METHOD_LABEL,
xbmcplugin.SORT_METHOD_TITLE,
xbmcplugin.SORT_METHOD_VIDEO_YEAR,
xbmcplugin.SORT_METHOD_GENRE,
xbmcplugin.SORT_METHOD_LASTPLAYED
]
)
for entity in entity_list:
entity_search = SearchParams()
entity_search.add_entity(type_id=entity['type_id'])
entity_search_id = self.create_pcached_id(entity_search, type='SEARCH')
entity_li = KodiDirectoryBuilder.new_list_item(
label='{} ({})'.format(entity['name'].encode('ascii','ignore'),entity['type'].encode('ascii','ignore')),
iconImage=self.default_fanart,
thumbnailImage=self.default_fanart
)
entity_li.setProperty('fanart_image', self.default_fanart)
suggestions_directory.add_listing(
KodiDirectoryBuilder.new_listing(
url=build_url({'action': 'search_result', 'search_id': entity_search_id}),
listitem=entity_li,
isFolder=True
)
)
suggestions_cache_id = self.create_pcached_id(suggestions_directory, type='DIRECTORY')
# add suggestions dir to search dir
suggestions_li = KodiDirectoryBuilder.new_list_item(
label=self.get_local_string(30063), # suggestions
iconImage=self.default_fanart,
thumbnailImage=self.default_fanart
)
suggestions_li.setProperty('fanart_image', self.default_fanart)
search_directory.add_listing(
KodiDirectoryBuilder.new_listing(
url=build_url({'action': 'cached_directory', 'cache_id': suggestions_cache_id}),
listitem=suggestions_li,
isFolder=True
)
)
# add video listings
for listing in self.generate_video_listings(video_list=video_list, actions=actions, build_url=build_url):
search_directory.add_listing(listing)
# add next search (page) listing
next_search = search_params.build_next_search(search_results=search_results)
if next_search:
next_search_id = self.create_pcached_id(next_search, type='SEARCH')
next_search_li = KodiDirectoryBuilder.new_list_item(
label=self.get_local_string(30045),
iconImage=self.default_fanart,
thumbnailImage=self.default_fanart
)
next_search_li.setProperty('fanart_image', self.default_fanart)
search_directory.add_listing(
KodiDirectoryBuilder.new_listing(
url=build_url({'action': 'search_result', 'search_id': next_search_id}),
listitem=next_search_li,
isFolder=True
)
)
return search_directory.build(plugin_handle=self.plugin_handle)
def build_no_seasons_available(self):
"""Builds the season list screen if no seasons could be found
Returns
-------
bool
List could be build
"""
self.dialogs.show_no_seasons_notify()
xbmcplugin.endOfDirectory(self.plugin_handle)
return True
def build_no_search_results_available(self, build_url, action):
"""Builds the search results screen if no matches could be found
Parameters
----------
action : :obj:`str`
Action paramter to build the subsequent routes
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
self.dialogs.show_no_search_results_notify()
return xbmcplugin.endOfDirectory(self.plugin_handle)
def build_user_sub_listing(self, video_list_ids, type, action, build_url):
"""
Builds the video lists screen for user subfolders
(genres & recommendations)
Parameters
----------
video_list_ids : :obj:`dict` of :obj:`str`
List of video lists
type : :obj:`str`
List type (genre or recommendation)
action : :obj:`str`
Action paramter to build the subsequent routes
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
for video_list_id in video_list_ids:
li = xbmcgui.ListItem(
label=video_list_ids[video_list_id]['displayName'],
iconImage=self.default_fanart)
li.setProperty('fanart_image', self.default_fanart)
url = build_url({'action': action, 'video_list_id': video_list_id})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.endOfDirectory(self.plugin_handle)
self.set_custom_view(VIEW_FOLDER)
return True
def build_season_listing(self, seasons_sorted, build_url):
"""Builds the season list screen for a show
Parameters
----------
seasons_sorted : :obj:`list` of :obj:`dict` of :obj:`str`
Sorted list of season entries
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
for season in seasons_sorted:
li = xbmcgui.ListItem(label=season['text'])
# add some art to the item
li = self._generate_art_info(entry=season, li=li)
# add list item info
li, infos = self._generate_entry_info(
entry=season,
li=li,
base_info={'mediatype': 'season'})
li = self._generate_context_menu_items(entry=season, li=li)
params = {'action': 'episode_list', 'season_id': season['id']}
if 'tvshowtitle' in infos:
title = infos.get('tvshowtitle', '').encode('utf-8')
params['tvshowtitle'] = base64.urlsafe_b64encode(title)
url = build_url(params)
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_NONE)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LASTPLAYED)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.endOfDirectory(self.plugin_handle)
self.set_custom_view(VIEW_SEASON)
return True
def build_episode_listing(self, episodes_sorted, build_url):
"""Builds the episode list screen for a season of a show
Parameters
----------
episodes_sorted : :obj:`list` of :obj:`dict` of :obj:`str`
Sorted list of episode entries
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
for episode in episodes_sorted:
li = xbmcgui.ListItem(label=episode['title'])
# add some art to the item
li = self._generate_art_info(entry=episode, li=li)
# add list item info
li, infos = self._generate_entry_info(
entry=episode,
li=li,
base_info={'mediatype': 'episode'})
li = self._generate_context_menu_items(entry=episode, li=li)
maturity = episode.get('maturity', {}).get('maturityLevel', 999)
needs_pin = (True, False)[int(maturity) >= 100]
url = build_url({
'action': 'play_video',
'video_id': episode['id'],
'start_offset': episode['bookmark'],
'infoLabels': infos,
'pin': needs_pin})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url,
listitem=li,
isFolder=False)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_EPISODE)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_NONE)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LASTPLAYED)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_DURATION)
xbmcplugin.endOfDirectory(self.plugin_handle)
self.set_custom_view(VIEW_EPISODE)
return True
def play_item(self, esn, video_id, start_offset=-1, infoLabels={}):
"""Plays a video
Parameters
----------
esn : :obj:`str`
ESN needed for Widevine/Inputstream
video_id : :obj:`str`
ID of the video that should be played
start_offset : :obj:`str`
Offset to resume playback from (in seconds)
infoLabels : :obj:`str`
the listitem's infoLabels
Returns
-------
bool
List could be build
"""
self.set_esn(esn)
addon = self.get_addon()
is_helper = inputstreamhelper.Helper('mpd', drm='widevine')
if not is_helper.check_inputstream():
return False
# track play event
self.track_event('playVideo')
# check esn in settings
settings_esn = str(addon.getSetting('esn'))
if len(settings_esn) == 0:
addon.setSetting('esn', str(esn))
# inputstream addon properties
port = str(addon.getSetting('msl_service_port'))
msl_service_url = 'http://localhost:' + port
play_item = xbmcgui.ListItem(
path=msl_service_url + '/manifest?id=' + video_id)
play_item.setContentLookup(False)
play_item.setMimeType('application/dash+xml')
play_item.setProperty(
key=is_helper.inputstream_addon + '.stream_headers',
value='user-agent=' + get_user_agent())
play_item.setProperty(
key=is_helper.inputstream_addon + '.license_type',
value='com.widevine.alpha')
play_item.setProperty(
key=is_helper.inputstream_addon + '.manifest_type',
value='mpd')
play_item.setProperty(
key=is_helper.inputstream_addon + '.license_key',
value=msl_service_url + '/license?id=' + video_id + '||b{SSM}!b{SID}|')
play_item.setProperty(
key=is_helper.inputstream_addon + '.server_certificate',
value='Cr0CCAMSEOVEukALwQ8307Y2+LVP+0MYh/HPkwUijgIwggEKAoIBAQDm875btoWUbGqQD8eAGuBlGY+Pxo8YF1LQR+Ex0pDONMet8EHslcZRBKNQ/09RZFTP0vrYimyYiBmk9GG+S0wB3CRITgweNE15cD33MQYyS3zpBd4z+sCJam2+jj1ZA4uijE2dxGC+gRBRnw9WoPyw7D8RuhGSJ95OEtzg3Ho+mEsxuE5xg9LM4+Zuro/9msz2bFgJUjQUVHo5j+k4qLWu4ObugFmc9DLIAohL58UR5k0XnvizulOHbMMxdzna9lwTw/4SALadEV/CZXBmswUtBgATDKNqjXwokohncpdsWSauH6vfS6FXwizQoZJ9TdjSGC60rUB2t+aYDm74cIuxAgMBAAE6EHRlc3QubmV0ZmxpeC5jb20SgAOE0y8yWw2Win6M2/bw7+aqVuQPwzS/YG5ySYvwCGQd0Dltr3hpik98WijUODUr6PxMn1ZYXOLo3eED6xYGM7Riza8XskRdCfF8xjj7L7/THPbixyn4mULsttSmWFhexzXnSeKqQHuoKmerqu0nu39iW3pcxDV/K7E6aaSr5ID0SCi7KRcL9BCUCz1g9c43sNj46BhMCWJSm0mx1XFDcoKZWhpj5FAgU4Q4e6f+S8eX39nf6D6SJRb4ap7Znzn7preIvmS93xWjm75I6UBVQGo6pn4qWNCgLYlGGCQCUm5tg566j+/g5jvYZkTJvbiZFwtjMW5njbSRwB3W4CrKoyxw4qsJNSaZRTKAvSjTKdqVDXV/U5HK7SaBA6iJ981/aforXbd2vZlRXO/2S+Maa2mHULzsD+S5l4/YGpSt7PnkCe25F+nAovtl/ogZgjMeEdFyd/9YMYjOS4krYmwp3yJ7m9ZzYCQ6I8RQN4x/yLlHG5RH/+WNLNUs6JAZ0fFdCmw=')
play_item.setProperty(
key='inputstreamaddon',
value=is_helper.inputstream_addon)
# check if we have a bookmark e.g. start offset position
if int(start_offset) > 0:
play_item.setProperty('StartOffset', str(start_offset) + '.0')
# set infoLabels
if len(infoLabels) > 0:
play_item.setInfo('video', infoLabels)
if len(infoLabels) == 0:
infoLabels = self.library.read_metadata_file(video_id=video_id)
art = self.library.read_artdata_file(video_id=video_id)
play_item.setArt(art)
play_item.setInfo('video', infoLabels)
# check for content in kodi db
if str(infoLabels) != 'None':
if infoLabels['mediatype'] == 'episode':
id = self.showtitle_to_id(title=infoLabels['tvshowtitle'])
details = self.get_show_content_by_id(
showid=id,
showseason=infoLabels['season'],
showepisode=infoLabels['episode'])
if details is not False:
play_item.setInfo('video', details[0])
play_item.setArt(details[1])
if infoLabels['mediatype'] != 'episode':
id = self.movietitle_to_id(title=infoLabels['title'])
details = self.get_movie_content_by_id(movieid=id)
if details is not False:
play_item.setInfo('video', details[0])
play_item.setArt(details[1])
resolved = xbmcplugin.setResolvedUrl(
handle=self.plugin_handle,
succeeded=True,
listitem=play_item)
return resolved
def _generate_art_info(self, entry, li):
"""Adds the art info from an entry to a Kodi list item
Parameters
----------
entry : :obj:`dict` of :obj:`str`
Entry that should be turned into a list item
li : :obj:`XMBC.ListItem`
Kodi list item instance
Returns
-------
:obj:`XMBC.ListItem`
Kodi list item instance
"""
art = {'fanart': self.default_fanart}
# Cleanup art
art.update({
'landscape': '',
'thumb': '',
'fanart': '',
'poster': ''
})
self.log(entry)
if 'boxarts' in dict(entry).keys() and not isinstance(entry.get('boxarts'), dict):
big = entry.get('boxarts', '')
small = big
if 'boxarts' in dict(entry).keys() and isinstance(entry.get('boxarts'), dict):
big = entry.get('boxarts', {}).get('big')
small = entry.get('boxarts', {}).get('small')
art.update({
'poster': big or small,
'landscape': big or small,
'thumb': big or small,
'fanart': big or small
})
# Download image for exported listing
if 'title' in entry:
self.library.download_image_file(
title=entry['title'].encode('utf-8'),
url=str(big))
if 'interesting_moment' in dict(entry).keys():
art.update({
'poster': entry['interesting_moment'],
'fanart': entry['interesting_moment']
})
if 'thumb' in dict(entry).keys():
art.update({'thumb': entry['thumb']})
if 'fanart' in dict(entry).keys():
art.update({'fanart': entry['fanart']})
if 'poster' in dict(entry).keys():
art.update({'poster': entry['poster']})
li.setArt(art)
vid_id = entry.get('id', entry.get('summary', {}).get('id'))
self.library.write_artdata_file(video_id=str(vid_id), content=art)
return li
def _generate_entry_info(self, entry, li, base_info={}):
"""Adds the item info from an entry to a Kodi list item
Parameters
----------
entry : :obj:`dict` of :obj:`str`
Entry that should be turned into a list item
li : :obj:`XMBC.ListItem`
Kodi list item instance
base_info : :obj:`dict` of :obj:`str`
Additional info that overrules the entry info
Returns
-------
:obj:`XMBC.ListItem`
Kodi list item instance
"""
infos = base_info
entry_keys = entry.keys()
# Cleanup item info
infos.update({
'writer': '',
'director': '',
'genre': '',
'mpaa': '',
'rating': '',
'plot': '',
'duration': '',
'season': '',
'title': '',
'tvshowtitle': '',
'mediatype': '',
'playcount': '',
'episode': '',
'year': '',
'tvshowtitle': ''
})
if 'cast' in entry_keys and len(entry['cast']) > 0:
infos.update({'cast': entry['cast']})
if 'creators' in entry_keys and len(entry['creators']) > 0:
infos.update({'writer': entry['creators'][0]})
if 'directors' in entry_keys and len(entry['directors']) > 0:
infos.update({'director': entry['directors'][0]})
if 'genres' in entry_keys and len(entry['genres']) > 0:
infos.update({'genre': entry['genres'][0]})
if 'maturity' in entry_keys:
if 'mpaa' in entry_keys:
infos.update({'mpaa': entry['mpaa']})
else:
if entry.get('maturity', None) is not None:
if entry.get('maturity', {}).get('board') is not None and entry.get('maturity', {}).get('value') is not None:
infos.update({'mpaa': str(entry['maturity']['board'].encode('utf-8')) + '-' + str(entry['maturity']['value'].encode('utf-8'))})
if 'rating' in entry_keys:
infos.update({'rating': int(entry['rating']) * 2})
if 'synopsis' in entry_keys:
infos.update({'plot': entry['synopsis']})
if 'plot' in entry_keys:
infos.update({'plot': entry['plot']})
if 'runtime' in entry_keys:
infos.update({'duration': entry['runtime']})
if 'duration' in entry_keys:
infos.update({'duration': entry['duration']})
if 'seasons_label' in entry_keys:
infos.update({'season': entry['seasons_label']})
if 'season' in entry_keys:
infos.update({'season': entry['season']})
if 'title' in entry_keys:
infos.update({'title': entry['title']})
if 'type' in entry_keys:
if entry['type'] == 'movie' or entry['type'] == 'episode':
li.setProperty('IsPlayable', 'true')
elif entry['type'] == 'show':
infos.update({'tvshowtitle': entry['title']})
if 'mediatype' in entry_keys:
if entry['mediatype'] == 'movie' or entry['mediatype'] == 'episode':
li.setProperty('IsPlayable', 'true')
infos.update({'mediatype': entry['mediatype']})
if 'watched' in entry_keys and entry.get('watched') is True:
infos.update({'playcount': 1})
else:
del infos['playcount']
if 'index' in entry_keys:
infos.update({'episode': entry['index']})
if 'episode' in entry_keys:
infos.update({'episode': entry['episode']})
if 'year' in entry_keys:
infos.update({'year': entry['year']})
if 'quality' in entry_keys:
quality = {'width': '960', 'height': '540'}
if entry['quality'] == '720':
quality = {'width': '1280', 'height': '720'}
if entry['quality'] == '1080':
quality = {'width': '1920', 'height': '1080'}
li.addStreamInfo('video', quality)
if 'tvshowtitle' in entry_keys:
title = base64.urlsafe_b64decode(entry.get('tvshowtitle', ''))
infos.update({'tvshowtitle': title.decode('utf-8')})
li.setInfo('video', infos)
self.library.write_metadata_file(video_id=str(entry['id']), content=infos)
return li, infos
def _generate_context_menu_items(self, entry, li):
"""Adds context menue items to a Kodi list item
Parameters
----------
entry : :obj:`dict` of :obj:`str`
Entry that should be turned into a list item
li : :obj:`XMBC.ListItem`
Kodi list item instance
Returns
-------
:obj:`XMBC.ListItem`
Kodi list item instance
"""
items = []
action = {}
entry_keys = entry.keys()
# action item templates
encoded_title = urlencode({'title': entry['title'].encode('utf-8')}) if 'title' in entry else ''
url_tmpl = 'XBMC.RunPlugin(' + self.base_url + '?action=%action%&id=' + str(entry['id']) + '&' + encoded_title + ')'
actions = [
['export_to_library', self.get_local_string(30018), 'export'],
['remove_from_library', self.get_local_string(30030), 'remove'],
['update_the_library', self.get_local_string(30061), 'update'],
['rate_on_netflix', self.get_local_string(30019), 'rating'],
['remove_from_my_list', self.get_local_string(30020), 'remove_from_list'],
['add_to_my_list', self.get_local_string(30021), 'add_to_list']
]
# build concrete action items
for action_item in actions:
action.update({action_item[0]: [action_item[1], url_tmpl.replace('%action%', action_item[2])]})
# add or remove the movie/show/season/episode from & to the users "My List"
if 'in_my_list' in entry_keys:
items.append(action['remove_from_my_list']) if entry['in_my_list'] else items.append(action['add_to_my_list'])
elif 'queue' in entry_keys:
items.append(action['remove_from_my_list']) if entry['queue'] else items.append(action['add_to_my_list'])
elif 'my_list' in entry_keys:
items.append(action['remove_from_my_list']) if entry['my_list'] else items.append(action['add_to_my_list'])
# rate the movie/show/season/episode on Netflix
items.append(action['rate_on_netflix'])
# add possibility to export this movie/show/season/episode to a static/local library (and to remove it)
if 'type' in entry_keys:
# add/remove movie
if entry['type'] == 'movie':
action_type = 'remove_from_library' if self.library.movie_exists(title=entry['title'], year=entry.get('year', 0000)) else 'export_to_library'
items.append(action[action_type])
# Add update option
if action_type == 'remove_from_library':
action_type = 'update_the_library'
items.append(action[action_type])
if entry['type'] == 'show' and 'title' in entry_keys:
action_type = 'remove_from_library' if self.library.show_exists(title=entry['title']) else 'export_to_library'
items.append(action[action_type])
# Add update option
if action_type == 'remove_from_library':
action_type = 'update_the_library'
items.append(action[action_type])
# add it to the item
li.addContextMenuItems(items)
return li
def log(self, msg, level=xbmc.LOGDEBUG):
"""Adds a log entry to the Kodi log
Parameters
----------
msg : :obj:`str`
Entry that should be turned into a list item
level : :obj:`int`
Kodi log level
"""
if isinstance(msg, unicode):
msg = msg.encode('utf-8')
xbmc.log('[%s] %s' % (self.plugin, msg.__str__()), level)
def get_local_string(self, string_id):
"""Returns the localized version of a string
Parameters
----------
string_id : :obj:`int`
ID of the string that shoudl be fetched
Returns
-------
:obj:`str`
Requested string or empty string
"""
src = xbmc if string_id < 30000 else self.get_addon()
locString = src.getLocalizedString(string_id)
if isinstance(locString, unicode):
locString = locString.encode('utf-8')
return locString
def movietitle_to_id(self, title):
query = {
"jsonrpc": "2.0",
"method": "VideoLibrary.GetMovies",
"params": {
"properties": ["title"]
},
"id": "libMovies"
}
try:
rpc_result = xbmc.executeJSONRPC(
jsonrpccommand=json.dumps(query, encoding='utf-8'))
json_result = json.loads(rpc_result)
if 'result' in json_result and 'movies' in json_result['result']:
json_result = json_result['result']['movies']
for movie in json_result:
# Switch to ascii/lowercase and remove special chars and spaces
# to make sure best possible compare is possible
titledb = movie['title'].encode('ascii', 'ignore')
titledb = re.sub(r'[?|$|!|:|#|\.|\,|\'| ]', r'', titledb).lower().replace('-', '')
if '(' in titledb:
titledb = titledb.split('(')[0]
titlegiven = title.encode('ascii','ignore')
titlegiven = re.sub(r'[?|$|!|:|#|\.|\,|\'| ]', r'', titlegiven).lower().replace('-', '')
if '(' in titlegiven:
titlegiven = titlegiven.split('(')[0]
if titledb == titlegiven:
return movie['movieid']
return '-1'
except Exception:
return '-1'
def showtitle_to_id(self, title):
query = {
"jsonrpc": "2.0",
"method": "VideoLibrary.GetTVShows",
"params": {
"properties": ["title", "genre"]
},
"id": "libTvShows"
}
try:
rpc_result = xbmc.executeJSONRPC(
jsonrpccommand=json.dumps(query, encoding='utf-8'))
json_result = json.loads(rpc_result)
if 'result' in json_result and 'tvshows' in json_result['result']:
json_result = json_result['result']['tvshows']
for tvshow in json_result:
# Switch to ascii/lowercase and
# remove special chars and spaces
# to make sure best possible compare is possible
titledb = tvshow['label'].encode('ascii', 'ignore')
titledb = re.sub(
pattern=r'[?|$|!|:|#|\.|\,|\'| ]',
repl=r'',
string=titledb).lower().replace('-', '')
if '(' in titledb:
titledb = titledb.split('(')[0]
titlegiven = title.encode('ascii', 'ignore')
titlegiven = re.sub(
pattern=r'[?|$|!|:|#|\.|\,|\'| ]',
repl=r'',
string=titlegiven).lower().replace('-', '')
if '(' in titlegiven:
titlegiven = titlegiven.split('(')[0]
if titledb == titlegiven:
return tvshow['tvshowid'], tvshow['genre']
return '-1', ''
except Exception:
return '-1', ''
def get_show_content_by_id(self, showid, showseason, showepisode):
showseason = int(showseason)
showepisode = int(showepisode)
props = ["season", "episode", "plot", "fanart", "art"]
query = {
"jsonrpc": "2.0",
"method": "VideoLibrary.GetEpisodes",
"params": {
"properties": props,
"tvshowid": int(showid[0])
},
"id": "1"
}
try:
rpc_result = xbmc.executeJSONRPC(
jsonrpccommand=json.dumps(query, encoding='utf-8'))
json_result = json.loads(rpc_result)
result = json_result.get('result', None)
if result is not None and 'episodes' in result:
result = result['episodes']
for episode in result:
in_season = episode['season'] == showseason
in_episode = episode['episode'] == showepisode
if in_season and in_episode:
infos = {}
if 'plot' in episode and len(episode['plot']) > 0:
infos.update({
'plot': episode['plot'],
'genre': showid[1]})
art = {}
if 'fanart' in episode and len(episode['fanart']) > 0:
art.update({'fanart': episode['fanart']})
if 'art' in episode and len(episode['art']['season.poster']) > 0:
art.update({
'thumb': episode['art']['season.poster']})
return infos, art
return False
except Exception:
return False
def get_movie_content_by_id(self, movieid):
query = {
"jsonrpc": "2.0",
"method": "VideoLibrary.GetMovieDetails",
"params": {
"movieid": movieid,
"properties": [
"genre",
"plot",
"fanart",
"thumbnail",
"art"]
},
"id": "libMovies"
}
try:
rpc_result = xbmc.executeJSONRPC(
jsonrpccommand=json.dumps(query, encoding='utf-8'))
json_result = json.loads(rpc_result)
result = json_result.get('result', None)
if result is not None and 'moviedetails' in result:
result = result.get('moviedetails', {})
infos = {}
if 'genre' in result and len(result['genre']) > 0:
infos.update({'genre': json_result['genre']})
if 'plot' in result and len(result['plot']) > 0:
infos.update({'plot': result['plot']})
art = {}
if 'fanart' in result and len(result['fanart']) > 0:
art.update({'fanart': result['fanart']})
if 'thumbnail' in result and len(result['thumbnail']) > 0:
art.update({'thumb': result['thumbnail']})
if 'art' in json_result and len(result['art']['poster']) > 0:
art.update({'poster': result['art']['poster']})
return infos, art
return False
except Exception:
return False
def set_library(self, library):
"""Adds an instance of the Library class
Parameters
----------
library : :obj:`Library`
instance of the Library class
"""
self.library = library
def track_event(self, event):
"""
Send a tracking event if tracking is enabled
:param event: the string idetifier of the event
:return: None
"""
addon = self.get_addon()
# Check if tracking is enabled
enable_tracking = (addon.getSetting('enable_tracking') == 'true')
if enable_tracking:
# Get or Create Tracking id
tracking_id = addon.getSetting('tracking_id')
if tracking_id is '':
tracking_id = str(uuid4())
addon.setSetting('tracking_id', tracking_id)
# Send the tracking event
tracker = Tracker.create('UA-46081640-5', client_id=tracking_id)
tracker.send('event', event)
| 37.91911
| 965
| 0.560296
|
4a072ca7487f0f685df28cbbb1047de2dccadd6e
| 365
|
py
|
Python
|
findram/users/urls.py
|
RamParameswaran/findram.dev
|
ed44c0ef9287906117ba9ee48e447cd8d6a62a7f
|
[
"MIT"
] | 1
|
2020-11-12T00:41:16.000Z
|
2020-11-12T00:41:16.000Z
|
findram/users/urls.py
|
RamParameswaran/findram.dev
|
ed44c0ef9287906117ba9ee48e447cd8d6a62a7f
|
[
"MIT"
] | null | null | null |
findram/users/urls.py
|
RamParameswaran/findram.dev
|
ed44c0ef9287906117ba9ee48e447cd8d6a62a7f
|
[
"MIT"
] | null | null | null |
from django.urls import path
from findram.users.views import (
user_redirect_view,
user_update_view,
user_detail_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| 24.333333
| 66
| 0.70137
|
4a072d2484be413220438cc91080d8ef22edf2e6
| 9,884
|
py
|
Python
|
openclean/function/eval/datatype.py
|
remram44/openclean-core
|
8c09c8302cadbb3bb02c959907f91a3ae343f939
|
[
"BSD-3-Clause"
] | 4
|
2021-04-20T09:06:26.000Z
|
2021-11-20T20:31:28.000Z
|
openclean/function/eval/datatype.py
|
remram44/openclean-core
|
8c09c8302cadbb3bb02c959907f91a3ae343f939
|
[
"BSD-3-Clause"
] | 14
|
2021-01-19T19:23:16.000Z
|
2021-04-28T14:31:03.000Z
|
openclean/function/eval/datatype.py
|
remram44/openclean-core
|
8c09c8302cadbb3bb02c959907f91a3ae343f939
|
[
"BSD-3-Clause"
] | 5
|
2021-08-24T11:57:21.000Z
|
2022-03-17T04:39:04.000Z
|
# This file is part of the Data Cleaning Library (openclean).
#
# Copyright (C) 2018-2021 New York University.
#
# openclean is released under the Revised BSD License. See file LICENSE for
# full license details.
"""Predicates that test whether a given value (or list of values) matches a
given data type constraint.
"""
from openclean.function.eval.base import Eval
from openclean.function.value.datatype import (
is_datetime, is_float, is_int, is_nan, to_datetime, to_int, to_float
)
# -- Type checker predicates --------------------------------------------------
class IsDatetime(Eval):
"""Boolean predicate that tests whether a given value or list of values
from a data frame row are of type date or can be converted to a date. For
value lists the for all flag determines whether all values have to be dates
or at least one.
"""
def __init__(self, columns, formats=None, typecast=True):
"""Create an instance of an evaluation function that checks whether
values are dates.
Parameters
----------
columns: int, string, openclean.function,.base.EvalFunction, or list
Single column or list of column index positions or column names.
This can also be a single evalaution function or a list of
functions.
typecast: bool, default=True
Attempt to parse string values as dates if True.
formats: string or list(string)
Date format string using Python strptime() format directives. This
can be a list of date formats.
"""
def func(value):
return is_datetime(value, formats=formats, typecast=typecast)
super(IsDatetime, self).__init__(func=func, columns=columns, is_unary=True)
class IsInt(Eval):
"""Boolean predicate that tests whether a given value or list of values
from a data frame row are of type integer or can be converted to an
integer. For value lists the for all flag determines whether all values
have to be integer or at least one.
"""
def __init__(self, columns, typecast=True):
"""Create an instance of an evaluation function that checks whether
values are integer.
whether a single column or a list of columns is given.
Parameters
----------
columns: int, string, openclean.function,.base.EvalFunction, or list
Single column or list of column index positions or column names.
This can also be a single evalaution function or a list of
functions.
typecast: bool, default=True
Cast string values to integer if True.
"""
def func(value):
return is_int(value, typecast=typecast)
super(IsInt, self).__init__(func=func, columns=columns, is_unary=True)
class IsFloat(Eval):
"""Boolean predicate that tests whether a given value or list of values
from a data frame row are of type float or can be converted to a float
value. For value lists the for all flag determines whether all values have
to be floats or at least one.
"""
def __init__(self, columns, typecast=True):
"""Create an instance of an evaluation function that checks whether
values are floats.
Parameters
----------
columns: int, string, openclean.function,.base.EvalFunction, or list
Single column or list of column index positions or column names.
This can also be a single evalaution function or a list of
functions.
typecast: bool, default=True
Cast string values to float if True.
"""
def func(value):
return is_float(value, typecast=typecast)
super(IsFloat, self).__init__(func=func, columns=columns, is_unary=True)
class IsNaN(Eval):
"""Boolean predicate that tests whether a given value or list of values
from a data frame row are of the special type NaN (not a number).
"""
def __init__(self, columns):
"""Create an instance of an evaluation function that checks whether
values are of type NaN.
Parameters
----------
columns: int, string, openclean.function,.base.EvalFunction, or list
Single column or list of column index positions or column names.
This can also be a single evalaution function or a list of
functions.
"""
super(IsNaN, self).__init__(func=is_nan, columns=columns, is_unary=True)
# -- Type converters ----------------------------------------------------------
class Bool(Eval):
"""Convert a given value to bool."""
def __init__(self, columns, default_value=None, raise_error=False):
"""Create an instance of an float type cast function.
Parameters
----------
columns: int, string, openclean.function,.base.EvalFunction, or list
Single column or list of column index positions or column names.
This can also be a single evalaution function or a list of
functions.
default_value: scalar, default=None
Default value that is being returned for values that cannot be cast
to float if the raise_error flag is False.
raise_error: bool, default=False
Raise ValueError if the value cannot be cast to float.
"""
def cast(value):
return True if value else False
super(Bool, self).__init__(func=cast, columns=columns, is_unary=True)
class Datetime(Eval):
"""Convert a given value to datetime. Raises an error if the given value
cannot be converted to datetime and the raise error flag is True. If the
flag is False, a given default value will be returned for thoses values
that cannot be converted to datetime.
"""
def __init__(self, columns, default_value=None, raise_error=False):
"""Create an instance of an float type cast function.
Parameters
----------
columns: int, string, openclean.function,.base.EvalFunction, or list
Single column or list of column index positions or column names.
This can also be a single evalaution function or a list of
functions.
default_value: scalar, default=None
Default value that is being returned for values that cannot be cast
to float if the raise_error flag is False.
raise_error: bool, default=False
Raise ValueError if the value cannot be cast to float.
"""
def cast(value):
return to_datetime(
value,
default_value=default_value,
raise_error=raise_error
)
super(Datetime, self).__init__(func=cast, columns=columns, is_unary=True)
class Float(Eval):
"""Convert a given value to float. Raises an error if the given value
cannot be converted to float and the raise error flag is True. If the
flag is False, a given default value will be returned for thoses values
that cannot be converted to float.
"""
def __init__(self, columns, default_value=None, raise_error=False):
"""Create an instance of an float type cast function.
Parameters
----------
columns: int, string, openclean.function,.base.EvalFunction, or list
Single column or list of column index positions or column names.
This can also be a single evalaution function or a list of
functions.
default_value: scalar, default=None
Default value that is being returned for values that cannot be cast
to float if the raise_error flag is False.
raise_error: bool, default=False
Raise ValueError if the value cannot be cast to float.
"""
def cast(value):
return to_float(
value,
default_value=default_value,
raise_error=raise_error
)
super(Float, self).__init__(func=cast, columns=columns, is_unary=True)
class Int(Eval):
"""Convert a given value to integer. Raises an error if the given value
cannot be converted to integer and the raise error flag is True. If the
flag is False, a given default value will be returned for thoses values
that cannot be converted to integer.
"""
def __init__(self, columns, default_value=None, raise_error=False):
"""Create an instance of an integer type cast function.
Parameters
----------
columns: int, string, openclean.function,.base.EvalFunction, or list
Single column or list of column index positions or column names.
This can also be a single evalaution function or a list of
functions.
default_value: scalar, default=None
Default value that is being returned for values that cannot be cast
to integer if the raise_error flag is False.
raise_error: bool, default=False
Raise ValueError if the value cannot be cast to integer.
"""
def cast(value):
return to_int(
value,
default_value=default_value,
raise_error=raise_error
)
super(Int, self).__init__(func=cast, columns=columns, is_unary=True)
class Str(Eval):
"""Convert a given value to string."""
def __init__(self, columns):
"""Create an instance of an string type cast function.
Parameters
----------
columns: int, string, openclean.function,.base.EvalFunction, or list
Single column or list of column index positions or column names.
This can also be a single evalaution function or a list of
functions.
"""
super(Str, self).__init__(func=str, columns=columns, is_unary=True)
| 39.222222
| 83
| 0.640631
|
4a072d801321f871b5d04dc607b3a5e8752b60d3
| 454
|
py
|
Python
|
src/training_data/createCSVFile.py
|
Airthee/faciale_recognition
|
a045d93747543696e3908604febd87c80a67136a
|
[
"Unlicense"
] | 1
|
2021-08-08T14:17:44.000Z
|
2021-08-08T14:17:44.000Z
|
src/training_data/createCSVFile.py
|
Airthee/faciale_recognition
|
a045d93747543696e3908604febd87c80a67136a
|
[
"Unlicense"
] | null | null | null |
src/training_data/createCSVFile.py
|
Airthee/faciale_recognition
|
a045d93747543696e3908604febd87c80a67136a
|
[
"Unlicense"
] | null | null | null |
#!/bin/env python3
import os, re, sys
# Recover args
if len(sys.argv) < 2:
raise Exception("Wrong parameters : 1 expected")
root = sys.argv[1]
# Collect data
for (dirpath, dirnames, filenames) in os.walk(root):
for filename in filenames:
path = os.path.join(dirpath, filename)
match = re.search(r'\/s([0-9]+)\/', path)
if match:
# Write data
print("training_data/{};{}".format(path.replace(root + '/', ''), match.group(1)))
| 26.705882
| 87
| 0.634361
|
4a072f8aefb0353da2d979aa7f84bcec8a1c0819
| 21,277
|
py
|
Python
|
event.py
|
jdthorpe/MCPH
|
c431bd5d9320d1165cabcba9e201a29f016fa362
|
[
"MIT"
] | null | null | null |
event.py
|
jdthorpe/MCPH
|
c431bd5d9320d1165cabcba9e201a29f016fa362
|
[
"MIT"
] | null | null | null |
event.py
|
jdthorpe/MCPH
|
c431bd5d9320d1165cabcba9e201a29f016fa362
|
[
"MIT"
] | null | null | null |
"""
because timelines have this nice property of keeping track of nested
timelines, we need to prevent timline instances from duplicating this
calculation. Hence, we need to freeze the timeline when it has been
accessed as a child timeline.
==============================================
Example: potential double counting situation
==============================================
tumor = Event(type='tumor',id='ovarianCancer',time = 55) # the origin of the tumor relative to it's reference
deathFromCancer = Event(time = 12)
tumor._addEvent(deathFromCancer)
someone._addEvent(tumor)
# then later:
thisTumor = someone.tumor
...
...
someone.tumor = thisTumor
==============================================
End Example
==============================================
in the above example, if we didn't keep track
of the reference timeline, we would end up shifting
the originof thisTumor twice and our ages at
events would get all screwed up
==============================================
time v. reftime
==============================================
Because all timelines are considered to be
within the context of a person's life, the
'global' cordintes of a timeline refers to
that person's time at the event.
The local coordinates, however, can be measured
with respect to some event that defines an reftime.
The Event Class has an implicit reftime
which is the time at the 'even'.
to get the global coordinates, use 'getAge' methods
, and to get local coordinates, ust the 'getTime'
methods.
"""
from math import isinf
from operator import attrgetter
from types import NoneType
import pdb
inf = float('inf')
class Event(object):
"""
most of what follows is not longer accurate. Here is what's current:
==================================================
* an event is an object with 3 special (public) properties:
'reference': another event that serves as the
reference time for this event. References
can be followed from child to parent
until a global (person) object is reached.
The global object cannonically does not have
a reference event.
'reftime': the time between this event and
the reference event (negative if this event occured
first).
'time': a calculated property := time between
this event and the global event (birth),
*iif* a global event is at the top of the
reference chain.
and a couple of sepcial methods:
'getEvent()': returns a named event.
'getEvents()': returns a (possibly empty) list
of events with a set of common characteristics
Events *may* have a reference event, in which case the
event is a child of it's reference.
there are three ways to set a reference event on an event:
(1) in the init method [ child = Event(reference=parent) ]
(2) via the child's reference attrubute [ child.reference = parent ]
(3) via attribute assignment [ parent.foo = child ]
Note that the first two methods to not assign a name
to the event.
The link between parnet and child can always be removed
via 'del child.reference', and in the case that the third
assignment option was used, 'del parent.foo' will also
remove the link between child and parent.
the final aspect of an event is that attribute assignment
can be used to set the reference (parent / child)
relationship. (e.g. parent.foo = child) sets the
parent / child relation and names the event 'foo'.
==================================================
"""
# default values for time and reftime
def __init__(self,
# the time between the reference event and this event
reftime=None,
# the reference object
reference=None,
# a string, tuple of strings, or list of strings to aid
# in searching for events.
type=()):
# store the reference event
if reference is not None:
self.reference = reference # implicicitly uses the reference setter property
`
# the time of the event relative to the reference frame
if reftime is not None:
self.__dict__['reftime'] = reftime
`
# store the 'type' tuple
if isinstance(type,str):
type = (type,)
elif isinstance(type,list):
type = tuple(type)
self.type = type # a tuple that names the event type
# initialize the childen and prevented by lists
self._children = []
self._preventedBy = []
# --------------------------------------------------
# prevented properties
# --------------------------------------------------
def unpreventALL(self):
self._preventedBy = []
def unprevent(self,by):
for(i in range(len(self._preventedBy)-1,-1,-1))
if self._preventedBy is by:
del self._preventedBy[i]
def prevent(self,by):
if inherits(by,origin):
raise RuntimeError('An event cannot be prevented by an orign')
if self is by:
raise RuntimeError('An event cannot be prevented by itself')
if by not in self._preventedBy :
self._preventedBy.append(by)
def _getTimePrevented(self):
if(len(self._preventedBy)):
return min([x.time for time in self._preventedBy])
else :
return float('inf')
TimePrevented = property(_getTimePrevented)
def _prevented (self):
""" An event is prevented if any of the prevention events
occure prior to the event in the absence of prevention
events.
"""
return float(self) > min(x.time for x in self._preventedBy])
prevented = property(_prevented)
# --------------------------------------------------
# time property
# --------------------------------------------------
def _getTime(self):
if 'reference' not in self.__dict__:
raise RuntimeError("Attempt to GET the time of an event before setting the event's reference attribute, OR no global reference found.")
refTime = self.reference.time
if self.reftime is None or refTime is None:
return None
else:
return float(self.reftime) + refTime
time = property(_getTime)
# --------------------------------------------------
# redraw method
# --------------------------------------------------
def redraw(self):
"""call the redraw method on self.reference.time or self.reference.reftime"""
try:
self.reference.time.redraw()
except AttributeError:
pass
try:
self.reference.reftime.redraw()
except AttributeError:
pass
# --------------------------------------------------
# Attribute Setter
# --------------------------------------------------
def __setattr__(self, name, value):
""" The Set Attr method, which is reponsible for setting
the double link between events for statements like: `e1.e2 = e2`
"""
if name in ('reference',):
# python calles setter methods in this order, so we have to bypass __setattr__
# in order to get the property getter and setter methods defined below to handle
# the assighment. See this page for details:
#
# http://stackoverflow.com/questions/15750522/class-properties-and-setattr
object.__setattr__(self, name, value)
return
if isinstance(value,Event):
if ('reference' in value.__dict__
and value.reference is not self):
raise AttributeError('Attempt to add two reference to a single event')
# PREVENT CIRCULAR PARENT/CHILD REFERENCES
tmp = value
while 'reference' in tmp.__dict__:
if(inherits(tmp,origin))
break
if tmp is self:
raise ValueError("Circular Reference Error: attempt to add a Event as a child of an ancestor.")
tmp = tmp.reference
# ADD SELF AS THE EVENT'S NEW 'REFERENCE' ATTIRUBTE
value.reference = self
self.__dict__[name] = value
# --------------------------------------------------
# Attribute Deleter
# --------------------------------------------------
def __delattr__(self, name):
if name not in self.__dict__:
raise AttributeError(name)
if name == 'reference':
# python calles setter methods in this order, so we have to bypass __setattr__
# in order to get the property getter and setter methods defined below to handle
# the assighment. See this page for details:
#
# http://stackoverflow.com/questions/15750522/class-properties-and-setattr
object.__delattr__(self, name)
# this propogates the delete on to the '__delReferenceEvent()' method below
return
if isinstance(self.__dict__[name],event):
# NO CIRCULAR REFERENCES PLEASE, hence the following line
# is NOT !!: self.__dict__[name].reference
del self.__dict__[name].__dict__['reference']
del self.__dict__[name]
# --------------------------------------------------
#
# --------------------------------------------------
def _origin(self):
""" Retruns the origin event which is an ancestor to self. """
this = self
while True:
if 'reference' not in self.__dict__:
return None
reference = self.__dict__['reference']
if isinstance(reference,origin):
return reference
else: this = reference
origin = property(_origin)
# --------------------------------------------------
# stubs for pre and post processing
# --------------------------------------------------
def preprocess(self):
""" preprocess() is called when the event is initialized. it is
responsible for initializing any values required for processing
and/or eligibility testing. before the person event is tested for
enrollment eligibility, and before the personEventProcessors in the
decisionRule are called.
"""
pass # to be over-written by sub-classes
def process(self):
""" process() is called in order to handle the conditional events.
For example It's not possible for a tubal ligation (TL) to occure
after the tubes are removed (BS), so the TL should set it's time to
None in it's "process()" method when a BSO occures before the TL.
The Process() method should be used to modifiy the event that it
is called on, and not other events. The cascasde of
event.process() calles proceeds cronologically from the minimum
of [a] the time of the event before calling event.process(), [b]
the time of the event after calling event.process() and [c] the
return value from process (optional).
"""
pass # to be over-written by sub-classes
def postprocess(self):
""" postprocess() is called after all the event generators have been
called, and after the person event is qualified for enrollment
eligibility. It is also called each time that the time of the event
is reset.
postprocess() is therefor good for things like assigning marker
levels, which are expensive to generate, and not needed to
determine eligibility or when the tumor does not have an time at
diagnosis. with a diagnosis, etc.,
The timing of this or any other event that existed during
processing should *NOT* be modified here.
Postprocess() is called *after* eligibility testing, so it may
not be called on events from ineligible individuals.
"""
pass # optionally, to be over-written by sub-classes
# --------------------------------------------------
# Reference event property
# --------------------------------------------------
def __setReferenceEvent(self,reference):
# PREVENT CIRCULAR PARENT/CHILD REFERENCES
if reference is self:
raise ValueError("Attempt to add a Event as it's own reference point. Circular references are forbidden")
ancestor = reference
while True:
if ancestor is self:
raise ValueError("Attempt to add a Event as a child of an ancestor. Circular references are forbidden")
if not 'reference' in ancestor.__dict__:
break
ancestor = ancestor.reference
if 'reference' in self.__dict__:
print 'deleting child.reference'
del self.reference
self.__dict__['reference'] = reference
# *may* need to complete the loop
if not self in reference.getEvents():
reference.__dict__['_children'].append(self)
def __getReferenceEvent(self):
return self.__dict__['reference']
def __delReferenceEvent(self):
# since we don't know if this event is is even named, we have
# to delete it from the reference based on it's value. Specifically
# this event (self) can be on of:
# [a] this.__data__.[namedChild]
# [b] this.__data__._children[this.__data__._children.index(child)]
if self in self.__dict__['_children']:
del self.__dict__['_children'][self.__dict__['_children'].index(self)]
tmpKeys = []
for key,value in self.__dict__.iteritems():
if value is self:
tmpKeys.append(key)
for key in tmpKeys:
del self.__dict__[key]
# now delete the reference from __dict__
del self.__dict__['reference']
reference = property(__getReferenceEvent,
__setReferenceEvent,
__delReferenceEvent)
# --------------------------------------------------
# event query methods
# --------------------------------------------------
def getEvent(self,name):
out = self.__dict__[name]
if not isinstance(out,Event):
raise KeyError("Event instance has no event named '"+name[0]+"'.")
def getEvents(self,
type=None,
deepQuery=False,
includePrevented=False,
includeNoneTimes=False,
ordered=False,
first=False):
""" returns a list of events with type 'type' """
try:
out = self.__dict__['_children'] + [e for e in self.__dict__ if isinstance(e,Event) ]
except: pdb.set_trace()
if not includeNoneTimes:
out = [e for e in out if e.time is not None]
if not includePrevented:
out = [e for e in out if not e.prevented]
if deepQuery:
# NOTE THAT this looping trick depends on not haveing circular references in the events
for e in out:
out.extend(e.getEvents(type=type, deepQuery=deepQuery, ordered=ordered, first=first))
if type:
if hasattr(type,'__call__'):
out = [e for e in out if type(e)]
else:
out = [e for e in out if type in e.type ]
if ordered or first:
out = sorted(out, key=lambda x: x.time if x.time is not None else inf)
if first:
if len(out):
return out[0]
else:
return None
else:
return out
# module test code
if __name__ == '__main__':
import sys
b = Event()
# ----------------------------------------
msg = 'bad time assignment (no reference event)'
sys.stdout.write(msg+"\r" )
try:
b.time = 5
except RuntimeError:
sys.stdout.write(msg + "...Passed\n" )
else:
sys.stdout.write(msg + "...Failed\n" )
# ----------------------------------------
msg = 'bad time query (no reference event)'
try:
_ = b.time
except RuntimeError:
sys.stdout.write(msg + "...Passed\n" )
else:
sys.stdout.write(msg + "...Failed\n" )
# ----------------------------------------
msg = 'self reference assignment'
sys.stdout.write(msg+"\r" )
try:
b.reference = b
except ValueError as e:
sys.stdout.write(msg + "...Passed\n" )
else:
sys.stdout.write(msg + "...Failed\n" )
# ----------------------------------------
msg = 'valid reference assignment'
sys.stdout.write(msg+"\r" )
a = Event()
try:
b.reference = a
except ValueError as e:
sys.stdout.write(msg + "...Failed\n" )
else:
sys.stdout.write(msg + "...Passed\n" )
# ----------------------------------------
msg = 'circular reference assignment'
sys.stdout.write(msg+"\r" )
try:
a.reference = b
except ValueError as e:
sys.stdout.write(msg + "...Passed\n" )
else:
sys.stdout.write(msg + "...Failed\n" )
# ----------------------------------------
msg = 'no origin'
sys.stdout.write(msg+"\r" )
try:
b.time = 5
except RuntimeError:
sys.stdout.write(msg + "...Passed\n" )
else:
sys.stdout.write(msg + "...Failed\n" )
# ----------------------------------------
msg = 'no origin'
try:
_ = b.time
except AttributeError:
sys.stdout.write(msg + "...Passed\n" )
else:
sys.stdout.write(msg + "...Failed\n" )
# ----------------------------------------
a.isGlobalReference = True
msg = 'good time assignment '
sys.stdout.write(msg+"\r" )
try:
b.time = 5
except AttributeError:
sys.stdout.write(msg+"...Failed\n" )
else:
sys.stdout.write(msg+"...Passed\n" )
c = Event(reference=b,time = 11)
assert c.reftime == 6
# ----------------------------------------
a.isGlobalReference = True
msg = 'deleting global event '
sys.stdout.write(msg+"\r" )
try:
del b.reference
except AttributeError:
sys.stdout.write(msg+"...Failed\n" )
else:
sys.stdout.write(msg+"...Passed\n" )
# ----------------------------------------
msg = 'getting time of secondary event after deleting the global event '
sys.stdout.write(msg+"\r" )
try:
b.time
except RuntimeError:
sys.stdout.write(msg+"...Passed\n" )
else:
sys.stdout.write(msg+"...Failed\n" )
# ----------------------------------------
msg = 'getting time of tertiary event after deleting the global event '
sys.stdout.write(msg+"\r" )
try:
c.time
except RuntimeError:
sys.stdout.write(msg+"...Passed\n" )
else:
sys.stdout.write(msg+"...Failed\n" )
# ----------------------------------------
msg = 'adding the global by named assignment'
sys.stdout.write(msg+"\r" )
try:
a.tumor = b
except :
sys.stdout.write(msg+"...Failed\n" )
else:
sys.stdout.write(msg+"...Passed\n" )
# ----------------------------------------
msg = 'getting time of secondary event after attribute assignment the global event '
sys.stdout.write(msg+"\r" )
try:
b.time
except :
sys.stdout.write(msg+"...Failed\n" )
else:
sys.stdout.write(msg+"...Passed\n" )
# ----------------------------------------
#a.isGlobalReference = True
msg = 'circular reference (as a named attribute)'
sys.stdout.write(msg+"\r" )
try:
c.person = a
except RuntimeError:
sys.stdout.write(msg+"...Passed\n" )
else:
sys.stdout.write(msg+"...Failed\n" )
# ----------------------------------------
#a.isGlobalReference = True
msg = 'deleting global event (as a named attribute)'
sys.stdout.write(msg+"\r" )
try:
del b.reference
except AttributeError:
sys.stdout.write(msg+"...Failed\n" )
else:
sys.stdout.write(msg+"...Passed\n" )
# ----------------------------------------
msg = 'getting time of secondary event after deleting the global event '
sys.stdout.write(msg+"\r" )
try:
b.time
except RuntimeError:
sys.stdout.write(msg+"...Passed\n" )
else:
sys.stdout.write(msg+"...Failed\n" )
# ----------------------------------------
msg = 'getting time of tertiary event after deleting the global event '
sys.stdout.write(msg+"\r" )
try:
c.time
except RuntimeError:
sys.stdout.write(msg+"...Passed\n" )
else:
sys.stdout.write(msg+"...Failed\n" )
# ValueError
| 33.507087
| 147
| 0.536025
|
4a0730ebeb6cc7e7c4e03ddc2142c918c1281a20
| 4,449
|
py
|
Python
|
pyclient/zeroos/orchestrator/client/ImageImport.py
|
5l1v3r1/0-orchestrator
|
9373a4acb1517ff001df526925c224a7a93b3274
|
[
"Apache-2.0"
] | 3
|
2017-07-04T14:02:02.000Z
|
2019-07-06T23:34:08.000Z
|
pyclient/zeroos/orchestrator/client/ImageImport.py
|
5l1v3r1/0-orchestrator
|
9373a4acb1517ff001df526925c224a7a93b3274
|
[
"Apache-2.0"
] | 497
|
2017-05-31T07:55:40.000Z
|
2018-01-03T12:10:43.000Z
|
pyclient/zeroos/orchestrator/client/ImageImport.py
|
zero-os/0-orchestrator
|
9373a4acb1517ff001df526925c224a7a93b3274
|
[
"Apache-2.0"
] | 8
|
2017-06-14T09:45:56.000Z
|
2021-02-01T18:12:55.000Z
|
"""
Auto-generated class for ImageImport
"""
from . import client_support
class ImageImport(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(diskBlockSize, exportName, imageName, url, encryptionKey=None, exportSnapshot=None, overwrite=None):
"""
:type diskBlockSize: int
:type encryptionKey: str
:type exportName: str
:type exportSnapshot: str
:type imageName: str
:type overwrite: bool
:type url: str
:rtype: ImageImport
"""
return ImageImport(
diskBlockSize=diskBlockSize,
encryptionKey=encryptionKey,
exportName=exportName,
exportSnapshot=exportSnapshot,
imageName=imageName,
overwrite=overwrite,
url=url,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'ImageImport'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'diskBlockSize'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.diskBlockSize = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'encryptionKey'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.encryptionKey = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
property_name = 'exportName'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.exportName = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'exportSnapshot'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.exportSnapshot = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
property_name = 'imageName'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.imageName = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'overwrite'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.overwrite = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
property_name = 'url'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.url = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 35.592
| 115
| 0.606428
|
4a07311883152753983d4b62af0e117bb776345e
| 4,680
|
py
|
Python
|
exps/example/yolox_pdfall_s.py
|
johnson7788/YOLOX
|
0c22a08951f5564122959390f959996517c9a15d
|
[
"Apache-2.0"
] | null | null | null |
exps/example/yolox_pdfall_s.py
|
johnson7788/YOLOX
|
0c22a08951f5564122959390f959996517c9a15d
|
[
"Apache-2.0"
] | null | null | null |
exps/example/yolox_pdfall_s.py
|
johnson7788/YOLOX
|
0c22a08951f5564122959390f959996517c9a15d
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
# 7分类
self.num_classes = 7
self.depth = 0.33
self.width = 0.50
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
def get_data_loader(self, batch_size, is_distributed, no_aug=False, args=None):
from yolox.data import (
PDFDetection,
TrainTransform,
YoloBatchSampler,
DataLoader,
InfiniteSampler,
MosaicDetection,
)
# 更改模型的大小
if args.depth_width == "small":
self.depth = 0.33
self.width = 0.50
elif args.depth_width == "middle":
self.depth = 0.67
self.width = 0.75
elif args.depth_width == "large":
self.depth = 1.0
self.width = 1.0
elif args.depth_width == "xlarge":
self.depth = 1.33
self.width = 1.25
if args and args.data_dir:
data_dir = args.data_dir
else:
data_dir = get_yolox_datadir()
# 读取数据集
dataset = PDFDetection(
#数据的目录 eg: data_dir:
data_dir = data_dir,
#数据集目录
image_sets=['labeled_all_train'],
# image_sets=[('2007', 'trainval'), ('2012', 'trainval')],
# 输入的图片的尺寸(640, 640)
img_size=self.input_size,
# 训练的图片变形,数据增强
preproc=TrainTransform(
rgb_means=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
max_labels=self.num_classes,
),
)
#数据增强,马赛克数据增强
dataset = MosaicDetection(
dataset,
mosaic=not no_aug,
img_size=self.input_size,
preproc=TrainTransform(
rgb_means=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
max_labels=120,
),
degrees=self.degrees,
translate=self.translate,
scale=self.scale,
shear=self.shear,
perspective=self.perspective,
enable_mixup=self.enable_mixup,
)
self.dataset = dataset
if is_distributed:
batch_size = batch_size // dist.get_world_size()
sampler = InfiniteSampler(
len(self.dataset), seed=self.seed if self.seed else 0
)
# 设置一个批次采样
batch_sampler = YoloBatchSampler(
sampler=sampler,
batch_size=batch_size,
drop_last=False,
input_dimension=self.input_size,
mosaic=not no_aug,
)
# DataLoader相关参数
dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
dataloader_kwargs["batch_sampler"] = batch_sampler
train_loader = DataLoader(self.dataset, **dataloader_kwargs)
return train_loader
def get_eval_loader(self, batch_size, is_distributed, testdev=False):
from yolox.data import VOCDetection, ValTransform
valdataset = VOCDetection(
data_dir=os.path.join(get_yolox_datadir(), "VOCdevkit"),
image_sets=[('2007', 'test')],
img_size=self.test_size,
preproc=ValTransform(
rgb_means=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
),
)
if is_distributed:
batch_size = batch_size // dist.get_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
valdataset, shuffle=False
)
else:
sampler = torch.utils.data.SequentialSampler(valdataset)
dataloader_kwargs = {
"num_workers": self.data_num_workers,
"pin_memory": True,
"sampler": sampler,
}
dataloader_kwargs["batch_size"] = batch_size
val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)
return val_loader
def get_evaluator(self, batch_size, is_distributed, testdev=False):
from yolox.evaluators import VOCEvaluator
val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
evaluator = VOCEvaluator(
dataloader=val_loader,
img_size=self.test_size,
confthre=self.test_conf,
nmsthre=self.nmsthre,
num_classes=self.num_classes,
)
return evaluator
| 32.054795
| 86
| 0.561752
|
4a07314b46fdf68fb9e2706137e2523cbd35448b
| 13,548
|
py
|
Python
|
taste_fn.py
|
ekremozturk/collabus
|
a9aee9414dd1216b560fdb59613fe5b1253d3b98
|
[
"Apache-2.0"
] | 2
|
2018-04-29T07:53:06.000Z
|
2019-04-05T07:29:23.000Z
|
taste_fn.py
|
ekremozturk/collabus
|
a9aee9414dd1216b560fdb59613fe5b1253d3b98
|
[
"Apache-2.0"
] | 10
|
2018-03-14T11:18:35.000Z
|
2018-04-25T10:48:58.000Z
|
taste_fn.py
|
ekremozturk/collabus
|
a9aee9414dd1216b560fdb59613fe5b1253d3b98
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 1 17:12:17 2018
@author: ekrem
"""
import numpy as np
import pandas as pd
from collections import defaultdict
from math import log, exp
n_list = [20, 50, 200]
agg_fns = ['avg', 'normalized_avg', 'least_misery', 'median', 'weighted_avg']
#=============================================================================
def load_files():
triplets = pd.read_table('subset/train_triplets.txt',
sep=' ',
header=None,
names=['userID','itemID','playCount'])
users = pd.read_table('subset/user_play_mean.txt',
sep=' ',
header=None,
names=['ID','totalPlay','occurence', 'mean'])
songs = pd.read_table('subset/song_play_mean.txt',
sep=' ',
header=None,
names=['ID','totalPlay','occurence', 'mean'])
return triplets, users, songs
#=============================================================================
def load_files_by_no(subset_no):
path = 'subset' + subset_no
triplets = pd.read_table(path+'/train_triplets.txt',
sep=' ',
header=None,
names=['userID','itemID','playCount'])
users = pd.read_table(path+'/user_play_mean.txt',
sep=' ',
header=None,
names=['ID','totalPlay','occurence', 'mean'])
songs = pd.read_table(path+'/song_play_mean.txt',
sep=' ',
header=None,
names=['ID','totalPlay','occurence', 'mean'])
return triplets, users, songs
#=============================================================================
def ids(users, songs):
userIDs = np.asarray(users['ID'])
songIDs = np.asarray(songs['ID'])
return userIDs, songIDs
#=============================================================================
def form_dictionaries(userIDs, songIDs):
song_dict = dict()
user_dict = dict()
count = 0
for item in songIDs:
song_dict[item] = count
count += 1
count = 0
for item in userIDs:
user_dict[item] = count
count += 1
return user_dict, song_dict
#=============================================================================
def split_into_train_test(df, frac=0.5):
train = df.groupby("userID", group_keys=False).apply(lambda df: df.sample(frac=frac, random_state=42))
test = df.drop(train.index)
return train, test
#=============================================================================
def split_into_train_test_cv(df, cv=5):
subset_list = list()
for i in range(cv):
remain = df.groupby("userID", group_keys=False).apply(lambda df: df.sample(frac=1/(cv-i)))
df = df.drop(remain.index)
subset_list.append(remain)
return subset_list
#=============================================================================
def form_records(triplets, user_dict, song_dict, normalization = False, virtual=False):
R = np.zeros((len(user_dict), len(song_dict)))
if normalization:
if virtual:
R = np.zeros((triplets.index.size, len(song_dict)))
for group_idx, row in triplets.iterrows():
for song_idx, count in row.iteritems():
R[group_idx, song_idx] = log(count+1)
from sklearn.metrics.pairwise import cosine_similarity
M = cosine_similarity(R.transpose())
return R, M
#Log(playCount)+1
counts_logged = triplets['playCount'].apply(log)+1
for t, logged_count in zip(np.asmatrix(triplets), counts_logged):
user_idx = t[0,0]
song_idx = t[0,1]
R[user_idx, song_idx] = logged_count
#Form item-item similarity matrix
from sklearn.metrics.pairwise import cosine_similarity
M = cosine_similarity(R.transpose())
return R, M
else:
for t in triplets.values:
user_idx = t[0]
song_idx = t[1]
R[user_idx, song_idx] = t[2]
return R
#=============================================================================
def replace_DF(DF, user_dict, song_dict):
DF = DF.applymap(lambda x: user_dict.get(x,x))
DF = DF.applymap(lambda x: song_dict.get(x,x))
return DF
#=============================================================================
def form_tuples(train_DF, test_DF, virtual=False, knn=False):
print("Creating rating tuples...")
if(virtual==True):
train_rdd = []
for group_idx, row in train_DF.iterrows():
for song_idx, count in row.iteritems():
if(count!=0):
rating = (group_idx, song_idx, count)
train_rdd.append(rating)
test_set = []
for group_idx, row in test_DF.iterrows():
for song_idx, count in row.iteritems():
if(count!=0):
rating = (group_idx, song_idx)
test_set.append(rating)
return train_rdd, test_set
train_rdd = []
for idx, row in train_DF.iterrows():
train_rdd.append((int(row[0]), int(row[1]), float(row[2])))
test_set = []
for idx, row in test_DF.iterrows():
test_set.append((int(row[0]), int(row[1])))
return train_rdd, test_set
#=============================================================================
def load_subsets():
train_triplets = pd.read_table('subset/train_triplets.txt',
sep=' ',
header=None,
names=['userID','itemID','playCount'])
test_triplets = pd.read_table('subset/test_triplets.txt',
sep=' ',
header=None,
names=['userID','itemID','playCount'])
return train_triplets, test_triplets
#=============================================================================
def extract_recommendations(recommendations, knn=False):
rec = defaultdict(list)
if knn:
index = 0
for idx, user in recommendations.iterrows():
list_of_songs = list(user)
for song in list_of_songs:
rec[index].append(song)
index += 1
return rec
for row in recommendations:
user_no = row[0]
for recommend in row[1]:
rec[user_no].append(recommend.product)
return rec
def extract_evaluations(ratings_eval):
eval_dict = defaultdict(list)
for row in ratings_eval:
eval_dict[row[0]].append(row[1])
return eval_dict
def prepare_prediction_label(recommendations, ratings, knn=False):
if knn:
tuples = []
for song, recommend in recommendations.items():
tuples.append((recommend,ratings[song]))
return tuples
recommend_ext = extract_recommendations(recommendations)
rating_ext = extract_evaluations(ratings)
tuples = []
for song, recommend in recommend_ext.items():
tuples.append((recommend,rating_ext[song]))
return tuples
#=============================================================================
def group_users(userIDs, g_size):
np.random.shuffle(userIDs)
groups = []
remaining_group = userIDs.size % g_size
for i in range(0, userIDs.size - remaining_group, g_size):
group = []
for j in range(0, g_size):
group.append(userIDs[i+j])
groups.append(group)
group = []
for i in range(remaining_group):
group.append(userIDs[-i])
groups.append(group)
return groups
def agg_fn(agg, item, weights = None):
if(agg=='avg'):
return item[item>0].mean()
if(agg=='normalized_avg'):
item = item[item>0].apply(log)+1
return exp(item.mean())
if(agg=='least_misery'):
return item[item>0].min()
if(agg=='median'):
return item[item>0].median()
if(agg=='weighted_avg'):
weighted_item = np.multiply(item, weights)
return weighted_item[weighted_item>0.0].mean()
def form_groups(userGroups, train_data, test_data):
merged_train_ratings = []
merged_test_ratings = []
count = 0
for group in userGroups:
train_group_Series = []
test_group_Series = []
for user in group:
single_train_data = train_data[train_data['userID']==user]
single_train_Series = pd.Series(list(single_train_data['playCount']), index = single_train_data['itemID'], name=user)
train_group_Series.append(single_train_Series)
single_test_data = test_data[test_data['userID']==user]
single_test_Series = pd.Series(list(single_test_data['playCount']), index = single_test_data['itemID'], name=user)
test_group_Series.append(single_test_Series)
merged_train_ratings.append(pd.concat(train_group_Series, axis=1).fillna(0).astype(int))
merged_test_ratings.append(pd.concat(test_group_Series, axis=1).fillna(0).astype(int))
count += 1
if count%10 == 0:
print('Group ' + str(count)+ ' formed-> ' + '%'+str(count/len(userGroups)*100)+' complete! ')
return merged_train_ratings, merged_test_ratings
def load_groups(size=4):
import pickle
train_filename = "subset/groups/train"+str(size)+".txt"
test_filename = "subset/groups/test"+str(size)+".txt"
with open(train_filename, "rb") as fp: # Unpickling
train = pickle.load(fp)
with open(test_filename, "rb") as fp: # Unpickling
test = pickle.load(fp)
return train, test
def form_group_weights(groups, user_dict, users):
groups_weights = []
for group in groups:
group_users = list(group.columns.values)
group_weights = []
for userID in group_users:
user_idx = user_dict[userID]
group_weights.append(1.0/float(users.loc[user_idx]['mean']))
groups_weights.append(group_weights)
return groups_weights
def form_virtual_users(groups, song_dict, agg = 'avg', groups_weights=None):
virtual_users = []
count = 0
for group_idx, group in enumerate(groups):
virtual_user = []
for idx, item in group.iterrows():
if(agg=='weighted_avg'):
weights = groups_weights[group_idx]
virtual_user.append(agg_fn(agg, item, weights))
else:
virtual_user.append(agg_fn(agg, item))
virtual_users.append(pd.Series(virtual_user, index = group.index.values).fillna(0))
count += 1
if count%10 == 0:
print('Group ' + str(count)+ ' formed-> ' + '%'+str(count/len(groups)*100)+' complete! ')
virtual_users = pd.DataFrame(virtual_users).fillna(0)
song_idx_cols = pd.Series([song_dict[x] for x in virtual_users.columns.values], index = virtual_users.columns.values)
return virtual_users.rename(columns=song_idx_cols)
def form_and_save_groups(userIDs, train_data, test_data):
import pickle
user_groups = group_users(userIDs, 4)
train_groups, test_groups = form_groups(user_groups, train_data, test_data)
with open("train4.txt", "wb") as fp: #Pickling
pickle.dump(train_groups, fp)
with open("test4.txt", "wb") as fp: #Pickling
pickle.dump(test_groups, fp)
user_groups = group_users(userIDs, 12)
train_groups, test_groups = form_groups(user_groups, train_data, test_data)
with open("train12.txt", "wb") as fp: #Pickling
pickle.dump(train_groups, fp)
with open("test12.txt", "wb") as fp: #Pickling
pickle.dump(test_groups, fp)
#=============================================================================
def extract_most_pop(songs, n):
by_tot_play= songs.sort_values('totalPlay', ascending=False).iloc[:n].index.values
by_occurence= songs.sort_values('occurence', ascending=False).iloc[:n].index.values
by_mean= songs.sort_values('mean', ascending=False).iloc[:n].index.values
return by_tot_play, by_occurence, by_mean
def rec_most_pop(users, songs, by = 'occ', n=20):
totPlay, occ, mean = extract_most_pop(songs, n)
if by == 'tot':
return pd.DataFrame(np.full((len(users), n), totPlay, dtype=int), index=np.arange(len(users)))
elif by == 'occ':
return pd.DataFrame(np.full((len(users), n), occ, dtype=int), index=np.arange(len(users)))
elif by == 'mean':
return pd.DataFrame(np.full((len(users), n), mean, dtype=int), index=np.arange(len(users)))
def rec_random(users, songs, n=20):
by_random= songs.sample(frac=0.2).iloc[:n].index.values
return pd.DataFrame(np.full((len(users), n), by_random, dtype=int), index=np.arange(len(users)))
#=============================================================================
def f1_precision_recall(predictions_and_labels):
pal = np.asarray(predictions_and_labels)
recalls = list()
precisions = list()
for tuple_ in pal:
recommendations = tuple_[0]
labels = tuple_[1]
mask = np.isin(labels, recommendations, assume_unique=False)
if mask.size>0:
recalls.append(float(mask.sum())/float(mask.size))
mask = np.isin(recommendations, labels, assume_unique=False)
if mask.size>0:
precisions.append(float(mask.sum())/float(mask.size))
r = np.asarray(recalls).mean()
p = np.asarray(precisions).mean()
return 2*r*p/(r+p), p, r
def mpr(predictions_and_labels):
pal = np.asarray(predictions_and_labels)
rank = 0
for tuple_ in pal:
recommendations = tuple_[0]
labels = tuple_[1]
r_u = 0
for label in labels:
r_u += rank_ui(label, recommendations)/len(labels)
rank += r_u
return rank/len(pal)
def rank_ui(label, recommendations):
if label in recommendations:
idx = recommendations.index(label)
return float(idx)/len(recommendations)*100
else:
return 100.0
| 31.433875
| 123
| 0.585105
|
4a073195b8097ce8952ade9e75696d376835a761
| 14,259
|
py
|
Python
|
protoattend/main_protoattend.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-02-25T05:34:44.000Z
|
2022-02-25T05:34:44.000Z
|
protoattend/main_protoattend.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
protoattend/main_protoattend.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demo code to run training, testing and analysis of attention-based prototypical learning for Fashion-MNIST dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import time
import input_data
import model
import numpy as np
from options import FLAGS
import tensorflow.compat.v1 as tf
import utils
# GPU options
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# File names
model_name = os.path.basename(__file__).split(".")[0]
checkpoint_name = "./checkpoints/" + model_name + ".ckpt"
export_name = os.path.join("exports", time.strftime("%Y%m%d-%H%M%S"))
# Set random seed
tf.set_random_seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed)
def inference(input_image, m_encoded_test_cand_keys, m_encoded_test_cand_values,
m_label_test_cand):
"""Constructs inference graph."""
processed_input_image = input_data.parse_function_test(input_image)
_, encoded_query, _ = model.cnn_encoder(
processed_input_image, reuse=False, is_training=False)
weighted_encoded_test, weight_coefs_test = model.relational_attention(
encoded_query,
tf.constant(m_encoded_test_cand_keys),
tf.constant(m_encoded_test_cand_values),
reuse=False)
_, prediction_weighted_test = model.classify(
weighted_encoded_test, reuse=False)
predicted_class = tf.argmax(prediction_weighted_test, axis=1)
expl_per_class = tf.py_func(
utils.class_explainability,
(tf.constant(m_label_test_cand), weight_coefs_test), tf.float32)
confidence = tf.reduce_max(expl_per_class, axis=1)
return predicted_class, confidence, weight_coefs_test
def main(unused_argv):
"""Main function."""
# Load training and eval data - this portion can be modified if the data is
# imported from other sources.
(m_train_data, m_train_labels), (m_eval_data, m_eval_labels) = \
tf.keras.datasets.fashion_mnist.load_data()
train_dataset = tf.data.Dataset.from_tensor_slices(
(m_train_data, m_train_labels))
eval_dataset = tf.data.Dataset.from_tensor_slices(
(m_eval_data, m_eval_labels))
train_dataset = train_dataset.map(input_data.parse_function_train)
eval_dataset = eval_dataset.map(input_data.parse_function_eval)
eval_batch_size = int(
math.floor(len(m_eval_data) / FLAGS.batch_size) * FLAGS.batch_size)
train_batch = train_dataset.repeat().batch(FLAGS.batch_size)
train_cand = train_dataset.repeat().batch(FLAGS.example_cand_size)
eval_cand = train_dataset.repeat().batch(FLAGS.eval_cand_size)
eval_batch = eval_dataset.repeat().batch(eval_batch_size)
iter_train = train_batch.make_initializable_iterator()
iter_train_cand = train_cand.make_initializable_iterator()
iter_eval_cand = eval_cand.make_initializable_iterator()
iter_eval = eval_batch.make_initializable_iterator()
image_batch, _, label_batch = iter_train.get_next()
image_train_cand, _, _ = iter_train_cand.get_next()
image_eval_cand, orig_image_eval_cand, label_eval_cand = iter_eval_cand.get_next(
)
eval_batch, orig_eval_batch, eval_labels = iter_eval.get_next()
# Model and loss definitions
_, encoded_batch_queries, encoded_batch_values = model.cnn_encoder(
image_batch, reuse=False, is_training=True)
encoded_cand_keys, _, encoded_cand_values = model.cnn_encoder(
image_train_cand, reuse=True, is_training=True)
weighted_encoded_batch, weight_coefs_batch = model.relational_attention(
encoded_batch_queries,
encoded_cand_keys,
encoded_cand_values,
normalization=FLAGS.normalization)
tf.summary.scalar("Average max. coef. train",
tf.reduce_mean(tf.reduce_max(weight_coefs_batch, axis=1)))
# Sparsity regularization
entropy_weights = tf.reduce_sum(
-weight_coefs_batch * tf.log(FLAGS.epsilon_sparsity + weight_coefs_batch),
axis=1)
sparsity_loss = tf.reduce_mean(entropy_weights) - tf.log(
FLAGS.epsilon_sparsity +
tf.constant(FLAGS.example_cand_size, dtype=tf.float32))
tf.summary.scalar("Sparsity entropy loss", sparsity_loss)
# Intermediate loss
joint_encoded_batch = (1 - FLAGS.alpha_intermediate) * encoded_batch_values \
+ FLAGS.alpha_intermediate * weighted_encoded_batch
logits_joint_batch, _ = model.classify(joint_encoded_batch, reuse=False)
softmax_joint_op = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits_joint_batch, labels=label_batch))
# Self loss
logits_orig_batch, _ = model.classify(encoded_batch_values, reuse=True)
softmax_orig_key_op = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits_orig_batch, labels=label_batch))
# Prototype combination loss
logits_weighted_batch, _ = model.classify(weighted_encoded_batch, reuse=True)
softmax_weighted_op = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits_weighted_batch, labels=label_batch))
train_loss_op = softmax_orig_key_op + softmax_weighted_op + \
softmax_joint_op + FLAGS.sparsity_weight * sparsity_loss
tf.summary.scalar("Total loss", train_loss_op)
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
FLAGS.init_learning_rate,
global_step=global_step,
decay_steps=FLAGS.decay_every,
decay_rate=FLAGS.decay_rate)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
tf.summary.scalar("Learning rate", learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
gvs = optimizer.compute_gradients(train_loss_op)
capped_gvs = [(tf.clip_by_value(grad, -FLAGS.gradient_thresh,
FLAGS.gradient_thresh), var)
for grad, var in gvs]
train_op = optimizer.apply_gradients(capped_gvs, global_step=global_step)
# Evaluate model
# Process sequentially to avoid out-of-memory.
i = tf.constant(0)
encoded_cand_keys_val = tf.zeros([0, FLAGS.attention_dim])
encoded_cand_queries_val = tf.zeros([0, FLAGS.attention_dim])
encoded_cand_values_val = tf.zeros([0, FLAGS.val_dim])
def cond(i, unused_l1, unused_l2, unused_l3):
return i < int(math.ceil(FLAGS.eval_cand_size / FLAGS.example_cand_size))
def body(i, encoded_cand_keys_val, encoded_cand_queries_val,
encoded_cand_values_val):
"""Loop body."""
temp = image_eval_cand[i * FLAGS.example_cand_size:(i + 1) *
FLAGS.example_cand_size, :, :, :]
temp_keys, temp_queries, temp_values = model.cnn_encoder(
temp, reuse=True, is_training=False)
encoded_cand_keys_val = tf.concat([encoded_cand_keys_val, temp_keys], 0)
encoded_cand_queries_val = tf.concat(
[encoded_cand_queries_val, temp_queries], 0)
encoded_cand_values_val = tf.concat([encoded_cand_values_val, temp_values],
0)
return i+1, encoded_cand_keys_val, encoded_cand_queries_val, \
encoded_cand_values_val
_, encoded_cand_keys_val, encoded_cand_queries_val, \
encoded_cand_values_val, = tf.while_loop(
cond, body, [i, encoded_cand_keys_val, encoded_cand_queries_val,
encoded_cand_values_val],
shape_invariants=[
i.get_shape(), tf.TensorShape([None, FLAGS.attention_dim]),
tf.TensorShape([None, FLAGS.attention_dim]),
tf.TensorShape([None, FLAGS.val_dim])])
j = tf.constant(0)
encoded_val_keys = tf.zeros([0, FLAGS.attention_dim])
encoded_val_queries = tf.zeros([0, FLAGS.attention_dim])
encoded_val_values = tf.zeros([0, FLAGS.val_dim])
def cond2(j, unused_j1, unused_j2, unused_j3):
return j < int(math.ceil(eval_batch_size / FLAGS.batch_size))
def body2(j, encoded_val_keys, encoded_val_queries, encoded_val_values):
"""Loop body."""
temp = eval_batch[j * FLAGS.batch_size:(j + 1) * FLAGS.batch_size, :, :, :]
temp_keys, temp_queries, temp_values = model.cnn_encoder(
temp, reuse=True, is_training=False)
encoded_val_keys = tf.concat([encoded_val_keys, temp_keys], 0)
encoded_val_queries = tf.concat([encoded_val_queries, temp_queries], 0)
encoded_val_values = tf.concat([encoded_val_values, temp_values], 0)
return j + 1, encoded_val_keys, encoded_val_queries, encoded_val_values
_, encoded_val_keys, encoded_val_queries, \
encoded_val_values = tf.while_loop(
cond2, body2, [
j, encoded_val_keys, encoded_val_queries, encoded_val_values],
shape_invariants=[
j.get_shape(), tf.TensorShape([None, FLAGS.attention_dim]),
tf.TensorShape([None, FLAGS.attention_dim]),
tf.TensorShape([None, FLAGS.val_dim])])
weighted_encoded_val, weight_coefs_val = model.relational_attention(
encoded_val_queries,
encoded_cand_keys_val,
encoded_cand_values_val,
normalization=FLAGS.normalization)
# Coefficient distribution
tf.summary.scalar("Average max. coefficient val",
tf.reduce_mean(tf.reduce_max(weight_coefs_val, axis=1)))
# Analysis of median number of prototypes above a certain
# confidence threshold.
sorted_weights = tf.contrib.framework.sort(
weight_coefs_val, direction="DESCENDING")
cum_sorted_weights = tf.cumsum(sorted_weights, axis=1)
for threshold in [0.5, 0.9, 0.95]:
num_examples_thresh = tf.shape(sorted_weights)[1] + 1 - tf.reduce_sum(
tf.cast(cum_sorted_weights > threshold, tf.int32), axis=1)
tf.summary.histogram(
"Number of samples for explainability above " + str(threshold),
num_examples_thresh)
tf.summary.scalar(
"Median number of samples for explainability above " + str(threshold),
tf.contrib.distributions.percentile(num_examples_thresh, q=50))
expl_per_class = tf.py_func(utils.class_explainability,
(label_eval_cand, weight_coefs_val), tf.float32)
max_expl = tf.reduce_max(expl_per_class, axis=1)
tf.summary.histogram("Maximum per-class explainability", max_expl)
_, prediction_val = model.classify(encoded_val_values, reuse=True)
_, prediction_weighted_val = model.classify(weighted_encoded_val, reuse=True)
val_eq_op = tf.equal(
tf.cast(tf.argmax(prediction_val, 1), dtype=tf.int32), eval_labels)
val_acc_op = tf.reduce_mean(tf.cast(val_eq_op, dtype=tf.float32))
tf.summary.scalar("Val accuracy input query", val_acc_op)
val_weighted_eq_op = tf.equal(
tf.cast(tf.argmax(prediction_weighted_val, 1), dtype=tf.int32),
eval_labels)
val_weighted_acc_op = tf.reduce_mean(
tf.cast(val_weighted_eq_op, dtype=tf.float32))
tf.summary.scalar("Val accuracy weighted prototypes", val_weighted_acc_op)
conf_wrong = tf.reduce_mean(
(1 - tf.cast(val_weighted_eq_op, tf.float32)) * max_expl)
tf.summary.scalar("Val average confidence of wrong decisions", conf_wrong)
conf_right = tf.reduce_mean(
tf.cast(val_weighted_eq_op, tf.float32) * max_expl)
tf.summary.scalar("Val average confidence of right decisions", conf_right)
# Confidence-controlled prediction
for ti in [0.5, 0.8, 0.9, 0.95, 0.99, 0.999]:
mask = tf.cast(tf.greater(max_expl, ti), tf.float32)
acc_tot = tf.reduce_sum(tf.cast(val_weighted_eq_op, tf.float32) * mask)
conf_tot = tf.reduce_sum(mask)
tf.summary.scalar("Val accurate ratio for confidence above " + str(ti),
acc_tot / conf_tot)
tf.summary.scalar("Val total ratio for confidence above " + str(ti),
conf_tot / eval_batch_size)
# Visualization of example images and corresponding prototypes
for image_ind in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
tf.summary.image("Input image " + str(image_ind),
tf.expand_dims(orig_eval_batch[image_ind, :, :, :], 0))
mask = tf.greater(weight_coefs_val[image_ind, :], 0.05)
mask = tf.squeeze(mask)
mask.set_shape([None])
relational_attention_images = tf.boolean_mask(
orig_image_eval_cand, mask, axis=0)
relational_attention_weight_coefs = tf.boolean_mask(
tf.squeeze(weight_coefs_val[image_ind, :]), mask, axis=0)
annotated_images = utils.tf_put_text(relational_attention_images,
relational_attention_weight_coefs)
tf.summary.image("Prototype images for image " + str(image_ind),
annotated_images)
# Training setup
init = (tf.global_variables_initializer(), tf.local_variables_initializer())
saver_all = tf.train.Saver()
summaries = tf.summary.merge_all()
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter("./tflog/" + model_name, sess.graph)
sess.run(init)
sess.run(iter_train.initializer)
sess.run(iter_train_cand.initializer)
sess.run(iter_eval_cand.initializer)
sess.run(iter_eval.initializer)
for step in range(1, FLAGS.num_steps):
if step % FLAGS.display_step == 0:
_, train_loss = sess.run([train_op, train_loss_op])
print("Step " + str(step) + " , Training loss = " +
"{:.4f}".format(train_loss))
else:
sess.run(train_op)
if step % FLAGS.val_step == 0:
val_acc, merged_summary = sess.run([val_weighted_acc_op, summaries])
print("Step " + str(step) + " , Val Accuracy = " +
"{:.4f}".format(val_acc))
summary_writer.add_summary(merged_summary, step)
if step % FLAGS.save_step == 0:
saver_all.save(sess, checkpoint_name)
if __name__ == "__main__":
tf.app.run()
| 41.330435
| 121
| 0.72193
|
4a0731d52f20749535f839bab1b163a61ac0653f
| 3,485
|
py
|
Python
|
organize/filters/created.py
|
awesome-archive/organize
|
911eeda7aa0c116a042deee2dadee04353459ed2
|
[
"MIT"
] | 1
|
2020-10-01T05:51:33.000Z
|
2020-10-01T05:51:33.000Z
|
organize/filters/created.py
|
davidolrik/organize
|
b6581531bcdbdb6c0833472b683cbbeaa513f595
|
[
"MIT"
] | null | null | null |
organize/filters/created.py
|
davidolrik/organize
|
b6581531bcdbdb6c0833472b683cbbeaa513f595
|
[
"MIT"
] | null | null | null |
import sys
from datetime import datetime, timedelta
from .filter import Filter
class Created(Filter):
"""
Matches files by created date
:param int days:
specify number of days
:param int hours:
specify number of hours
:param int minutes:
specify number of minutes
:param str mode:
either 'older' or 'newer'. 'older' matches all files created before the given
time, 'newer' matches all files created within the given time.
(default = 'older')
:returns:
- ``{created.year}`` -- the year the file was created
- ``{created.month}`` -- the month the file was created
- ``{created.day}`` -- the day the file was created
- ``{created.hour}`` -- the hour the file was created
- ``{created.minute}`` -- the minute the file was created
- ``{created.second}`` -- the second the file was created
Examples:
- Show all files on your desktop created at least 10 days ago:
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: '~/Desktop'
filters:
- created:
days: 10
actions:
- echo: 'Was created at least 10 days ago'
- Show all files on your desktop which were created within the last 5 hours:
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: '~/Desktop'
filters:
- created:
hours: 5
mode: newer
actions:
- echo: 'Was created within the last 5 hours'
- Sort pdfs by year of creation:
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: '~/Documents'
filters:
- extension: pdf
- created
actions:
- move: '~/Documents/PDF/{created.year}/'
"""
def __init__(self, days=0, hours=0, minutes=0, seconds=0, mode="older"):
self._mode = mode.strip().lower()
if self._mode not in ("older", "newer"):
raise ValueError("Unknown option for 'mode': must be 'older' or 'newer'.")
else:
self.is_older = self._mode == "older"
self.timedelta = timedelta(
days=days, hours=hours, minutes=minutes, seconds=seconds
)
def pipeline(self, args):
created_date = self._created(args.path)
reference_date = datetime.now() - self.timedelta
match = (self.is_older and created_date <= reference_date) or (
not self.is_older and created_date >= reference_date
)
if match:
return {"created": created_date}
def _created(self, path):
# see https://stackoverflow.com/a/39501288/300783
stat = path.stat()
if sys.platform.startswith("win"):
time = stat.st_ctime
else:
try:
time = stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
time = stat.st_mtime
return datetime.fromtimestamp(time)
def __str__(self):
return "Created(delta=%s, select_mode=%s)" % (self.timedelta, self._mode)
| 31.681818
| 86
| 0.537446
|
4a07328c29c609059c1378df9753d571f86a9c50
| 111
|
py
|
Python
|
09_slurm/python_example/matrix_inverse.py
|
quantumiracle/hpc_beginning_workshop
|
1bd456611611beafe7bd360306a92467b0397737
|
[
"MIT"
] | 96
|
2017-04-12T18:09:23.000Z
|
2022-02-15T17:57:54.000Z
|
09_slurm/python_example/matrix_inverse.py
|
quantumiracle/hpc_beginning_workshop
|
1bd456611611beafe7bd360306a92467b0397737
|
[
"MIT"
] | 1
|
2019-11-07T19:42:52.000Z
|
2019-11-07T22:01:14.000Z
|
09_slurm/python_example/matrix_inverse.py
|
quantumiracle/hpc_beginning_workshop
|
1bd456611611beafe7bd360306a92467b0397737
|
[
"MIT"
] | 39
|
2019-09-16T21:10:59.000Z
|
2022-02-15T18:00:29.000Z
|
import numpy as np
N = 3
X = np.random.randn(N, N)
print("X =\n", X)
print("Inverse(X) =\n", np.linalg.inv(X))
| 18.5
| 41
| 0.603604
|
4a0733018049df7ff6e1a029a7730ac995321a01
| 12,143
|
py
|
Python
|
test/forward_backward_compatibility/check_forward_backward_compatibility.py
|
iyuanyin/pytorch
|
e21c0ac9a5d318a62371a6348c7fecc5e905129c
|
[
"Intel"
] | null | null | null |
test/forward_backward_compatibility/check_forward_backward_compatibility.py
|
iyuanyin/pytorch
|
e21c0ac9a5d318a62371a6348c7fecc5e905129c
|
[
"Intel"
] | null | null | null |
test/forward_backward_compatibility/check_forward_backward_compatibility.py
|
iyuanyin/pytorch
|
e21c0ac9a5d318a62371a6348c7fecc5e905129c
|
[
"Intel"
] | null | null | null |
import argparse
import datetime
import re
import sys
import warnings
from collections import defaultdict
import torch
from torch._C import parse_schema
# The date specifies how long the allowlist exclusion should apply to.
#
# - If we NEVER give BC guarantee for an operator, you can put the
# date arbitrarily far in the future.
# - Otherwise, pick a date that is far enough in the future that you
# believe you can land your diff before then.
#
# Allowlist entries can be removed after the date listed on them passes.
#
# Allowlist item format:
# [
# 0: function name regex
# 1: date until which the allowlist entry is valid
# 2: (optional) function argument regex
# ]
#
# NB: function name DOES NOT include overload name!
ALLOW_LIST = [
("c10_experimental", datetime.date(2222, 1, 1)),
# Internal
("static", datetime.date(9999, 1, 1)),
("prim::ModuleDictIndex", datetime.date(9999, 1, 1)),
("prim::MKLDNNRelu6", datetime.date(9999, 1, 1)),
("prim::MKLDNNRelu6_", datetime.date(9999, 1, 1)),
("prim::Concat", datetime.date(9999, 1, 1)),
# Internal, profiler-specific ops
("profiler::_call_end_callbacks_on_jit_fut*", datetime.date(9999, 1, 1)),
("profiler::_record_function_enter", datetime.date(9999, 1, 1)),
("aten::_sparse_addmm", datetime.date(2022, 6, 30)),
("aten::_cholesky_helper", datetime.date(9999, 1, 1)),
("aten::_lstsq_helper", datetime.date(9999, 1, 1)),
("aten::_syevd_helper", datetime.date(9999, 1, 1)),
("aten::_linalg_solve_out_helper_", datetime.date(9999, 1, 1)),
("aten::select_backward", datetime.date(9999, 1, 1)),
("aten::slice_backward", datetime.date(9999, 1, 1)),
("aten::diagonal_backward", datetime.date(9999, 1, 1)),
("aten::rowwise_prune", datetime.date(9999, 1, 1)),
("aten::adaptive_avg_pool3d_backward", datetime.date(9999, 1, 1)),
("aten::_embedding_bag_dense_backward", datetime.date(9999, 1, 1)),
("aten::randperm", datetime.date(9999, 1, 1)),
("aten::linalg_solve", datetime.date(2022, 8, 31)),
("aten::linalg_solve.out", datetime.date(2022, 8, 31)),
("aten::l1_loss_backward.grad_input", datetime.date(2022, 7, 1)),
("aten::l1_loss_backward", datetime.date(2022, 7, 1)),
("aten::l1_loss.out", datetime.date(2022, 7, 1)),
("aten::_linalg_qr_helper", datetime.date(2022, 8, 1)),
("aten::linalg_lu_solve", datetime.date(2022, 8, 1)),
("aten::linalg_lu_solve.out", datetime.date(2022, 8, 1)),
("aten::solve", datetime.date(9999, 1, 1)),
("aten::solve.solution", datetime.date(9999, 1, 1)),
("aten::_solve_helper", datetime.date(9999, 1, 1)),
("aten::_convolution_nogroup", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward_bias", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward_input", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward_weight", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_transpose_backward", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_transpose_backward_input", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_transpose_backward_weight", datetime.date(9999, 1, 1)),
("aten::miopen_depthwise_convolution_backward", datetime.date(9999, 1, 1)),
("aten::miopen_depthwise_convolution_backward_input", datetime.date(9999, 1, 1)),
("aten::miopen_depthwise_convolution_backward_weight", datetime.date(9999, 1, 1)),
("aten::_nested_tensor", datetime.date(9999, 1, 1)),
("prepacked::unpack_prepacked_sizes_conv2d", datetime.date(9999, 1, 1)),
("prepacked::unpack_prepacked_sizes_linear", datetime.date(9999, 1, 1)),
("aten::linalg_solve", datetime.date(2022, 8, 31)),
("aten::linalg_solve.out", datetime.date(2022, 8, 31)),
("aten::quantile", datetime.date(2022, 9, 30)),
("aten::nanquantile", datetime.date(2022, 9, 30)),
("aten::native_multi_head_self_attention", datetime.date(9999, 1, 1)),
("aten::_native_multi_head_self_attention", datetime.date(9999, 1, 1)),
("aten::grid_sampler_3d_backward", datetime.date(9999, 1, 1)),
("aten::_transform_bias_rescale_qkv", datetime.date(9999, 1, 1)),
("aten::_s_where", datetime.date(2022, 9, 30)),
("prim::infer_squeeze_size.dim", datetime.date(9999, 1, 1)),
("prim::infer_squeeze_size", datetime.date(9999, 1, 1)),
("aten::_weight_norm_cuda_interface", datetime.date(9999, 1, 1)),
("aten::_weight_norm_cuda_interface_backward", datetime.date(9999, 1, 1)),
("aten::segment_reduce", datetime.date(9999, 1, 1)),
("aten::_segment_reduce_backward", datetime.date(9999, 1, 1)),
("aten::empty.SymInt", datetime.date(9999, 1, 1)),
# TODO: FIXME: prims shouldn't be checked
("prims::.*", datetime.date(9999, 1, 1)),
]
ALLOW_LIST_COMPILED = [
(
re.compile(item[0]),
item[1],
re.compile(item[2]) if len(item) > 2 else None,
) for item in ALLOW_LIST if item[1] >= datetime.date.today()
]
def allow_listed(schema):
for item in ALLOW_LIST_COMPILED:
if item[0].search(str(schema)):
if len(item) > 2 and item[2] is not None:
# if arguments regex is present, use it
return bool(item[2].search(str(schema)))
return True
return False
# The nightly will fail to parse newly added syntax to schema declarations
# Add new schemas that will fail the nightly here
dont_parse_list = [
("_TorchScriptTesting.*", datetime.date(2099, 9, 17)),
("test_backend", datetime.date(2099, 9, 17)),
("dist_c10d", datetime.date(2099, 9, 17)),
]
def has_valid_upgraders(schema, version_map):
# we want to parse through the map to find if
# the schema has valid upgraders. Since the
# version map has entry for each overload
# we need to do some ugly parsing.
# the name of the operator
schema_name = schema.name
if schema_name not in version_map:
return False
entries = version_map[schema_name]
possible_overloads = []
possible_schemas = []
for key, upgrader_schema_entries in entries.items():
possible_overloads.append(key)
possible_schemas.extend(upgrader_schema_entries)
# let's make sure this existing schema is part of possible
# schemas
for old_schema in possible_schemas:
if old_schema == schema:
return True
return False
def dont_parse(schema_line):
for item in dont_parse_list:
if item[1] < datetime.date.today():
continue
regexp = re.compile(item[0])
if regexp.search(schema_line):
return True
return False
def load_schemas_to_dict():
new_schemas = torch._C._jit_get_all_schemas()
new_schemas += torch._C._jit_get_custom_class_schemas()
new_schema_dict = defaultdict(list)
for s in new_schemas:
new_schema_dict[s.name].append(s)
return new_schema_dict
def process_version_map(version_map):
# version map maps full schema name to
# list of upgraders. Since we only have
# the name of the schema (aka no overload)
# we want to first process the map to make
# the key lookup easier. After this it will be:
# Dict[schema_name, Dict[overload, List[schema]]]
output = defaultdict(dict)
for (key, entries) in version_map.items():
operator_name = key.split(".")[0]
schema_entries = [parse_schema(entry.old_schema) for entry in entries]
output[operator_name][key] = schema_entries
return output
def check_bc(existing_schemas):
new_schema_dict = load_schemas_to_dict()
version_map = process_version_map(torch._C._get_operator_version_map())
is_bc = True
broken_ops = []
for existing_schema in existing_schemas:
if allow_listed(existing_schema):
print("schema: ", str(existing_schema), " found on allowlist, skipping")
continue
if has_valid_upgraders(existing_schema, version_map):
print("schema: ", str(existing_schema), " has valid upgrader, skipping")
continue
print("processing existing schema: ", str(existing_schema))
matching_new_schemas = new_schema_dict.get(existing_schema.name, [])
found = False
for matching_new_schema in matching_new_schemas:
if matching_new_schema.is_backward_compatible_with(existing_schema):
found = True
break
if not found:
print(
"Can NOT find backward compatible schemas after changes "
"for schema {} from the following candidates:\n[\n{}\n]".format(
str(existing_schema),
"\n\t".join(str(s) for s in matching_new_schemas),
)
)
# TODO Print out more details about why candidates don't match.
broken_ops.append(str(existing_schema))
is_bc = False
if is_bc:
print("Found backward compatible schemas for all existing schemas")
else:
print(
"The PR is introducing backward incompatible changes to the "
"operator library. Please contact PyTorch team to confirm "
"whether this change is wanted or not. \n\nBroken ops: "
"[\n\t{}\n]".format("\n\t".join(broken_ops))
)
return is_bc
def check_fc(existing_schemas):
new_schema_dict = load_schemas_to_dict()
is_fc = True
broken_ops = []
for existing_schema in existing_schemas:
if allow_listed(existing_schema):
print("schema: ", str(existing_schema), " found on allowlist, skipping")
continue
print("processing existing schema: ", str(existing_schema))
matching_new_schemas = new_schema_dict.get(existing_schema.name, [])
found = False
possible_failure_reasons = []
for matching_new_schema in matching_new_schemas:
is_compatible, reason = matching_new_schema.check_forward_compatible_with(existing_schema)
if is_compatible:
found = True
break
if reason != "":
possible_failure_reasons.append(reason)
if not found:
print(
"Can NOT find forward compatible schemas after changes "
"for schema {} from the following candidates:\n[\n{}\n]".format(
str(existing_schema),
"\n\t".join(str(s) for s in matching_new_schemas),
)
)
print(
"Refer to following reasons for failure "
"to find FC schema:\n[\n{}\n]".format(
"\n\t".join(str(r) for r in possible_failure_reasons)
)
)
broken_ops.append(str(existing_schema))
is_fc = False
if is_fc:
print("Found forward compatible schemas for all existing schemas")
else:
warnings.warn(
"The PR is introducing a potentially forward incompatible changes to the "
"operator library. Please contact PyTorch team to confirm "
"whether this change is wanted or not. \n\nBroken ops: "
"[\n\t{}\n]".format("\n\t".join(broken_ops))
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"--existing-schemas",
help="filename to load existing schemas",
type=str,
default="schemas.txt",
)
args = parser.parse_args()
existing_schema_dict = dict()
slist = []
with open(args.existing_schemas, "r") as f:
while True:
line = f.readline()
if not line:
break
if dont_parse(line.strip()):
print("Not parsing schema line: ", line.strip())
continue
s = parse_schema(line.strip())
slist.append(s)
# TODO in case there is FC breaking changes,
# we just warn for now until there is a policy.
check_fc(slist)
if not check_bc(slist):
sys.exit(1)
| 40.61204
| 102
| 0.641851
|
4a073366ecf8c575bdd0d2d9fd1b7c93121fc876
| 455
|
py
|
Python
|
TimeWrapper_JE/venv/Lib/site-packages/requests/__version__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | 227
|
2021-01-20T05:34:32.000Z
|
2022-03-29T12:43:05.000Z
|
TimeWrapper_JE/venv/Lib/site-packages/requests/__version__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | 55
|
2020-09-07T02:12:51.000Z
|
2022-03-23T02:37:42.000Z
|
TimeWrapper_JE/venv/Lib/site-packages/requests/__version__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | 239
|
2021-01-28T02:59:53.000Z
|
2022-03-29T08:02:17.000Z
|
# .-. .-. .-. . . .-. .-. .-. .-.
# |( |- |.| | | |- `-. | `-.
# ' ' `-' `-`.`-' `-' `-' ' `-'
__title__ = 'requests'
__description__ = 'Python HTTP for Humans.'
__url__ = 'https://requests.readthedocs.io'
__version__ = '2.25.1'
__build__ = 0x022501
__author__ = 'Kenneth Reitz'
__author_email__ = 'me@kennethreitz.org'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2020 Kenneth Reitz'
__cake__ = u'\u2728 \U0001f370 \u2728'
| 30.333333
| 47
| 0.562637
|
4a0733a07d206794eb8eca2795fe6638b4cacf06
| 1,429
|
py
|
Python
|
sa/migrations/0003_task_schedule.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | 1
|
2019-09-20T09:36:48.000Z
|
2019-09-20T09:36:48.000Z
|
sa/migrations/0003_task_schedule.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
sa/migrations/0003_task_schedule.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# task schedule
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
# Model 'TaskSchedule'
self.db.create_table(
"sa_taskschedule",
(
("id", models.AutoField(verbose_name="ID", primary_key=True, auto_created=True)),
("periodic_name", models.CharField("Periodic Task", max_length=64)),
("is_enabled", models.BooleanField("Enabled?", default=False)),
("run_every", models.PositiveIntegerField("Run Every (secs)", default=86400)),
("retries", models.PositiveIntegerField("Retries", default=1)),
("retry_delay", models.PositiveIntegerField("Retry Delay (secs)", default=60)),
("timeout", models.PositiveIntegerField("Timeout (secs)", default=300)),
("next_run", models.DateTimeField("Next Run", auto_now_add=True)),
("retries_left", models.PositiveIntegerField("Retries Left", default=1)),
),
)
| 42.029412
| 97
| 0.522743
|
4a0733bd9f95c6858b8cf018a52a31505b0363f7
| 1,181
|
py
|
Python
|
py_cord_components/ext/filters.py
|
spacedev-official/discord.py-components
|
14a3ccc727a332ea37147f5278f28209c699e3d2
|
[
"MIT"
] | 3
|
2021-12-05T08:39:26.000Z
|
2022-02-17T23:54:53.000Z
|
py_cord_components/ext/filters.py
|
spacedev-official/discord.py-components
|
14a3ccc727a332ea37147f5278f28209c699e3d2
|
[
"MIT"
] | null | null | null |
py_cord_components/ext/filters.py
|
spacedev-official/discord.py-components
|
14a3ccc727a332ea37147f5278f28209c699e3d2
|
[
"MIT"
] | 2
|
2022-01-11T23:27:26.000Z
|
2022-02-22T22:29:08.000Z
|
from discord import Message, Guild, TextChannel, User
from py_cord_components.interaction import Interaction
from py_cord_components.component import Component, Button, SelectOption
__all__ = (
"message_filter",
"component_filter",
"guild_filter",
"channel_filter",
"user_filter",
)
def message_filter(message: Message, ephemeral: bool = False):
def _filter(interaction: Interaction):
if ephemeral or isinstance(interaction.message, dict):
return False
return interaction.message.id == message.id
return _filter
def component_filter(component: Component):
def _filter(interaction: Interaction):
return interaction.custom_id == component.id
return _filter
def guild_filter(guild: Guild):
def _filter(interaction: Interaction):
return interaction.guild_id == guild.id
return _filter
def channel_filter(channel: TextChannel):
def _filter(interaction: Interaction):
return interaction.channel_id == channel.id
return _filter
def user_filter(user: User):
def _filter(interaction: Interaction):
return interaction.user.id == user.id
return _filter
| 22.711538
| 72
| 0.719729
|
4a0735c1199fb4345ee2686ed963b1e61f4c3bff
| 2,627
|
py
|
Python
|
mastercardmoneysend/cardeligibility.py
|
candeias/mastercard_moneysend_py3
|
c7b52b406b02a1d4fd98ce2dbd6ce44a9e668492
|
[
"MIT"
] | 1
|
2019-02-06T22:06:16.000Z
|
2019-02-06T22:06:16.000Z
|
mastercardmoneysend/cardeligibility.py
|
candeias/mastercard_moneysend_py3
|
c7b52b406b02a1d4fd98ce2dbd6ce44a9e668492
|
[
"MIT"
] | null | null | null |
mastercardmoneysend/cardeligibility.py
|
candeias/mastercard_moneysend_py3
|
c7b52b406b02a1d4fd98ce2dbd6ce44a9e668492
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2016 MasterCard International Incorporated
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
# Neither the name of the MasterCard International Incorporated nor the names of its
# contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
from mastercardapicore import BaseObject
from mastercardapicore import RequestMap
from mastercardapicore import OperationConfig
from mastercardapicore import OperationMetadata
from .resourceconfig import ResourceConfig
class CardEligibility(BaseObject):
"""
"""
__config = {
"02f80c60-20ae-44fe-bed8-fa6c0a3375fe" : OperationConfig("/moneysend/v3/eligibility/pan", "update", [], []),
}
def getOperationConfig(self,operationUUID):
if operationUUID not in self.__config:
raise Exception("Invalid operationUUID: "+operationUUI)
return self.__config[operationUUID]
def getOperationMetadata(self):
return OperationMetadata(ResourceConfig.getInstance().getVersion(), ResourceConfig.getInstance().getHost(), ResourceConfig.getInstance().getContext())
def read(self):
"""
Updates an object of type CardEligibility
@return CardEligibility object representing the response.
@raise ApiException: raised an exception from the response status
"""
return BaseObject.execute("02f80c60-20ae-44fe-bed8-fa6c0a3375fe", self)
| 37
| 152
| 0.790636
|
4a0738613bb73203a19360408dc8af7525e7c8cf
| 2,132
|
py
|
Python
|
Secao 10 - Redes neurais artificiais/keras_census.py
|
flaviofontes29/Machine-Learning-e-Data-Science-com-Python
|
7b8188b6e7003426ae3a6d46d91d61494135a2b7
|
[
"MIT"
] | null | null | null |
Secao 10 - Redes neurais artificiais/keras_census.py
|
flaviofontes29/Machine-Learning-e-Data-Science-com-Python
|
7b8188b6e7003426ae3a6d46d91d61494135a2b7
|
[
"MIT"
] | null | null | null |
Secao 10 - Redes neurais artificiais/keras_census.py
|
flaviofontes29/Machine-Learning-e-Data-Science-com-Python
|
7b8188b6e7003426ae3a6d46d91d61494135a2b7
|
[
"MIT"
] | null | null | null |
import pandas as pd
base = pd.read_csv('census.csv')
previsores = base.iloc[:, 0:14].values
classe = base.iloc[:, 14].values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_previsores = LabelEncoder()
previsores[:, 1] = labelencoder_previsores.fit_transform(previsores[:, 1])
previsores[:, 3] = labelencoder_previsores.fit_transform(previsores[:, 3])
previsores[:, 5] = labelencoder_previsores.fit_transform(previsores[:, 5])
previsores[:, 6] = labelencoder_previsores.fit_transform(previsores[:, 6])
previsores[:, 7] = labelencoder_previsores.fit_transform(previsores[:, 7])
previsores[:, 8] = labelencoder_previsores.fit_transform(previsores[:, 8])
previsores[:, 9] = labelencoder_previsores.fit_transform(previsores[:, 9])
previsores[:, 13] = labelencoder_previsores.fit_transform(previsores[:, 13])
onehotencoder = OneHotEncoder(categorical_features = [1,3,5,6,7,8,9,13])
previsores = onehotencoder.fit_transform(previsores).toarray()
labelencoder_classe = LabelEncoder()
classe = labelencoder_classe.fit_transform(classe)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
previsores = scaler.fit_transform(previsores)
from sklearn.model_selection import train_test_split
previsores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores, classe, test_size=0.15, random_state=0)
import keras
from keras.models import Sequential
from keras.layers import Dense
classificador = Sequential()
classificador.add(Dense(units = 8, activation = 'relu', input_dim = 14))
classificador.add(Dense(units = 8, activation = 'relu'))
classificador.add(Dense(units = 1, activation = 'sigmoid'))
classificador.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
classificador.fit(previsores_treinamento, classe_treinamento, batch_size = 10, epochs = 100)
previsoes = classificador.predict(previsores_teste)
previsoes = (previsoes > 0.5)
from sklearn.metrics import confusion_matrix, accuracy_score
precisao = accuracy_score(classe_teste, previsoes)
matriz = confusion_matrix(classe_teste, previsoes)
| 45.361702
| 145
| 0.789869
|
4a07388dba409f6b03e711cb1f7e33d18c3a65b1
| 8,612
|
py
|
Python
|
test/test_images.py
|
g--o/Lavinder
|
a31092c452be769b7aeb6a1a80a6e57cc4625829
|
[
"MIT"
] | 1
|
2019-06-18T07:44:04.000Z
|
2019-06-18T07:44:04.000Z
|
test/test_images.py
|
g--o/Lavinder
|
a31092c452be769b7aeb6a1a80a6e57cc4625829
|
[
"MIT"
] | 22
|
2019-02-23T23:56:05.000Z
|
2019-09-04T21:35:24.000Z
|
test/test_images.py
|
g--o/Lavinder
|
a31092c452be769b7aeb6a1a80a6e57cc4625829
|
[
"MIT"
] | 4
|
2019-02-22T23:26:00.000Z
|
2022-01-03T17:46:54.000Z
|
"""
test_images.py contains unittests for liblavinder.images.Img
and its supporting code.
"""
import pytest
import liblavinder.images as images
import cairocffi
import os
from os import path
from glob import glob
from collections import OrderedDict
TEST_DIR = path.dirname(os.path.abspath(__file__))
DATA_DIR = path.join(TEST_DIR, 'data')
PNGS = glob(path.join(DATA_DIR, '*', '*.png'))
SVGS = glob(path.join(DATA_DIR, '*', '*.svg'))
ALL_IMAGES = glob(path.join(DATA_DIR, '*', '*'))
@pytest.fixture(
scope='function',
params=ALL_IMAGES,
)
def path_n_bytes_image(request):
fpath = request.param
with open(fpath, 'rb') as fobj:
bobj = fobj.read()
return fpath, bobj
@pytest.fixture(
scope='function',
params=PNGS,
)
def path_n_bytes_image_pngs(request):
fpath = request.param
with open(fpath, 'rb') as fobj:
bobj = fobj.read()
return fpath, bobj
@pytest.fixture(scope='function')
def png_img():
return images.Img.from_path(PNGS[0])
def test_get_cairo_surface(path_n_bytes_image):
path, bytes_image = path_n_bytes_image
surf_info = images.get_cairo_surface(bytes_image)
assert isinstance(surf_info.surface, cairocffi.ImageSurface)
assert path.split('.')[-1].lower() == surf_info.file_type
def test_get_cairo_surface_bad_input():
with pytest.raises(images.LoadingError):
images.get_cairo_surface(b'asdfasfdi3')
def assert_approx_equal(vec0, vec1):
approx = pytest.approx
for val0, val1 in zip(vec0, vec1):
assert val0 == approx(val1)
class TestImg:
def test_init(self, path_n_bytes_image):
path, bytes_image = path_n_bytes_image
img = images.Img(bytes_image)
assert isinstance(img.surface, cairocffi.ImageSurface)
del img.surface
assert isinstance(img.surface, cairocffi.ImageSurface)
def test_from_path(self, path_n_bytes_image):
path, bytes_image = path_n_bytes_image
img = images.Img(bytes_image)
assert isinstance(img.surface, cairocffi.ImageSurface)
img2 = img.from_path(path)
assert img == img2
img2.theta = 90.0
assert img != img2
img2.theta = 0.0
assert img == img2
def test_setting(self, png_img):
img = png_img
width0, height0 = img.default_size
pat0 = img.pattern
img.width = width0 + 3
assert pat0 != img.pattern
assert img.width == (width0 + 3)
pat1 = img.pattern
img.height = height0 + 7
assert img.height == (height0 + 7)
assert img.pattern != pat0
assert img.pattern != pat1
pat2 = img.pattern
img.theta = -35.0
assert img.pattern != pat0
assert img.pattern != pat1
assert img.pattern != pat2
assert img.theta == pytest.approx(-35.0)
def test_equality(self, png_img):
width0, height0 = png_img.default_size
png_img2 = images.Img.from_path(png_img.path)
assert png_img == png_img2
png_img.width = width0 * 2
png_img2.height = width0 * 2
assert png_img != png_img2
def test_setting_negative_size(self, png_img):
png_img.width = -90
assert png_img.width == 1
png_img.height = 0
assert png_img.height == 1
def test_pattern(self, path_n_bytes_image):
path, bytes_image = path_n_bytes_image
img = images.Img(bytes_image)
assert isinstance(img.pattern, cairocffi.SurfacePattern)
def test_pattern_resize(self, path_n_bytes_image_pngs):
path, bytes_image = path_n_bytes_image_pngs
img = images.Img.from_path(path)
assert isinstance(img.pattern, cairocffi.SurfacePattern)
t_matrix = img.pattern.get_matrix().as_tuple()
assert_approx_equal(t_matrix, (1.0, 0.0, 0.0, 1.0))
img.width = 2.0 * img.default_size.width
t_matrix = img.pattern.get_matrix().as_tuple()
assert_approx_equal(t_matrix, (0.5, 0.0, 0.0, 1.0))
img.height = 3.0 * img.default_size.height
t_matrix = img.pattern.get_matrix().as_tuple()
assert_approx_equal(t_matrix, (0.5, 0.0, 0.0, 1.0/3.0))
def test_pattern_rotate(self, path_n_bytes_image):
path, bytes_image = path_n_bytes_image
img = images.Img(bytes_image)
img.theta = 90.0
assert img.theta == 90.0
t_matrix = img.pattern.get_matrix().as_tuple()
assert_approx_equal(t_matrix, (0.0, 1.0, -1.0, 0.0))
img.theta = 45.0
t_matrix = img.pattern.get_matrix().as_tuple()
from math import sqrt
s2o2 = sqrt(2) / 2.0
assert_approx_equal(t_matrix, (s2o2, s2o2, -s2o2, s2o2))
del img.theta
assert img.theta == pytest.approx(0.0)
class TestImgScale:
def test_scale(self, png_img):
size = png_img.default_size
png_img.scale(2, 3)
assert png_img.width == 2 * size.width
assert png_img.height == 3 * size.height
def test_scale_rounding(self, png_img):
size = png_img.default_size
png_img.scale(1.99999, 2.99999)
assert png_img.width == 2 * size.width
assert png_img.height == 3 * size.height
def test_scale_width_lock(self, png_img):
size = png_img.default_size
png_img.scale(width_factor=10, lock_aspect_ratio=True)
assert png_img.width == 10 * size.width
assert png_img.height == 10 * size.height
def test_scale_height_lock(self, png_img):
size = png_img.default_size
png_img.scale(height_factor=11, lock_aspect_ratio=True)
assert png_img.height == 11 * size.height
assert png_img.width == 11 * size.width
def test_scale_fail_lock(self, png_img):
with pytest.raises(ValueError):
png_img.scale(0.5, 4.0, lock_aspect_ratio=True)
def test_scale_fail(self, png_img):
with pytest.raises(ValueError):
png_img.scale()
class TestImgResize:
def test_resize(self, png_img):
png_img.resize(100, 100)
assert png_img.width == 100
assert png_img.height == 100
def test_resize_width(self, png_img):
size = png_img.default_size
ratio = size.width / size.height
png_img.resize(width=40)
assert png_img.width == 40
assert (png_img.width / png_img.height) == pytest.approx(ratio)
def test_resize_height(self, png_img):
size = png_img.default_size
ratio = size.width / size.height
png_img.resize(height=10)
assert png_img.height == 10
assert (png_img.width / png_img.height) == pytest.approx(ratio)
class TestGetMatchingFiles:
def test_audio_volume_muted(self):
name = 'audio-volume-muted'
dfiles = images.get_matching_files(
DATA_DIR,
False,
name,
)
result = dfiles[name]
assert len(result) == 2
png = path.join(DATA_DIR, 'png', 'audio-volume-muted.png')
assert png in result
svg = path.join(DATA_DIR, 'svg', 'audio-volume-muted.svg')
assert svg in result
def test_only_svg(self):
name = 'audio-volume-muted.svg'
dfiles = images.get_matching_files(
DATA_DIR,
True,
name,
)
result = dfiles[name]
assert len(result) == 1
svg = path.join(DATA_DIR, 'svg', 'audio-volume-muted.svg')
assert svg in result
def test_multiple(self):
names = OrderedDict()
names['audio-volume-muted'] = 2
names['battery-caution-charging'] = 1
dfiles = images.get_matching_files(DATA_DIR, False, *names)
for name, length in names.items():
assert len(dfiles[name]) == length
class TestLoader:
@pytest.fixture(scope='function')
def loader(self):
png_dir = path.join(DATA_DIR, 'png')
svg_dir = path.join(DATA_DIR, 'svg')
return images.Loader(svg_dir, png_dir)
def test_audio_volume_muted(self, loader):
name = 'audio-volume-muted'
result = loader(name)
assert isinstance(result[name], images.Img)
assert result[name].path.endswith('.svg')
def test_audio_volume_muted_png(self, loader):
name = 'audio-volume-muted.png'
loader.explicit_filetype = True
result = loader(name)
assert isinstance(result[name], images.Img)
assert result[name].path.endswith('.png')
def test_load_file_missing(self, loader):
names = ('audio-asdlfjasdvolume-muted', 'audio-volume-muted')
with pytest.raises(images.LoadingError):
loader(*names)
| 32.37594
| 71
| 0.639921
|
4a0738a624b479ef3a5fe1d150426f95a30a5156
| 800
|
py
|
Python
|
benchmarks/versionedhdf5file.py
|
asmeurer/versioned-hdf5
|
c5c661b920a2d38e440da4c5ff0a6148379d93d2
|
[
"BSD-3-Clause"
] | 46
|
2020-06-12T21:44:17.000Z
|
2022-02-28T15:26:19.000Z
|
benchmarks/versionedhdf5file.py
|
asmeurer/versioned-hdf5
|
c5c661b920a2d38e440da4c5ff0a6148379d93d2
|
[
"BSD-3-Clause"
] | 135
|
2020-06-12T15:48:05.000Z
|
2022-03-29T21:54:45.000Z
|
benchmarks/versionedhdf5file.py
|
asmeurer/versioned-hdf5
|
c5c661b920a2d38e440da4c5ff0a6148379d93d2
|
[
"BSD-3-Clause"
] | 9
|
2021-04-23T03:58:13.000Z
|
2022-03-19T19:30:50.000Z
|
import h5py
import numpy as np
from versioned_hdf5 import VersionedHDF5File
class TimeDatetimeAccess:
def setup(self):
with h5py.File('foo.h5', 'w') as f:
vf = VersionedHDF5File(f)
with vf.stage_version('0') as sv:
sv.create_dataset('bar', data=np.random.rand(10))
for i in range(1, 100):
with vf.stage_version(str(i)) as sv:
sv['bar'][:] = np.random.rand(10)
self.dt = np.datetime64(vf[str(50)].attrs['timestamp'])
def time_version_by_datetime(self):
# Based on https://github.com/deshaw/versioned-hdf5/issues/170
with h5py.File('foo.h5', 'r') as f:
vf = VersionedHDF5File(f)
for _ in range(100):
_ = vf[self.dt]['bar'][:]
| 33.333333
| 70
| 0.5625
|
4a073928d9941f359edd22cf84083a110eec0789
| 12,067
|
py
|
Python
|
searchlight-6.0.0/searchlight/common/policies/resource.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
searchlight-6.0.0/searchlight/common/policies/resource.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
searchlight-6.0.0/searchlight/common/policies/resource.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from searchlight.common.policies import base
RESOURCE = 'resource:OS::%s'
rules = [
policy.DocumentedRuleDefault(
name=RESOURCE % 'Glance::Image',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Glance Image resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Glance::Metadef',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Glance Metadef resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Nova::Server',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Nova Server resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Nova::Hypervisor',
check_str="rule:context_is_admin",
description='Query with Nova Hypervisor resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Nova::ServerGroup',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Nova ServerGroup resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Nova::Flavor',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Nova Flavor resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Cinder::Volume',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Cinder Volume resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Cinder::Snapshot',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Cinder Snapshot resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Designate::Zone',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Designate Zone resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Designate::RecordSet',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Designate RecordSet resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Neutron::Net',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Neutron Net resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Neutron::Port',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Neutron Port resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Neutron::Subnet',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Neutron Subnet resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Neutron::Router',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Neutron Router resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Neutron::SecurityGroup',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Neutron SecurityGroup resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Ironic::Chassis',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Ironic Chassis resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Ironic::Node',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Ironic Node resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=RESOURCE % 'Ironic::Port',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Query with Ironic Port resource.',
operations=[
{
'path': '/v1/search',
'method': 'POST'
},
{
'path': '/v1/search',
'method': 'GET'
},
{
'path': '/v1/search/plugins',
'method': 'GET'
},
{
'path': '/v1/search/facets',
'method': 'GET'
}
]
),
]
def list_rules():
return rules
| 27.425
| 78
| 0.391978
|
4a07396ab932e4250a64e367bc840016a9a408f1
| 1,381
|
py
|
Python
|
scripts/camera.py
|
Irvingao/paddle_inference_ros
|
926b1f2e16c2cd3b00ad8b9928f04297887befc0
|
[
"MIT"
] | 2
|
2021-08-14T06:15:09.000Z
|
2021-12-29T11:20:27.000Z
|
scripts/camera.py
|
Irvingao/paddle_inference_ros
|
926b1f2e16c2cd3b00ad8b9928f04297887befc0
|
[
"MIT"
] | 2
|
2021-08-15T11:39:17.000Z
|
2021-09-04T15:44:30.000Z
|
scripts/camera.py
|
Irvingao/paddle_inference_ros
|
926b1f2e16c2cd3b00ad8b9928f04297887befc0
|
[
"MIT"
] | 1
|
2021-08-15T13:02:07.000Z
|
2021-08-15T13:02:07.000Z
|
#!/usr/bin/env python3
# coding:utf-8
import cv2
import numpy as np
import rospy
from std_msgs.msg import Header
from sensor_msgs.msg import Image
from cv_bridge import CvBridge , CvBridgeError
import time
if __name__=="__main__":
import sys
print(sys.version) # 查看python版本
capture = cv2.VideoCapture(0) # 定义摄像头
rospy.init_node('camera_node', anonymous=True) #定义节点
image_pub=rospy.Publisher('/image_view/image_raw', Image, queue_size = 1) #定义话题
while not rospy.is_shutdown(): # Ctrl C正常退出,如果异常退出会报错device busy!
start = time.time()
ret, frame = capture.read()
if ret: # 如果有画面再执行
# frame = cv2.flip(frame,0) #垂直镜像操作
frame = cv2.flip(frame,1) #水平镜像操作
ros_frame = Image()
header = Header(stamp = rospy.Time.now())
header.frame_id = "Camera"
ros_frame.header=header
ros_frame.width = 640
ros_frame.height = 480
ros_frame.encoding = "bgr8"
ros_frame.step = 1920
ros_frame.data = np.array(frame).tostring() #图片格式转换
image_pub.publish(ros_frame) #发布消息
end = time.time()
print("cost time:", end-start ) # 看一下每一帧的执行时间,从而确定合适的rate
rate = rospy.Rate(25) # 10hz
capture.release()
cv2.destroyAllWindows()
print("quit successfully!")
| 32.880952
| 83
| 0.614772
|
4a0739ab428e6584fe19555f36d3bf35b2ed13fe
| 1,511
|
py
|
Python
|
mimicro/views.py
|
pokidovea/py-mimicro
|
f80db25e15170bfa6bf58e96d978043205b3a423
|
[
"Apache-2.0"
] | null | null | null |
mimicro/views.py
|
pokidovea/py-mimicro
|
f80db25e15170bfa6bf58e96d978043205b3a423
|
[
"Apache-2.0"
] | null | null | null |
mimicro/views.py
|
pokidovea/py-mimicro
|
f80db25e15170bfa6bf58e96d978043205b3a423
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Callable
from aiohttp import web
from .store import overrides_storage, requests_storage
async def add_override(request):
data = await request.json()
await overrides_storage.add(**data)
return web.json_response('OK')
async def remove_override(request):
data = await request.json()
await overrides_storage.remove(**data)
return web.json_response('OK')
async def get_requests(request):
requests = [item.as_dict for item in requests_storage]
return web.json_response(requests)
def get_handler(resource: Dict, response: Dict) -> Callable:
async def handler(request):
params = {
**request.match_info,
**request.query,
}
override = await overrides_storage.get(request.host, resource['name'], response['method'])
if override:
content = override.content.format(**params)
status = override.status
else:
content = response['content'].format(**params)
status = response['status']
await requests_storage.add(
domain=request.host,
resource=resource['name'],
method=response['method'],
path=request.path,
payload=await request.text(),
request_headers=request.headers,
request_params=request.query,
response=content,
status=status,
)
return web.Response(text=content, status=status)
return handler
| 24.770492
| 98
| 0.630046
|
4a073a9d3f11e74966e99c9401bb3915b6d673fc
| 5,387
|
py
|
Python
|
yt/visualization/volume_rendering/utils.py
|
cevans216/yt
|
c19c3c615b996c8a6e418362ffea9041a616d673
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/visualization/volume_rendering/utils.py
|
cevans216/yt
|
c19c3c615b996c8a6e418362ffea9041a616d673
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/visualization/volume_rendering/utils.py
|
cevans216/yt
|
c19c3c615b996c8a6e418362ffea9041a616d673
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import numpy as np
from yt.data_objects.data_containers import YTSelectionContainer3D
from yt.data_objects.static_output import Dataset
from yt.utilities.lib import bounding_volume_hierarchy
from yt.utilities.lib.image_samplers import (
InterpolatedProjectionSampler,
ProjectionSampler,
VolumeRenderSampler,
)
from yt.utilities.on_demand_imports import NotAModule
try:
from yt.utilities.lib.embree_mesh import mesh_traversal
# Catch ValueError in case size of objects in Cython change
except (ImportError, ValueError):
mesh_traversal = NotAModule("pyembree")
def data_source_or_all(data_source):
if isinstance(data_source, Dataset):
data_source = data_source.all_data()
if not isinstance(data_source, (YTSelectionContainer3D, type(None))):
raise RuntimeError(
"The data_source is not a valid 3D data container.\n"
"Expected an object of type YTSelectionContainer3D but received "
"an object of type %s." % type(data_source)
)
return data_source
def new_mesh_sampler(camera, render_source, engine):
params = ensure_code_unit_params(camera._get_sampler_params(render_source))
args = (
np.atleast_3d(params["vp_pos"]),
np.atleast_3d(params["vp_dir"]),
params["center"],
params["bounds"],
np.atleast_3d(params["image"]).astype("float64"),
params["x_vec"],
params["y_vec"],
params["width"],
)
kwargs = {"lens_type": params["lens_type"]}
if engine == "embree":
sampler = mesh_traversal.EmbreeMeshSampler(*args, **kwargs)
elif engine == "yt":
sampler = bounding_volume_hierarchy.BVHMeshSampler(*args, **kwargs)
return sampler
def new_volume_render_sampler(camera, render_source):
params = ensure_code_unit_params(camera._get_sampler_params(render_source))
params.update(transfer_function=render_source.transfer_function)
params.update(transfer_function=render_source.transfer_function)
params.update(num_samples=render_source.num_samples)
args = (
np.atleast_3d(params["vp_pos"]),
np.atleast_3d(params["vp_dir"]),
params["center"],
params["bounds"],
params["image"],
params["x_vec"],
params["y_vec"],
params["width"],
params["transfer_function"],
params["num_samples"],
)
kwargs = {"lens_type": params["lens_type"]}
if "camera_data" in params:
kwargs["camera_data"] = params["camera_data"]
if render_source.zbuffer is not None:
kwargs["zbuffer"] = render_source.zbuffer.z
args[4][:] = np.reshape(
render_source.zbuffer.rgba[:],
(camera.resolution[0], camera.resolution[1], 4),
)
else:
kwargs["zbuffer"] = np.ones(params["image"].shape[:2], "float64")
sampler = VolumeRenderSampler(*args, **kwargs)
return sampler
def new_interpolated_projection_sampler(camera, render_source):
params = ensure_code_unit_params(camera._get_sampler_params(render_source))
params.update(transfer_function=render_source.transfer_function)
params.update(num_samples=render_source.num_samples)
args = (
np.atleast_3d(params["vp_pos"]),
np.atleast_3d(params["vp_dir"]),
params["center"],
params["bounds"],
params["image"],
params["x_vec"],
params["y_vec"],
params["width"],
params["num_samples"],
)
kwargs = {"lens_type": params["lens_type"]}
if render_source.zbuffer is not None:
kwargs["zbuffer"] = render_source.zbuffer.z
else:
kwargs["zbuffer"] = np.ones(params["image"].shape[:2], "float64")
sampler = InterpolatedProjectionSampler(*args, **kwargs)
return sampler
def new_projection_sampler(camera, render_source):
params = ensure_code_unit_params(camera._get_sampler_params(render_source))
params.update(transfer_function=render_source.transfer_function)
params.update(num_samples=render_source.num_samples)
args = (
np.atleast_3d(params["vp_pos"]),
np.atleast_3d(params["vp_dir"]),
params["center"],
params["bounds"],
params["image"],
params["x_vec"],
params["y_vec"],
params["width"],
params["num_samples"],
)
kwargs = {"lens_type": params["lens_type"]}
if render_source.zbuffer is not None:
kwargs["zbuffer"] = render_source.zbuffer.z
else:
kwargs["zbuffer"] = np.ones(params["image"].shape[:2], "float64")
sampler = ProjectionSampler(*args, **kwargs)
return sampler
def get_corners(le, re):
return np.array(
[
[le[0], le[1], le[2]],
[re[0], le[1], le[2]],
[re[0], re[1], le[2]],
[le[0], re[1], le[2]],
[le[0], le[1], re[2]],
[re[0], le[1], re[2]],
[re[0], re[1], re[2]],
[le[0], re[1], re[2]],
],
dtype="float64",
)
def ensure_code_unit_params(params):
for param_name in ["center", "vp_pos", "vp_dir", "width"]:
param = params[param_name]
if hasattr(param, "in_units"):
params[param_name] = param.in_units("code_length")
bounds = params["bounds"]
if hasattr(bounds[0], "units"):
params["bounds"] = tuple(b.in_units("code_length").d for b in bounds)
return params
| 34.094937
| 79
| 0.641173
|
4a073b36a55d86b4c4f501eb9b6ce83f9000ecab
| 7,651
|
py
|
Python
|
scrips/porphyrin/porphyrin_library.py
|
lonelu/Metalprot
|
e51bee472c975aa171bdb6ee426a07ca69f110ee
|
[
"MIT"
] | null | null | null |
scrips/porphyrin/porphyrin_library.py
|
lonelu/Metalprot
|
e51bee472c975aa171bdb6ee426a07ca69f110ee
|
[
"MIT"
] | null | null | null |
scrips/porphyrin/porphyrin_library.py
|
lonelu/Metalprot
|
e51bee472c975aa171bdb6ee426a07ca69f110ee
|
[
"MIT"
] | null | null | null |
import prody as pr
import numpy as np
import os
from metalprot import ligand_database
### Default selection
ligands = ['HEM', 'HNI', 'COH', 'HEB', 'FDE', 'ZNH', 'HEC', 'HEA', 'HAS', 'MNH', 'MNR']
ligand_sel = 'resname ' + ' '.join(ligands)
metal_sel = 'name NI MN ZN CO CU MG FE'
def extract_core(pdb, extend = 4):
'''
Extract the porphyrin ligand first.
Then extract the metal of the ligand.
'''
metal_cores = []
_lgd = pdb.select(ligand_sel)
if not _lgd:
#print('Failed no ligand: ' + pdb.getTitle())
return
count = 0
for resind in np.unique(_lgd.getResindices()):
mt = _lgd.select('resindex ' + str(resind) + ' and ' + metal_sel)
if not mt:
#print('Failed no metal: ' + pdb.getTitle())
continue
if mt[0].getName() not in metal_sel or mt[0].getResname() not in ligand_sel:
#print(mt.getIndices())
#print(mt.getNames())
#print('Failed others: ' + pdb.getTitle())
continue
ni = mt[0]
ni_index = ni.getIndex()
#all_near = pdb_prody.select('nitrogen or oxygen or sulfur').select('not water and within 2.83 of index ' + str(ni_index))
all_near = pdb.select('protein and within 2.83 of index ' + str(ni_index))
if not all_near or not all_near.select('nitrogen or oxygen or sulfur'):
print('Not find: ' + pdb.getTitle())
continue
ind_check = set()
for a_n in all_near.select('nitrogen or oxygen or sulfur'):
ind = a_n.getResindex()
if ind in ind_check:
continue
ind_check.add(ind)
ext_inds = ligand_database.extend_res_indices([ind], pdb, extend)
count += 1
sel_pdb_prody = pdb.select('resindex ' + ' '.join([str(ind) for ind in ext_inds]) + ' '+ str(ni.getResindex()))
metal_cores.append((pdb.getTitle() + '_' + ni.getResname() + '_'+ str(count), sel_pdb_prody))
return metal_cores
workdir = '/mnt/e/DesignData/ligands/porphyrin/pdbs/all_pdbs/'
pdbs = []
for path in os.listdir(workdir):
if '.pdb' not in path:
continue
pdb = pr.parsePDB(workdir + path)
pdbs.append(pdb)
all_cores = []
for pdb in pdbs:
cores = extract_core(pdb)
if not cores:
continue
all_cores.extend(cores)
outdir = '/mnt/e/DesignData/ligands/porphyrin/pdbs/contact_cores/'
ligand_database.superimpose_core_and_writepdb(all_cores, all_cores[0], metal_sel, outdir)
#core_pdbs = [c[1] for c in all_cores]
core_pdbs =[]
for path in os.listdir(outdir):
if '.pdb' not in path:
continue
pdb = pr.parsePDB(outdir + path)
core_pdbs.append(pdb)
clusters = ligand_database.reduce_dup(core_pdbs, metal_sel)
outdir = '/mnt/e/DesignData/ligands/porphyrin/pdbs/contact_core_reps/'
ligand_database.extract_rep_and_writepdb(core_pdbs, clusters, metal_sel, outdir)
ligand_database.write_dup_summary(outdir, core_pdbs, clusters)
### superimpose on the 'metal->bb(N CA C)'
core_pdb_reps = []
for path in os.listdir(outdir):
if '.pdb' not in path:
continue
pdb = pr.parsePDB(outdir + path)
core_pdb_reps.append(pdb)
def porphyrin_superimpose_sel(pdb, metal_sel):
metal_ind = pdb.select(metal_sel)[0].getIndex()
contact_atom_resind = pdb.select('protein and within 2.83 of index ' + str(metal_ind))[0].getResindex()
contact_aa_bb_inds = pdb.select('name N CA C and resindex ' + str(contact_atom_resind)).getIndices()
sel = pdb.select('index ' + str(metal_ind) + ' ' + ' '.join([str(x) for x in contact_aa_bb_inds]))
return sel
def superimpose_and_writepdb(pdbs, outdir, metal_sel):
'''
Superimpose on the metal->bb(N CA C)
'''
if not os.path.exists(outdir):
os.mkdir(outdir)
first = pdbs[0]
first_sel = porphyrin_superimpose_sel(first, metal_sel)
for pdb in pdbs:
pdb_sel = porphyrin_superimpose_sel(pdb, metal_sel)
if len(first_sel) != len(pdb_sel):
print('Failed superimpose: ' + pdb.getTitle())
continue
pr.calcTransformation(pdb_sel, first_sel).apply(pdb_sel)
pr.writePDB(outdir + pdb.getTitle(), pdb)
outdir = '/mnt/e/DesignData/ligands/porphyrin/pdbs/contact_core_reps_aligned/'
superimpose_and_writepdb(core_pdb_reps, outdir, metal_sel)
###########################################################################
def extract_water_core(pdb, extend = 4):
'''
Extract the porphyrin ligand first.
Then extract the metal of the ligand.
'''
metal_cores = []
_lgd = pdb.select(ligand_sel)
if not _lgd:
#print('Failed no ligand: ' + pdb.getTitle())
return
count = 0
for resind in np.unique(_lgd.getResindices()):
mt = _lgd.select('resindex ' + str(resind) + ' and ' + metal_sel)
if not mt:
#print('Failed no metal: ' + pdb.getTitle())
continue
if mt[0].getName() not in metal_sel or mt[0].getResname() not in ligand_sel:
#print(mt.getIndices())
#print(mt.getNames())
#print('Failed others: ' + pdb.getTitle())
continue
ni = mt[0]
ni_index = ni.getIndex()
#all_near = pdb_prody.select('nitrogen or oxygen or sulfur').select('not water and within 2.83 of index ' + str(ni_index))
all_water = pdb.select('water and within 2.83 of index ' + str(ni_index))
if not all_water or not all_water.select('oxygen'):
print('Not find: ' + pdb.getTitle())
continue
ind_check = set()
for a_n in all_water.select('oxygen'):
water_ind = a_n.getIndex()
all_near_water = pdb.select('protein and within 3.4 of index ' + str(water_ind))
if not all_near_water or not all_near_water.select('nitrogen or oxygen or sulfur'):
continue
for w_a_n in all_near_water.select('nitrogen or oxygen or sulfur'):
ind = w_a_n.getResindex()
if ind in ind_check:
continue
ind_check.add(ind)
ext_inds = ligand_database.extend_res_indices([ind], pdb, extend)
count += 1
sel_pdb_prody = pdb.select('resindex ' + ' '.join([str(ind) for ind in ext_inds]) + ' '+ str(ni.getResindex()) + ' '+ str(a_n.getResindex()) )
metal_cores.append((pdb.getTitle() + '_' + ni.getResname() + '_'+ str(count), sel_pdb_prody))
return metal_cores
workdir = '/mnt/e/DesignData/ligands/porphyrin/pdbs/all_pdbs/'
pdbs = []
for path in os.listdir(workdir):
if '.pdb' not in path:
continue
pdb = pr.parsePDB(workdir + path)
pdbs.append(pdb)
all_water_cores = []
for pdb in pdbs:
cores = extract_water_core(pdb)
if not cores:
continue
all_water_cores.extend(cores)
outdir = '/mnt/e/DesignData/ligands/porphyrin/pdbs/contact_wather_cores/'
ligand_database.superimpose_core_and_writepdb(all_water_cores, all_water_cores[0], metal_sel, outdir)
#core_pdbs = [c[1] for c in all_cores]
core_water_pdbs =[]
for path in os.listdir(outdir):
if '.pdb' not in path:
continue
pdb = pr.parsePDB(outdir + path)
core_water_pdbs.append(pdb)
water_clusters = ligand_database.reduce_dup(core_water_pdbs, metal_sel)
outdir = '/mnt/e/DesignData/ligands/porphyrin/pdbs/contact_water_core_reps/'
ligand_database.extract_rep_and_writepdb(core_water_pdbs, water_clusters, metal_sel, outdir)
ligand_database.write_dup_summary(outdir, core_water_pdbs, water_clusters)
| 31.101626
| 158
| 0.62567
|
4a073ba84b8d2c5ef58afc4d7633092fafe9203b
| 239
|
py
|
Python
|
GUI/main.py
|
JasonJarvan/EAFT
|
5e252e2d70b76f7a8a015fea4d8840de85d78b88
|
[
"MIT"
] | 1
|
2021-02-02T09:18:20.000Z
|
2021-02-02T09:18:20.000Z
|
GUI/main.py
|
JasonJarvan/EAFT
|
5e252e2d70b76f7a8a015fea4d8840de85d78b88
|
[
"MIT"
] | null | null | null |
GUI/main.py
|
JasonJarvan/EAFT
|
5e252e2d70b76f7a8a015fea4d8840de85d78b88
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtWidgets import QApplication
from GUI import Window
if __name__=='__main__':
app = QApplication(sys.argv)
window = Window()
window.show()#transfer the window in the GUI
sys.exit(app.exec_())
| 14.058824
| 48
| 0.694561
|
4a073bae956a33e6e55d903524f2bbd7ca9c777f
| 1,112
|
py
|
Python
|
backend/apps/api/v1/about/views.py
|
skiv23/portfolio
|
3c1a7b0cf0fb67148ce4b0491132e3a01375c9b0
|
[
"MIT"
] | null | null | null |
backend/apps/api/v1/about/views.py
|
skiv23/portfolio
|
3c1a7b0cf0fb67148ce4b0491132e3a01375c9b0
|
[
"MIT"
] | null | null | null |
backend/apps/api/v1/about/views.py
|
skiv23/portfolio
|
3c1a7b0cf0fb67148ce4b0491132e3a01375c9b0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from rest_framework import viewsets
from rest_framework import response
from apps.about import models
from . import serializers
class AboutViewSet(viewsets.ModelViewSet):
queryset = models.About.objects.all()
serializer_class = serializers.AboutSerializer
def list(self, *args, **kwargs):
serializer = self.get_serializer(self.get_object())
return response.Response(serializer.data)
def get_object(self):
return self.filter_queryset(self.get_queryset()).first()
class AboutInfoViewSet(viewsets.ModelViewSet):
queryset = models.AboutInfo.objects.all()
serializer_class = serializers.AboutInfoSerializer
class WhatIDoViewSet(viewsets.ModelViewSet):
queryset = models.WhatIDo.objects.all()
serializer_class = serializers.WhatIDoSerializer
class SkillViewSet(viewsets.ModelViewSet):
queryset = models.Skill.objects.all()
serializer_class = serializers.SkillSerializer
class TimelineViewSet(viewsets.ModelViewSet):
queryset = models.Timeline.objects.all()
serializer_class = serializers.TimelineSerializer
| 27.121951
| 64
| 0.763489
|
4a073be8e2ce9b703d9bc6e22e7d1e44e61548db
| 1,833
|
py
|
Python
|
riker/permission/simple.py
|
A-UNDERSCORE-D/riker
|
5257d6113a614e54696068b758275e59f71ddf51
|
[
"0BSD"
] | null | null | null |
riker/permission/simple.py
|
A-UNDERSCORE-D/riker
|
5257d6113a614e54696068b758275e59f71ddf51
|
[
"0BSD"
] | null | null | null |
riker/permission/simple.py
|
A-UNDERSCORE-D/riker
|
5257d6113a614e54696068b758275e59f71ddf51
|
[
"0BSD"
] | null | null | null |
from __future__ import annotations
from fnmatch import fnmatch
from irctokens.line import Line
from .base import BasePermissionHandler
# spell-checker: words oper
class SimplePermissionHandler(BasePermissionHandler):
"""SimplePermissionHandler implements a mask and oper based permission handler."""
def __init__(
self,
mask_permissions: dict[str, list[str]],
enable_oper: bool = True,
oper_permissions: dict[str, list[str]] | None = None,
) -> None:
self.mask_permissions: dict[str, list[str]] = mask_permissions
self.enable_oper = enable_oper
self.oper_permissions: dict[str, list[str]] = (
oper_permissions if oper_permissions is not None else {}
)
def _check_masks(self, to_check: str) -> list[str]:
out: list[str] = []
for mask in self.mask_permissions:
if fnmatch(to_check, mask):
out.extend(self.mask_permissions[mask])
return out
def _check_oper(self, oper_name: str) -> list[str]:
out = []
for name in self.oper_permissions:
if fnmatch(oper_name, name):
out.extend(self.oper_permissions[name])
return out
def check_permissions(self, line: Line) -> set[str]:
"""
Return the permissions the sender of a given line has.
:param line: The line to check
:return: a list of permission strings
"""
out: set[str] = set()
out.update(self._check_masks(str(line.hostmask)))
if self.enable_oper and line.tags is not None and "oper" in line.tags:
out.add("oper")
if line.tags["oper"] != "":
out.update(self._check_oper(line.tags["oper"]))
out.add(f'oper.{line.tags["oper"]}')
return out
| 28.640625
| 86
| 0.612111
|
4a073c4f9ebc2a167dff93d7740b930ef91ce28f
| 964
|
py
|
Python
|
aysa_cli/__init__.py
|
alejandrobernardis/aysa-cli
|
20163ea91b837d06be829617f1a0334748edbe64
|
[
"MIT"
] | 1
|
2019-10-30T16:41:21.000Z
|
2019-10-30T16:41:21.000Z
|
aysa_cli/__init__.py
|
alejandrobernardis/aysa-cli
|
20163ea91b837d06be829617f1a0334748edbe64
|
[
"MIT"
] | null | null | null |
aysa_cli/__init__.py
|
alejandrobernardis/aysa-cli
|
20163ea91b837d06be829617f1a0334748edbe64
|
[
"MIT"
] | 1
|
2021-06-09T03:58:23.000Z
|
2021-06-09T03:58:23.000Z
|
# Author: Alejandro M. Bernardis
# Email: alejandro.bernardis at gmail.com
# Created: 2019/10/12
# ~
__all__ = [
'__title__',
'__summary__',
'__uri__',
'__version__',
'__author__',
'__email__',
'__license__',
'__copyright__',
'__commands__',
'SEGMENT',
'VERSION'
]
# version
SEGMENT = 'dev'
VERSION = (1, 0, 0, SEGMENT, 0)
# doc
__title__ = 'aysa-cli'
__summary__ = 'Marco de trabajo para el despliegue de contenedores.'
__uri__ = 'https://github.com/alejandrobernardis/aysa-cli/'
__issues__ = 'https://github.com/alejandrobernardis/aysa-cli/issues/'
__version__ = '.'.join([str(x) for x in VERSION])
__author__ = 'Alejandro M. BERNARDIS and individual contributors.'
__email__ = 'alejandro.bernardis@gmail.com'
__license__ = 'MTI License, Version 2.0'
__copyright__ = 'Copyright 2019-% {}'.format(__author__)
__commands__ = 'https://github.com/alejandrobernardis/aysa-commands/archive' \
'/master.zip'
| 27.542857
| 78
| 0.686722
|
4a073c6156db5e9c8cba76e5d20317e5fa208011
| 34,710
|
py
|
Python
|
datasets/irs_990/irs_990_2014/irs_990_2014_dag.py
|
shanecglass/public-datasets-pipelines
|
b02f47a6d617a6864c78f56ce8247ccaceee8695
|
[
"Apache-2.0"
] | 2
|
2022-02-27T02:31:35.000Z
|
2022-02-27T02:32:49.000Z
|
datasets/irs_990/irs_990_2014/irs_990_2014_dag.py
|
shanecglass/public-datasets-pipelines
|
b02f47a6d617a6864c78f56ce8247ccaceee8695
|
[
"Apache-2.0"
] | null | null | null |
datasets/irs_990/irs_990_2014/irs_990_2014_dag.py
|
shanecglass/public-datasets-pipelines
|
b02f47a6d617a6864c78f56ce8247ccaceee8695
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.contrib.operators import gcs_to_bq, kubernetes_pod_operator
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="irs_990.irs_990_2014",
default_args=default_args,
max_active_runs=1,
schedule_interval="@daily",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
irs_990_transform_csv = kubernetes_pod_operator.KubernetesPodOperator(
task_id="irs_990_transform_csv",
startup_timeout_seconds=600,
name="irs_990_2014",
namespace="default",
affinity={
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
"key": "cloud.google.com/gke-nodepool",
"operator": "In",
"values": ["pool-e2-standard-4"],
}
]
}
]
}
}
},
image_pull_policy="Always",
image="{{ var.json.irs_990.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://www.irs.gov/pub/irs-soi/14eofinextract990.zip",
"SOURCE_FILE": "files/data.zip",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/irs_990/irs_990_2014/data_output.csv",
"PIPELINE_NAME": "irs_990_2015",
"CSV_HEADERS": '["ein","tax_pd","subseccd","s501c3or4947a1cd","schdbind","politicalactvtscd","lbbyingactvtscd","subjto6033cd","dnradvisedfundscd","prptyintrcvdcd","maintwrkofartcd","crcounselingqstncd","hldassetsintermpermcd","rptlndbldgeqptcd","rptinvstothsecd","rptinvstprgrelcd","rptothasstcd","rptothliabcd","sepcnsldtfinstmtcd","sepindaudfinstmtcd","inclinfinstmtcd","operateschools170cd","frgnofficecd","frgnrevexpnscd","frgngrntscd","frgnaggragrntscd","rptprofndrsngfeescd","rptincfnndrsngcd","rptincgamingcd","operatehosptlcd","hospaudfinstmtcd","rptgrntstogovtcd","rptgrntstoindvcd","rptyestocompnstncd","txexmptbndcd","invstproceedscd","maintescrwaccntcd","actonbehalfcd","engageexcessbnftcd","awarexcessbnftcd","loantofficercd","grantoofficercd","dirbusnreltdcd","fmlybusnreltdcd","servasofficercd","recvnoncashcd","recvartcd","ceaseoperationscd","sellorexchcd","ownsepentcd","reltdorgcd","intincntrlcd","orgtrnsfrcd","conduct5percentcd","compltschocd","f1096cnt","fw2gcnt","wthldngrulescd","noemplyeesw3cnt","filerqrdrtnscd","unrelbusinccd","filedf990tcd","frgnacctcd","prohibtdtxshltrcd","prtynotifyorgcd","filedf8886tcd","solicitcntrbcd","exprstmntcd","providegoodscd","notfydnrvalcd","filedf8282cd","f8282cnt","fndsrcvdcd","premiumspaidcd","filedf8899cd","filedf1098ccd","excbushldngscd","s4966distribcd","distribtodonorcd","initiationfees","grsrcptspublicuse","grsincmembers","grsincother","filedlieuf1041cd","txexmptint","qualhlthplncd","qualhlthreqmntn","qualhlthonhnd","rcvdpdtngcd","filedf720cd","totreprtabled","totcomprelatede","totestcompf","noindiv100kcnt","nocontractor100kcnt","totcntrbgfts","prgmservcode2acd","totrev2acola","prgmservcode2bcd","totrev2bcola","prgmservcode2ccd","totrev2ccola","prgmservcode2dcd","totrev2dcola","prgmservcode2ecd","totrev2ecola","totrev2fcola","totprgmrevnue","invstmntinc","txexmptbndsproceeds","royaltsinc","grsrntsreal","grsrntsprsnl","rntlexpnsreal","rntlexpnsprsnl","rntlincreal","rntlincprsnl","netrntlinc","grsalesecur","grsalesothr","cstbasisecur","cstbasisothr","gnlsecur","gnlsothr","netgnls","grsincfndrsng","lessdirfndrsng","netincfndrsng","grsincgaming","lessdirgaming","netincgaming","grsalesinvent","lesscstofgoods","netincsales","miscrev11acd","miscrevtota","miscrev11bcd","miscrevtot11b","miscrev11ccd","miscrevtot11c","miscrevtot11d","miscrevtot11e","totrevenue","grntstogovt","grnsttoindiv","grntstofrgngovt","benifitsmembrs","compnsatncurrofcr","compnsatnandothr","othrsalwages","pensionplancontrb","othremplyeebenef","payrolltx","feesforsrvcmgmt","legalfees","accntingfees","feesforsrvclobby","profndraising","feesforsrvcinvstmgmt","feesforsrvcothr","advrtpromo","officexpns","infotech","royaltsexpns","occupancy","travel","travelofpublicoffcl","converconventmtng","interestamt","pymtoaffiliates","deprcatndepletn","insurance","othrexpnsa","othrexpnsb","othrexpnsc","othrexpnsd","othrexpnse","othrexpnsf","totfuncexpns","nonintcashend","svngstempinvend","pldgegrntrcvblend","accntsrcvblend","currfrmrcvblend","rcvbldisqualend","notesloansrcvblend","invntriesalesend","prepaidexpnsend","lndbldgsequipend","invstmntsend","invstmntsothrend","invstmntsprgmend","intangibleassetsend","othrassetsend","totassetsend","accntspayableend","grntspayableend","deferedrevnuend","txexmptbndsend","escrwaccntliabend","paybletoffcrsend","secrdmrtgsend","unsecurednotesend","othrliabend","totliabend","unrstrctnetasstsend","temprstrctnetasstsend","permrstrctnetasstsend","capitalstktrstend","paidinsurplusend","retainedearnend","totnetassetend","totnetliabastend","nonpfrea","totnooforgscnt","totsupport","gftgrntsrcvd170","txrevnuelevied170","srvcsval170","pubsuppsubtot170","exceeds2pct170","pubsupplesspct170","samepubsuppsubtot170","grsinc170","netincunreltd170","othrinc170","totsupp170","grsrcptsrelated170","totgftgrntrcvd509","grsrcptsadmissn509","grsrcptsactivities509","txrevnuelevied509","srvcsval509","pubsuppsubtot509","rcvdfrmdisqualsub509","exceeds1pct509","subtotpub509","pubsupplesub509","samepubsuppsubtot509","grsinc509","unreltxincls511tx509","subtotsuppinc509","netincunrelatd509","othrinc509","totsupp509"]',
"RENAME_MAPPINGS": '{"elf": "elf","EIN": "ein","tax_prd": "tax_pd","subseccd": "subseccd","s50Yc3or4947aYcd": "s501c3or4947a1cd","schdbind": "schdbind","politicalactvtscd": "politicalactvtscd","lbbyingactvtscd": "lbbyingactvtscd","subjto6033cd": "subjto6033cd","dnradvisedfundscd": "dnradvisedfundscd","prptyintrcvdcd": "prptyintrcvdcd","maintwrkofartcd": "maintwrkofartcd","crcounselingqstncd": "crcounselingqstncd","hldassetsintermpermcd": "hldassetsintermpermcd","rptlndbldgeqptcd": "rptlndbldgeqptcd","rptinvstothsecd": "rptinvstothsecd","rptinvstprgrelcd": "rptinvstprgrelcd","rptothasstcd": "rptothasstcd","rptothliabcd": "rptothliabcd","sepcnsldtfinstmtcd": "sepcnsldtfinstmtcd","sepindaudfinstmtcd": "sepindaudfinstmtcd","inclinfinstmtcd": "inclinfinstmtcd","operateschoolsY70cd": "operateschools170cd","frgnofficecd": "frgnofficecd","frgnrevexpnscd": "frgnrevexpnscd","frgngrntscd": "frgngrntscd","frgnaggragrntscd": "frgnaggragrntscd","rptprofndrsngfeescd": "rptprofndrsngfeescd","rptincfnndrsngcd": "rptincfnndrsngcd","rptincgamingcd": "rptincgamingcd","operatehosptlcd": "operatehosptlcd","hospaudfinstmtcd": "hospaudfinstmtcd","rptgrntstogovtcd": "rptgrntstogovtcd","rptgrntstoindvcd": "rptgrntstoindvcd","rptyestocompnstncd": "rptyestocompnstncd","txexmptbndcd": "txexmptbndcd","invstproceedscd": "invstproceedscd","maintescrwaccntcd": "maintescrwaccntcd","actonbehalfcd": "actonbehalfcd","engageexcessbnftcd": "engageexcessbnftcd","awarexcessbnftcd": "awarexcessbnftcd","loantofficercd": "loantofficercd","grantoofficercd": "grantoofficercd","dirbusnreltdcd": "dirbusnreltdcd","fmlybusnreltdcd": "fmlybusnreltdcd","servasofficercd": "servasofficercd","recvnoncashcd": "recvnoncashcd","recvartcd": "recvartcd","ceaseoperationscd": "ceaseoperationscd","sellorexchcd": "sellorexchcd","ownsepentcd": "ownsepentcd","reltdorgcd": "reltdorgcd","intincntrlcd": "intincntrlcd","orgtrnsfrcd": "orgtrnsfrcd","conduct5percentcd": "conduct5percentcd","compltschocd": "compltschocd","f1096cnt": "f1096cnt","fw2gcnt": "fw2gcnt","wthldngrulescd": "wthldngrulescd","noemplyeesw3cnt": "noemplyeesw3cnt","filerqrdrtnscd": "filerqrdrtnscd","unrelbusinccd": "unrelbusinccd","filedf990tcd": "filedf990tcd","frgnacctcd": "frgnacctcd","prohibtdtxshltrcd": "prohibtdtxshltrcd","prtynotifyorgcd": "prtynotifyorgcd","filedf8886tcd": "filedf8886tcd","solicitcntrbcd": "solicitcntrbcd","exprstmntcd": "exprstmntcd","providegoodscd": "providegoodscd","notfydnrvalcd": "notfydnrvalcd","filedf8N8Ncd": "filedf8282cd","f8282cnt": "f8282cnt","fndsrcvdcd": "fndsrcvdcd","premiumspaidcd": "premiumspaidcd","filedf8899cd": "filedf8899cd","filedfY098ccd": "filedf1098ccd","excbushldngscd": "excbushldngscd","s4966distribcd": "s4966distribcd","distribtodonorcd": "distribtodonorcd","initiationfees": "initiationfees","grsrcptspublicuse": "grsrcptspublicuse","grsincmembers": "grsincmembers","grsincother": "grsincother","filedlieufY04Ycd": "filedlieuf1041cd","txexmptint": "txexmptint","qualhlthplncd": "qualhlthplncd","qualhlthreqmntn": "qualhlthreqmntn","qualhlthonhnd": "qualhlthonhnd","rcvdpdtngcd": "rcvdpdtngcd","filedf7N0cd": "filedf720cd","totreprtabled": "totreprtabled","totcomprelatede": "totcomprelatede","totestcompf": "totestcompf","noindiv100kcnt": "noindiv100kcnt","nocontractor100kcnt": "nocontractor100kcnt","totcntrbgfts": "totcntrbgfts","prgmservcode2acd": "prgmservcode2acd","totrev2acola": "totrev2acola","prgmservcode2bcd": "prgmservcode2bcd","totrev2bcola": "totrev2bcola","prgmservcode2ccd": "prgmservcode2ccd","totrev2ccola": "totrev2ccola","prgmservcode2dcd": "prgmservcode2dcd","totrev2dcola": "totrev2dcola","prgmservcode2ecd": "prgmservcode2ecd","totrev2ecola": "totrev2ecola","totrev2fcola": "totrev2fcola","totprgmrevnue": "totprgmrevnue","invstmntinc": "invstmntinc","txexmptbndsproceeds": "txexmptbndsproceeds","royaltsinc": "royaltsinc","grsrntsreal": "grsrntsreal","grsrntsprsnl": "grsrntsprsnl","rntlexpnsreal": "rntlexpnsreal","rntlexpnsprsnl": "rntlexpnsprsnl","rntlincreal": "rntlincreal","rntlincprsnl": "rntlincprsnl","netrntlinc": "netrntlinc","grsalesecur": "grsalesecur","grsalesothr": "grsalesothr","cstbasisecur": "cstbasisecur","cstbasisothr": "cstbasisothr","gnlsecur": "gnlsecur","gnlsothr": "gnlsothr","netgnls": "netgnls","grsincfndrsng": "grsincfndrsng","lessdirfndrsng": "lessdirfndrsng","netincfndrsng": "netincfndrsng","grsincgaming": "grsincgaming","lessdirgaming": "lessdirgaming","netincgaming": "netincgaming","grsalesinvent": "grsalesinvent","lesscstofgoods": "lesscstofgoods","netincsales": "netincsales","miscrev11acd": "miscrev11acd","miscrevtota": "miscrevtota","miscrev11bcd": "miscrev11bcd","miscrevtot11b": "miscrevtot11b","miscrev11ccd": "miscrev11ccd","miscrevtot11c": "miscrevtot11c","miscrevtot11d": "miscrevtot11d","miscrevtot11e": "miscrevtot11e","totrevenue": "totrevenue","grntstogovt": "grntstogovt","grnsttoindiv": "grnsttoindiv","grntstofrgngovt": "grntstofrgngovt","benifitsmembrs": "benifitsmembrs","compnsatncurrofcr": "compnsatncurrofcr","compnsatnandothr": "compnsatnandothr","othrsalwages": "othrsalwages","pensionplancontrb": "pensionplancontrb","othremplyeebenef": "othremplyeebenef","payrolltx": "payrolltx","feesforsrvcmgmt": "feesforsrvcmgmt","legalfees": "legalfees","accntingfees": "accntingfees","feesforsrvclobby": "feesforsrvclobby","profndraising": "profndraising","feesforsrvcinvstmgmt": "feesforsrvcinvstmgmt","feesforsrvcothr": "feesforsrvcothr","advrtpromo": "advrtpromo","officexpns": "officexpns","infotech": "infotech","royaltsexpns": "royaltsexpns","occupancy": "occupancy","travel": "travel","travelofpublicoffcl": "travelofpublicoffcl","converconventmtng": "converconventmtng","interestamt": "interestamt","pymtoaffiliates": "pymtoaffiliates","deprcatndepletn": "deprcatndepletn","insurance": "insurance","othrexpnsa": "othrexpnsa","othrexpnsb": "othrexpnsb","othrexpnsc": "othrexpnsc","othrexpnsd": "othrexpnsd","othrexpnse": "othrexpnse","othrexpnsf": "othrexpnsf","totfuncexpns": "totfuncexpns","nonintcashend": "nonintcashend","svngstempinvend": "svngstempinvend","pldgegrntrcvblend": "pldgegrntrcvblend","accntsrcvblend": "accntsrcvblend","currfrmrcvblend": "currfrmrcvblend","rcvbldisqualend": "rcvbldisqualend","notesloansrcvblend": "notesloansrcvblend","invntriesalesend": "invntriesalesend","prepaidexpnsend": "prepaidexpnsend","lndbldgsequipend": "lndbldgsequipend","invstmntsend": "invstmntsend","invstmntsothrend": "invstmntsothrend","invstmntsprgmend": "invstmntsprgmend","intangibleassetsend": "intangibleassetsend","othrassetsend": "othrassetsend","totassetsend": "totassetsend","accntspayableend": "accntspayableend","grntspayableend": "grntspayableend","deferedrevnuend": "deferedrevnuend","txexmptbndsend": "txexmptbndsend","escrwaccntliabend": "escrwaccntliabend","paybletoffcrsend": "paybletoffcrsend","secrdmrtgsend": "secrdmrtgsend","unsecurednotesend": "unsecurednotesend","othrliabend": "othrliabend","totliabend": "totliabend","unrstrctnetasstsend": "unrstrctnetasstsend","temprstrctnetasstsend": "temprstrctnetasstsend","permrstrctnetasstsend": "permrstrctnetasstsend","capitalstktrstend": "capitalstktrstend","paidinsurplusend": "paidinsurplusend","retainedearnend": "retainedearnend","totnetassetend": "totnetassetend","totnetliabastend": "totnetliabastend","nonpfrea": "nonpfrea","totnooforgscnt": "totnooforgscnt","totsupport": "totsupport","gftgrntsrcvd170": "gftgrntsrcvd170","txrevnuelevied170": "txrevnuelevied170","srvcsval170": "srvcsval170","pubsuppsubtot170": "pubsuppsubtot170","exceeds2pct170": "exceeds2pct170","pubsupplesspct170": "pubsupplesspct170","samepubsuppsubtot170": "samepubsuppsubtot170","grsinc170": "grsinc170","netincunreltd170": "netincunreltd170","othrinc170": "othrinc170","totsupp170": "totsupp170","grsrcptsrelated170": "grsrcptsrelated170","totgftgrntrcvd509": "totgftgrntrcvd509","grsrcptsadmissn509": "grsrcptsadmissn509","grsrcptsactivities509": "grsrcptsactivities509","txrevnuelevied509": "txrevnuelevied509","srvcsval509": "srvcsval509","pubsuppsubtot509": "pubsuppsubtot509","rcvdfrmdisqualsub509": "rcvdfrmdisqualsub509","exceeds1pct509": "exceeds1pct509","subtotpub509": "subtotpub509","pubsupplesub509": "pubsupplesub509","samepubsuppsubtot509": "samepubsuppsubtot509","grsinc509": "grsinc509","unreltxincls511tx509": "unreltxincls511tx509","subtotsuppinc509": "subtotsuppinc509","netincunrelatd509": "netincunrelatd509","othrinc509": "othrinc509","totsupp509": "totsupp509"}',
},
resources={"request_memory": "2G", "request_cpu": "1"},
)
# Task to load CSV data to a BigQuery table
load_irs_990_to_bq = gcs_to_bq.GoogleCloudStorageToBigQueryOperator(
task_id="load_irs_990_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/irs_990/irs_990_2014/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="irs_990.irs_990_2014",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{"name": "ein", "type": "string", "mode": "required"},
{"name": "tax_pd", "type": "integer", "mode": "nullable"},
{"name": "subseccd", "type": "integer", "mode": "nullable"},
{"name": "s501c3or4947a1cd", "type": "string", "mode": "nullable"},
{"name": "schdbind", "type": "string", "mode": "nullable"},
{"name": "politicalactvtscd", "type": "string", "mode": "nullable"},
{"name": "lbbyingactvtscd", "type": "string", "mode": "nullable"},
{"name": "subjto6033cd", "type": "string", "mode": "nullable"},
{"name": "dnradvisedfundscd", "type": "string", "mode": "nullable"},
{"name": "prptyintrcvdcd", "type": "string", "mode": "nullable"},
{"name": "maintwrkofartcd", "type": "string", "mode": "nullable"},
{"name": "crcounselingqstncd", "type": "string", "mode": "nullable"},
{"name": "hldassetsintermpermcd", "type": "string", "mode": "nullable"},
{"name": "rptlndbldgeqptcd", "type": "string", "mode": "nullable"},
{"name": "rptinvstothsecd", "type": "string", "mode": "nullable"},
{"name": "rptinvstprgrelcd", "type": "string", "mode": "nullable"},
{"name": "rptothasstcd", "type": "string", "mode": "nullable"},
{"name": "rptothliabcd", "type": "string", "mode": "nullable"},
{"name": "sepcnsldtfinstmtcd", "type": "string", "mode": "nullable"},
{"name": "sepindaudfinstmtcd", "type": "string", "mode": "nullable"},
{"name": "inclinfinstmtcd", "type": "string", "mode": "nullable"},
{"name": "operateschools170cd", "type": "string", "mode": "nullable"},
{"name": "frgnofficecd", "type": "string", "mode": "nullable"},
{"name": "frgnrevexpnscd", "type": "string", "mode": "nullable"},
{"name": "frgngrntscd", "type": "string", "mode": "nullable"},
{"name": "frgnaggragrntscd", "type": "string", "mode": "nullable"},
{"name": "rptprofndrsngfeescd", "type": "string", "mode": "nullable"},
{"name": "rptincfnndrsngcd", "type": "string", "mode": "nullable"},
{"name": "rptincgamingcd", "type": "string", "mode": "nullable"},
{"name": "operatehosptlcd", "type": "string", "mode": "nullable"},
{"name": "hospaudfinstmtcd", "type": "string", "mode": "nullable"},
{"name": "rptgrntstogovtcd", "type": "string", "mode": "nullable"},
{"name": "rptgrntstoindvcd", "type": "string", "mode": "nullable"},
{"name": "rptyestocompnstncd", "type": "string", "mode": "nullable"},
{"name": "txexmptbndcd", "type": "string", "mode": "nullable"},
{"name": "invstproceedscd", "type": "string", "mode": "nullable"},
{"name": "maintescrwaccntcd", "type": "string", "mode": "nullable"},
{"name": "actonbehalfcd", "type": "string", "mode": "nullable"},
{"name": "engageexcessbnftcd", "type": "string", "mode": "nullable"},
{"name": "awarexcessbnftcd", "type": "string", "mode": "nullable"},
{"name": "loantofficercd", "type": "string", "mode": "nullable"},
{"name": "grantoofficercd", "type": "string", "mode": "nullable"},
{"name": "dirbusnreltdcd", "type": "string", "mode": "nullable"},
{"name": "fmlybusnreltdcd", "type": "string", "mode": "nullable"},
{"name": "servasofficercd", "type": "string", "mode": "nullable"},
{"name": "recvnoncashcd", "type": "string", "mode": "nullable"},
{"name": "recvartcd", "type": "string", "mode": "nullable"},
{"name": "ceaseoperationscd", "type": "string", "mode": "nullable"},
{"name": "sellorexchcd", "type": "string", "mode": "nullable"},
{"name": "ownsepentcd", "type": "string", "mode": "nullable"},
{"name": "reltdorgcd", "type": "string", "mode": "nullable"},
{"name": "intincntrlcd", "type": "string", "mode": "nullable"},
{"name": "orgtrnsfrcd", "type": "string", "mode": "nullable"},
{"name": "conduct5percentcd", "type": "string", "mode": "nullable"},
{"name": "compltschocd", "type": "string", "mode": "nullable"},
{"name": "f1096cnt", "type": "integer", "mode": "nullable"},
{"name": "fw2gcnt", "type": "integer", "mode": "nullable"},
{"name": "wthldngrulescd", "type": "string", "mode": "nullable"},
{"name": "noemplyeesw3cnt", "type": "integer", "mode": "nullable"},
{"name": "filerqrdrtnscd", "type": "string", "mode": "nullable"},
{"name": "unrelbusinccd", "type": "string", "mode": "nullable"},
{"name": "filedf990tcd", "type": "string", "mode": "nullable"},
{"name": "frgnacctcd", "type": "string", "mode": "nullable"},
{"name": "prohibtdtxshltrcd", "type": "string", "mode": "nullable"},
{"name": "prtynotifyorgcd", "type": "string", "mode": "nullable"},
{"name": "filedf8886tcd", "type": "string", "mode": "nullable"},
{"name": "solicitcntrbcd", "type": "string", "mode": "nullable"},
{"name": "exprstmntcd", "type": "string", "mode": "nullable"},
{"name": "providegoodscd", "type": "string", "mode": "nullable"},
{"name": "notfydnrvalcd", "type": "string", "mode": "nullable"},
{"name": "filedf8282cd", "type": "string", "mode": "nullable"},
{"name": "f8282cnt", "type": "integer", "mode": "nullable"},
{"name": "fndsrcvdcd", "type": "string", "mode": "nullable"},
{"name": "premiumspaidcd", "type": "string", "mode": "nullable"},
{"name": "filedf8899cd", "type": "string", "mode": "nullable"},
{"name": "filedf1098ccd", "type": "string", "mode": "nullable"},
{"name": "excbushldngscd", "type": "string", "mode": "nullable"},
{"name": "s4966distribcd", "type": "string", "mode": "nullable"},
{"name": "distribtodonorcd", "type": "string", "mode": "nullable"},
{"name": "initiationfees", "type": "integer", "mode": "nullable"},
{"name": "grsrcptspublicuse", "type": "integer", "mode": "nullable"},
{"name": "grsincmembers", "type": "integer", "mode": "nullable"},
{"name": "grsincother", "type": "integer", "mode": "nullable"},
{"name": "filedlieuf1041cd", "type": "string", "mode": "nullable"},
{"name": "txexmptint", "type": "integer", "mode": "nullable"},
{"name": "qualhlthplncd", "type": "string", "mode": "nullable"},
{"name": "qualhlthreqmntn", "type": "integer", "mode": "nullable"},
{"name": "qualhlthonhnd", "type": "integer", "mode": "nullable"},
{"name": "rcvdpdtngcd", "type": "string", "mode": "nullable"},
{"name": "filedf720cd", "type": "string", "mode": "nullable"},
{"name": "totreprtabled", "type": "integer", "mode": "nullable"},
{"name": "totcomprelatede", "type": "integer", "mode": "nullable"},
{"name": "totestcompf", "type": "integer", "mode": "nullable"},
{"name": "noindiv100kcnt", "type": "integer", "mode": "nullable"},
{"name": "nocontractor100kcnt", "type": "integer", "mode": "nullable"},
{"name": "totcntrbgfts", "type": "integer", "mode": "nullable"},
{"name": "prgmservcode2acd", "type": "integer", "mode": "nullable"},
{"name": "totrev2acola", "type": "integer", "mode": "nullable"},
{"name": "prgmservcode2bcd", "type": "integer", "mode": "nullable"},
{"name": "totrev2bcola", "type": "integer", "mode": "nullable"},
{"name": "prgmservcode2ccd", "type": "integer", "mode": "nullable"},
{"name": "totrev2ccola", "type": "integer", "mode": "nullable"},
{"name": "prgmservcode2dcd", "type": "integer", "mode": "nullable"},
{"name": "totrev2dcola", "type": "integer", "mode": "nullable"},
{"name": "prgmservcode2ecd", "type": "integer", "mode": "nullable"},
{"name": "totrev2ecola", "type": "integer", "mode": "nullable"},
{"name": "totrev2fcola", "type": "integer", "mode": "nullable"},
{"name": "totprgmrevnue", "type": "integer", "mode": "nullable"},
{"name": "invstmntinc", "type": "integer", "mode": "nullable"},
{"name": "txexmptbndsproceeds", "type": "integer", "mode": "nullable"},
{"name": "royaltsinc", "type": "integer", "mode": "nullable"},
{"name": "grsrntsreal", "type": "integer", "mode": "nullable"},
{"name": "grsrntsprsnl", "type": "integer", "mode": "nullable"},
{"name": "rntlexpnsreal", "type": "integer", "mode": "nullable"},
{"name": "rntlexpnsprsnl", "type": "integer", "mode": "nullable"},
{"name": "rntlincreal", "type": "integer", "mode": "nullable"},
{"name": "rntlincprsnl", "type": "integer", "mode": "nullable"},
{"name": "netrntlinc", "type": "integer", "mode": "nullable"},
{"name": "grsalesecur", "type": "integer", "mode": "nullable"},
{"name": "grsalesothr", "type": "integer", "mode": "nullable"},
{"name": "cstbasisecur", "type": "integer", "mode": "nullable"},
{"name": "cstbasisothr", "type": "integer", "mode": "nullable"},
{"name": "gnlsecur", "type": "integer", "mode": "nullable"},
{"name": "gnlsothr", "type": "integer", "mode": "nullable"},
{"name": "netgnls", "type": "integer", "mode": "nullable"},
{"name": "grsincfndrsng", "type": "integer", "mode": "nullable"},
{"name": "lessdirfndrsng", "type": "integer", "mode": "nullable"},
{"name": "netincfndrsng", "type": "integer", "mode": "nullable"},
{"name": "grsincgaming", "type": "integer", "mode": "nullable"},
{"name": "lessdirgaming", "type": "integer", "mode": "nullable"},
{"name": "netincgaming", "type": "integer", "mode": "nullable"},
{"name": "grsalesinvent", "type": "integer", "mode": "nullable"},
{"name": "lesscstofgoods", "type": "integer", "mode": "nullable"},
{"name": "netincsales", "type": "integer", "mode": "nullable"},
{"name": "miscrev11acd", "type": "integer", "mode": "nullable"},
{"name": "miscrevtota", "type": "integer", "mode": "nullable"},
{"name": "miscrev11bcd", "type": "integer", "mode": "nullable"},
{"name": "miscrevtot11b", "type": "integer", "mode": "nullable"},
{"name": "miscrev11ccd", "type": "integer", "mode": "nullable"},
{"name": "miscrevtot11c", "type": "integer", "mode": "nullable"},
{"name": "miscrevtot11d", "type": "integer", "mode": "nullable"},
{"name": "miscrevtot11e", "type": "integer", "mode": "nullable"},
{"name": "totrevenue", "type": "integer", "mode": "nullable"},
{"name": "grntstogovt", "type": "integer", "mode": "nullable"},
{"name": "grnsttoindiv", "type": "integer", "mode": "nullable"},
{"name": "grntstofrgngovt", "type": "integer", "mode": "nullable"},
{"name": "benifitsmembrs", "type": "integer", "mode": "nullable"},
{"name": "compnsatncurrofcr", "type": "integer", "mode": "nullable"},
{"name": "compnsatnandothr", "type": "integer", "mode": "nullable"},
{"name": "othrsalwages", "type": "integer", "mode": "nullable"},
{"name": "pensionplancontrb", "type": "integer", "mode": "nullable"},
{"name": "othremplyeebenef", "type": "integer", "mode": "nullable"},
{"name": "payrolltx", "type": "integer", "mode": "nullable"},
{"name": "feesforsrvcmgmt", "type": "integer", "mode": "nullable"},
{"name": "legalfees", "type": "integer", "mode": "nullable"},
{"name": "accntingfees", "type": "integer", "mode": "nullable"},
{"name": "feesforsrvclobby", "type": "integer", "mode": "nullable"},
{"name": "profndraising", "type": "integer", "mode": "nullable"},
{"name": "feesforsrvcinvstmgmt", "type": "integer", "mode": "nullable"},
{"name": "feesforsrvcothr", "type": "integer", "mode": "nullable"},
{"name": "advrtpromo", "type": "integer", "mode": "nullable"},
{"name": "officexpns", "type": "integer", "mode": "nullable"},
{"name": "infotech", "type": "integer", "mode": "nullable"},
{"name": "royaltsexpns", "type": "integer", "mode": "nullable"},
{"name": "occupancy", "type": "integer", "mode": "nullable"},
{"name": "travel", "type": "integer", "mode": "nullable"},
{"name": "travelofpublicoffcl", "type": "integer", "mode": "nullable"},
{"name": "converconventmtng", "type": "integer", "mode": "nullable"},
{"name": "interestamt", "type": "integer", "mode": "nullable"},
{"name": "pymtoaffiliates", "type": "integer", "mode": "nullable"},
{"name": "deprcatndepletn", "type": "integer", "mode": "nullable"},
{"name": "insurance", "type": "integer", "mode": "nullable"},
{"name": "othrexpnsa", "type": "integer", "mode": "nullable"},
{"name": "othrexpnsb", "type": "integer", "mode": "nullable"},
{"name": "othrexpnsc", "type": "integer", "mode": "nullable"},
{"name": "othrexpnsd", "type": "integer", "mode": "nullable"},
{"name": "othrexpnse", "type": "integer", "mode": "nullable"},
{"name": "othrexpnsf", "type": "integer", "mode": "nullable"},
{"name": "totfuncexpns", "type": "integer", "mode": "nullable"},
{"name": "nonintcashend", "type": "integer", "mode": "nullable"},
{"name": "svngstempinvend", "type": "integer", "mode": "nullable"},
{"name": "pldgegrntrcvblend", "type": "integer", "mode": "nullable"},
{"name": "accntsrcvblend", "type": "integer", "mode": "nullable"},
{"name": "currfrmrcvblend", "type": "integer", "mode": "nullable"},
{"name": "rcvbldisqualend", "type": "integer", "mode": "nullable"},
{"name": "notesloansrcvblend", "type": "integer", "mode": "nullable"},
{"name": "invntriesalesend", "type": "integer", "mode": "nullable"},
{"name": "prepaidexpnsend", "type": "integer", "mode": "nullable"},
{"name": "lndbldgsequipend", "type": "integer", "mode": "nullable"},
{"name": "invstmntsend", "type": "integer", "mode": "nullable"},
{"name": "invstmntsothrend", "type": "integer", "mode": "nullable"},
{"name": "invstmntsprgmend", "type": "integer", "mode": "nullable"},
{"name": "intangibleassetsend", "type": "integer", "mode": "nullable"},
{"name": "othrassetsend", "type": "integer", "mode": "nullable"},
{"name": "totassetsend", "type": "integer", "mode": "nullable"},
{"name": "accntspayableend", "type": "integer", "mode": "nullable"},
{"name": "grntspayableend", "type": "integer", "mode": "nullable"},
{"name": "deferedrevnuend", "type": "integer", "mode": "nullable"},
{"name": "txexmptbndsend", "type": "integer", "mode": "nullable"},
{"name": "escrwaccntliabend", "type": "integer", "mode": "nullable"},
{"name": "paybletoffcrsend", "type": "integer", "mode": "nullable"},
{"name": "secrdmrtgsend", "type": "integer", "mode": "nullable"},
{"name": "unsecurednotesend", "type": "integer", "mode": "nullable"},
{"name": "othrliabend", "type": "integer", "mode": "nullable"},
{"name": "totliabend", "type": "integer", "mode": "nullable"},
{"name": "unrstrctnetasstsend", "type": "integer", "mode": "nullable"},
{"name": "temprstrctnetasstsend", "type": "integer", "mode": "nullable"},
{"name": "permrstrctnetasstsend", "type": "integer", "mode": "nullable"},
{"name": "capitalstktrstend", "type": "integer", "mode": "nullable"},
{"name": "paidinsurplusend", "type": "integer", "mode": "nullable"},
{"name": "retainedearnend", "type": "integer", "mode": "nullable"},
{"name": "totnetassetend", "type": "integer", "mode": "nullable"},
{"name": "totnetliabastend", "type": "integer", "mode": "nullable"},
{"name": "nonpfrea", "type": "integer", "mode": "nullable"},
{"name": "totnooforgscnt", "type": "integer", "mode": "nullable"},
{"name": "totsupport", "type": "integer", "mode": "nullable"},
{"name": "gftgrntsrcvd170", "type": "integer", "mode": "nullable"},
{"name": "txrevnuelevied170", "type": "integer", "mode": "nullable"},
{"name": "srvcsval170", "type": "integer", "mode": "nullable"},
{"name": "pubsuppsubtot170", "type": "integer", "mode": "nullable"},
{"name": "exceeds2pct170", "type": "integer", "mode": "nullable"},
{"name": "pubsupplesspct170", "type": "integer", "mode": "nullable"},
{"name": "samepubsuppsubtot170", "type": "integer", "mode": "nullable"},
{"name": "grsinc170", "type": "integer", "mode": "nullable"},
{"name": "netincunreltd170", "type": "integer", "mode": "nullable"},
{"name": "othrinc170", "type": "integer", "mode": "nullable"},
{"name": "totsupp170", "type": "integer", "mode": "nullable"},
{"name": "grsrcptsrelated170", "type": "integer", "mode": "nullable"},
{"name": "totgftgrntrcvd509", "type": "integer", "mode": "nullable"},
{"name": "grsrcptsadmissn509", "type": "integer", "mode": "nullable"},
{"name": "grsrcptsactivities509", "type": "integer", "mode": "nullable"},
{"name": "txrevnuelevied509", "type": "integer", "mode": "nullable"},
{"name": "srvcsval509", "type": "integer", "mode": "nullable"},
{"name": "pubsuppsubtot509", "type": "integer", "mode": "nullable"},
{"name": "rcvdfrmdisqualsub509", "type": "integer", "mode": "nullable"},
{"name": "exceeds1pct509", "type": "integer", "mode": "nullable"},
{"name": "subtotpub509", "type": "integer", "mode": "nullable"},
{"name": "pubsupplesub509", "type": "integer", "mode": "nullable"},
{"name": "samepubsuppsubtot509", "type": "integer", "mode": "nullable"},
{"name": "grsinc509", "type": "integer", "mode": "nullable"},
{"name": "unreltxincls511tx509", "type": "integer", "mode": "nullable"},
{"name": "subtotsuppinc509", "type": "integer", "mode": "nullable"},
{"name": "netincunrelatd509", "type": "integer", "mode": "nullable"},
{"name": "othrinc509", "type": "integer", "mode": "nullable"},
{"name": "totsupp509", "type": "integer", "mode": "nullable"},
],
)
irs_990_transform_csv >> load_irs_990_to_bq
| 104.548193
| 8,442
| 0.625209
|
4a073d6b462e27a8f91570c55f5687eea3e8dc55
| 660
|
py
|
Python
|
dev/Tools/Python/2.7.12/windows/Scripts/rst2html.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 8
|
2019-10-07T16:33:47.000Z
|
2020-12-07T03:59:58.000Z
|
dev/Tools/Python/2.7.12/windows/Scripts/rst2html.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | null | null | null |
dev/Tools/Python/2.7.12/windows/Scripts/rst2html.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 5
|
2020-08-27T20:44:18.000Z
|
2021-08-21T22:54:11.000Z
|
#!C:\workspace\lyengine\branches\testtech\dev\Tools\Python\2.7.12\windows\python.exe
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| 27.5
| 84
| 0.742424
|
4a073ebbe3524db31f6832b3c16b904302e89b76
| 21,445
|
py
|
Python
|
lib/unparse.py
|
GRAYgoose124/mushishi
|
6dd4512908e39bf6506be023d1834611f58e894b
|
[
"MIT"
] | 2
|
2018-10-19T07:47:19.000Z
|
2020-02-11T05:03:11.000Z
|
lib/unparse.py
|
GRAYgoose124/mushishi
|
6dd4512908e39bf6506be023d1834611f58e894b
|
[
"MIT"
] | null | null | null |
lib/unparse.py
|
GRAYgoose124/mushishi
|
6dd4512908e39bf6506be023d1834611f58e894b
|
[
"MIT"
] | null | null | null |
# modified from unparse from cpython
from ast import NodeVisitor, Name, Constant, Module, Tuple
import sys
from contextlib import contextmanager, nullcontext
from _ast import *
# Large float and imaginary literals get turned into infinities in the AST.
# We unparse those infinities to INFSTR.
_INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1)
class _Unparser(NodeVisitor):
"""Methods in this class recursively traverse an AST and
output source code for the abstract syntax; original formatting
is disregarded."""
def __init__(self):
self._source = []
self._buffer = []
self._indent = 0
def interleave(self, inter, f, seq):
"""Call f on each item in seq, calling inter() in between."""
seq = iter(seq)
try:
f(next(seq))
except StopIteration:
pass
else:
for x in seq:
inter()
f(x)
def fill(self, text=""):
"""Indent a piece of text and append it, according to the current
indentation level"""
self.write("\n" + " " * self._indent + text)
def write(self, text):
"""Append a piece of text"""
self._source.append(text)
def buffer_writer(self, text):
self._buffer.append(text)
@property
def buffer(self):
value = "".join(self._buffer)
self._buffer.clear()
return value
@contextmanager
def block(self):
"""A context manager for preparing the source for blocks. It adds
the character':', increases the indentation on enter and decreases
the indentation on exit."""
self.write(":")
self._indent += 1
yield
self._indent -= 1
@contextmanager
def delimit(self, start, end):
"""A context manager for preparing the source for expressions. It adds
*start* to the buffer and enters, after exit it adds *end*."""
self.write(start)
yield
self.write(end)
def delimit_if(self, start, end, condition):
if condition:
return self.delimit(start, end)
else:
return nullcontext()
def traverse(self, node):
if isinstance(node, list) or isinstance(node, tuple):
for item in node:
self.traverse(item)
else:
if hasattr(node, 'value'):
if node.value is None:
return node
super().visit(node)
def visit(self, node):
"""Outputs a source code string that, if converted back to an ast
(using ast.parse) will generate an AST equivalent to *node*"""
if isinstance(node, Module):
pass
elif hasattr(node, 'value') and node.value is None:
return node
self._source = []
self.traverse(node)
return "".join(self._source)
def visit_Module(self, node):
for subnode in node.body:
self.traverse(subnode)
def visit_Expr(self, node):
if not hasattr(node, '__dict__'):
print(node)
self.fill()
self.traverse(node.value)
def visit_NamedExpr(self, node):
with self.delimit("(", ")"):
self.traverse(node.target)
self.write(" := ")
self.traverse(node.value)
def visit_Import(self, node):
self.fill("import ")
self.interleave(lambda: self.write(", "), self.traverse, node.names)
def visit_ImportFrom(self, node):
self.fill("from ")
self.write("." * node.level)
if node.module:
self.write(node.module)
self.write(" import ")
self.interleave(lambda: self.write(", "), self.traverse, node.names)
def visit_Assign(self, node):
self.fill()
for target in node.targets:
self.traverse(target)
self.write(" = ")
self.traverse(node.value)
def visit_AugAssign(self, node):
self.fill()
self.traverse(node.target)
self.write(" " + self.binop[node.op.__class__.__name__] + "= ")
self.traverse(node.value)
def visit_AnnAssign(self, node):
self.fill()
with self.delimit_if("(", ")", not node.simple and isinstance(node.target, Name)):
self.traverse(node.target)
self.write(": ")
self.traverse(node.annotation)
if node.value:
self.write(" = ")
self.traverse(node.value)
def visit_Return(self, node):
self.fill("return")
if node.value:
self.write(" ")
self.traverse(node.value)
def visit_Pass(self, node):
self.fill("pass")
def visit_Break(self, node):
self.fill("break")
def visit_Continue(self, node):
self.fill("continue")
def visit_Delete(self, node):
self.fill("del ")
self.interleave(lambda: self.write(", "), self.traverse, node.targets)
def visit_Assert(self, node):
self.fill("assert ")
self.traverse(node.test)
if node.msg:
self.write(", ")
self.traverse(node.msg)
def visit_Global(self, node):
self.fill("global ")
self.interleave(lambda: self.write(", "), self.write, node.names)
def visit_Nonlocal(self, node):
self.fill("nonlocal ")
self.interleave(lambda: self.write(", "), self.write, node.names)
def visit_Await(self, node):
with self.delimit("(", ")"):
self.write("await")
if node.value:
self.write(" ")
self.traverse(node.value)
def visit_Yield(self, node):
with self.delimit("(", ")"):
self.write("yield")
if node.value:
self.write(" ")
self.traverse(node.value)
def visit_YieldFrom(self, node):
with self.delimit("(", ")"):
self.write("yield from ")
if not node.value:
raise ValueError("Node can't be used without a value attribute.")
self.traverse(node.value)
def visit_Raise(self, node):
self.fill("raise")
if not node.exc:
if node.cause:
raise ValueError(f"Node can't use cause without an exception.")
return
self.write(" ")
self.traverse(node.exc)
if node.cause:
self.write(" from ")
self.traverse(node.cause)
def visit_Try(self, node):
self.fill("try")
with self.block():
self.traverse(node.body)
for ex in node.handlers:
self.traverse(ex)
if node.orelse:
self.fill("else")
with self.block():
self.traverse(node.orelse)
if node.finalbody:
self.fill("finally")
with self.block():
self.traverse(node.finalbody)
def visit_ExceptHandler(self, node):
self.fill("except")
if node.type:
self.write(" ")
self.traverse(node.type)
if node.name:
self.write(" as ")
self.write(node.name)
with self.block():
self.traverse(node.body)
def visit_ClassDef(self, node):
self.write("\n")
for deco in node.decorator_list:
self.fill("@")
self.traverse(deco)
self.fill("class " + node.name)
with self.delimit("(", ")"):
comma = False
for e in node.bases:
if comma:
self.write(", ")
else:
comma = True
self.traverse(e)
for e in node.keywords:
if comma:
self.write(", ")
else:
comma = True
self.traverse(e)
with self.block():
self.traverse(node.body)
def visit_FunctionDef(self, node):
self.__FunctionDef_helper(node, "def")
def visit_AsyncFunctionDef(self, node):
self.__FunctionDef_helper(node, "async def")
def __FunctionDef_helper(self, node, fill_suffix):
self.write("\n")
for deco in node.decorator_list:
self.fill("@")
self.traverse(deco)
def_str = fill_suffix + " " + node.name
self.fill(def_str)
with self.delimit("(", ")"):
self.traverse(node.args)
if node.returns:
self.write(" -> ")
self.traverse(node.returns)
with self.block():
self.traverse(node.body)
def visit_For(self, node):
self.__For_helper("for ", node)
def visit_AsyncFor(self, node):
self.__For_helper("async for ", node)
def __For_helper(self, fill, node):
self.fill(fill)
self.traverse(node.target)
self.write(" in ")
self.traverse(node.iter)
with self.block():
self.traverse(node.body)
if node.orelse:
self.fill("else")
with self.block():
self.traverse(node.orelse)
def visit_If(self, node):
self.fill("if ")
self.traverse(node.test)
with self.block():
self.traverse(node.body)
# collapse nested ifs into equivalent elifs.
while node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], If):
node = node.orelse[0]
self.fill("elif ")
self.traverse(node.test)
with self.block():
self.traverse(node.body)
# final else
if node.orelse:
self.fill("else")
with self.block():
self.traverse(node.orelse)
def visit_While(self, node):
self.fill("while ")
self.traverse(node.test)
with self.block():
self.traverse(node.body)
if node.orelse:
self.fill("else")
with self.block():
self.traverse(node.orelse)
def visit_With(self, node):
self.fill("with ")
self.interleave(lambda: self.write(", "), self.traverse, node.items)
with self.block():
self.traverse(node.body)
def visit_AsyncWith(self, node):
self.fill("async with ")
self.interleave(lambda: self.write(", "), self.traverse, node.items)
with self.block():
self.traverse(node.body)
def visit_JoinedStr(self, node):
self.write("f")
self._fstring_JoinedStr(node, self.buffer_writer)
self.write(repr(self.buffer))
def visit_FormattedValue(self, node):
self.write("f")
self._fstring_FormattedValue(node, self.buffer_writer)
self.write(repr(self.buffer))
def _fstring_JoinedStr(self, node, write):
for value in node.values:
meth = getattr(self, "_fstring_" + type(value).__name__)
meth(value, write)
def _fstring_Constant(self, node, write):
if not isinstance(node.value, str):
raise ValueError("Constants inside JoinedStr should be a string.")
value = node.value.replace("{", "{{").replace("}", "}}")
write(value)
def _fstring_FormattedValue(self, node, write):
write("{")
expr = type(self)().visit(node.value).rstrip("\n")
if expr.startswith("{"):
write(" ") # Separate pair of opening brackets as "{ {"
write(expr)
if node.conversion != -1:
conversion = chr(node.conversion)
if conversion not in "sra":
raise ValueError("Unknown f-string conversion.")
write(f"!{conversion}")
if node.format_spec:
write(":")
meth = getattr(self, "_fstring_" + type(node.format_spec).__name__)
meth(node.format_spec, write)
write("}")
def visit_Name(self, node):
self.write(node.id)
def _write_constant(self, value):
if isinstance(value, (float, complex)):
# Substitute overflowing decimal literal for AST infinities.
self.write(repr(value).replace("inf", _INFSTR))
else:
self.write(repr(value))
def visit_Constant(self, node):
value = node.value
if isinstance(value, tuple):
with self.delimit("(", ")"):
if len(value) == 1:
self._write_constant(value[0])
self.write(",")
else:
self.interleave(lambda: self.write(", "), self._write_constant, value)
elif value is ...:
self.write("...")
else:
if node.kind == "u":
self.write("u")
self._write_constant(node.value)
def visit_List(self, node):
with self.delimit("[", "]"):
self.interleave(lambda: self.write(", "), self.traverse, node.elts)
def visit_ListComp(self, node):
with self.delimit("[", "]"):
self.traverse(node.elt)
for gen in node.generators:
self.traverse(gen)
def visit_GeneratorExp(self, node):
with self.delimit("(", ")"):
self.traverse(node.elt)
for gen in node.generators:
self.traverse(gen)
def visit_SetComp(self, node):
with self.delimit("{", "}"):
self.traverse(node.elt)
for gen in node.generators:
self.traverse(gen)
def visit_DictComp(self, node):
with self.delimit("{", "}"):
self.traverse(node.key)
self.write(": ")
self.traverse(node.value)
for gen in node.generators:
self.traverse(gen)
def visit_comprehension(self, node):
if node.is_async:
self.write(" async for ")
else:
self.write(" for ")
self.traverse(node.target)
self.write(" in ")
self.traverse(node.iter)
for if_clause in node.ifs:
self.write(" if ")
self.traverse(if_clause)
def visit_IfExp(self, node):
with self.delimit("(", ")"):
self.traverse(node.body)
self.write(" if ")
self.traverse(node.test)
self.write(" else ")
self.traverse(node.orelse)
def visit_Set(self, node):
if not node.elts:
raise ValueError("Set node should has at least one item")
with self.delimit("{", "}"):
self.interleave(lambda: self.write(", "), self.traverse, node.elts)
def visit_Tuple(self, node):
with self.delimit("(", ")"):
if len(node.elts) == 1:
elt = node.elts[0]
self.traverse(elt)
self.write(",")
else:
self.interleave(lambda: self.write(", "), self.traverse, node.elts)
def visit_Dict(self, node):
def write_key_value_pair(k, v):
self.traverse(k)
self.write(": ")
self.traverse(v)
def write_item(item):
k, v = item
if k is None:
# for dictionary unpacking operator in dicts {**{'y': 2}}
# see PEP 448 for details
self.write("**")
self.traverse(v)
else:
write_key_value_pair(k, v)
with self.delimit("{", "}"):
self.interleave(
lambda: self.write(", "), write_item, zip(node.keys, node.values))
unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"}
def visit_UnaryOp(self, node):
with self.delimit("(", ")"):
self.write(self.unop[node.op.__class__.__name__])
self.write(" ")
self.traverse(node.operand)
binop = {
"Add": "+",
"Sub": "-",
"Mult": "*",
"MatMult": "@",
"Div": "/",
"Mod": "%",
"LShift": "<<",
"RShift": ">>",
"BitOr": "|",
"BitXor": "^",
"BitAnd": "&",
"FloorDiv": "//",
"Pow": "**",
}
def visit_BinOp(self, node):
with self.delimit("(", ")"):
self.traverse(node.left)
self.write(" " + self.binop[node.op.__class__.__name__] + " ")
self.traverse(node.right)
cmpops = {
"Eq": "==",
"NotEq": "!=",
"Lt": "<",
"LtE": "<=",
"Gt": ">",
"GtE": ">=",
"Is": "is",
"IsNot": "is not",
"In": "in",
"NotIn": "not in",
}
def visit_Compare(self, node):
with self.delimit("(", ")"):
self.traverse(node.left)
for o, e in zip(node.ops, node.comparators):
self.write(" " + self.cmpops[o.__class__.__name__] + " ")
self.traverse(e)
boolops = {"And": "and", "Or": "or"}
def visit_BoolOp(self, node):
with self.delimit("(", ")"):
s = " %s " % self.boolops[node.op.__class__.__name__]
self.interleave(lambda: self.write(s), self.traverse, node.values)
def visit_Attribute(self, node):
self.traverse(node.value)
# Special case: 3.__abs__() is a syntax error, so if node.value
# is an integer literal then we need to either parenthesize
# it or add an extra space to get 3 .__abs__().
if isinstance(node.value, Constant) and isinstance(node.value.value, int):
self.write(" ")
self.write(".")
self.write(node.attr)
def visit_Call(self, node):
self.traverse(node.func)
with self.delimit("(", ")"):
comma = False
for e in node.args:
if comma:
self.write(", ")
else:
comma = True
self.traverse(e)
for e in node.keywords:
if comma:
self.write(", ")
else:
comma = True
self.traverse(e)
def visit_Subscript(self, node):
if len(node.__dict__) == 0:
return ''
self.traverse(node.value)
with self.delimit("[", "]"):
self.traverse(node.slice)
def visit_Starred(self, node):
self.write("*")
self.traverse(node.value)
def visit_Ellipsis(self, node):
self.write("...")
def visit_Index(self, node):
self.traverse(node.value)
def visit_Slice(self, node):
if node.lower:
self.traverse(node.lower)
self.write(":")
if node.upper:
self.traverse(node.upper)
if node.step:
self.write(":")
self.traverse(node.step)
def visit_ExtSlice(self, node):
self.interleave(lambda: self.write(", "), self.traverse, node.dims)
def visit_arg(self, node):
self.write(node.arg)
if node.annotation:
self.write(": ")
self.traverse(node.annotation)
def visit_arguments(self, node):
first = True
# normal arguments
all_args = node.posonlyargs + node.args
defaults = [None] * (len(all_args) - len(node.defaults)) + node.defaults
for index, elements in enumerate(zip(all_args, defaults), 1):
a, d = elements
if first:
first = False
else:
self.write(", ")
self.traverse(a)
if d:
self.write("=")
self.traverse(d)
if index == len(node.posonlyargs):
self.write(", /")
# varargs, or bare '*' if no varargs but keyword-only arguments present
if node.vararg or node.kwonlyargs:
if first:
first = False
else:
self.write(", ")
self.write("*")
if node.vararg:
self.write(node.vararg.arg)
if node.vararg.annotation:
self.write(": ")
self.traverse(node.vararg.annotation)
# keyword-only arguments
if node.kwonlyargs:
for a, d in zip(node.kwonlyargs, node.kw_defaults):
self.write(", ")
self.traverse(a)
if d:
self.write("=")
self.traverse(d)
# kwargs
if node.kwarg:
if first:
first = False
else:
self.write(", ")
self.write("**" + node.kwarg.arg)
if node.kwarg.annotation:
self.write(": ")
self.traverse(node.kwarg.annotation)
def visit_keyword(self, node):
if node.arg is None:
self.write("**")
else:
self.write(node.arg)
self.write("=")
self.traverse(node.value)
def visit_Lambda(self, node):
with self.delimit("(", ")"):
self.write("lambda ")
self.traverse(node.args)
self.write(": ")
self.traverse(node.body)
def visit_alias(self, node):
self.write(node.name)
if node.asname:
self.write(" as " + node.asname)
def visit_withitem(self, node):
self.traverse(node.context_expr)
if node.optional_vars:
self.write(" as ")
self.traverse(node.optional_vars)
def unparse(ast_obj):
unparser = _Unparser()
return unparser.visit(ast_obj)
| 30.811782
| 90
| 0.521753
|
4a073fbaa39a93365ae9c9b53ddf528439e53f98
| 648
|
py
|
Python
|
python/group/send_group_message.py
|
StevenMcguffin/examples
|
8ed7cd80c2b70280b274803dd84e9f99403dac9f
|
[
"MIT"
] | 1
|
2019-09-25T08:48:46.000Z
|
2019-09-25T08:48:46.000Z
|
python/group/send_group_message.py
|
StevenMcguffin/examples
|
8ed7cd80c2b70280b274803dd84e9f99403dac9f
|
[
"MIT"
] | 4
|
2019-09-25T02:21:32.000Z
|
2022-02-16T06:52:15.000Z
|
python/group/send_group_message.py
|
StevenMcguffin/examples
|
8ed7cd80c2b70280b274803dd84e9f99403dac9f
|
[
"MIT"
] | 9
|
2019-05-23T06:46:51.000Z
|
2020-12-23T08:21:49.000Z
|
import requests
import configparser
import json
import sys
import os.path
libdir = os.path.dirname(__file__)
sys.path.append(os.path.split(libdir)[0])
from auth import auth
config = configparser.ConfigParser()
config.read('config.ini')
apiKey = config['AUTH']['ApiKey']
apiSecret = config['AUTH']['ApiSecret']
if __name__ == '__main__':
# [INPUT_GROUP_ID] 에 그룹 아이디를 넣어주세요
# ex) G4V20181005122748TESTTESTTESTTES
res = requests.post(config['SERVER']['URI'] + 'groups/[INPUT_GROUP_ID]/send',
headers=auth.get_headers(apiKey, apiSecret))
print(json.dumps(json.loads(res.text), indent=2, ensure_ascii=False))
| 28.173913
| 81
| 0.71142
|
4a0740052bfc706a694329fc27f934dc3140e47f
| 5,192
|
py
|
Python
|
mayan/apps/events/views/subscription_views.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 343
|
2015-01-05T14:19:35.000Z
|
2018-12-10T19:07:48.000Z
|
mayan/apps/events/views/subscription_views.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 191
|
2015-01-03T00:48:19.000Z
|
2018-11-30T09:10:25.000Z
|
mayan/apps/events/views/subscription_views.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 114
|
2015-01-08T20:21:05.000Z
|
2018-12-10T19:07:53.000Z
|
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.models import AccessControlList
from mayan.apps.views.generics import FormView
from ..classes import EventType, ModelEventType
from ..forms import (
EventTypeUserRelationshipFormSet, ObjectEventTypeUserRelationshipFormSet
)
from ..models import StoredEventType
from ..permissions import permission_events_view
__all__ = (
'EventTypeSubscriptionListView', 'ObjectEventTypeSubscriptionListView'
)
class EventTypeSubscriptionListView(FormView):
form_class = EventTypeUserRelationshipFormSet
main_model = 'user'
submodel = StoredEventType
def dispatch(self, *args, **kwargs):
EventType.refresh()
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
try:
for instance in form:
instance.save()
except Exception as exception:
messages.error(
message=_(
'Error updating event subscription; %s'
) % exception, request=self.request
)
else:
messages.success(
message=_('Event subscriptions updated successfully'),
request=self.request
)
return super().form_valid(form=form)
def get_extra_context(self):
return {
'form_display_mode_table': True,
'object': self.get_object(),
'title': _(
'Event subscriptions'
) % self.get_object()
}
def get_initial(self):
obj = self.get_object()
initial = []
for element in self.get_queryset():
initial.append({
'user': obj,
'main_model': self.main_model,
'stored_event_type': element,
})
return initial
def get_object(self):
return self.request.user
def get_queryset(self):
# Return the queryset by name from the sorted list of the class
event_type_ids = [event_type.id for event_type in EventType.all()]
# Preserve the queryset order to that of the sorted ID list by
# namespace label and event label.
# Create a conditional statement to annotate each row with the sort
# index number. Then sort the query set by the custom sort index
# field.
when_list = []
for sort_index, event_type_id in enumerate(iterable=event_type_ids):
when_list.append(models.When(name=event_type_id, then=sort_index))
queryset = self.submodel.objects.filter(name__in=event_type_ids)
queryset = queryset.annotate(
sort_index=models.Case(
*when_list, output_field=models.IntegerField()
)
)
return queryset.order_by('sort_index')
class ObjectEventTypeSubscriptionListView(FormView):
form_class = ObjectEventTypeUserRelationshipFormSet
def dispatch(self, *args, **kwargs):
EventType.refresh()
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
try:
for instance in form:
instance.save()
except Exception as exception:
messages.error(
message=_(
'Error updating object event subscription; %s'
) % exception, request=self.request
)
else:
messages.success(
message=_(
'Object event subscriptions updated successfully'
), request=self.request
)
return super().form_valid(form=form)
def get_extra_context(self):
return {
'form_display_mode_table': True,
'object': self.get_object(),
'title': _(
'Event subscriptions for: %s'
) % self.get_object()
}
def get_initial(self):
obj = self.get_object()
initial = []
for element in self.get_queryset():
initial.append(
{
'user': self.request.user,
'object': obj,
'stored_event_type': element,
}
)
return initial
def get_object(self):
object_content_type = get_object_or_404(
klass=ContentType, app_label=self.kwargs['app_label'],
model=self.kwargs['model_name']
)
try:
content_object = object_content_type.get_object_for_this_type(
pk=self.kwargs['object_id']
)
except object_content_type.model_class().DoesNotExist:
raise Http404
AccessControlList.objects.check_access(
obj=content_object, permissions=(permission_events_view,),
user=self.request.user
)
return content_object
def get_queryset(self):
return ModelEventType.get_for_instance(instance=self.get_object())
| 31.08982
| 78
| 0.601888
|
4a0741e48e2f44532d800e8fcef2db9bc5151c71
| 211
|
py
|
Python
|
nipype/utils/__init__.py
|
felixsc1/nipype
|
e722d6170593583f16ddfcb95473e5d30b5f1d7c
|
[
"Apache-2.0"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
nipype/utils/__init__.py
|
felixsc1/nipype
|
e722d6170593583f16ddfcb95473e5d30b5f1d7c
|
[
"Apache-2.0"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
nipype/utils/__init__.py
|
felixsc1/nipype
|
e722d6170593583f16ddfcb95473e5d30b5f1d7c
|
[
"Apache-2.0"
] | 2
|
2017-09-23T16:22:00.000Z
|
2019-08-01T14:18:52.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .config import NUMPY_MMAP
from .onetime import OneTimeProperty, setattr_on_read
from .tmpdirs import TemporaryDirectory, InTemporaryDirectory
| 30.142857
| 61
| 0.819905
|
4a0741ed06f83ab3883b440c1cc6fec1e19d41e7
| 5,239
|
py
|
Python
|
ACME-oneM2M-CSE-master/tests/testCIN.py
|
Romsi-Occi/Trash-tracker
|
4490fe181b4e3bdaa8575e83b87ac2d12472a9c7
|
[
"BSD-3-Clause"
] | null | null | null |
ACME-oneM2M-CSE-master/tests/testCIN.py
|
Romsi-Occi/Trash-tracker
|
4490fe181b4e3bdaa8575e83b87ac2d12472a9c7
|
[
"BSD-3-Clause"
] | null | null | null |
ACME-oneM2M-CSE-master/tests/testCIN.py
|
Romsi-Occi/Trash-tracker
|
4490fe181b4e3bdaa8575e83b87ac2d12472a9c7
|
[
"BSD-3-Clause"
] | null | null | null |
#
# testCIN.py
#
# (c) 2020 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# Unit tests for CIN functionality
#
import unittest, sys
sys.path.append('../acme')
from typing import Tuple
from Constants import Constants as C
from Types import ResourceTypes as T, ResponseCode as RC
from init import *
class TestCIN(unittest.TestCase):
ae = None
cnt = None
originator = None
@classmethod
@unittest.skipIf(noCSE, 'No CSEBase')
def setUpClass(cls) -> None:
dct = { 'm2m:ae' : {
'rn' : aeRN,
'api' : 'NMyApp1Id',
'rr' : False,
'srv' : [ '3' ]
}}
cls.ae, rsc = CREATE(cseURL, 'C', T.AE, dct) # AE to work under
assert rsc == RC.created, 'cannot create parent AE'
cls.originator = findXPath(cls.ae, 'm2m:ae/aei')
dct = { 'm2m:cnt' : {
'rn' : cntRN
}}
cls.cnt, rsc = CREATE(aeURL, cls.originator, T.CNT, dct)
assert rsc == RC.created, 'cannot create container'
@classmethod
@unittest.skipIf(noCSE, 'No CSEBase')
def tearDownClass(cls) -> None:
DELETE(aeURL, ORIGINATOR) # Just delete the AE and everything below it. Ignore whether it exists or not
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createCIN(self) -> None:
""" Create a <CIN> resource """
self.assertIsNotNone(TestCIN.ae)
self.assertIsNotNone(TestCIN.cnt)
dct = { 'm2m:cin' : {
'rn' : cinRN,
'cnf' : 'a',
'con' : 'AnyValue'
}}
r, rsc = CREATE(cntURL, TestCIN.originator, T.CIN, dct)
self.assertEqual(rsc, RC.created)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_retrieveCIN(self) -> None:
""" Retrieve <CIN> resource """
_, rsc = RETRIEVE(cinURL, TestCIN.originator)
self.assertEqual(rsc, RC.OK)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_attributesCIN(self) -> None:
""" Test <CIN> attributes """
r, rsc = RETRIEVE(cinURL, TestCIN.originator)
self.assertEqual(rsc, RC.OK)
# TEST attributess
self.assertEqual(findXPath(r, 'm2m:cin/ty'), T.CIN)
self.assertEqual(findXPath(r, 'm2m:cin/pi'), findXPath(TestCIN.cnt,'m2m:cnt/ri'))
self.assertEqual(findXPath(r, 'm2m:cin/rn'), cinRN)
self.assertIsNotNone(findXPath(r, 'm2m:cin/ct'))
self.assertIsNotNone(findXPath(r, 'm2m:cin/lt'))
self.assertIsNotNone(findXPath(r, 'm2m:cin/et'))
self.assertIsNotNone(findXPath(r, 'm2m:cin/st'))
self.assertIsNone(findXPath(r, 'm2m:cin/cr'))
self.assertIsNotNone(findXPath(r, 'm2m:cin/cnf'))
self.assertEqual(findXPath(r, 'm2m:cin/cnf'), 'a')
self.assertIsNotNone(findXPath(r, 'm2m:cin/con'))
self.assertEqual(findXPath(r, 'm2m:cin/con'), 'AnyValue')
self.assertGreater(findXPath(r, 'm2m:cin/cs'), 0)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_updateCIN(self) -> None:
""" Update <CIN> -> Fail """
dct = { 'm2m:cin' : {
'con' : 'NewValue'
}}
r, rsc = UPDATE(cinURL, TestCIN.originator, dct)
self.assertEqual(rsc, RC.operationNotAllowed)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createCINUnderAE(self) -> None:
""" Create <CIN> resource under <AE> -> Fail """
dct = { 'm2m:cin' : {
'rn' : cinRN,
'cnf' : 'a',
'con' : 'AnyValue'
}}
r, rsc = CREATE(aeURL, TestCIN.originator, T.CIN, dct)
self.assertEqual(rsc, RC.invalidChildResourceType)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_deleteCIN(self) -> None:
""" Delete <CIN> resource """
_, rsc = DELETE(cinURL, TestCIN.originator)
self.assertEqual(rsc, RC.deleted)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createCINWithCreatorWrong(self) -> None:
""" Create <CIN> with creator attribute (wrong) -> Fail """
dct = { 'm2m:cin' : {
'cr' : 'wrong',
'con' : 'AnyValue'
}}
r, rsc = CREATE(cntURL, TestCIN.originator, T.CIN, dct) # Not allowed
self.assertEqual(rsc, RC.badRequest)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createCINWithCreator(self) -> None:
""" Create <CIN> with creator attribute set to Null """
dct = { 'm2m:cin' : {
'con' : 'AnyValue',
'cr' : None
}}
r, rsc = CREATE(cntURL, TestCIN.originator, T.CIN, dct)
self.assertEqual(rsc, RC.created)
self.assertEqual(findXPath(r, 'm2m:cin/cr'), TestCIN.originator) # Creator should now be set to originator
# Check whether creator is there in a RETRIEVE
r, rsc = RETRIEVE(f'{cntURL}/{findXPath(r, "m2m:cin/rn")}', TestCIN.originator)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:cin/cr'), TestCIN.originator)
# More tests of la, ol etc in testCNT_CNI.py
def run(testVerbosity:int, testFailFast:bool) -> Tuple[int, int, int]:
suite = unittest.TestSuite()
suite.addTest(TestCIN('test_createCIN'))
suite.addTest(TestCIN('test_retrieveCIN'))
suite.addTest(TestCIN('test_attributesCIN'))
suite.addTest(TestCIN('test_updateCIN'))
suite.addTest(TestCIN('test_createCINUnderAE'))
suite.addTest(TestCIN('test_deleteCIN'))
suite.addTest(TestCIN('test_createCINWithCreatorWrong'))
suite.addTest(TestCIN('test_createCINWithCreator'))
result = unittest.TextTestRunner(verbosity=testVerbosity, failfast=testFailFast).run(suite)
printResult(result)
return result.testsRun, len(result.errors + result.failures), len(result.skipped)
if __name__ == '__main__':
_, errors, _ = run(2, True)
sys.exit(errors)
| 31
| 108
| 0.675511
|
4a07425e938ebb62b10d4051454fc600bd43395f
| 1,227
|
py
|
Python
|
messagebox/tests/test_models.py
|
HelloMelanieC/FiveUp
|
ab97d311f163b09146fe330e4360d8e75d769f95
|
[
"MIT"
] | 12
|
2017-09-10T01:43:42.000Z
|
2020-09-20T01:17:20.000Z
|
messagebox/tests/test_models.py
|
HelloMelanieC/FiveUp
|
ab97d311f163b09146fe330e4360d8e75d769f95
|
[
"MIT"
] | 22
|
2016-12-26T21:46:10.000Z
|
2022-02-10T08:01:52.000Z
|
messagebox/tests/test_models.py
|
HelloMelanieC/FiveUp
|
ab97d311f163b09146fe330e4360d8e75d769f95
|
[
"MIT"
] | 4
|
2017-08-24T16:01:37.000Z
|
2019-02-14T23:50:17.000Z
|
import datetime
from django.test import TestCase
from freezegun import freeze_time
from fuauth.models import User
from ..models import Message
class TestMessageBox(TestCase):
"""
Basic tests for the message box.
"""
@freeze_time("2019-12-28 12:31")
def setUp(self):
self.noof = User.objects.create_user(
name="Noofie",
phone_number="777-777-7777",
carrier="ATT",
password="password",
user_timezone="HAWAII",
email="noofie@emailzzz.com",
)
self.message = Message.objects.create(
recipient=self.noof, message_text="You are beautiful", sender_name="Melanie"
)
def test_message_string_repr(self):
"""
The message object should have a nice string representation.
"""
assert str(self.message) == "Melanie_2019-12-28 12:31:00+00:00"
def deleting_a_user_deletes_user_messages(self):
"""
If a user is deleted, all related messages should be deleted as well.
"""
assert len(Message.objects.filter(recipient=self.noof)) == 1
self.noof.delete()
assert len(Message.objects.filter(recipient=self.noof)) == 0
| 27.886364
| 88
| 0.624287
|
4a0743924ed227b6a10b8b7181cb9500864bad84
| 358
|
py
|
Python
|
CellCulturePy/Plotting/__init__.py
|
Douwe-Spaanderman/Broad_DJ_AI
|
d151b35d2c05b7ca12653abca4f73cf438399b0f
|
[
"MIT"
] | null | null | null |
CellCulturePy/Plotting/__init__.py
|
Douwe-Spaanderman/Broad_DJ_AI
|
d151b35d2c05b7ca12653abca4f73cf438399b0f
|
[
"MIT"
] | null | null | null |
CellCulturePy/Plotting/__init__.py
|
Douwe-Spaanderman/Broad_DJ_AI
|
d151b35d2c05b7ca12653abca4f73cf438399b0f
|
[
"MIT"
] | 1
|
2022-03-14T20:07:15.000Z
|
2022-03-14T20:07:15.000Z
|
#from .Feature_importance import NOG NIKS
from .heatmap_media_matrix import heatmap_media_matrix
from .maf_info import maf_info
from .Prediction_plotting import Prediction_plotting
# if somebody does "from somepackage import *", this is what they will
# be able to access:
__all__ = [
'heatmap_media_matrix',
'maf_info',
'Prediction_plotting',
]
| 29.833333
| 70
| 0.782123
|
4a07479471734dbfc5ffc559c5d3fe01d657874d
| 946
|
py
|
Python
|
thefuck/rules/gradle_no_task.py
|
Ishaanahuja7/thefuck
|
c719712b6256f4add4e65e8d4369b36d73342b48
|
[
"MIT"
] | 75,504
|
2015-04-08T18:22:19.000Z
|
2022-03-31T23:59:52.000Z
|
thefuck/rules/gradle_no_task.py
|
Ishaanahuja7/thefuck
|
c719712b6256f4add4e65e8d4369b36d73342b48
|
[
"MIT"
] | 1,160
|
2015-04-17T18:47:12.000Z
|
2022-03-30T20:42:26.000Z
|
thefuck/rules/gradle_no_task.py
|
Ishaanahuja7/thefuck
|
c719712b6256f4add4e65e8d4369b36d73342b48
|
[
"MIT"
] | 4,399
|
2015-04-17T18:36:04.000Z
|
2022-03-31T07:01:03.000Z
|
import re
from subprocess import Popen, PIPE
from thefuck.utils import for_app, eager, replace_command
regex = re.compile(r"Task '(.*)' (is ambiguous|not found)")
@for_app('gradle', 'gradlew')
def match(command):
return regex.findall(command.output)
@eager
def _get_all_tasks(gradle):
proc = Popen([gradle, 'tasks'], stdout=PIPE)
should_yield = False
for line in proc.stdout.readlines():
line = line.decode().strip()
if line.startswith('----'):
should_yield = True
continue
if not line.strip():
should_yield = False
continue
if should_yield and not line.startswith('All tasks runnable from root project'):
yield line.split(' ')[0]
def get_new_command(command):
wrong_task = regex.findall(command.output)[0][0]
all_tasks = _get_all_tasks(command.script_parts[0])
return replace_command(command, wrong_task, all_tasks)
| 27.028571
| 88
| 0.658562
|
4a0747bbf3e2fbe152a310b5e641c1abc0c63176
| 246
|
py
|
Python
|
goodtechgigs/profiles/urls.py
|
aschn/goodtechgigs
|
f2f74e28a5a21e826ded0f09a6029fe6a24d228b
|
[
"Apache-2.0"
] | null | null | null |
goodtechgigs/profiles/urls.py
|
aschn/goodtechgigs
|
f2f74e28a5a21e826ded0f09a6029fe6a24d228b
|
[
"Apache-2.0"
] | null | null | null |
goodtechgigs/profiles/urls.py
|
aschn/goodtechgigs
|
f2f74e28a5a21e826ded0f09a6029fe6a24d228b
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import routers
from profiles.views import GigSeekerViewSet, GigPosterViewSet
router = routers.DefaultRouter()
router.register('orgs', GigPosterViewSet)
router.register('helpers', GigSeekerViewSet)
urlpatterns = router.urls
| 24.6
| 61
| 0.829268
|
4a0748dea7f67bf0f6d7a42a0b3100cd97779db9
| 16,176
|
py
|
Python
|
src/devapp/spec/fs_components.py
|
AXGKl/devapps
|
249ff369de512dcde5fdc83e6d461f82ad6c667f
|
[
"BSD-2-Clause"
] | null | null | null |
src/devapp/spec/fs_components.py
|
AXGKl/devapps
|
249ff369de512dcde5fdc83e6d461f82ad6c667f
|
[
"BSD-2-Clause"
] | null | null | null |
src/devapp/spec/fs_components.py
|
AXGKl/devapps
|
249ff369de512dcde5fdc83e6d461f82ad6c667f
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
# Spec Building
## Dev
### Frequent abbrs:
- rt: runtime (e.g. systemd, docker, k8s,)
- cls: A class in a spec python file, denoting a config entity
- fid: Full id of an item (path in the spec : ['Wifi', 'NB01', 'Redis']
"""
# stream of specs - from anywhere, we will run as daemon some day:
# before that its just a one item stream, with item pushed at proc stasrt:
import os
import subprocess as sp
import rx
from devapp.app import app
from devapp.spec import find_paths_in_fs_components
from devapp.spec.tools import FS, full_url_and_path
from devapp.tools import (
dirname,
download_file,
exists,
into,
is_,
sp_call,
to_list,
url_to_dir,
)
from devapp.utils.rx_tools import chain, do_if, threaded
from rx import operators as op
from rx.operators import filter
base_dirs = 'var', 'data', 'conf', 'log', 'build', 'etc'
# filesystem components as declared in specs:
fs_comps = {}
default_methods = {'git': 'mount', 'oci': 'overlay', 'conda_env': 'activate'}
pass_ = lambda a: a
# fmt:off
d_oci = lambda r: r['env']['DA_DIR_OCI'] + '/' + r['path']
props = lambda r: r['spec']['props']
frm_to = lambda frm, to: {'from': frm, 'to': to}
d_repo = lambda r: r['env']['DA_DIR_REPOS'] + '/' + r['path']
comp_name = lambda p: p if is_(p, str) else p['name']
rfs_comps = lambda r: [fs_comps[comp_name(p)] for p in filesystem(r)]
filesystem = lambda r: props(r).get('filesystem')
d_exe_bare = lambda env: env['DA_DIR_OCI'] + '/executables'
# fmt:on
def d_checkout(repo):
r = repo
d, p = '', r['path']
if '/' in p:
d, p = p.rsplit('/', 1)
d = '@' + d.replace('/', sep_slash)
res = r.get('checkout_dir') or (
r['env']['DA_DIR_CHECKOUTS']
+ '/'
+ p
+ d
+ sep_branch
+ r['branch'].replace('/', sep_slash)
)
return res
# we want one flat dir of checkouts. need to name them somewhat sensibly
# they should show the branch they are based on
sep_branch = '#'
sep_slash = '^'
root_fs_mount_ph = '$fs'
# Phase 1: FETCH FS COMPONENTS------------------------------------------------:
# The First Phase: Walking all filesysem components, independent of classes
class FSComponents:
"""
Namespace for Component Specific Methods
We often use the word repo for component, for analogy with git
"""
class Defaults:
# ready to be adapted, e.g. by flags
exe_fs_method = 'symlink'
local_unknown_comptype = 'ext_dir'
class host_system:
"""host packages"""
def prepare(repo):
repo['bare_have'] = True
repo['skip_completion'] = True
repo['skip_add_fs_presence_infos'] = True
repo['packages'] = to_list(repo['packages'])
# just something meaningful, packages are handled by the distri:
repo['checkout_dir'] = '/usr/bin'
return repo
def get_bare(repo):
return repo
def _detect_inst_cmd():
ps = 'dnf', 'yum', 'apt-get'
for c in ps:
cmd = getattr(app.sh, c, None)
if cmd:
break
if not cmd:
app.die('Could not derive a package installer', tried=ps)
return (c, 'install', '-y')
@classmethod
def into_fs(pkg, repo):
inst_cmd = pkg._detect_inst_cmd()
inst_cmd += tuple(repo['packages'])
st = sp_call(*inst_cmd, as_root=True, get_all=True)
if st['exit_status']:
app.die('Package install failed', **st)
return repo
class git:
def get_bare(repo):
app.sh.git.clone('--bare', repo['url'], d_repo(repo))
repo['bare_have'] = True
return repo
def checkout(repo):
d = d_checkout(repo)
app.sh.git.clone(d_repo(repo), d)
app.sh.git.checkout(repo['branch'], _cwd=d)
repo['checkout_have'] = True
return repo
def set_remotes(repo):
d = d_checkout(repo)
# have = app.sh.git.remotes('-v', _cwd=d)
app.sh.git.remote.rename('origin', 'local', _cwd=d)
app.sh.git.remote.add('origin', repo['url'], _cwd=d)
return repo
into_fs = [checkout, set_remotes]
class oci:
def prepare(repo):
repo['branch'] = 'oci'
repo['run_dir'] = root_fs_mount_ph # will be replaced later
# oci always are overlayed, no matter the run_dir:
repo['fs_method'] = repo.get('fs_method') or default_methods['oci']
return repo
def pull_layers(repo):
url = repo['url']
# app.sh.dpull('-d', repo['bare_dir'], '--repo', url)
err = sp.call(['dpull', '-d', repo['bare_dir'], '--repo', url])
if err:
app.die('could not pull layers', **repo)
repo['bare_have'] = True
return repo
def merge_layers(repo):
b, c = repo['bare_dir'], repo['checkout_dir']
err = sp.call(['dmerge', '-d', b, '--method', 'tar', '-t', c])
if err:
app.die('could not merge layers', **repo)
repo['checkout_have'] = True
return repo
get_bare = pull_layers
into_fs = merge_layers
class mount:
def prepare(repo):
repo['branch'], n, D = 'mount', repo['name'], repo['env']['DA_DIR']
repo['base_name'] = repo['name']
repo['fs_method'] = 'mount' # checkout will be mounted to run
repo['run_dir'] = repo['to']
repo['checkout_dir'] = repo['bare_dir'] = repo['from']
repo['bare_have'] = repo['checkout_have'] = True
repo['skip_completion'] = True
return repo
def get_bare(repo):
return repo
class conda_env:
def prepare(repo):
repo['branch'], n, D = 'env', repo['name'], repo['env']['DA_DIR']
repo['base_name'] = repo['name']
repo['checkout_dir'] = repo['bare_dir'] = D + '/envs/' + n
repo['run_dir'] = repo['checkout_dir']
repo['bare_have'] = True
repo['skip_completion'] = True
repo['fs_method'] = 'pass' # done via link to env's python (in build.sh)
return repo
def get_bare(repo):
return repo
def into_fs(repo):
D = repo['env']['DA_DIR']
cmds = [
'-p',
D + '/envs/%s' % repo['name'],
'-y',
*[l for l in to_list(repo['packages']) if is_(l, str)],
]
for c in to_list(repo.get('add_channels')):
cmds.insert(0, c)
cmds.insert(0, '-c')
app.info('conda create', args=' '.join(cmds))
app.sh.conda.create(*cmds)
l = [(l[0], to_list(l[1:])) for l in repo['packages'] if not is_(l, str)]
icmd = app.sh.conda.install
if l:
for chan, ps in l:
app.info('conda.install', args=' '.join(cmds))
icmd('-c', chan, '-n', repo['name'], '-y', *ps)
pips = repo.get('pips', ())
if pips:
m = {'DA_DIR': repo['env']['DA_DIR'], 'name': repo['name']}
pc = '%(DA_DIR)s/envs/%(name)s/bin/pip' % m
for pip in pips:
if sp.call([pc, 'install', pip]):
app.die('Pip failed', pip=pip)
repo['checkout_have'] = True
return repo
class ext_dir:
"""Using anywhere on the filesystem"""
@classmethod
def get_bare(sl, repo):
for d in 'bare_dir', 'checkout_dir', 'run_dir', 'path':
repo[d] = repo['url'].replace('file://', '')
return repo
class exe:
"""Simple static binaries, gitlab runner, hashitools in go...
We pull to bare, copy to checkout, mount or copy to run_dir
"""
_dest_dir = '/bin/'
@classmethod
def fn(exe, where, repo):
fn = repo[where] + exe._dest_dir + repo['name']
if not exists(dirname(fn)):
os.makedirs(dirname(fn))
return fn
def prepare(repo):
repo['branch'], env = 'exe', repo['env']
d = url_to_dir(repo['url'])
for k, m in (('bare_dir', d_exe_bare),):
repo[k] = '/'.join((m(env), d))
repo['branch'] = 'exe'
repo['fs_method'] = repo.get(
'fs_method', FSComponents.Defaults.exe_fs_method
)
return repo
@classmethod
def get_bare(exe, repo):
fn = exe.fn('bare_dir', repo)
download_file(repo['url'], local_filename=fn)
return repo
@classmethod
def into_fs(exe, repo):
fn = exe.fn('checkout_dir', repo)
app.sh.cp('-f', exe.fn('bare_dir', repo), fn)
app.sh.chmod('+x', fn)
return repo
class lib(exe):
_dest_dir = '/'
def fetch_fs_comps(env, processing=[]):
"""At spec loading we walked thru all classes and generated the list of
fs components in use in the spec"""
# todo: Node local only comps filter, either here or at spec walk.
return chain(
# sort just to get same download behaviour(order) everywhere:
rx.from_(sorted(fs_comps.items())),
filter(lambda kv: kv[0] not in processing),
op.do_action(lambda kv: processing.append(kv[0])), # don't do twice
lambda kv: kv[1], # we drop the name, was just to sort
lambda repo, env=env: into(repo, 'env', env),
do_if(_flow_by_type, which=_guess_component_type),
# ldd, # uncomment to check produced infos per component
)
# --------------------------------------------------- Helpers for the main flow
def _guess_component_type(repo_as_from_spec):
"""for group by"""
repo = repo_as_from_spec
if 'xhello' in str(repo):
breakpoint()
un, f = repo.get('url') or repo['name'], 'file://'
# local?
un, type = un.replace(f, ''), repo.get('type')
# yes we allow even relative paths:
if un.startswith('/') or un.startswith('./'):
d = FSComponents.Defaults.local_unknown_comptype
if not os.path.exists(un):
app.die('Not found', url=un)
if os.path.exists(un + '/.git') or un.endswith('.git'):
repo['type'] = type = type or 'git'
elif os.path.exists(un + '/.hg'):
app.die('Mercurial not yet supported', d=un)
elif not type:
repo['type'] = type = type or d
return (
type
if type
else 'git'
if (un.endswith('.git') or '.git#' in un)
else 'oci'
if len(un.split(':')) == 2
else None
)
def _flow_by_type(comp_type):
"""flows are so similar, we offer this for oci and git
"""
cls = getattr(FSComponents, comp_type)
ifs = getattr(cls, 'into_fs', pass_)
into_fs = [ifs] if callable(ifs) else ifs
return [
lambda repo: into(repo, 'type', comp_type),
getattr(cls, 'prepare', pass_),
_complete_repo_infos,
_add_fs_presence_infos,
do_if(cls.get_bare, threaded(10), if_=lambda comp: not comp.get('bare_have'),),
# do_if(*into_fs, if_=lambda comp: not comp.get('checkout_have')),
do_if(*into_fs, if_=checkout_missing),
_find_auto_env_paths,
_drop_env,
]
def checkout_missing(comp):
return not comp.get('checkout_have')
def _complete_repo_infos(repo):
"""Repo as derived from initial spec walk
If in spec there was only string, then it has only 'name' set.
"""
type, env, url = repo['type'], repo['env'], repo.get('url') or repo['name']
if repo.get('skip_completion'):
return repo
app.debug('Repo type', type=type, url=url)
repo['base_name'] = url.rsplit('/', 1)[-1].replace('.git', '')
url += '#' + repo.get('branch', 'master')
url, repo['branch'] = url.split('#')[:2]
# d = url.split('/')
# if len(d) > 1 and exists(d[1]):
# d = 'file://' + d
repo['url'], repo['path'] = full_url_and_path(env, url, mode=repo['type'])
repo['bare_dir'] = repo.get('bare_dir') or (
d_repo(repo) if type == 'git' else d_oci(repo)
)
repo['checkout_dir'] = dc = d_checkout(repo)
rd = repo.get('run_dir')
if rd and rd != dc:
# if there is a run_dir set, we have to mount over:
rd = rd if rd[0] in ('/', '$') else ('%(DA_DIR)s/run/' % env + rd)
repo['run_dir'] = rd
repo['fs_method'] = repo.get('fs_method') or default_methods[type]
else:
# otherwise we'll just run from the checkout dir:
# no mount required, then, for git
repo['run_dir'] = dc
return repo
def _add_fs_presence_infos(repo):
if repo.get('skip_add_fs_presence_infos'):
return repo
d = repo['bare_dir']
if exists(d):
repo['bare_have'] = True
repo['bare_is_up_to_date'] = 'maybe'
# the co have check dir feature enables to detail when a checkout is considered present.
# usefual for e.g. gitlab_runner/hugo, with bin and themes repos:
d = repo.get('checkout_have_check_dir') or repo['checkout_dir']
if exists(d):
repo['checkout_have'] = True
repo['checkout_is_up_to_date'] = 'maybe'
return repo
def _find_auto_env_paths(repo):
"""A filesystem layer usually contains stuff processes want to use ;-)
Find and define those here
"""
cod = repo['checkout_dir']
sds = [cod] # searchdirs
dd = cod + '/daemon'
# if 'gbase' in str(repo):
# breakpoint()
finders = find_paths_in_fs_components
if exists(dd):
[sds.insert(0, dd + '/' + d) for d in os.listdir(dd)]
chain(
rx.from_([(repo, cod, s) for s in sds]),
finders.find_bin_paths,
finders.find_py_paths,
).run()
return repo
def _drop_env(repo):
repo.pop('env')
return repo
def check_mk_dirs(env):
for k in ('DA_DIR_OCI', 'DA_DIR_CHECKOUTS'):
if not k in env:
env[k] = env['DA_DIR'] + '/' + k.rsplit('_', 1)[-1].lower()
for k in ('DA_DIR_REPOS',):
if not k in env:
app.die('Require', env_key=k)
dirs = ('DA_DIR_REPOS', 'DA_DIR_OCI', 'DA_DIR_CHECKOUTS')
[app.sh.mkdir('-p', env[k]) for k in dirs if not exists(env[k])]
# Phase 2: Apply Per Class ----------------------------------------------------
# 'r' dicts are sent in per class of the spec:
def set_run_and_checkout_dirs(rc):
r, comp = rc
for d in 'run_dir', 'checkout_dir':
r['env'][d] = comp[d]
return rc
def set_mount(rc):
r, comp = rc
rd, cd = (comp['run_dir'], comp['checkout_dir'])
if comp['type'] == 'exe':
rd, cd = rd + '/' + comp['name'], cd + '/' + comp['name']
if not FS.can_mount():
app.warn('Require bindfs', frm=rd, to=cd)
r['fs'][rd] = {'meth': 'mount', 'from': cd}
return rc
def set_overlay(rc):
r, comp = rc
mp = comp['run_dir']
have, cd = r['fs'].get(mp), comp['checkout_dir']
if have:
have['from'].append(cd)
else:
r['fs'][mp] = {'meth': 'overlay', 'from': [cd]}
return rc
def add_framework_dirs(rc):
r, comp = rc
e, D = r['env'], r['env']['DA_DIR']
r['fs']['$fs' + D] = {'meth': 'mount', 'from': D}
bds = list(base_dirs)
bds.append('repos')
for d in bds:
db = e.get('DA_DIR_%s' % d.upper())
# outside one:
if db and not db.startswith(D):
r['fs']['$fs' + db] = {'meth': 'mount', 'from': db}
return rc
fs_methods = {'mount': set_mount, 'overlay': [set_overlay, add_framework_dirs]}
def ldd(r):
breakpoint()
return r
def define_fs_stack(r):
chain(
rx.from_([(r, c) for c in r['fs_comps']]),
set_run_and_checkout_dirs,
do_if(fs_methods, which=lambda rc: rc[1].get('fs_method')),
).run()
return r
| 32.095238
| 92
| 0.542223
|
4a0749299dfe22089797241ace12d544de46c8e1
| 32,735
|
py
|
Python
|
payment_system/models.py
|
OlexandrTopuzov/Data_converter
|
0ac2319ccaae790af35ab2202724c65d83d32ecc
|
[
"MIT"
] | null | null | null |
payment_system/models.py
|
OlexandrTopuzov/Data_converter
|
0ac2319ccaae790af35ab2202724c65d83d32ecc
|
[
"MIT"
] | null | null | null |
payment_system/models.py
|
OlexandrTopuzov/Data_converter
|
0ac2319ccaae790af35ab2202724c65d83d32ecc
|
[
"MIT"
] | null | null | null |
import io
import os
import uuid
from calendar import monthrange
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone, translation
from django.utils.translation import gettext, gettext_lazy as _
from weasyprint import HTML
from data_ocean.models import DataOceanModel
from data_ocean.utils import generate_key
from payment_system import emails
from users.models import DataOceanUser
from users.validators import name_symbols_validator, two_in_row_validator
class Project(DataOceanModel):
name = models.CharField(_('name'), max_length=50)
description = models.CharField(max_length=500, blank=True, default='')
token = models.CharField(max_length=40, unique=True, db_index=True)
disabled_at = models.DateTimeField(_('disabled_at'), null=True, blank=True, default=None)
owner = models.ForeignKey('users.DataOceanUser', models.CASCADE,
related_name='owned_projects', verbose_name=_('owner'))
users = models.ManyToManyField('users.DataOceanUser', through='UserProject',
related_name='projects', verbose_name=_('user'))
subscriptions = models.ManyToManyField('Subscription', through='ProjectSubscription',
related_name='projects',
verbose_name=_('subscriptions'))
@property
def frontend_projects_link(self):
return f'{settings.FRONTEND_SITE_URL}/system/profile/projects/'
@property
def frontend_link(self):
return f'{self.frontend_projects_link}{self.id}/'
@property
def is_disabled(self):
return bool(self.disabled_at)
def __str__(self):
return f'{self.name} of {self.owner}'
def save(self, *args, **kwargs):
if not self.token:
self.generate_new_token()
super().save(*args, **kwargs)
def generate_new_token(self):
def get_token_safe():
new_key = generate_key()
if Project.objects.filter(token=new_key).exists():
return get_token_safe()
else:
return new_key
self.token = get_token_safe()
@classmethod
def create(cls, owner, name, description='', is_default=False):
new_project = Project.objects.create(
name=name,
description=description,
owner=owner,
)
new_project.user_projects.create(
user=owner,
role=UserProject.OWNER,
status=UserProject.ACTIVE,
is_default=is_default,
)
new_project.add_default_subscription()
return new_project
def add_default_subscription(self) -> 'ProjectSubscription':
default_subscription, created = Subscription.objects.get_or_create(
is_default=True,
defaults={
'requests_limit': 20,
'platform_requests_limit': 200,
'name': settings.DEFAULT_SUBSCRIPTION_NAME,
'grace_period': 30,
},
)
return ProjectSubscription.objects.create(
project=self,
subscription=default_subscription,
status=ProjectSubscription.ACTIVE,
start_date=timezone.localdate(),
)
def invite_user(self, email: str):
if self.user_projects.filter(user__email=email).exists():
raise ValidationError(_('User already in project'))
if self.invitations.filter(email=email, deleted_at__isnull=True).exists():
raise ValidationError(_('User already invited'))
invitation, created = Invitation.include_deleted_objects.get_or_create(
email=email, project=self,
)
if not created:
invitation.deleted_at = None
invitation.save(update_fields=['deleted_at', 'updated_at'])
invitation.send()
def _check_user_invitation(self, user):
try:
invitation = self.invitations.get(
email=user.email, deleted_at__isnull=True,
)
except Invitation.DoesNotExist:
raise ValidationError(_('User is not invited'))
return invitation
def reject_invitation(self, user):
invitation = self._check_user_invitation(user)
invitation.soft_delete()
def confirm_invitation(self, user):
invitation = self._check_user_invitation(user)
if user in self.users.all():
raise ValidationError(_('User already in project'))
self.user_projects.create(
user=user,
role=UserProject.MEMBER,
status=UserProject.ACTIVE,
)
invitation.soft_delete()
emails.membership_confirmed(self.owner, user)
def deactivate_user(self, user_id):
u2p = self.user_projects.get(user_id=user_id)
if u2p.role == UserProject.OWNER:
raise ValidationError(_('You cannot deactivate an owner from his own project'))
if u2p.status == UserProject.DEACTIVATED:
raise ValidationError(_('User already deactivated'))
u2p.status = UserProject.DEACTIVATED
u2p.save(update_fields=['status', 'updated_at'])
emails.member_removed(u2p.user, self)
def activate_user(self, user_id):
u2p = self.user_projects.get(user_id=user_id)
if u2p.status == UserProject.ACTIVE:
raise ValidationError(_('User already activated'))
u2p.status = UserProject.ACTIVE
u2p.save(update_fields=['status', 'updated_at'])
emails.member_activated(u2p.user, self)
def delete_user(self, user_id):
u2p = self.user_projects.get(user_id=user_id)
if u2p.role == UserProject.OWNER:
raise ValidationError(_('You cannot delete an owner from his own project'))
u2p.delete()
emails.member_deleted(u2p.user, self)
def disable(self):
for u2p in self.user_projects.all():
if u2p.is_default:
raise ValidationError(_('You cannot disable default project'))
self.disabled_at = timezone.now()
self.save(update_fields=['disabled_at', 'updated_at'])
def activate(self):
self.disabled_at = None
self.save(update_fields=['disabled_at', 'updated_at'])
def refresh_token(self):
self.generate_new_token()
self.save(update_fields=['token', 'updated_at'])
emails.token_has_been_changed(self)
def has_read_perms(self, user):
u2p: UserProject = self.user_projects.get(user=user)
return u2p.status == UserProject.ACTIVE
def has_write_perms(self, user):
u2p: UserProject = self.user_projects.get(user=user)
return u2p.status == UserProject.ACTIVE and u2p.role == UserProject.OWNER
def add_subscription(self, subscription: 'Subscription', invoice=None):
assert isinstance(subscription, Subscription)
current_p2s = ProjectSubscription.objects.get(
project=self,
status=ProjectSubscription.ACTIVE,
)
# if subscription.is_default:
# raise ValidationError(_('Can\'t add default subscription'))
if ProjectSubscription.objects.filter(
project=self,
status=ProjectSubscription.FUTURE,
).exists():
raise ValidationError(_('Can\'t add second future subscription'))
grace_period_used = []
for project in self.owner.owned_projects.all():
grace_period_used.append(
project.project_subscriptions.filter(
subscription__is_default=False,
invoices__grace_period_block=True,
).exists()
)
if any(grace_period_used):
raise ValidationError(_('Project have subscription on a grace period, can\'t add new subscription'))
if current_p2s.subscription == subscription:
raise ValidationError(gettext('Project already on {}').format(subscription.name))
if current_p2s.subscription.is_default:
current_p2s.status = ProjectSubscription.PAST
current_p2s.save()
new_p2s = ProjectSubscription.objects.create(
project=self,
subscription=subscription,
status=ProjectSubscription.ACTIVE,
start_date=timezone.localdate(),
is_grace_period=True,
)
if invoice:
invoice.project_subscription = new_p2s
invoice.project_subscription.paid_up()
invoice.start_date = new_p2s.start_date
invoice.end_date = new_p2s.generate_expiring_date()
invoice.save()
else:
Invoice.objects.create(project_subscription=new_p2s)
else:
if current_p2s.is_grace_period:
raise ValidationError(_('Project have subscription on a grace period, can\'t add new subscription'))
new_p2s = ProjectSubscription.objects.create(
project=self,
subscription=subscription,
status=ProjectSubscription.FUTURE,
start_date=current_p2s.expiring_date,
is_grace_period=True,
)
emails.new_subscription(new_p2s)
return new_p2s
def remove_future_subscription(self):
try:
future_p2s = self.project_subscriptions.get(status=ProjectSubscription.FUTURE)
except ProjectSubscription.DoesNotExist:
raise ValidationError(_('Project don\'t have future subscription'))
future_p2s.delete()
@property
def is_active(self):
return self.disabled_at is None
# @property
# def owner(self):
# return self.user_projects.get(
# role=UserProject.OWNER,
# ).user
@property
def active_subscription(self) -> 'Subscription':
return self.active_p2s.subscription
@property
def active_p2s(self) -> 'ProjectSubscription':
return self.project_subscriptions.get(status=ProjectSubscription.ACTIVE)
class Meta:
verbose_name = _('project')
verbose_name_plural = _('projects')
class Subscription(DataOceanModel):
MONTH_PERIOD = 'month'
YEAR_PERIOD = 'year'
PERIODS = (
(MONTH_PERIOD, 'Month'),
(YEAR_PERIOD, 'Year'),
)
name = models.CharField(_('name'), max_length=50, unique=True)
description = models.TextField(_('description'), blank=True, default='')
price = models.PositiveIntegerField(_('price'), default=0)
requests_limit = models.IntegerField(_('requests limit'), help_text='Limit for API requests from the project')
platform_requests_limit = models.IntegerField(
_('platform requests limit'),
help_text='Limit for API requests from the project via platform',
)
periodicity = models.CharField(max_length=5, choices=PERIODS, default=MONTH_PERIOD, help_text='days')
grace_period = models.SmallIntegerField(_('grace_period'), default=10, help_text='days')
is_custom = models.BooleanField(
_('is custom'), blank=True, default=False,
help_text='Custom subscription not shown to users',
)
is_default = models.BooleanField(_('is default'), blank=True, default=False)
pep_checks = models.BooleanField(
blank=True, default=False,
help_text='Allow to use api/pep/check/ endpoint',
)
pep_checks_per_minute = models.PositiveSmallIntegerField(default=0)
pep_db_downloading = models.BooleanField(blank=True, default=False)
position = models.PositiveSmallIntegerField(default=1, help_text='Position of subscription on frontend')
yearly_subscription = models.ForeignKey(
'self', models.PROTECT, blank=True, null=True, default=None,
help_text='Related yearly subscription. Example: Business -> Business +',
)
@classmethod
def get_default_subscription(cls):
return cls.objects.get(is_default=True)
def validate_unique(self, exclude=None):
super().validate_unique(exclude)
# check only one default subscription
if self.is_default:
exists = Subscription.objects.filter(is_default=True).exclude(pk=self.pk).exists()
if exists:
raise ValidationError(_('Default subscription already exists'))
def save(self, *args, **kwargs):
self.validate_unique()
super().save(*args, **kwargs)
def __str__(self):
return self.name
class Meta:
ordering = ['position', 'price']
verbose_name = _('subscription')
verbose_name_plural = _('subscriptions')
class Invoice(DataOceanModel):
paid_at = models.DateField(
_('paid at'),
null=True, blank=True,
help_text='This operation is irreversible, you cannot '
'cancel the payment of the subscription for the project.'
)
payment_registration_date = models.DateField('payment registration date', auto_now_add=True)
token = models.UUIDField(db_index=True, default=uuid.uuid4, blank=True)
project_subscription = models.ForeignKey(
'ProjectSubscription', on_delete=models.PROTECT,
related_name='invoices',
verbose_name=_('project`s subscription'),
)
grace_period_block = models.BooleanField(
_('is grace period blocked'),
blank=True, default=True,
help_text='If set to False, then the user will be allowed '
'to use "grace period" again'
)
note = models.TextField(blank=True, default='')
# information about payment
start_date = models.DateField(_('start date'))
end_date = models.DateField(_('end date'))
requests_limit = models.IntegerField(_('requests limit'))
subscription_name = models.CharField(_("subscription`s name"), max_length=200)
project_name = models.CharField(_("project`s name"), max_length=100)
is_custom_subscription = models.BooleanField(_("is subscription custom"), )
price = models.IntegerField(_("price"))
iban = models.CharField(blank=True, max_length=29)
person_status = models.CharField(blank=True, max_length=23)
company_address = models.CharField(blank=True, max_length=150)
identification_code = models.CharField(blank=True, max_length=10)
mfo = models.CharField(blank=True, max_length=6)
company_name = models.CharField(blank=True, max_length=300)
email = models.EmailField(blank=True)
full_name = models.CharField(blank=True, max_length=300)
@property
def link(self):
return reverse('payment_system:invoice_pdf', args=[self.id, self.token])
@property
def is_paid(self):
return bool(self.paid_at)
@property
def tax(self):
# if not self.price:
# return 0
# return round(self.price * 0.2, 2)
return 0
@property
def price_with_tax(self):
# if not self.price:
# return 0
# return round(self.price + self.tax, 2)
return self.price
@property
def grace_period_end_date(self):
if self.is_overdue:
return self.start_date + timezone.timedelta(days=settings.OVERDUE_INVOICE_DATE_INCREASE)
return self.start_date + timezone.timedelta(days=self.project_subscription.grace_period)
@property
def is_overdue(self):
return self.project_subscription.status == ProjectSubscription.PAST and self.project_subscription.is_grace_period
def save(self, *args, **kwargs):
p2s = self.project_subscription
if getattr(self, 'id', False):
invoice_old = Invoice.objects.get(pk=self.pk)
if p2s.is_grace_period and not invoice_old.is_paid and self.is_paid:
if self.is_overdue:
self.grace_period_block = False
#self.project_subscription.is_grace_period = False
super().save(update_fields=['grace_period_block'])
p2s.project.add_subscription(subscription=p2s.subscription, invoice=self)
emails.payment_confirmed(self.project_subscription)
else:
p2s.paid_up()
self.grace_period_block = False
emails.payment_confirmed(p2s)
self.payment_registration_date = timezone.localdate()
# else:
# if self.grace_period_block and not invoice_old.grace_period_block:
# p2s.is_grace_period = False
# p2s.save()
# elif not self.disable_grace_period_block and invoice_old.disable_grace_period_block:
# p2s.is_grace_period = True
# p2s.save()
super().save(*args, **kwargs)
else:
self.start_date = p2s.start_date
self.end_date = p2s.generate_expiring_date()
self.requests_limit = p2s.subscription.requests_limit
self.subscription_name = p2s.subscription.name
self.project_name = p2s.project.name
self.price = p2s.subscription.price
self.is_custom_subscription = p2s.subscription.is_custom
self.update_payer_info()
super().save(*args, **kwargs)
emails.new_invoice(self, p2s.project)
def update_payer_info(self, user=None):
if user is None:
user = self.project_subscription.project.owner
self.email = user.email
self.full_name = user.get_full_name()
self.iban = user.iban
self.person_status = user.person_status
self.company_address = user.company_address
self.identification_code = user.identification_code
self.mfo = user.mfo
self.company_name = user.company_name
def get_pdf(self, user=None) -> io.BytesIO:
if user is None:
user = self.project_subscription.project.owner
current_date = timezone.localdate()
if self.is_overdue:
self.start_date = current_date
if not self.is_paid:
self.update_payer_info()
self.save()
with translation.override('uk'):
html_string = render_to_string('payment_system/invoice.html', {
'invoice': self,
'user': user,
})
html = HTML(string=html_string, base_url=os.path.join(settings.BASE_DIR, 'payment_system'))
result = html.write_pdf()
file = io.BytesIO(result)
file.name = 'Invoice from ' + str(self.created_at) + '.pdf'
file.seek(0)
return file
def __str__(self):
return f'Invoice #{self.id}'
class Meta:
ordering = ['-created_at']
verbose_name = _('invoice')
verbose_name_plural = _('invoices')
class UserProject(DataOceanModel):
OWNER = 'owner'
MEMBER = 'member'
ROLES = (
(OWNER, 'Owner'),
(MEMBER, 'Member'),
)
ACTIVE = 'active'
DEACTIVATED = 'deactivated'
STATUSES = (
(ACTIVE, 'Active'),
(DEACTIVATED, "Deactivated"),
)
user = models.ForeignKey('users.DataOceanUser', on_delete=models.CASCADE,
related_name='user_projects')
project = models.ForeignKey('Project', on_delete=models.CASCADE, related_name='user_projects')
role = models.CharField(choices=ROLES, max_length=20)
status = models.CharField(choices=STATUSES, max_length=11)
is_default = models.BooleanField(blank=True, default=False)
def validate_unique(self, exclude=None):
super().validate_unique(exclude)
# Validate only one default project
if self.is_default:
default_count = UserProject.objects.filter(
user=self.user, is_default=True,
).exclude(id=self.id).count()
if default_count:
raise ValidationError(_('User can only have one default project'))
def save(self, *args, **kwargs):
self.validate_unique()
super().save(*args, **kwargs)
def __str__(self):
return f'User {self.user.get_full_name()} in Project {self.project.name} of {self.project.owner}'
class Meta:
unique_together = [['user', 'project']]
ordering = ['id']
class ProjectSubscription(DataOceanModel):
ACTIVE = 'active'
PAST = 'past'
FUTURE = 'future'
STATUSES = (
(ACTIVE, _('Active')),
(PAST, _('Past')),
(FUTURE, _('Future')),
)
project = models.ForeignKey('Project', on_delete=models.CASCADE,
related_name='project_subscriptions')
subscription = models.ForeignKey('Subscription', on_delete=models.PROTECT,
related_name='project_subscriptions')
status = models.CharField(choices=STATUSES, max_length=10, db_index=True)
start_day = models.SmallIntegerField()
start_date = models.DateField()
expiring_date = models.DateField()
renewal_date = models.DateField()
is_grace_period = models.BooleanField(blank=True, default=True)
requests_left = models.IntegerField()
requests_used = models.IntegerField(blank=True, default=0)
platform_requests_left = models.IntegerField()
platform_requests_used = models.IntegerField(blank=True, default=0)
periodicity = models.CharField(max_length=5, choices=Subscription.PERIODS)
grace_period = models.SmallIntegerField(help_text='days')
pep_checks_count_per_minute = models.PositiveSmallIntegerField(default=0)
pep_checks_minute = models.PositiveIntegerField(default=0)
@staticmethod
def increase_term(date, start_day, period='month'):
assert 1 <= start_day <= 31
year = date.year
month = date.month
if period == 'month':
if month == 12:
year += 1
month = 1
else:
month += 1
elif period == 'year':
year += 1
else:
raise ValueError(f'period = "{period}" not supported!')
last_day_of_month = monthrange(year, month)[1]
if start_day > last_day_of_month:
day = last_day_of_month
else:
day = start_day
return date.replace(year, month, day)
def generate_expiring_date(self):
if self.periodicity == Subscription.MONTH_PERIOD:
return self.increase_term(self.start_date, self.start_day, 'month')
elif self.periodicity == Subscription.YEAR_PERIOD:
return self.increase_term(self.start_date, self.start_day, 'year')
else:
raise ValueError(f'periodicity = "{self.periodicity}" not supported!')
def update_expiring_date(self):
if self.subscription.is_default:
self.expiring_date = self.generate_expiring_date()
elif self.is_grace_period:
self.expiring_date = self.start_date + timezone.timedelta(days=self.grace_period)
else:
self.expiring_date = self.generate_expiring_date()
def validate_unique(self, exclude=None):
super().validate_unique(exclude)
def check_unique_status(status):
if self.status == status:
is_exists = ProjectSubscription.objects.filter(
project=self.project,
status=status,
).exclude(pk=self.pk).exists()
if is_exists:
for code, verbose in self.STATUSES:
if code == status:
status = verbose
break
raise ValidationError(gettext('Only one {} subscription in project').format(status))
check_unique_status(ProjectSubscription.ACTIVE)
check_unique_status(ProjectSubscription.FUTURE)
@property
def payment_date(self):
if self.is_grace_period:
return self.start_date
return self.expiring_date
@property
def payment_overdue_days(self):
if self.is_grace_period and self.status == ProjectSubscription.ACTIVE:
return (timezone.localdate() - self.start_date).days
return None
@property
def is_paid(self):
last_invoice = self.invoices.order_by('-created_at').first()
return last_invoice and last_invoice.is_paid
def paid_up(self):
assert self.is_grace_period
self.is_grace_period = False
self.update_expiring_date()
self.save()
def reset(self):
self.is_grace_period = True
self.requests_left = self.subscription.requests_limit
self.requests_used = 0
self.platform_requests_left = self.subscription.platform_requests_limit
self.platform_requests_used = 0
self.periodicity = self.subscription.periodicity
self.grace_period = self.subscription.grace_period
self.start_date = self.expiring_date
self.update_expiring_date()
self.renewal_date = self.increase_term(self.start_date, self.start_day, 'month')
def renewal(self):
if self.periodicity == Subscription.MONTH_PERIOD:
return
self.renewal_date = self.increase_term(self.renewal_date, self.start_day, 'month')
self.requests_left = self.subscription.requests_limit
self.requests_used = 0
self.platform_requests_left = self.subscription.platform_requests_limit
self.platform_requests_used = 0
self.save()
def expire(self):
try:
future_p2s = ProjectSubscription.objects.get(
project=self.project,
status=ProjectSubscription.FUTURE,
)
except ProjectSubscription.DoesNotExist:
if self.subscription.is_default:
self.reset()
self.save()
else:
if self.is_grace_period:
self.status = ProjectSubscription.PAST
self.save()
self.project.add_default_subscription()
emails.project_non_payment(self.project)
else:
self.reset()
self.save()
Invoice.objects.create(project_subscription=self)
else:
self.status = ProjectSubscription.PAST
self.save()
future_p2s.status = ProjectSubscription.ACTIVE
future_p2s.save()
if not future_p2s.subscription.is_default:
Invoice.objects.create(project_subscription=future_p2s)
@classmethod
def send_tomorrow_payment_emails(cls):
project_subscriptions_for_update = cls.objects.filter(
expiring_date=timezone.localdate() + timezone.timedelta(days=2),
status=ProjectSubscription.ACTIVE,
is_grace_period=True,
subscription__is_default=False,
)
for p2s in project_subscriptions_for_update:
emails.tomorrow_payment_day(p2s)
@classmethod
def update_expire_subscriptions(cls) -> str:
cls.send_tomorrow_payment_emails()
today = timezone.localdate()
project_subscriptions_for_expire = cls.objects.filter(
expiring_date__lte=today,
status=ProjectSubscription.ACTIVE,
)
i = 0
for p2s in project_subscriptions_for_expire:
p2s.expire()
i += 1
msg = ''
if i == 0:
msg += 'No subscriptions to expire'
else:
msg += f'{i} subscriptions expired'
project_subscriptions_for_renewal = cls.objects.filter(
renewal_date__lte=today,
status=ProjectSubscription.ACTIVE,
)
i = 0
for p2s in project_subscriptions_for_renewal:
p2s.renewal()
i += 1
if i == 0:
msg += '\nNo subscriptions to renewal'
else:
msg += f'\n{i} subscriptions renewed'
return msg
def save(self, *args, **kwargs):
if not getattr(self, 'id', None):
# if create
self.requests_left = self.subscription.requests_limit
self.platform_requests_left = self.subscription.platform_requests_limit
self.start_day = self.start_date.day
self.periodicity = self.subscription.periodicity
self.grace_period = self.subscription.grace_period
self.update_expiring_date()
self.renewal_date = self.increase_term(self.start_date, self.start_day, 'month')
self.validate_unique()
super().save(*args, **kwargs)
@property
def latest_invoice(self) -> Invoice:
return self.invoices.order_by('-created_at').first()
def __str__(self):
return f'{self.project.owner} | {self.project.name} | {self.subscription}'
class Meta:
verbose_name = "relation between the project and its subscriptions"
ordering = ['-created_at']
class Invitation(DataOceanModel):
email = models.EmailField(_('email'))
project = models.ForeignKey('Project', models.CASCADE, related_name='invitations',
verbose_name=_('project'))
# who_invited = models.ForeignKey('users.DataOceanUser', models.CASCADE,
# related_name='who_invitations')
def send(self):
if not self.is_deleted:
emails.new_invitation(self)
def __str__(self):
return f'Invite {self.email} on {self.project}'
class Meta:
unique_together = [['email', 'project']]
verbose_name = _('invitation')
verbose_name_plural = _('invitations')
class CustomSubscriptionRequest(DataOceanModel):
first_name = models.CharField(max_length=30, validators=[
name_symbols_validator,
two_in_row_validator,
])
last_name = models.CharField(max_length=150, validators=[
name_symbols_validator,
two_in_row_validator,
])
email = models.EmailField()
phone = models.CharField(max_length=15, blank=True, default='')
note = models.TextField(blank=True, default='')
user = models.ForeignKey(
'users.DataOceanUser', on_delete=models.PROTECT,
blank=True, default=None, null=True,
related_name='custom_subscription_requests'
)
is_processed = models.BooleanField(blank=True, default=False)
@property
def full_name(self):
return f'{self.first_name} {self.last_name}'
def __str__(self):
return f'{self.full_name} <{self.email}>'
def save(self, *args, **kwargs):
is_existing_record = getattr(self, 'id', None)
super().save(*args, **kwargs)
if not is_existing_record:
emails.new_custom_sub_request(self)
class Meta:
ordering = ['is_processed', '-created_at']
verbose_name = _('custom subscription request')
verbose_name_plural = _('custom subscription requests')
class InvoiceReport(models.Model):
created_at = models.DateField(auto_now_add=True)
should_complete_count = models.SmallIntegerField(default=0)
was_complete_count = models.SmallIntegerField(default=0)
was_overdue_count = models.SmallIntegerField(default=0)
was_overdue_grace_period_count = models.SmallIntegerField(default=0)
@classmethod
def create_daily_report(cls):
invoices = {
'should_complete': [],
'was_complete': [],
'was_overdue': [],
'was_overdue_grace_period': [],
}
for invoice in Invoice.objects.all():
current_date = timezone.localdate()
if invoice.paid_at is None:
if invoice.start_date == current_date:
invoices['should_complete'].append(invoice)
elif invoice.start_date == current_date - timezone.timedelta(days=2):
invoices['was_overdue'].append(invoice)
elif current_date == invoice.grace_period_end_date:
invoices['was_overdue_grace_period'].append(invoice)
elif invoice.payment_registration_date == current_date - timezone.timedelta(days=1):
invoices['was_complete'].append(invoice)
cls.objects.create(
should_complete_count=len(invoices['should_complete']),
was_complete_count=len(invoices['was_complete']),
was_overdue_count=len(invoices['was_overdue']),
was_overdue_grace_period_count=len(invoices['was_overdue_grace_period']),
)
emails.create_report(invoices)
| 36.988701
| 121
| 0.634092
|
4a074a13308b6162ce83bbbbb038a478dd4a44ac
| 5,943
|
py
|
Python
|
data_vn.py
|
HariWu1995/WaveNet4Vietnamese
|
151b29f553cf8b17585f1d7c8374a035e9e958e3
|
[
"Apache-2.0"
] | null | null | null |
data_vn.py
|
HariWu1995/WaveNet4Vietnamese
|
151b29f553cf8b17585f1d7c8374a035e9e958e3
|
[
"Apache-2.0"
] | null | null | null |
data_vn.py
|
HariWu1995/WaveNet4Vietnamese
|
151b29f553cf8b17585f1d7c8374a035e9e958e3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import sugartensor as tf
import numpy as np
import re as regex # REGular EXpression
import csv, string, codecs, random, librosa, time
from collections import Counter
from six.moves import cPickle, reduce, map
import sys
try:
if sys.version_info[0] == '3': # Python3
sys.stdin = sys.stdin.detach()
sys.stdout = sys.stdout.detach()
elif sys.version_info[0] == '2': # Python2
sys.stdout = codecs.getwriter('utf_8')(sys.stdout)
sys.stdin = codecs.getreader('utf_8')(sys.stdin)
except:
print('Cannot handle utf-8 with sys.stdin/stdout')
__author__ = 'hari.wu.95@gmail.com'
# default data path
data_path = 'asset/data/'
# vocabulary table
index2byte = [ # 91 characters
u' ', # [0]
u'A', u'À', u'Á', u'Ã', u'Ả', u'Ạ', # [1]
u'Ă', u'Ằ', u'Ắ', u'Ẵ', u'Ẳ', u'Ặ',
u'Â', u'Ầ', u'Ấ', u'Ẫ', u'Ẩ', u'Ậ',
u'B', # [19]
u'C', # u'CH',
u'D', # [21]
u'Đ',
u'E', u'È', u'É', u'Ẽ', u'Ẻ', u'Ẹ', # [23]
u'Ê', u'Ề', u'Ế', u'Ễ', u'Ể', u'Ệ',
u'G', # u'GH', u'GI', # [35]
u'H',
u'I', u'Ì', u'Í', u'Ĩ', u'Ỉ', u'Ị', # u'IA', u'IÊ',
u'K', # u'KH', # [43]
u'L',
u'M',
u'N', # u'NH', u'NG', u'NGH',
u'O', u'Ò', u'Ó', u'Õ', u'Ỏ', u'Ọ', # [47]
u'Ô', u'Ồ', u'Ố', u'Ỗ', u'Ổ', u'Ộ',
u'Ơ', u'Ờ', u'Ớ', u'Ỡ', u'Ở', u'Ợ',
u'P', # u'PH', # [65]
u'Q', # u'QU',
u'R',
u'S',
u'T', # u'TH', u'TR', # [69]
u'U', u'Ù', u'Ú', u'Ũ', u'Ủ', u'Ụ', # u'UA', u'UÔ',
u'Ư', u'Ừ', u'Ứ', u'Ữ', u'Ử', u'Ự', # u'ưa', u'ươ',
u'V', # [82]
u'X',
u'Y', u'Ỳ', u'Ý', u'Ỹ', u'Ỷ', u'Ỵ', # u'YÊ',
'<EMP>' # EoS - End of String # [90]
]
for i, char in enumerate(index2byte):
index2byte[i] = char.lower()
# accents table
accents = ['̀', '́', '̃', '̉', '̣']
# byte-to-index mapping
byte2index = {}
for i, char in enumerate(index2byte):
byte2index[char] = i
# vocabulary size
vocab_size = len(index2byte)
# print('#vocab', vocab_size)
# convert sentence to index list
def str2index(str_):
# clean white space
str_ = u' '.join(str_.split())
# remove punctuations like ',', '.', '?', '!', etc
# str_ = str_.translate(None, string.punctuation) # Python2
str_ = str_.translate(str.maketrans(u'', u'', string.punctuation)) # Python3
# make lower case
str_ = str_.lower()
res = []
for char in str_:
# print(char, len(char), type(char))
try:
idx = byte2index[char]
except KeyError:
# drop OOV (Out-Of-Vocabulary)
pass
res.append(idx)
return res
# Convert accent to non-accent Vietnamese
def de_accent_vnese(s, mode='1-byte-keystroke'):
s = s.lower()
s = regex.sub(u'[àáạảã]', 'a', s)
s = regex.sub(u'[ầấậẩẫ]', 'â', s)
s = regex.sub(u'[ằắặẳẵ]', 'ă', s)
s = regex.sub(u'[èéẹẻẽ]', 'e', s)
s = regex.sub(u'[ềếệểễ]', 'ê', s)
s = regex.sub(u'[òóọỏõ]', 'o', s)
s = regex.sub(u'[ồốộổỗ]', 'ô', s)
s = regex.sub(u'[ờớợởỡ]', 'ơ', s)
s = regex.sub(u'[ìíịỉĩ]', 'i', s)
s = regex.sub(u'[ùúụủũ]', 'u', s)
s = regex.sub(u'[ừứựửữ]', 'ư', s)
s = regex.sub(u'[ỳýỵỷỹ]', 'y', s)
if mode == 'ascii':
s = regex.sub(u'đ' , 'd', s)
return s
# convert index list to string
def index2str(index_list):
# transform label index to character
str_ = u''
for char in index_list:
if char < (vocab_size-1):
str_ += index2byte[char]
elif char == (vocab_size-1): # <EOS>
break
return str_
# print list of index list
def print_index(indices):
for index_list in indices:
print(index2str(index_list))
# real-time wave to mfcc conversion function
@tf.sg_producer_func
def _load_mfcc(src_list):
# label, wave_file
label, mfcc_file = src_list
# decode string to integer
label = np.fromstring(label, np.int)
# load mfcc
mfcc = np.load(mfcc_file, allow_pickle=False)
# speed perturbation augmenting
mfcc = _augment_speech(mfcc)
return label, mfcc
def _augment_speech(mfcc):
# random frequency shift ( == speed perturbation effect on MFCC )
r = np.random.randint(-2, 2)
# shifting mfcc
mfcc = np.roll(mfcc, r, axis=0)
# zero padding
if r > 0:
mfcc[:r, :] = 0
elif r < 0:
mfcc[r:, :] = 0
return mfcc
# Speech Corpus
class SpeechCorpus(object):
def __init__(self, batch_size=16, set_name='train'):
# load meta file
label, mfcc_file = [], []
with open(data_path + 'preprocess_vn/meta/%s.csv' % set_name) as csv_file:
reader = csv.reader(csv_file, delimiter=',')
for row in reader: # 11658 rows (=files)
# mfcc file list
filename = data_path + 'preprocess_vn/mfcc/' + row[0] + '.npy'
mfcc_file.append(filename)
# label info (convert to string object for variable-length support)
info = np.asarray(row[1:], dtype=np.int)
label.append(info.tostring())
# to constant tensor
label_t = tf.convert_to_tensor(label)
mfcc_file_t = tf.convert_to_tensor(mfcc_file)
# create queue from constant tensor
label_q, mfcc_file_q = tf.train.slice_input_producer(tensor_list=[label_t, mfcc_file_t], shuffle=True, capacity=32)
# create label, mfcc queue
label_q, mfcc_q = _load_mfcc(source=[label_q, mfcc_file_q],
dtypes=[tf.sg_intx, tf.sg_floatx],
capacity=256,
num_threads=64)
# create batch queue with dynamic padding
batch_queue = tf.train.batch([label_q, mfcc_q],
batch_size,
shapes=[(None,), (20, None)],
num_threads=64,
capacity=batch_size*32,
dynamic_pad=True,
allow_smaller_final_batch=True)
# split data
self.label, self.mfcc = batch_queue
# batch * time * dim
self.mfcc = self.mfcc.sg_transpose(perm=[0, 2, 1])
# calculate total batch count
self.num_batch = len(label) // batch_size # Floor division
# print info
tf.sg_info('%s set loaded.(total data=%d, total batch=%d)' % (set_name.upper(), len(label), self.num_batch))
| 24.557851
| 117
| 0.590443
|
4a074a679c554390585d0307ad19621a1d2bbeb2
| 2,129
|
py
|
Python
|
bitmovin_api_sdk/notifications/webhooks/encoding/encodings/encodings_api.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/notifications/webhooks/encoding/encodings/encodings_api.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/notifications/webhooks/encoding/encodings/encodings_api.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
# coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.notifications.webhooks.encoding.encodings.finished.finished_api import FinishedApi
from bitmovin_api_sdk.notifications.webhooks.encoding.encodings.error.error_api import ErrorApi
from bitmovin_api_sdk.notifications.webhooks.encoding.encodings.transfer_error.transfer_error_api import TransferErrorApi
from bitmovin_api_sdk.notifications.webhooks.encoding.encodings.live_input_stream_changed.live_input_stream_changed_api import LiveInputStreamChangedApi
from bitmovin_api_sdk.notifications.webhooks.encoding.encodings.encoding_status_changed.encoding_status_changed_api import EncodingStatusChangedApi
class EncodingsApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(EncodingsApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.finished = FinishedApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.error = ErrorApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.transfer_error = TransferErrorApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.live_input_stream_changed = LiveInputStreamChangedApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.encoding_status_changed = EncodingStatusChangedApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
| 35.483333
| 152
| 0.699389
|
4a074abe9db7bb216ce1ba36f59967adc84ddf85
| 752
|
py
|
Python
|
test/vanilla/Expected/AcceptanceTests/Header/header/__init__.py
|
qwordy/autorest.python
|
6b12df51c2a39a1285546b5a771b69f5896e794f
|
[
"MIT"
] | 35
|
2018-04-03T12:15:53.000Z
|
2022-03-11T14:03:34.000Z
|
test/vanilla/Expected/AcceptanceTests/Header/header/__init__.py
|
qwordy/autorest.python
|
6b12df51c2a39a1285546b5a771b69f5896e794f
|
[
"MIT"
] | 652
|
2017-08-28T22:44:41.000Z
|
2022-03-31T21:20:31.000Z
|
test/vanilla/Expected/AcceptanceTests/Header/header/__init__.py
|
qwordy/autorest.python
|
6b12df51c2a39a1285546b5a771b69f5896e794f
|
[
"MIT"
] | 29
|
2017-08-28T20:57:01.000Z
|
2022-03-11T14:03:38.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._auto_rest_swagger_bat_header_service import AutoRestSwaggerBATHeaderService
from ._version import VERSION
__version__ = VERSION
__all__ = ["AutoRestSwaggerBATHeaderService"]
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
| 35.809524
| 94
| 0.628989
|
4a074bfef11b57f08b8b25039833407d70aac910
| 630
|
py
|
Python
|
nerpy/__init__.py
|
ConstantineLignos/nerpy
|
cfc5b0f76c3f7105f2b881204c10a53c063c9d14
|
[
"MIT"
] | 1
|
2020-05-12T15:19:10.000Z
|
2020-05-12T15:19:10.000Z
|
nerpy/__init__.py
|
ConstantineLignos/nerpy
|
cfc5b0f76c3f7105f2b881204c10a53c063c9d14
|
[
"MIT"
] | null | null | null |
nerpy/__init__.py
|
ConstantineLignos/nerpy
|
cfc5b0f76c3f7105f2b881204c10a53c063c9d14
|
[
"MIT"
] | null | null | null |
from nerpy.annotator import MentionAnnotator, SequenceMentionAnnotator, Trainable
from nerpy.document import (
Document,
DocumentBuilder,
EntityType,
Mention,
MentionType,
Sentence,
Token,
)
from nerpy.encoding import (
BILOU,
BIO,
BIOES,
BIOU,
BMES,
IO,
IOB,
SUPPORTED_ENCODINGS,
MentionEncoder,
get_mention_encoder,
)
from nerpy.ingest.conll import CoNLLIngester, write_conll
from nerpy.ingest.ontonotes import OntoNotesIngester
from nerpy.io import load_json, load_pickled_documents, pickle_documents
from nerpy.scoring import Score, ScoringResult, score_prf
| 23.333333
| 81
| 0.747619
|
4a074e26e72641cb9f633ca38b240e32a3f7565e
| 1,738
|
py
|
Python
|
src/main/python/docs.py
|
bosea/outbrain-click-prediction
|
f6e46f31c8dc7294edacafcecba1ba4824272353
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/docs.py
|
bosea/outbrain-click-prediction
|
f6e46f31c8dc7294edacafcecba1ba4824272353
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/docs.py
|
bosea/outbrain-click-prediction
|
f6e46f31c8dc7294edacafcecba1ba4824272353
|
[
"Apache-2.0"
] | null | null | null |
#/bin/python
f1_in = 'promoted_content.csv'
f2_in = 'documents_meta.csv'
f3_in = 'documents_categories.csv'
f4_in = 'documents_topics.csv'
f5_in = 'documents_entities.csv'
f1 = open(f1_in, 'r')
f2 = open(f2_in, 'r')
f3 = open(f1_in, 'r')
f4 = open(f1_in, 'r')
f5 = open(f1_in, 'r')
doc_id_f1 = {}
doc_id_f2 = {}
doc_id_f3 = {}
doc_id_f4 = {}
doc_id_f5 = {}
for row in f1:
tokens = row.strip('\n').split(',')
doc_id = tokens[1]
if doc_id != 'document_id':
if doc_id in doc_id_f1:
doc_id_f1[doc_id] = doc_id_f1[doc_id] + 1
else:
doc_id_f1[doc_id] = 1
for row in f2:
tokens = row.strip('\n').split(',')
doc_id = tokens[0]
if doc_id != 'document_id':
if doc_id in doc_id_f2:
doc_id_f2[doc_id] = doc_id_f2[doc_id] + 1
else:
doc_id_f2[doc_id] = 1
for row in f3:
tokens = row.strip('\n').split(',')
doc_id = tokens[0]
if doc_id != 'document_id':
if doc_id in doc_id_f3:
doc_id_f3[doc_id] = doc_id_f3[doc_id] + 1
else:
doc_id_f3[doc_id] = 1
for row in f4:
tokens = row.strip('\n').split(',')
doc_id = tokens[0]
if doc_id != 'document_id':
if doc_id in doc_id_f4:
doc_id_f4[doc_id] = doc_id_f4[doc_id] + 1
else:
doc_id_f4[doc_id] = 1
for row in f5:
tokens = row.strip('\n').split(',')
doc_id = tokens[0]
if doc_id != 'document_id':
if doc_id in doc_id_f5:
doc_id_f5[doc_id] = doc_id_f5[doc_id] + 1
else:
doc_id_f5[doc_id] = 1
print 'length of promoted_content docids= ', len(doc_id_f1)
print 'length of doc_meta docids= ', len(doc_id_f2)
print 'length of doc_categories docids= ', len(doc_id_f3)
print 'length of doc_topics docids= ', len(doc_id_f4)
print 'length of doc_entities docids= ', len(doc_id_f5)
| 24.138889
| 60
| 0.640391
|
4a074e59b7a64d86da935acd5d2cf7c78f566381
| 39,138
|
py
|
Python
|
codalab/lib/worksheet_util.py
|
Matt-F-Wu/codalab_cl
|
e08e82afbc3d478c52a811065701cf53c9d3891e
|
[
"Apache-2.0"
] | null | null | null |
codalab/lib/worksheet_util.py
|
Matt-F-Wu/codalab_cl
|
e08e82afbc3d478c52a811065701cf53c9d3891e
|
[
"Apache-2.0"
] | null | null | null |
codalab/lib/worksheet_util.py
|
Matt-F-Wu/codalab_cl
|
e08e82afbc3d478c52a811065701cf53c9d3891e
|
[
"Apache-2.0"
] | null | null | null |
"""
worksheet_util contains the following public functions:
- request_lines: pops up an editor to allow for full-text editing of a worksheet.
- parse_worksheet_form: takes those lines and generates a set of items (triples)
- interpret_items: takes those triples and returns a structure that interprets all the directives in the worksheet item.
A worksheet contains a list of (worksheet) items, where each item includes
- bundle_uuid (only used if type == bundle)
- subworkheet_uuid (only used if type == worksheet)
- value (used for text and directive)
- type (one of the following)
* markup: just plain plain text (markdown)
* directive: special instructions for determining formatting
* bundle: represents a bundle
* worksheet: represents a worksheet
This is the representation in the DB.
In the code, we have full items of the form (bundle_info, subworkheet_info, value_obj, type).
In other words, there are two representations of worksheet items:
- (bundle_uuid, subworksheet_uuid, value, type) [inserted into the database]
- (bundle_info, subworksheet_info, value_obj, type) [used in the code]
A genpath (generalized path) is either:
- a bundle field (e.g., 'command')
- a metadata field (e.g., 'name')
- a path (starts with '/'), but can descend into a YAML file (e.g., /stats:train/errorRate)
See get_worksheet_lines for documentation on the specification of the directives.
"""
import copy
import os
import re
import sys
from itertools import izip
from codalab.common import PermissionError, UsageError
from codalab.lib import canonicalize, editor_util, formatting
from codalab.objects.permission import group_permissions_str, permission_str
from codalab.rest.worksheet_block_schemas import (
FetchStatusSchema,
BlockModes,
MarkupBlockSchema,
BundleContentsBlockSchema,
BundleImageBlockSchema,
TableBlockSchema,
RecordsRowSchema,
RecordsBlockSchema,
GraphBlockSchema,
SubworksheetsBlock,
BundleUUIDSpecSchema,
)
# Note: this is part of the client's session, not server side.
CURRENT_WORKSHEET = '.'
# Types of (raw) worksheet items
TYPE_MARKUP = 'markup'
TYPE_DIRECTIVE = 'directive'
TYPE_BUNDLE = 'bundle'
TYPE_WORKSHEET = 'worksheet'
WORKSHEET_ITEM_TYPES = (TYPE_MARKUP, TYPE_DIRECTIVE, TYPE_BUNDLE, TYPE_WORKSHEET)
BUNDLE_REGEX = re.compile('^(\[(.*)\])?\s*\{([^{]*)\}$')
SUBWORKSHEET_REGEX = re.compile('^(\[(.*)\])?\s*\{\{(.*)\}\}$')
DIRECTIVE_CHAR = '%'
DIRECTIVE_REGEX = re.compile(r'^' + DIRECTIVE_CHAR + '\s*(.*)$')
# Default number of lines to pull for each display mode.
DEFAULT_CONTENTS_MAX_LINES = 10
def markup_item(x):
return (None, None, x, TYPE_MARKUP)
def directive_item(x):
return (None, None, x, TYPE_DIRECTIVE)
def bundle_item(x):
return (x, None, '', TYPE_BUNDLE) # TODO: replace '' with None when tables.py schema is updated
def subworksheet_item(x):
return (
None,
x,
'',
TYPE_WORKSHEET,
) # TODO: replace '' with None when tables.py schema is updated
def bundle_line(description, uuid):
return '[%s]{%s}' % (description, uuid)
def worksheet_line(description, uuid):
return '[%s]{{%s}}' % (description, uuid)
############################################################
def get_worksheet_info_edit_command(raw_command_map):
"""
Return a cli-command for editing worksheet-info. Return None if raw_command_map contents are invalid.
Input:
raw_command: a map containing the info to edit, new_value and the action to perform
"""
key = raw_command_map.get('k')
value = raw_command_map.get('v')
action = raw_command_map.get('action')
if key is None or not key or value is None or not action == 'worksheet-edit':
return None
return 'wedit -{k[0]} "{v}"'.format(**raw_command_map)
def convert_item_to_db(item):
(bundle_info, subworksheet_info, value_obj, item_type) = item
return (
bundle_info['uuid'] if bundle_info else None,
subworksheet_info['uuid'] if subworksheet_info else None,
# TODO: change tables.py so that None's are allowed
(formatting.tokens_to_string(value_obj) if item_type == TYPE_DIRECTIVE else value_obj)
or '',
item_type,
)
def get_worksheet_lines(worksheet_info):
"""
Generator that returns pretty-printed lines of text for the given worksheet.
"""
lines = []
for item in worksheet_info['items']:
(bundle_info, subworksheet_info, value_obj, item_type) = item
if item_type == TYPE_MARKUP:
lines.append(value_obj)
elif item_type == TYPE_DIRECTIVE:
if len(value_obj) > 0 and value_obj[0] == DIRECTIVE_CHAR:
# A comment directive
lines.append('//' + ' '.join(value_obj[1:]))
else:
# A normal directive
value = formatting.tokens_to_string(value_obj)
value = (
DIRECTIVE_CHAR
+ ('' if len(value) == 0 or value.startswith(DIRECTIVE_CHAR) else ' ')
+ value
)
lines.append(value)
elif item_type == TYPE_BUNDLE:
if 'metadata' not in bundle_info:
# This happens when we add bundles by uuid and don't actually make sure they exist
# lines.append('ERROR: non-existent bundle %s' % bundle_info['uuid'])
description = formatting.contents_str(None)
else:
metadata = bundle_info['metadata']
# raise Exception(metadata)
description = bundle_info['bundle_type']
description += ' ' + metadata['name']
deps = interpret_genpath(bundle_info, 'dependencies')
if deps:
description += ' -- ' + deps
command = bundle_info.get('command')
if command:
description += ' : ' + command
lines.append(bundle_line(description, bundle_info['uuid']))
elif item_type == TYPE_WORKSHEET:
lines.append(
worksheet_line(
'worksheet ' + formatting.contents_str(subworksheet_info.get('name')),
subworksheet_info['uuid'],
)
)
else:
raise RuntimeError('Invalid worksheet item type: %s' % type)
return lines
def get_formatted_metadata(cls, metadata, raw=False):
"""
Input:
cls: bundle subclass (e.g. DatasetBundle, RuunBundle, ProgramBundle)
metadata: bundle metadata
raw: boolean value indicating if the raw value needs to be returned
Return a list of tuples containing the key and formatted value of metadata.
"""
result = []
for spec in cls.METADATA_SPECS:
key = spec.key
if not raw:
if key not in metadata:
continue
if metadata[key] == '' or metadata[key] == []:
continue
value = apply_func(spec.formatting, metadata.get(key))
if isinstance(value, list):
value = ' | '.join(value)
else:
value = metadata.get(key)
result.append((key, value))
return result
def get_editable_metadata_fields(cls):
"""
Input:
cls: bundle subclass (e.g. DatasetBundle, RuunBundle, ProgramBundle)
metadata: bundle metadata
Return a list of metadata fields that are editable by the owner.
"""
result = []
for spec in cls.METADATA_SPECS:
key = spec.key
if not spec.generated:
result.append(key)
return result
def get_metadata_types(cls):
"""
Return map from key -> type for the metadata fields in the given bundle class.
e.g.
'request_time' -> 'basestring'
'time' -> 'duration'
'tags' -> 'list'
Possible types: 'int', 'float', 'list', 'bool', 'duration',
'size', 'date', 'basestring'
Special types like 'duration' are only indicated when client-side
formatting/serialization is necessary.
"""
return {
spec.key: (not issubclass(spec.type, basestring) and spec.formatting) or spec.type.__name__
for spec in cls.METADATA_SPECS
}
def request_lines(worksheet_info):
"""
Input: worksheet_info
Popup an editor, populated with the current worksheet contents.
Return a list of new items (bundle_uuid, value, type) that the user typed into the editor.
"""
# Construct a form template with the current value of the worksheet.
template_lines = get_worksheet_lines(worksheet_info)
template = ''.join([line + os.linesep for line in template_lines])
lines = editor_util.open_and_edit(suffix='.md', template=template)
# Process the result
form_result = [line.rstrip('\n') for line in lines]
if form_result == template_lines:
raise UsageError('No change made; aborting')
return form_result
def parse_worksheet_form(form_result, model, user, worksheet_uuid):
"""
Input: form_result is a list of lines.
Return (list of (bundle_info, subworksheet_info, value, type) tuples, commands to execute)
"""
def get_line_type(line):
if line.startswith('//'):
return 'comment'
elif BUNDLE_REGEX.match(line) is not None:
return TYPE_BUNDLE
elif SUBWORKSHEET_REGEX.match(line) is not None:
return TYPE_WORKSHEET
elif DIRECTIVE_REGEX.match(line) is not None:
return TYPE_DIRECTIVE
else:
return TYPE_MARKUP
line_types = [get_line_type(line) for line in form_result]
# Extract bundle specs and resolve uuids in one batch
bundle_lines = [
(i, BUNDLE_REGEX.match(line).group(3))
for i, line in enumerate(form_result)
if line_types[i] == TYPE_BUNDLE
]
# bundle_specs = (line_indices, bundle_specs)
bundle_specs = zip(*bundle_lines) if len(bundle_lines) > 0 else [(), ()]
# bundle_uuids = {line_i: bundle_uuid, ...}
bundle_uuids = dict(
zip(
bundle_specs[0],
canonicalize.get_bundle_uuids(model, user, worksheet_uuid, bundle_specs[1]),
)
)
items = []
for line_i, (line_type, line) in enumerate(izip(line_types, form_result)):
if line_type == 'comment':
comment = line[2:]
items.append(directive_item([DIRECTIVE_CHAR, comment]))
elif line_type == TYPE_BUNDLE:
bundle_info = {
'uuid': bundle_uuids[line_i]
} # info doesn't need anything other than uuid
items.append(bundle_item(bundle_info))
elif line_type == TYPE_WORKSHEET:
subworksheet_spec = SUBWORKSHEET_REGEX.match(line).group(3)
try:
subworksheet_uuid = canonicalize.get_worksheet_uuid(
model, user, worksheet_uuid, subworksheet_spec
)
subworksheet_info = {
'uuid': subworksheet_uuid
} # info doesn't need anything other than uuid
items.append(subworksheet_item(subworksheet_info))
except UsageError as e:
items.append(markup_item(e.message + ': ' + line))
elif line_type == TYPE_DIRECTIVE:
directive = DIRECTIVE_REGEX.match(line).group(1)
items.append(directive_item(formatting.string_to_tokens(directive)))
elif line_type == TYPE_MARKUP:
items.append(markup_item(line))
else:
raise RuntimeError("Invalid line type %s: this should not happen." % line_type)
return items
def is_file_genpath(genpath):
# Return whether the genpath is a file (e.g., '/stdout') or not (e.g., 'command')
return genpath.startswith('/')
def interpret_genpath(bundle_info, genpath):
"""
Quickly interpret the genpaths (generalized path) that only require looking
bundle_info (e.g., 'time', 'command'). The interpretation of generalized
paths that require reading files is done by interpret_file_genpath.
"""
# If genpath is referring to a file, then just returns instructions for
# fetching that file rather than actually doing it.
if is_file_genpath(genpath):
return (bundle_info['uuid'], genpath)
# Render dependencies
deps = bundle_info.get('dependencies', [])
anonymous = len(deps) == 1 and deps[0]['child_path'] == ''
def render_dep(dep, show_key=True, show_uuid=False):
if show_key and not anonymous:
if show_uuid or dep['child_path'] != dep['parent_name']:
a = dep['child_path'] + ':'
else:
a = ':'
else:
a = ''
b = dep['parent_uuid'] if show_uuid else (dep['parent_name'] or '')
c = '/' + dep['parent_path'] if dep['parent_path'] else ''
return a + b + c
# Special genpaths (dependencies, args)
if genpath == 'dependencies':
return ','.join([render_dep(dep) for dep in deps])
elif genpath.startswith('dependencies/'):
# Look up the particular dependency
_, name = genpath.split('/', 1)
for dep in deps:
if dep['child_path'] == name:
return render_dep(dep, show_key=False)
return formatting.verbose_contents_str(None)
elif genpath == 'args':
# Arguments that we would pass to 'cl'
args = []
bundle_type = bundle_info.get('bundle_type')
if bundle_type not in ('make', 'run'):
return None
args += [bundle_type]
# Dependencies
for dep in deps:
args.append(render_dep(dep, show_uuid=True))
# Command
if bundle_info['command']:
args.append(formatting.quote(bundle_info['command']))
# Add request arguments from metadata
metadata = bundle_info['metadata']
for key, value in metadata.items():
if key.startswith('request_') and value:
key = key.replace('_', '-')
if isinstance(value, bool):
args.append('--' + key)
else:
args.extend(['--' + key, formatting.quote(str(value))])
return ' '.join(args)
elif genpath == 'summary':
def friendly_render_dep(dep):
key = dep['child_path'] or dep['parent_name']
friendly_parent_name = formatting.verbose_contents_str(dep['parent_name'])
value = (
key
+ '{'
+ (friendly_parent_name + ':' if key != dep['parent_name'] else '')
+ dep['parent_uuid'][0:4]
+ '}'
)
return key, value
# Nice easy-to-ready description of how this bundle got created.
bundle_type = bundle_info.get('bundle_type')
if bundle_type in ('dataset', 'program'):
return '[uploaded]'
if bundle_type == 'make':
args = []
for dep in deps:
args.append(friendly_render_dep(dep)[1])
return '= ' + ' '.join(args)
elif bundle_type == 'run':
command = bundle_info['command']
for dep in deps:
key, value = friendly_render_dep(dep)
# Replace full-word occurrences of key in the command with an indicator of the dependency.
# Of course, a string match in the command isn't necessary a semantic reference to the dependency,
# and there are some dependencies which are not explicit in the command.
# But this can be seen as a best-effort attempt.
command = re.sub(r'\b%s\b' % key, value, command)
return '! ' + command
elif genpath == 'host_worksheets':
if 'host_worksheets' in bundle_info:
return ' '.join(
'%s(%s)' % (info['name'], info['uuid']) for info in bundle_info['host_worksheets']
)
elif genpath == 'permission':
if 'permission' in bundle_info:
return permission_str(bundle_info['permission'])
elif genpath == 'group_permissions':
if 'group_permissions' in bundle_info:
# FIXME(sckoo): we will be passing the old permissions format into this
# which has been updated to accommodate the new formatting
return group_permissions_str(bundle_info['group_permissions'])
# Bundle field?
value = bundle_info.get(genpath)
if value is not None:
return value
# Metadata field?
value = bundle_info.get('metadata', {}).get(genpath)
if value is not None:
return value
return None
def format_metadata(metadata):
"""
Format worksheet item metadata based on field type specified in the schema.
"""
if metadata:
unformatted_fields = [
(name, func) for (_, name, func) in get_default_schemas()['default'] if func
]
for (name, func) in unformatted_fields:
if metadata.get(name):
metadata[name] = apply_func(func, metadata[name])
def canonicalize_schema_item(args):
"""
Users who type in schema items can specify a partial argument list.
Return the canonicalize version (a triple).
"""
if len(args) == 1: # genpath
return (os.path.basename(args[0]).split(":")[-1], args[0], None)
elif len(args) == 2: # name genpath
return (args[0], args[1], None)
elif len(args) == 3: # name genpath post-processing
return (args[0], args[1], args[2])
else:
raise UsageError('Invalid number of arguments: %s' % (args,))
def canonicalize_schema_items(items):
return [canonicalize_schema_item(item) for item in items]
def apply_func(func, arg):
"""
Apply post-processing function |func| to |arg|.
|func| is a string representing a list of functions (which are to be
applied to |arg| in succession). Each function is either:
- 'duration', 'date', 'size' for special formatting
- '%...' for sprintf-style formatting
- s/.../... for regular expression substitution
- [a:b] for taking substrings
"""
FUNC_DELIM = ' | '
if isinstance(arg, tuple):
# tuples are (bundle_uuid, genpath) which have not been fleshed out
return arg + (func,)
try:
if func is None:
return arg
# String encoding of a function: size s/a/b
for f in func.split(FUNC_DELIM):
if f == 'str':
arg = str(arg)
elif f == 'date':
arg = formatting.date_str(float(arg)) if arg is not None else None
elif f == 'duration':
arg = formatting.duration_str(float(arg)) if arg is not None else None
elif f == 'size':
arg = formatting.size_str(float(arg)) if arg is not None else None
elif f.startswith('%'):
arg = (f % float(arg)) if arg is not None else None
elif f.startswith('s/'): # regular expression: s/<old string>/<new string>
esc_slash = '_ESC_SLASH_' # Assume this doesn't occur in s
# Preserve escaped characters: \/
tokens = f.replace('\\/', esc_slash).split('/')
if len(tokens) != 3:
return '<invalid regex: %s>' % f
s = tokens[1].replace(esc_slash, '/')
t = tokens[2].replace(esc_slash, '/')
arg = re.sub(s, t, arg)
elif f.startswith('['): # substring
m = re.match('\[(.*):(.*)\]', f)
if m:
start = int(m.group(1) or 0)
end = int(m.group(2) or len(arg))
arg = arg[start:end]
else:
return '<invalid function: %s>' % f
elif f.startswith('add '):
# 'add k v' checks if arg is a dictionary and updates it with arg[k] = v
if isinstance(arg, dict):
k, v = f.split(' ')[1:]
arg[k] = v
else:
return 'arg (%s) not a dictionary' % type(arg)
elif f.startswith('key '):
# 'key k' converts arg into a dictionary where arg[k] = arg
arg = {f.split(' ')[1]: arg}
else:
return '<invalid function: %s>' % f
return arg
except:
# Applying the function failed, so just return the arg.
return arg
def get_default_schemas():
# Single fields
uuid = ['uuid[0:8]', 'uuid', '[0:8]']
name = ['name']
summary = ['summary']
data_size = ['data_size', 'data_size', 'size']
time = ['time', 'time', 'duration']
state = ['state']
description = ['description']
created = ['created', 'created', 'date']
schemas = {}
# Schemas corresponding to one field
schemas['uuid'] = [uuid]
schemas['name'] = [name]
schemas['summary'] = [summary]
schemas['data_size'] = [data_size]
schemas['time'] = [time]
schemas['state'] = [state]
schemas['description'] = [description]
schemas['created'] = [created]
# Schemas involving multiple fields
schemas['default'] = [uuid, name, summary, data_size, state, description]
schemas['program'] = [uuid, name, data_size, description]
schemas['dataset'] = [uuid, name, data_size, description]
schemas['make'] = [uuid, name, summary, data_size, state, description]
schemas['run'] = [uuid, name, summary, data_size, time, state, description]
for key in schemas:
schemas[key] = canonicalize_schema_items(schemas[key])
return schemas
def get_command(value_obj): # For directives only
return value_obj[0] if len(value_obj) > 0 else None
def interpret_items(schemas, raw_items):
"""
schemas: initial mapping from name to list of schema items (columns of a table)
raw_items: list of (raw) worksheet items (triples) to interpret
Return {'items': interpreted_items, ...}, where interpreted_items is a list of:
{
'mode': display mode ('markup' | 'contents' | 'image' | 'html', etc.)
'interpreted': one of
- rendered string
- target = (bundle_uuid, genpath)
- (header = (col1, ..., coln), rows = [{col1:value1, ..., coln:valuen}, ...]) [for tables]
- {keywords: [...]} for mode = 'search' or 'wsearch'
'properties': dict of properties (e.g., width, maxlines, etc.),
'bundle_info': bundle_info or list of bundle_infos,
'subworksheet_info': subworksheet,
}
In addition, return an alignment between the raw items and the interpreted items.
Each interpreted item has a focusIndex, and possibly consists of a list of
table rows (indexed by subFocusIndex). Here is an example:
--- Raw --- --- Interpreted ---
rawIndex (focusIndex, subFocusIndex)
0 % display table
1 [bundle] [table - row 0 (0, 0)
2 [bundle] - row 1] (0, 1)
3
4 hello [markup (1, 0)
5 world ]
6 [worksheet] [worksheet] (2, 0)
7
The mapping should be computed as follows:
- Some raw items contribute directly to a particular interpreted item.
- Others (blank lines, directives, schema definitions) don't.
- Those that don't should get mapped to the next interpreted item.
"""
raw_to_block = [] # rawIndex => (focusIndex, subFocusIndex)
# Set default schema
current_schema = None
default_display = ('table', 'default')
current_display = default_display
blocks = []
bundle_infos = []
worksheet_infos = []
def get_schema(args): # args is a list of schema names
args = args if len(args) > 0 else ['default']
schema = []
for arg in args:
# If schema doesn't exist, then treat as item (e.g., uuid).
schema += schemas.get(arg, canonicalize_schema_items([arg.split(':', 2)]))
return schema
def is_missing(info):
return 'metadata' not in info
def parse_properties(args):
properties = {}
for item in args:
if '=' not in item:
raise UsageError('Expected <key>=<value>, but got %s' % item)
key, value = item.split('=', 1)
properties[key] = value
return properties
def genpath_to_target(bundle_info, genpath):
# bundle_info, '/stdout' => target = (uuid, 'stdout')
if not is_file_genpath(genpath):
raise UsageError('Not file genpath: %s' % genpath)
# strip off the leading / from genpath to create a subpath in the target.
return (bundle_info['uuid'], genpath[1:])
def flush_bundles():
"""
Having collected bundles in |bundle_infos|, flush them into |blocks|,
potentially as a single table depending on the mode.
"""
if len(bundle_infos) == 0:
return
def raise_genpath_usage_error():
raise UsageError(
'Expected \'% display '
+ mode
+ ' (genpath)\', but got \'% display '
+ ' '.join([mode] + args)
+ '\''
)
# Print out the curent bundles somehow
mode = current_display[0]
args = current_display[1:]
if mode == 'hidden':
pass
elif mode == 'contents' or mode == 'image':
for item_index, bundle_info in bundle_infos:
if is_missing(bundle_info):
blocks.append(
MarkupBlockSchema().load({'text': 'ERROR: cannot access bundle'}).data
)
continue
# Parse arguments
if len(args) == 0:
raise_genpath_usage_error()
# these two are required for the target
(bundle_uuid, target_genpath) = genpath_to_target(bundle_info, args[0])
properties = parse_properties(args[1:])
block_object = {
'target_genpath': target_genpath,
'bundles_spec': BundleUUIDSpecSchema()
.load(BundleUUIDSpecSchema.create_json([bundle_info]))
.data,
'status': FetchStatusSchema.get_unknown_status(),
}
if mode == 'contents':
try:
block_object['max_lines'] = int(
properties.get('maxlines', DEFAULT_CONTENTS_MAX_LINES)
)
except ValueError:
raise UsageError("maxlines must be integer")
blocks.append(BundleContentsBlockSchema().load(block_object).data)
elif mode == 'image':
block_object['width'] = properties.get('width', None)
block_object['height'] = properties.get('height', None)
blocks.append(BundleImageBlockSchema().load(block_object).data)
elif mode == 'record':
# display record schema =>
# key1: value1
# key2: value2
# ...
schema = get_schema(args)
for item_index, bundle_info in bundle_infos:
header = ('key', 'value')
rows = []
for (name, genpath, post) in schema:
rows.append(
RecordsRowSchema()
.load(
{
'key': name + ':',
'value': apply_func(post, interpret_genpath(bundle_info, genpath)),
}
)
.data
)
blocks.append(
RecordsBlockSchema()
.load(
{
'bundles_spec': BundleUUIDSpecSchema()
.load(BundleUUIDSpecSchema.create_json([bundle_info]))
.data,
'status': FetchStatusSchema.get_unknown_status(),
'header': header,
'rows': rows,
}
)
.data
)
elif mode == 'table':
# display table schema =>
# key1 key2
# b1_value1 b1_value2
# b2_value1 b2_value2
schema = get_schema(args)
header = tuple(name for (name, genpath, post) in schema)
rows = []
processed_bundle_infos = []
for item_index, bundle_info in bundle_infos:
if 'metadata' in bundle_info:
rows.append(
{
name: apply_func(post, interpret_genpath(bundle_info, genpath))
for (name, genpath, post) in schema
}
)
processed_bundle_infos.append(copy.deepcopy(bundle_info))
else:
# The front-end relies on the name metadata field existing
processed_bundle_info = copy.deepcopy(bundle_info)
processed_bundle_info['metadata'] = {'name': '<invalid>'}
rows.append(
{
name: apply_func(
post, interpret_genpath(processed_bundle_info, genpath)
)
for (name, genpath, post) in schema
}
)
processed_bundle_infos.append(processed_bundle_info)
blocks.append(
TableBlockSchema()
.load(
{
'bundles_spec': BundleUUIDSpecSchema()
.load(BundleUUIDSpecSchema.create_json(processed_bundle_infos))
.data,
'status': FetchStatusSchema.get_unknown_status(),
'header': header,
'rows': rows,
}
)
.data
)
elif mode == 'graph':
# display graph <genpath> <properties>
if len(args) == 0:
raise_genpath_usage_error()
# trajectories is list of {
# 'uuid': ...,
# 'display_name': ..., # What to show as the description of a bundle
# 'target': (bundle_uuid, subpath)
# }
properties = parse_properties(args[1:])
trajectories = [
{
'bundle_uuid': bundle_info['uuid'],
'display_name': interpret_genpath(
bundle_info, properties.get('display_name', 'name')
),
'target_genpath': genpath_to_target(bundle_info, args[0])[1],
}
for item_index, bundle_info in bundle_infos
]
try:
max_lines = int(properties.get('maxlines', DEFAULT_CONTENTS_MAX_LINES))
except ValueError:
raise UsageError("maxlines must be integer")
blocks.append(
GraphBlockSchema()
.load(
{
'trajectories': trajectories,
'bundles_spec': BundleUUIDSpecSchema()
.load(BundleUUIDSpecSchema.create_json([bundle_infos[0][1]]))
.data, # Only show the first one for now
# 'bundles_spec': BundleUUIDSpecSchema().load(BundleUUIDSpecSchema.create_json(
# [copy.deepcopy(bundle_info) for item_index, bundle_info in bundle_infos]).data,
'max_lines': max_lines,
'xlabel': properties.get('xlabel', None),
'ylabel': properties.get('ylabel', None),
}
)
.data
)
else:
raise UsageError('Unknown display mode: %s' % mode)
bundle_infos[:] = [] # Clear
def flush_worksheets():
if len(worksheet_infos) == 0:
return
blocks.append(
SubworksheetsBlock().load({'subworksheet_infos': copy.deepcopy(worksheet_infos)}).data
)
worksheet_infos[:] = []
# Go through all the raw items...
last_was_empty_line = False
for raw_index, item in enumerate(raw_items):
new_last_was_empty_line = True
try:
(bundle_info, subworksheet_info, value_obj, item_type) = item
is_bundle = item_type == TYPE_BUNDLE
is_search = item_type == TYPE_DIRECTIVE and get_command(value_obj) == 'search'
is_directive = item_type == TYPE_DIRECTIVE
is_worksheet = item_type == TYPE_WORKSHEET
if not is_bundle:
flush_bundles()
if not is_worksheet:
flush_worksheets()
# Reset display to minimize long distance dependencies of directives
if not (is_bundle or is_search):
current_display = default_display
# Reset schema to minimize long distance dependencies of directives
if not is_directive:
current_schema = None
if item_type == TYPE_BUNDLE:
raw_to_block.append((len(blocks), len(bundle_infos)))
bundle_infos.append((raw_index, bundle_info))
elif item_type == TYPE_WORKSHEET:
raw_to_block.append((len(blocks), len(worksheet_infos)))
worksheet_infos.append(subworksheet_info)
elif item_type == TYPE_MARKUP:
new_last_was_empty_line = value_obj == ''
if (
len(blocks) > 0
and blocks[-1]['mode'] == BlockModes.markup_block
and not last_was_empty_line
and not new_last_was_empty_line
):
# Join with previous markup item
blocks[-1]['text'] += '\n' + value_obj
elif not new_last_was_empty_line:
blocks.append(
MarkupBlockSchema().load({'id': len(blocks), 'text': value_obj}).data
)
# Important: set raw_to_block after so we can focus on current item.
if new_last_was_empty_line:
raw_to_block.append(None)
else:
raw_to_block.append((len(blocks) - 1, 0))
elif item_type == TYPE_DIRECTIVE:
command = get_command(value_obj)
if command == '%' or command == '' or command is None:
# Comment
pass
elif command == 'schema':
# Start defining new schema
if len(value_obj) < 2:
raise UsageError("`schema` missing name")
name = value_obj[1]
schemas[name] = current_schema = []
elif command == 'addschema':
# Add to schema
if current_schema is None:
raise UsageError("`addschema` must be preceded by `schema` directive")
if len(value_obj) < 2:
raise UsageError("`addschema` missing name")
name = value_obj[1]
current_schema += schemas[name]
elif command == 'add':
# Add to schema
if current_schema is None:
raise UsageError("`add` must be preceded by `schema` directive")
schema_item = canonicalize_schema_item(value_obj[1:])
current_schema.append(schema_item)
elif command == 'display':
# Set display
current_display = value_obj[1:]
else:
raise UsageError("unknown directive `%s`" % command)
raw_to_block.append(None)
else:
raise RuntimeError('Unknown worksheet item type: %s' % item_type)
# Flush bundles once more at the end
if raw_index == len(raw_items) - 1:
flush_bundles()
flush_worksheets()
except UsageError as e:
current_schema = None
bundle_infos[:] = []
worksheet_infos[:] = []
blocks.append(
MarkupBlockSchema()
.load({'text': 'Error on line %d: %s' % (raw_index, e.message)})
.data
)
raw_to_block.append((len(blocks) - 1, 0))
except StandardError:
current_schema = None
bundle_infos[:] = []
worksheet_infos[:] = []
import traceback
traceback.print_exc()
blocks.append(
MarkupBlockSchema()
.load({'text': 'Unexpected error while parsing line %d' % raw_index})
.data
)
raw_to_block.append((len(blocks) - 1, 0))
finally:
last_was_empty_line = new_last_was_empty_line
# TODO: fix inconsistencies resulting from UsageErrors thrown in flush_bundles()
if len(raw_to_block) != len(raw_items):
print >>sys.stderr, "WARNING: Length of raw_to_block does not match length of raw_items"
# Package the result
block_to_raw = {}
next_interpreted_index = None
# Go in reverse order so we can assign raw items that map to None to the next interpreted item
for raw_index, interpreted_index in reversed(list(enumerate(raw_to_block))):
if interpreted_index is None: # e.g., blank line, directive
interpreted_index = next_interpreted_index
raw_to_block[raw_index] = interpreted_index
else:
interpreted_index_str = str(interpreted_index[0]) + ',' + str(interpreted_index[1])
if interpreted_index_str not in block_to_raw: # Bias towards the last item
block_to_raw[interpreted_index_str] = raw_index
next_interpreted_index = interpreted_index
# Return the result
result = {}
result['blocks'] = blocks
result['raw_to_block'] = raw_to_block
result['block_to_raw'] = block_to_raw
return result
def check_worksheet_not_frozen(worksheet):
if worksheet.frozen:
raise PermissionError(
'Cannot mutate frozen worksheet %s(%s).' % (worksheet.uuid, worksheet.name)
)
| 38.673913
| 120
| 0.557847
|
4a074f00ef256ecd533157b2779898086093e0a6
| 3,400
|
py
|
Python
|
cinder/scheduler/driver.py
|
cloudbau/cinder
|
3179f2f42ae940a08b910e326a809556689864d8
|
[
"Apache-2.0"
] | null | null | null |
cinder/scheduler/driver.py
|
cloudbau/cinder
|
3179f2f42ae940a08b910e326a809556689864d8
|
[
"Apache-2.0"
] | null | null | null |
cinder/scheduler/driver.py
|
cloudbau/cinder
|
3179f2f42ae940a08b910e326a809556689864d8
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler base class that all Schedulers should inherit from
"""
from oslo.config import cfg
from cinder import db
from cinder.openstack.common import importutils
from cinder.openstack.common import timeutils
from cinder import utils
from cinder.volume import rpcapi as volume_rpcapi
scheduler_driver_opts = [
cfg.StrOpt('scheduler_host_manager',
default='cinder.scheduler.host_manager.HostManager',
help='The scheduler host manager class to use'),
cfg.IntOpt('scheduler_max_attempts',
default=3,
help='Maximum number of attempts to schedule an volume'),
]
CONF = cfg.CONF
CONF.register_opts(scheduler_driver_opts)
def volume_update_db(context, volume_id, host):
'''Set the host and set the scheduled_at field of a volume.
:returns: A Volume with the updated fields set properly.
'''
now = timeutils.utcnow()
values = {'host': host, 'scheduled_at': now}
return db.volume_update(context, volume_id, values)
class Scheduler(object):
"""The base class that all Scheduler classes should inherit from."""
def __init__(self):
self.host_manager = importutils.import_object(
CONF.scheduler_host_manager)
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
def update_service_capabilities(self, service_name, host, capabilities):
"""Process a capability update from a service node."""
self.host_manager.update_service_capabilities(service_name,
host,
capabilities)
def hosts_up(self, context, topic):
"""Return the list of hosts that have a running service for topic."""
services = db.service_get_all_by_topic(context, topic)
return [service['host']
for service in services
if utils.service_is_up(service)]
def host_passes_filters(self, context, volume_id, host, filter_properties):
"""Check if the specified host passes the filters."""
raise NotImplementedError(_("Must implement host_passes_filters"))
def schedule(self, context, topic, method, *_args, **_kwargs):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_("Must implement a fallback schedule"))
def schedule_create_volume(self, context, request_spec, filter_properties):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_("Must implement schedule_create_volume"))
| 38.202247
| 79
| 0.694706
|
4a07510d24fd3b86d4c008667e314b5471238c0e
| 2,515
|
py
|
Python
|
magz/uroot2d.py
|
bru32/magz
|
541e0a5774d51251ce25bf326cdce0c615f347c3
|
[
"MIT"
] | null | null | null |
magz/uroot2d.py
|
bru32/magz
|
541e0a5774d51251ce25bf326cdce0c615f347c3
|
[
"MIT"
] | null | null | null |
magz/uroot2d.py
|
bru32/magz
|
541e0a5774d51251ce25bf326cdce0c615f347c3
|
[
"MIT"
] | null | null | null |
"""
2D Root Finder.
Function f(x,y) must return tuple of 2 error.
Bruce Wernick
10 June 2021
"""
import sys
from const import EPS, TINY
class RootFinder(object):
maxi = 96
tol = 1e-6
def __init__(self, f):
self.f = f
self.its = 0
def djac(self, x, y):
'jacobian'
h = 3.44e-4
xo, yo = x, y
dx, dy = h*abs(x), h*abs(y)
if dx == 0.0:
dx = h
if dy == 0.0:
dy = h
x += dx; y += dy
fxy, gxy = self.f(xo, yo)
dx, dy = x-xo, y-yo
fxyo, gxyo = self.f(x, yo)
fxoy, gxoy = self.f(xo, y)
J = (fxyo-fxy)/dx, (fxoy-fxy)/dy, (gxyo-gxy)/dx, (gxoy-gxy)/dy
D = J[0]*J[3] - J[2]*J[1]
return fxy, gxy, J, D
class Broyden(RootFinder):
def __call__(self, xy):
'2D root by Broyden method'
x,y = xy
self.its = 0
fxy, gxy, J, D = self.djac(x,y)
if abs(D) < EPS:
raise ValueError('too flat!')
B = [J[3]/D, -J[1]/D, -J[2]/D, J[0]/D]
dx = -(B[0]*fxy+B[1]*gxy); dy = -(B[2]*fxy+B[3]*gxy)
x += dx; y += dy
f0, g0 = fxy, gxy
fxy, gxy = self.f(x, y)
df, dg = fxy-f0, gxy-g0
for self.its in range(RootFinder.maxi):
BdF = B[0]*df+B[1]*dg, B[2]*df+B[3]*dg
e = dx*BdF[0] + dy*BdF[1]
if abs(e) < EPS:
return x,y
u = dx-BdF[0], dy-BdF[1]
v = B[0]*dx+B[2]*dy, B[1]*dx+B[3]*dy
B[0]+=u[0]*v[0]/e; B[1]+=u[0]*v[1]/e; B[2]+=u[1]*v[0]/e; B[3]+=u[1]*v[1]/e
dx=-(B[0]*fxy+B[1]*gxy); dy=-(B[2]*fxy+B[3]*gxy)
x += dx; y += dy
if abs(dx) <= RootFinder.tol and abs(dy) <= RootFinder.tol:
return x,y
f0,g0 = fxy,gxy
fxy,gxy = self.f(x,y)
df,dg = fxy - f0, gxy - g0
raise ValueError('max iterations reached!')
class Newton(RootFinder):
def __call__(self, xy):
'2D root by Newton method'
x,y = xy
self.its = 0
for self.its in range(RootFinder.maxi):
fxy,gxy,J,G = self.djac(x,y)
if abs(G) < EPS:
raise ValueError('too flat!')
dx = (gxy*J[1] - fxy*J[3])/G
dy = (fxy*J[2] - gxy*J[0])/G
x+=dx; y+=dy
if abs(dx) < RootFinder.tol and abs(dy) < RootFinder.tol:
return x,y
raise ValueError('max iterations reached!')
# ---------------------------------------------------------------------
if __name__=='__main__':
# Example 1, trivial 2D function
fxy = lambda x,y:(x-2.0, y-7.0)
root = Broyden(fxy)
x = (1.0,1.0)
z = root(x)
print(f'roots = {z}')
| 25.663265
| 81
| 0.47833
|
4a07515b8729ea054599b384f31a45be4062bd7a
| 20,173
|
py
|
Python
|
aiohttp/web_request.py
|
Mariatta/aiohttp
|
78d08ce8f629f6ce3d1b3f0758b797c085be944c
|
[
"Apache-2.0"
] | 1
|
2021-01-03T00:58:07.000Z
|
2021-01-03T00:58:07.000Z
|
aiohttp/web_request.py
|
Mariatta/aiohttp
|
78d08ce8f629f6ce3d1b3f0758b797c085be944c
|
[
"Apache-2.0"
] | null | null | null |
aiohttp/web_request.py
|
Mariatta/aiohttp
|
78d08ce8f629f6ce3d1b3f0758b797c085be944c
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import collections
import datetime
import json
import re
import socket
import string
import tempfile
import types
import warnings
from email.utils import parsedate
from http.cookies import SimpleCookie
from types import MappingProxyType
from urllib.parse import parse_qsl
from multidict import CIMultiDict, MultiDict, MultiDictProxy
from yarl import URL
from . import hdrs, multipart
from .helpers import HeadersMixin, reify, sentinel
from .streams import EmptyStreamReader
from .web_exceptions import HTTPRequestEntityTooLarge
__all__ = ('BaseRequest', 'FileField', 'Request')
FileField = collections.namedtuple(
'Field', 'name filename file content_type headers')
_TCHAR = string.digits + string.ascii_letters + r"!#$%&'*+.^_`|~-"
# '-' at the end to prevent interpretation as range in a char class
_TOKEN = r'[{tchar}]+'.format(tchar=_TCHAR)
_QDTEXT = r'[{}]'.format(
r''.join(chr(c) for c in (0x09, 0x20, 0x21) + tuple(range(0x23, 0x7F))))
# qdtext includes 0x5C to escape 0x5D ('\]')
# qdtext excludes obs-text (because obsoleted, and encoding not specified)
_QUOTED_PAIR = r'\\[\t !-~]'
_QUOTED_STRING = r'"(?:{quoted_pair}|{qdtext})*"'.format(
qdtext=_QDTEXT, quoted_pair=_QUOTED_PAIR)
_FORWARDED_PAIR = (
r'({token})=({token}|{quoted_string})'.format(
token=_TOKEN,
quoted_string=_QUOTED_STRING))
_QUOTED_PAIR_REPLACE_RE = re.compile(r'\\([\t !-~])')
# same pattern as _QUOTED_PAIR but contains a capture group
_FORWARDED_PAIR_RE = re.compile(_FORWARDED_PAIR)
############################################################
# HTTP Request
############################################################
class BaseRequest(collections.MutableMapping, HeadersMixin):
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT,
hdrs.METH_TRACE, hdrs.METH_DELETE}
def __init__(self, message, payload, protocol, payload_writer, task,
loop,
*, client_max_size=1024**2,
state=None,
scheme=None, host=None, remote=None):
if state is None:
state = {}
self._message = message
self._protocol = protocol
self._payload_writer = payload_writer
self._payload = payload
self._headers = message.headers
self._method = message.method
self._version = message.version
self._rel_url = message.url
self._post = None
self._read_bytes = None
self._state = state
self._cache = {}
self._task = task
self._client_max_size = client_max_size
self._loop = loop
self._scheme = scheme
self._host = host
self._remote = remote
def clone(self, *, method=sentinel, rel_url=sentinel,
headers=sentinel, scheme=sentinel, host=sentinel,
remote=sentinel):
"""Clone itself with replacement some attributes.
Creates and returns a new instance of Request object. If no parameters
are given, an exact copy is returned. If a parameter is not passed, it
will reuse the one from the current request object.
"""
if self._read_bytes:
raise RuntimeError("Cannot clone request "
"after reading it's content")
dct = {}
if method is not sentinel:
dct['method'] = method
if rel_url is not sentinel:
rel_url = URL(rel_url)
dct['url'] = rel_url
dct['path'] = str(rel_url)
if headers is not sentinel:
dct['headers'] = CIMultiDict(headers)
dct['raw_headers'] = tuple((k.encode('utf-8'), v.encode('utf-8'))
for k, v in headers.items())
message = self._message._replace(**dct)
kwargs = {}
if scheme is not sentinel:
kwargs['scheme'] = scheme
if host is not sentinel:
kwargs['host'] = host
if remote is not sentinel:
kwargs['remote'] = remote
return self.__class__(
message,
self._payload,
self._protocol,
self._payload_writer,
self._task,
self._loop,
client_max_size=self._client_max_size,
state=self._state.copy(),
**kwargs)
@property
def task(self):
return self._task
@property
def protocol(self):
return self._protocol
@property
def transport(self):
return self._protocol.transport
@property
def writer(self):
return self._payload_writer
@property
def message(self):
return self._message
@property
def rel_url(self):
return self._rel_url
@property
def loop(self):
return self._loop
# MutableMapping API
def __getitem__(self, key):
return self._state[key]
def __setitem__(self, key, value):
self._state[key] = value
def __delitem__(self, key):
del self._state[key]
def __len__(self):
return len(self._state)
def __iter__(self):
return iter(self._state)
########
@property
def secure(self):
"""A bool indicating if the request is handled with SSL."""
return self.scheme == 'https'
@reify
def forwarded(self):
"""A tuple containing all parsed Forwarded header(s).
Makes an effort to parse Forwarded headers as specified by RFC 7239:
- It adds one (immutable) dictionary per Forwarded 'field-value', ie
per proxy. The element corresponds to the data in the Forwarded
field-value added by the first proxy encountered by the client. Each
subsequent item corresponds to those added by later proxies.
- It checks that every value has valid syntax in general as specified
in section 4: either a 'token' or a 'quoted-string'.
- It un-escapes found escape sequences.
- It does NOT validate 'by' and 'for' contents as specified in section
6.
- It does NOT validate 'host' contents (Host ABNF).
- It does NOT validate 'proto' contents for valid URI scheme names.
Returns a tuple containing one or more immutable dicts
"""
elems = []
for field_value in self._message.headers.getall(hdrs.FORWARDED, ()):
length = len(field_value)
pos = 0
need_separator = False
elem = {}
elems.append(types.MappingProxyType(elem))
while 0 <= pos < length:
match = _FORWARDED_PAIR_RE.match(field_value, pos)
if match is not None: # got a valid forwarded-pair
if need_separator:
# bad syntax here, skip to next comma
pos = field_value.find(',', pos)
else:
(name, value) = match.groups()
if value[0] == '"':
# quoted string: remove quotes and unescape
value = _QUOTED_PAIR_REPLACE_RE.sub(r'\1',
value[1:-1])
elem[name.lower()] = value
pos += len(match.group(0))
need_separator = True
elif field_value[pos] == ',': # next forwarded-element
need_separator = False
elem = {}
elems.append(types.MappingProxyType(elem))
pos += 1
elif field_value[pos] == ';': # next forwarded-pair
need_separator = False
pos += 1
elif field_value[pos] in ' \t':
# Allow whitespace even between forwarded-pairs, though
# RFC 7239 doesn't. This simplifies code and is in line
# with Postel's law.
pos += 1
else:
# bad syntax here, skip to next comma
pos = field_value.find(',', pos)
return tuple(elems)
@reify
def scheme(self):
"""A string representing the scheme of the request.
'http' or 'https'.
"""
scheme = self._scheme
if scheme is not None:
return scheme
if self.transport.get_extra_info('sslcontext'):
return 'https'
else:
return 'http'
@property
def method(self):
"""Read only property for getting HTTP method.
The value is upper-cased str like 'GET', 'POST', 'PUT' etc.
"""
return self._method
@property
def version(self):
"""Read only property for getting HTTP version of request.
Returns aiohttp.protocol.HttpVersion instance.
"""
return self._version
@reify
def host(self):
"""Hostname of the request.
Hostname is resolved through the following headers, in this order:
- Forwarded
- X-Forwarded-Host
- Host
Returns str, or None if no hostname is found in the headers.
"""
host = self._host
if host is not None:
return host
host = self._message.headers.get(hdrs.HOST)
if host is not None:
return host
else:
return socket.getfqdn()
@reify
def remote(self):
"""Remote IP of client initiated HTTP request.
The IP is resolved through the following headers, in this order:
- Forwarded
- X-Forwarded-For
- peername of opened socket
"""
remote = self._remote
if remote is not None:
return remote
peername = self.transport.get_extra_info('peername')
if isinstance(peername, (list, tuple)):
return peername[0]
else:
return peername
@reify
def url(self):
url = URL.build(scheme=self.scheme, host=self.host)
return url.join(self._rel_url)
@property
def path(self):
"""The URL including *PATH INFO* without the host or scheme.
E.g., ``/app/blog``
"""
return self._rel_url.path
@reify
def path_qs(self):
"""The URL including PATH_INFO and the query string.
E.g, /app/blog?id=10
"""
return str(self._rel_url)
@property
def raw_path(self):
""" The URL including raw *PATH INFO* without the host or scheme.
Warning, the path is unquoted and may contains non valid URL characters
E.g., ``/my%2Fpath%7Cwith%21some%25strange%24characters``
"""
return self._message.path
@property
def query(self):
"""A multidict with all the variables in the query string."""
return self._rel_url.query
@property
def GET(self):
"""A multidict with all the variables in the query string."""
warnings.warn("GET property is deprecated, use .query instead",
DeprecationWarning)
return self._rel_url.query
@property
def query_string(self):
"""The query string in the URL.
E.g., id=10
"""
return self._rel_url.query_string
@property
def headers(self):
"""A case-insensitive multidict proxy with all headers."""
return self._headers
@property
def raw_headers(self):
"""A sequence of pars for all headers."""
return self._message.raw_headers
@reify
def if_modified_since(self, _IF_MODIFIED_SINCE=hdrs.IF_MODIFIED_SINCE):
"""The value of If-Modified-Since HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_IF_MODIFIED_SINCE)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@property
def keep_alive(self):
"""Is keepalive enabled by client?"""
return not self._message.should_close
@reify
def cookies(self):
"""Return request cookies.
A read-only dictionary-like object.
"""
raw = self.headers.get(hdrs.COOKIE, '')
parsed = SimpleCookie(raw)
return MappingProxyType(
{key: val.value for key, val in parsed.items()})
@property
def http_range(self, *, _RANGE=hdrs.RANGE):
"""The content of Range HTTP header.
Return a slice instance.
"""
rng = self._headers.get(_RANGE)
start, end = None, None
if rng is not None:
try:
pattern = r'^bytes=(\d*)-(\d*)$'
start, end = re.findall(pattern, rng)[0]
except IndexError: # pattern was not found in header
raise ValueError("range not in acceptible format")
end = int(end) if end else None
start = int(start) if start else None
if start is None and end is not None:
# end with no start is to return tail of content
end = -end
if start is not None and end is not None:
# end is inclusive in range header, exclusive for slice
end += 1
if start >= end:
raise ValueError('start cannot be after end')
if start is end is None: # No valid range supplied
raise ValueError('No start or end of range specified')
return slice(start, end, 1)
@property
def content(self):
"""Return raw payload stream."""
return self._payload
@property
def has_body(self):
"""Return True if request's HTTP BODY can be read, False otherwise."""
warnings.warn(
"Deprecated, use .can_read_body #2005",
DeprecationWarning, stacklevel=2)
return not self._payload.at_eof()
@property
def can_read_body(self):
"""Return True if request's HTTP BODY can be read, False otherwise."""
return not self._payload.at_eof()
@property
def body_exists(self):
"""Return True if request has HTTP BODY, False otherwise."""
return type(self._payload) is not EmptyStreamReader
async def release(self):
"""Release request.
Eat unread part of HTTP BODY if present.
"""
while not self._payload.at_eof():
await self._payload.readany()
async def read(self):
"""Read request body if present.
Returns bytes object with full request content.
"""
if self._read_bytes is None:
body = bytearray()
while True:
chunk = await self._payload.readany()
body.extend(chunk)
if self._client_max_size \
and len(body) >= self._client_max_size:
raise HTTPRequestEntityTooLarge
if not chunk:
break
self._read_bytes = bytes(body)
return self._read_bytes
async def text(self):
"""Return BODY as text using encoding from .charset."""
bytes_body = await self.read()
encoding = self.charset or 'utf-8'
return bytes_body.decode(encoding)
async def json(self, *, loads=json.loads):
"""Return BODY as JSON."""
body = await self.text()
return loads(body)
async def multipart(self, *, reader=multipart.MultipartReader):
"""Return async iterator to process BODY as multipart."""
return reader(self._headers, self._payload)
async def post(self):
"""Return POST parameters."""
if self._post is not None:
return self._post
if self._method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if (content_type not in ('',
'application/x-www-form-urlencoded',
'multipart/form-data')):
self._post = MultiDictProxy(MultiDict())
return self._post
out = MultiDict()
if content_type == 'multipart/form-data':
multipart = await self.multipart()
field = await multipart.next()
while field is not None:
size = 0
max_size = self._client_max_size
content_type = field.headers.get(hdrs.CONTENT_TYPE)
if field.filename:
# store file in temp file
tmp = tempfile.TemporaryFile()
chunk = await field.read_chunk(size=2**16)
while chunk:
chunk = field.decode(chunk)
tmp.write(chunk)
size += len(chunk)
if max_size > 0 and size > max_size:
raise ValueError(
'Maximum request body size exceeded')
chunk = await field.read_chunk(size=2**16)
tmp.seek(0)
ff = FileField(field.name, field.filename,
tmp, content_type, field.headers)
out.add(field.name, ff)
else:
value = await field.read(decode=True)
if content_type is None or \
content_type.startswith('text/'):
charset = field.get_charset(default='utf-8')
value = value.decode(charset)
out.add(field.name, value)
size += len(value)
if max_size > 0 and size > max_size:
raise ValueError(
'Maximum request body size exceeded')
field = await multipart.next()
else:
data = await self.read()
if data:
charset = self.charset or 'utf-8'
out.extend(
parse_qsl(
data.rstrip().decode(charset),
keep_blank_values=True,
encoding=charset))
self._post = MultiDictProxy(out)
return self._post
def __repr__(self):
ascii_encodable_path = self.path.encode('ascii', 'backslashreplace') \
.decode('ascii')
return "<{} {} {} >".format(self.__class__.__name__,
self._method, ascii_encodable_path)
@asyncio.coroutine
def _prepare_hook(self, response):
return
yield # pragma: no cover
class Request(BaseRequest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# matchdict, route_name, handler
# or information about traversal lookup
self._match_info = None # initialized after route resolving
def clone(self, *, method=sentinel, rel_url=sentinel,
headers=sentinel, scheme=sentinel, host=sentinel,
remote=sentinel):
ret = super().clone(method=method,
rel_url=rel_url,
headers=headers,
scheme=scheme,
host=host,
remote=remote)
ret._match_info = self._match_info
return ret
@property
def match_info(self):
"""Result of route resolving."""
return self._match_info
@reify
def app(self):
"""Application instance."""
return self._match_info.apps[-1]
async def _prepare_hook(self, response):
match_info = self._match_info
if match_info is None:
return
for app in match_info.apps:
await app.on_response_prepare.send(self, response)
| 32.020635
| 79
| 0.554801
|
4a0751d39869a0f1b15eb964d0b5b9ed433f6300
| 7,027
|
py
|
Python
|
setigen/voltage/polyphase_filterbank.py
|
bbrzycki/setigen
|
3106c32a629c76c71768ea02b7661474e1cf7ff6
|
[
"MIT"
] | 21
|
2019-01-25T20:44:56.000Z
|
2022-03-16T23:30:26.000Z
|
setigen/voltage/polyphase_filterbank.py
|
bbrzycki/setigen
|
3106c32a629c76c71768ea02b7661474e1cf7ff6
|
[
"MIT"
] | 7
|
2020-07-15T08:54:27.000Z
|
2021-09-24T03:57:30.000Z
|
setigen/voltage/polyphase_filterbank.py
|
bbrzycki/setigen
|
3106c32a629c76c71768ea02b7661474e1cf7ff6
|
[
"MIT"
] | 10
|
2020-03-17T17:59:26.000Z
|
2022-02-01T08:33:11.000Z
|
import os
GPU_FLAG = os.getenv('SETIGEN_ENABLE_GPU', '0')
if GPU_FLAG == '1':
try:
import cupy as xp
except ImportError:
import numpy as xp
else:
import numpy as xp
import numpy as np
import scipy.signal
import time
class PolyphaseFilterbank(object):
"""
Implement a polyphase filterbank (PFB) for coarse channelization of real voltage input data.
Follows description in Danny C. Price, Spectrometers and Polyphase Filterbanks in
Radio Astronomy, 2016. Available online at: http://arxiv.org/abs/1607.03579.
"""
def __init__(self, num_taps=8, num_branches=1024, window_fn='hamming'):
"""
Initialize a polyphase filterbank object, with a voltage sample cache that ensures that
consecutive sample retrievals get contiguous data (i.e. without introduced time delays).
Parameters
----------
num_taps : int, optional
Number of PFB taps
num_branches : int, optional
Number of PFB branches. Note that this results in `num_branches / 2` coarse channels.
window_fn : str, optional
Windowing function used for the PFB
"""
self.num_taps = num_taps
self.num_branches = num_branches
self.window_fn = window_fn
self.cache = None
self._get_pfb_window()
# Estimate stds after channelizing Gaussian with mean 0, std 1
self._get_channelized_stds()
def _reset_cache(self):
"""
Clear sample cache.
"""
self.cache = None
def _get_channelized_stds(self):
"""
Estimate standard deviations in real and imaginary components after channelizing
a zero-mean Gaussian distribution with variance 1.
"""
sample_v = xp.random.normal(0, 1, self.num_branches * 10000)
v_pfb = self.channelize(sample_v, use_cache=False)
self.channelized_stds = xp.array([v_pfb.real.std(), v_pfb.imag.std()])
def _get_pfb_window(self):
"""
Creates and saves PFB windowing coefficients. Saves frequency response shape
and ratio of maximum to mean of the frequency response.
"""
self.window = get_pfb_window(self.num_taps, self.num_branches, self.window_fn)
# Somewhat arbitrary length to calculate spectral response, representing
# fftlength in fine channelization. Only needed to estimate peak to mean response
length = 64 * self.num_taps
freq_response_x = xp.zeros(self.num_branches * length)
freq_response_x[:self.num_taps*self.num_branches] = self.window
h = xp.fft.fft(freq_response_x)
half_coarse_chan = (xp.abs(h)**2)[:length//2]+(xp.abs(h)**2)[length//2:length][::-1]
self.response = self.half_coarse_chan = half_coarse_chan
self.max_mean_ratio = xp.max(half_coarse_chan) / xp.mean(half_coarse_chan)
def channelize(self, x, use_cache=True):
"""
Channelize input voltages by applying the PFB and taking a normalized FFT.
Parameters
----------
x : array
Array of voltages
Returns
-------
X_pfb : array
Post-FFT complex voltages
"""
if use_cache:
# Cache last section of data, which is excluded in PFB step
if self.cache is not None:
x = xp.concatenate([self.cache, x])
self.cache = x[-self.num_taps*self.num_branches:]
x = pfb_frontend(x, self.window, self.num_taps, self.num_branches)
X_pfb = xp.fft.fft(x,
self.num_branches,
axis=1)[:, 0:self.num_branches//2] / self.num_branches**0.5
return X_pfb
def pfb_frontend(x, pfb_window, num_taps, num_branches):
"""
Apply windowing function to create polyphase filterbank frontend.
Follows description in Danny C. Price, Spectrometers and Polyphase
Filterbanks in Radio Astronomy, 2016. Available online at:
http://arxiv.org/abs/1607.03579.
Parameters
----------
x : array
Array of voltages
pfb_window : array
Array of PFB windowing coefficients
num_taps : int
Number of PFB taps
num_branches : int
Number of PFB branches. Note that this results in `num_branches / 2` coarse channels.
Returns
-------
x_summed : array
Array of voltages post-PFB weighting
"""
W = int(len(x) / num_taps / num_branches)
# Truncate data stream x to fit reshape step
x_p = x[:W*num_taps*num_branches].reshape((W * num_taps, num_branches))
h_p = pfb_window.reshape((num_taps, num_branches))
# Resulting summed data array will be slightly shorter from windowing coeffs
I = xp.expand_dims(xp.arange(num_taps), 0) + xp.expand_dims(xp.arange((W - 1) * num_taps), 0).T
x_summed = xp.sum(x_p[I] * h_p, axis=1) / num_taps
# x_summed = xp.zeros(((W - 1) * num_taps, num_branches))
# for t in range(0, (W - 1) * num_taps):
# x_weighted = x_p[t:t+num_taps, :] * h_p
# x_summed[t, :] = xp.sum(x_weighted, axis=0)
return x_summed
def get_pfb_window(num_taps, num_branches, window_fn='hamming'):
"""
Get windowing function to multiply to time series data
according to a finite impulse response (FIR) filter.
Parameters
----------
num_taps : int
Number of PFB taps
num_branches : int
Number of PFB branches. Note that this results in `num_branches / 2` coarse channels.
window_fn : str, optional
Windowing function used for the PFB
Returns
-------
window : array
Array of PFB windowing coefficients
"""
window = scipy.signal.firwin(num_taps * num_branches,
cutoff=1.0 / num_branches,
window=window_fn,
scale=True)
window *= num_taps * num_branches
return xp.array(window)
def get_pfb_voltages(x, num_taps, num_branches, window_fn='hamming'):
"""
Produce complex raw voltage data as a function of time and coarse channel.
Parameters
----------
x : array
Array of voltages
num_taps : int
Number of PFB taps
num_branches : int
Number of PFB branches. Note that this results in `num_branches / 2` coarse channels.
window_fn : str, optional
Windowing function used for the PFB
Returns
-------
X_pfb : array
Post-FFT complex voltages
"""
# Generate window coefficients
win_coeffs = get_pfb_window(num_taps, num_branches, window_fn)
# Apply frontend, take FFT, then take power (i.e. square)
x_fir = pfb_frontend(x, win_coeffs, num_taps, num_branches)
X_pfb = xp.fft.rfft(x_fir, num_branches, axis=1) / num_branches**0.5
return X_pfb
| 34.446078
| 99
| 0.616052
|
4a0752779d53d553966c524543bf7caba54d12c6
| 2,413
|
py
|
Python
|
lib/training/schemes/molhiv/scheme.py
|
shamim-hussain/egt_pytorch
|
1e243d6c94c572bde6c5d1456c2152e5852091e7
|
[
"MIT"
] | 10
|
2022-02-02T05:30:17.000Z
|
2022-03-31T12:33:57.000Z
|
lib/training/schemes/molhiv/scheme.py
|
shamim-hussain/egt_pytorch
|
1e243d6c94c572bde6c5d1456c2152e5852091e7
|
[
"MIT"
] | 1
|
2022-03-30T16:11:22.000Z
|
2022-03-31T13:35:41.000Z
|
lib/training/schemes/molhiv/scheme.py
|
shamim-hussain/egt_pytorch
|
1e243d6c94c572bde6c5d1456c2152e5852091e7
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
from lib.training.training import cached_property
from ..egt_mol_training import EGT_MOL_Training
from lib.models.molhiv import EGT_MOLHIV
from lib.data.molhiv import MOLHIVStructuralSVDGraphDataset
class MOLHIV_Training(EGT_MOL_Training):
def get_default_config(self):
config_dict = super().get_default_config()
config_dict.update(
dataset_name = 'molhiv',
dataset_path = 'cache_data/MOLHIV',
evaluation_type = 'prediction',
predict_on = ['test'],
state_file = None,
)
return config_dict
def get_dataset_config(self):
dataset_config, _ = super().get_dataset_config()
return dataset_config, MOLHIVStructuralSVDGraphDataset
def get_model_config(self):
model_config, _ = super().get_model_config()
return model_config, EGT_MOLHIV
def calculate_bce_loss(self, outputs, targets):
outputs = outputs.view(-1)
targets = targets.view(-1)
return F.binary_cross_entropy_with_logits(outputs, targets)
def calculate_loss(self, outputs, inputs):
return self.calculate_bce_loss(outputs, inputs['target'])
@cached_property
def evaluator(self):
from ogb.graphproppred import Evaluator
evaluator = Evaluator(name = "ogbg-molhiv")
return evaluator
def prediction_step(self, batch):
return dict(
predictions = torch.sigmoid(self.model(batch)),
targets = batch['target'],
)
def evaluate_predictions(self, predictions):
input_dict = {"y_true": predictions['targets'],
"y_pred": predictions['predictions']}
results = self.evaluator.eval(input_dict)
xent = self.calculate_bce_loss(torch.from_numpy(predictions['predictions']),
torch.from_numpy(predictions['targets'])).item()
results['xent'] = xent
for k, v in results.items():
if hasattr(v, 'tolist'):
results[k] = v.tolist()
return results
def evaluate_on(self, dataset_name, dataset, predictions):
print(f'Evaluating on {dataset_name}')
results = self.evaluate_predictions(predictions)
return results
SCHEME = MOLHIV_Training
| 34.471429
| 87
| 0.630336
|
4a0753f99f81c3c1f07cedc5b0394aa5c57b4830
| 1,389
|
py
|
Python
|
ecg_classification/__init__.py
|
KISMED-TUDa/ECG_Classification
|
7df7b6d28287f592536cdbf01b6aec73e7b045ef
|
[
"MIT"
] | 3
|
2021-12-07T17:08:00.000Z
|
2021-12-08T23:16:57.000Z
|
ecg_classification/__init__.py
|
KISMED-TUDa/ECG_Classification
|
7df7b6d28287f592536cdbf01b6aec73e7b045ef
|
[
"MIT"
] | 1
|
2021-12-09T00:33:41.000Z
|
2021-12-09T15:59:48.000Z
|
ecg_classification/__init__.py
|
KISMED-TUDa/ECG_Classification
|
7df7b6d28287f592536cdbf01b6aec73e7b045ef
|
[
"MIT"
] | 1
|
2021-07-30T14:53:48.000Z
|
2021-07-30T14:53:48.000Z
|
# Import dataset class
from ecg_classification.dataset import PhysioNetDataset, Icentia11kDataset, icentia11k_dataset_collate_fn
# Import loss functions
from ecg_classification.loss import SoftmaxFocalLoss, SoftmaxCrossEntropyLoss
# Import models
from ecg_classification.model import ECGCNN, ECGAttNet
# Import model configs
from ecg_classification.config import ECGCNN_CONFIG_S, ECGAttNet_CONFIG_S, ECGCNN_CONFIG_M, ECGAttNet_CONFIG_M, \
ECGCNN_CONFIG_L, ECGAttNet_CONFIG_L, ECGCNN_CONFIG_XL, ECGAttNet_CONFIG_XL, ECGAttNet_CONFIG_XXL, \
ECGAttNet_CONFIG_130M
# Import augmentation pipeline config
from ecg_classification.config import AUGMENTATION_PIPELINE_CONFIG
# Import model wrapper
from ecg_classification.model_wrapper import ModelWrapper
# Import data logger
from ecg_classification.logger import Logger
# Import splits
from ecg_classification.config import TRAINING_SPLIT, VALIDATION_SPLIT, TRAINING_SPLIT_PHYSIONET, \
VALIDATION_SPLIT_PHYSIONET, TRAINING_SPLIT_ICENTIA11K, VALIDATION_SPLIT_ICENTIA11K, VALIDATION_SEED_ICENTIA11K, \
TRAINING_SPLIT_CHALLANGE, VALIDATION_SPLIT_CHALLANGE, TRAINING_SPLIT_CHALLANGE_2_CLASSES, \
VALIDATION_SPLIT_CHALLANGE_2_CLASSES, AUGMENTATION_PIPELINE_CONFIG_2C
# Import metrics
from ecg_classification.metric import Accuracy, F1
# Import augmentation pipeline
from ecg_classification.augmentation import AugmentationPipeline
| 53.423077
| 117
| 0.87185
|
4a0755fb333db62c8ae03b95106e967117bb6e8e
| 10,150
|
py
|
Python
|
All_convet/ConvPool_c.py
|
fengjiran/scholar_project
|
35e86b7a8d0226ad0fee3b2983821a3f331f68aa
|
[
"Apache-2.0"
] | 3
|
2017-08-20T08:47:18.000Z
|
2019-06-21T06:09:27.000Z
|
All_convet/ConvPool_c.py
|
fengjiran/scholar_project
|
35e86b7a8d0226ad0fee3b2983821a3f331f68aa
|
[
"Apache-2.0"
] | null | null | null |
All_convet/ConvPool_c.py
|
fengjiran/scholar_project
|
35e86b7a8d0226ad0fee3b2983821a3f331f68aa
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
import os
import sys
import time
import csv
import yaml
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers.convolutional import Conv2D, ZeroPadding2D
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D
from keras.optimizers import SGD
from keras.regularizers import l2
from keras.callbacks import Callback
from keras import backend as K
import numpy as np
from load_cifar10 import load_cifar_10
from load_cifar10 import global_contrast_normalize, zca
class TestCallback(Callback):
"""ref: https://github.com/fchollet/keras/issues/2548."""
def __init__(self, test_data, test_history_filepath):
super(TestCallback, self).__init__()
self.test_data = test_data
self.test_history_filepath = test_history_filepath
def on_epoch_end(self, epoch, logs=None):
x, y = self.test_data
loss, acc = self.model.evaluate(x, y, batch_size=250, verbose=0)
with open(self.test_history_filepath, 'a+') as f:
mywrite = csv.writer(f)
if epoch == 0:
mywrite.writerow(['test_loss', 'test_acc'])
mywrite.writerow([loss, acc])
else:
mywrite.writerow([loss, acc])
print '\nTesting loss: {0}, acc: {1}'.format(loss, acc)
class LrDecay(Callback):
"""Learning rate decay."""
def __init__(self, initial_lr, e1, e2, e3, drop_rate):
super(LrDecay, self).__init__()
self.initial_lr = initial_lr
self.e1 = e1
self.e2 = e2
self.e3 = e3
self.drop_rate = drop_rate
def on_epoch_end(self, epoch, logs=None):
if epoch == self.e1:
K.set_value(self.model.optimizer.lr, self.initial_lr * self.drop_rate)
if epoch == self.e2:
K.set_value(self.model.optimizer.lr, self.initial_lr * (self.drop_rate**2))
if epoch == self.e3:
K.set_value(self.model.optimizer.lr, self.initial_lr * (self.drop_rate**3))
print '\nThe learning rate is: {:.6f}\n'.format(K.eval(self.model.optimizer.lr))
class ConvPool_cnn_c(object):
"""ConvPool_cnn_c."""
def __init__(self, activation='relu', weight_decay=0.001):
"""Construct the network."""
self.activation = activation
self.weight_decay = weight_decay
self.model = Sequential()
self.model.add(Dropout(0.2, input_shape=(3, 32, 32)))
self.model.add(ZeroPadding2D(padding=(1, 1)))
self.model.add(Conv2D(filters=96,
kernel_size=(3, 3),
activation=self.activation,
kernel_regularizer=l2(self.weight_decay)))
self.model.add(ZeroPadding2D(padding=(1, 1)))
self.model.add(Conv2D(filters=96,
kernel_size=(3, 3),
activation=self.activation,
kernel_regularizer=l2(self.weight_decay)))
self.model.add(ZeroPadding2D(padding=(1, 1)))
self.model.add(Conv2D(filters=96,
kernel_size=(3, 3),
activation=self.activation,
kernel_regularizer=l2(self.weight_decay)))
self.model.add(MaxPooling2D(pool_size=(3, 3), strides=2))
self.model.add(Dropout(0.5))
self.model.add(ZeroPadding2D(padding=(1, 1)))
self.model.add(Conv2D(filters=192,
kernel_size=(3, 3),
activation=self.activation,
kernel_regularizer=l2(self.weight_decay)))
self.model.add(ZeroPadding2D(padding=(1, 1)))
self.model.add(Conv2D(filters=192,
kernel_size=(3, 3),
activation=self.activation,
kernel_regularizer=l2(self.weight_decay)))
self.model.add(ZeroPadding2D(padding=(1, 1)))
self.model.add(Conv2D(filters=192,
kernel_size=(3, 3),
activation=self.activation,
kernel_regularizer=l2(self.weight_decay)))
self.model.add(MaxPooling2D(pool_size=(3, 3), strides=2))
self.model.add(Dropout(0.5))
self.model.add(ZeroPadding2D(padding=(1, 1)))
self.model.add(Conv2D(filters=192,
kernel_size=(3, 3),
activation=self.activation,
kernel_regularizer=l2(self.weight_decay)))
self.model.add(Conv2D(filters=192,
kernel_size=(1, 1),
activation=self.activation,
kernel_regularizer=l2(self.weight_decay)))
self.model.add(Conv2D(filters=10,
kernel_size=(1, 1),
activation=self.activation,
kernel_regularizer=l2(self.weight_decay)))
self.model.add(GlobalAveragePooling2D())
self.model.add(Dense(units=10,
activation='softmax',
kernel_regularizer=l2(self.weight_decay)))
print self.model.summary()
def train(self,
initial_lr=0.05,
momentum=0.9,
batch_size=250,
train_epochs=350,
lr_scheduler=True,
e1=100,
e2=200,
e3=300,
drop_rate=0.1,
test_history_filepath='test_history_convpool_c.csv'):
self.initial_lr = initial_lr
(X_train, y_train), (X_test, y_test) = load_cifar_10()
X_train = np.reshape(X_train, (X_train.shape[0], 3 * 32 * 32))
X_test = np.reshape(X_test, (X_test.shape[0], 3 * 32 * 32))
X_train = global_contrast_normalize(X_train)
X_test = global_contrast_normalize(X_test)
X_train = zca(X_train)
X_test = zca(X_test)
X_train = np.reshape(X_train, (X_train.shape[0], 3, 32, 32))
X_test = np.reshape(X_test, (X_test.shape[0], 3, 32, 32))
if lr_scheduler:
sgd = SGD(lr=initial_lr, momentum=momentum, nesterov=True)
self.model.compile(optimizer=sgd,
loss='categorical_crossentropy',
metrics=['accuracy'])
history = self.model.fit(X_train, y_train,
epochs=train_epochs,
batch_size=batch_size,
shuffle=True,
callbacks=[TestCallback((X_test, y_test), test_history_filepath),
LrDecay(self.initial_lr, e1, e2, e3, drop_rate)])
return history
else:
sgd = SGD(lr=initial_lr, momentum=momentum, nesterov=True)
self.model.compile(optimizer=sgd,
loss='categorical_crossentropy',
metrics=['accuracy'])
history = self.model.fit(X_train, y_train,
epochs=train_epochs,
batch_size=batch_size,
shuffle=True,
callbacks=[TestCallback((X_test, y_test), test_history_filepath)])
return history
def get_config(self):
"""Return a dictionary containing the configuration of the model.
The model can be reinstantiated from its config via:
config = model.get_config()
model = Model.from_config(config)
or for Sequential:
model = Sequential.from_config(config).
"""
self.config = self.model.get_config()
def save_model_weights(self, filepath='model_convpool_c_weights.h5'):
self.model.save_weights(filepath)
def num_params(self):
"""Count the number of parameters in the network."""
return self.model.count_params()
if __name__ == '__main__':
with open('config_convpool_c.yaml', 'r') as f:
config = yaml.load(f)
activation = config['activation']
weight_decay = config['weight_decay']
initial_lr = config['initial_lr']
momentum = config['momentum']
lr_decay = config['lr_decay']
batch_size = config['batch_size']
train_epochs = config['train_epochs']
lr_scheduler = config['lr_scheduler']
e1 = config['e1']
e2 = config['e2']
e3 = config['e3']
drop_rate = config['drop_rate']
train_history_filepath = config['train_history_filepath']
test_history_filepath = config['test_history_filepath']
model = ConvPool_cnn_c(activation=activation, weight_decay=weight_decay)
start_time = time.clock()
hist = model.train(initial_lr=initial_lr,
momentum=momentum,
batch_size=batch_size,
train_epochs=train_epochs,
lr_scheduler=lr_scheduler,
e1=e1,
e2=e2,
e3=e3,
drop_rate=drop_rate,
test_history_filepath=test_history_filepath)
model.save_model_weights()
train_loss = hist.history['loss']
train_acc = hist.history['acc']
with open(train_history_filepath, 'a+') as f:
mywrite = csv.writer(f)
mywrite.writerow(['train_loss', 'train_acc'])
for loss, acc in zip(train_loss, train_acc):
mywrite.writerow([loss, acc])
end_time = time.clock()
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
| 37.043796
| 104
| 0.543153
|
4a07569bc21af9ce642c6e98d449dc86911a2cbf
| 1,056
|
py
|
Python
|
pyleecan/Methods/Mesh/SolutionVector/get_axes_list.py
|
tobsen2code/pyleecan
|
5b1ded9e389e0c79ed7b7c878b6e939f2d9962e9
|
[
"Apache-2.0"
] | 95
|
2019-01-23T04:19:45.000Z
|
2022-03-17T18:22:10.000Z
|
pyleecan/Methods/Mesh/SolutionVector/get_axes_list.py
|
ecs-kev/pyleecan
|
1faedde4b24acc6361fa1fdd4e980eaec4ca3a62
|
[
"Apache-2.0"
] | 366
|
2019-02-20T07:15:08.000Z
|
2022-03-31T13:37:23.000Z
|
pyleecan/Methods/Mesh/SolutionVector/get_axes_list.py
|
ecs-kev/pyleecan
|
1faedde4b24acc6361fa1fdd4e980eaec4ca3a62
|
[
"Apache-2.0"
] | 74
|
2019-01-24T01:47:31.000Z
|
2022-02-25T05:44:42.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
from pyleecan.Functions.make_ndarray_equal import make_ndarray_equal
def get_axes_list(self, *args):
"""Get the axis of variables stored in Solution.
Parameters
----------
self : SolutionVector
an SolutionVector object
field_name : str
name of the field to return
Returns
-------
field: array
an array of field values
"""
# Build axis list
ax_name = list()
ax_size = list()
axes = self.field.get_axes(*args)
if "comp_x" in self.field.components:
comp = self.field.components["comp_x"]
elif "radial" in self.field.components:
comp = self.field.components["radial"]
else:
raise Exception(
"self.field.components shall have either " "comp_x" " or " "radial" " key"
)
for axis in axes:
ax_name.append(axis.name)
ax_size.append(axis.get_length())
ax_name.append("component")
ax_size.append(len(self.field.components))
return ax_name, ax_size
| 22.468085
| 86
| 0.621212
|
4a0759beec9d83b4d4486e161ac446cde2b58853
| 362
|
py
|
Python
|
torchpack/runner/hooks/timer.py
|
hellock/torchpack
|
8d7363ff683c8aec5af57e5d53518a22c7e0a807
|
[
"MIT"
] | 25
|
2017-12-16T09:53:14.000Z
|
2021-11-26T14:19:38.000Z
|
torchpack/runner/hooks/timer.py
|
nd1511/torchpack
|
8d7363ff683c8aec5af57e5d53518a22c7e0a807
|
[
"MIT"
] | null | null | null |
torchpack/runner/hooks/timer.py
|
nd1511/torchpack
|
8d7363ff683c8aec5af57e5d53518a22c7e0a807
|
[
"MIT"
] | 9
|
2018-01-17T14:08:05.000Z
|
2021-08-31T14:48:25.000Z
|
import time
from .hook import Hook
class TimerHook(Hook):
def before_epoch(self, runner):
self.t = time.time()
def before_iter(self, runner):
runner.log_buffer.update({'data_time': time.time() - self.t})
def after_iter(self, runner):
runner.log_buffer.update({'time': time.time() - self.t})
self.t = time.time()
| 21.294118
| 69
| 0.627072
|
4a0759ff68bf99fa5fe0eec64158731c0d0079c5
| 387
|
py
|
Python
|
jsmdc/asgi.py
|
Freiza/jsmdc
|
22db7975d7b6d52889e334ad7b8af09d2206f506
|
[
"Apache-2.0"
] | null | null | null |
jsmdc/asgi.py
|
Freiza/jsmdc
|
22db7975d7b6d52889e334ad7b8af09d2206f506
|
[
"Apache-2.0"
] | null | null | null |
jsmdc/asgi.py
|
Freiza/jsmdc
|
22db7975d7b6d52889e334ad7b8af09d2206f506
|
[
"Apache-2.0"
] | null | null | null |
"""
ASGI config for jsmdc project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'jsmdc.settings')
application = get_asgi_application()
| 22.764706
| 78
| 0.782946
|
4a075a09568109eb23763b3c7516c5ff3bf65351
| 155,791
|
py
|
Python
|
dice/bottle.py
|
manor/OpenSlice
|
e5f3bba181000801060f5ae67f88337ad0c63695
|
[
"MIT"
] | null | null | null |
dice/bottle.py
|
manor/OpenSlice
|
e5f3bba181000801060f5ae67f88337ad0c63695
|
[
"MIT"
] | null | null | null |
dice/bottle.py
|
manor/OpenSlice
|
e5f3bba181000801060f5ae67f88337ad0c63695
|
[
"MIT"
] | 2
|
2016-12-10T19:50:22.000Z
|
2018-02-06T21:23:13.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with URL parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
import sys
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
###############################################################################
# Command-line interface ########################################################
###############################################################################
# INFO: Some server adapters need to monkey-patch std-lib modules before they
# are imported. This is why some of the command-line handling is done here, but
# the actual call to main() is at the end of the file.
def _cli_parse(args):
from optparse import OptionParser
parser = OptionParser(
usage="usage: %prog [options] package.module:app")
opt = parser.add_option
opt("--version", action="store_true", help="show version number.")
opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
opt("-p", "--plugin", action="append", help="install additional plugin/s.")
opt("-c", "--conf", action="append", metavar="FILE",
help="load config values from FILE.")
opt("-C", "--param", action="append", metavar="NAME=VALUE",
help="override config values.")
opt("--debug", action="store_true", help="start server in debug mode.")
opt("--reload", action="store_true", help="auto-reload on file changes.")
opts, args = parser.parse_args(args[1:])
return opts, args, parser
def _cli_patch(args):
opts, _, _ = _cli_parse(args)
if opts.server:
if opts.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif opts.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
_cli_patch(sys.argv)
###############################################################################
# Imports and Python 2/3 unification ###########################################
###############################################################################
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, tempfile, threading, time, warnings
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from unicodedata import normalize
# inspect.getargspec was removed in Python 3.6, use
# Signature-based version where we can (Python 3.3+)
try:
from inspect import signature
def getargspec(func):
params = signature(func).parameters
args, varargs, keywords, defaults = [], None, None, []
for name, param in params.items():
if param.kind == param.VAR_POSITIONAL:
varargs = name
elif param.kind == param.VAR_KEYWORD:
keywords = name
else:
args.append(name)
if param.default is not param.empty:
defaults.append(param.default)
return (args, varargs, keywords, tuple(defaults) or None)
except ImportError:
from inspect import getargspec
try:
from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from json import dumps as json_dumps, loads as json_lds
except ImportError:
try:
from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError(
"JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e():
return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser, Error as ConfigParserError
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser, \
Error as ConfigParserError
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it):
return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self):
pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, strict=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" %
(rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.config):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
"""
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. Invalid JSON raises a 400 error response. """
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
try:
return json_loads(b)
except (ValueError, TypeError):
raise HTTPError(400, 'Invalid JSON')
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request entity too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'],
encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s' % name] = value
def _hkey(s):
return s.title().replace('_', '-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type', )),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else
str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode)
else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(
value if isinstance(value, unicode) else str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
return [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
else:
return [(k, v.encode('utf8') if isinstance(v, unicode) else v)
for (k, v) in out]
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
# Cookie size plus options must not exceed 4kb.
if len(name) + len(value) > 3800:
raise ValueError('Content does not fit into a cookie.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
if key in ('secure', 'httponly') and not value:
continue
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(
value if isinstance(value, unicode) else str(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_module(self, path, squash):
""" Load values from a Python module.
:param squash: Squash nested dicts into namespaces by using
load_dict(), otherwise use update()
Example: load_config('my.app.settings', True)
Example: load_config('my.app.settings', False)
"""
config_obj = __import__(path)
obj = dict([(key, getattr(config_obj, key))
for key in dir(config_obj) if key.isupper()])
if squash:
self.load_dict(obj)
else:
self.update(obj)
return self
def load_config(self, filename):
""" Load values from an ``*.ini`` style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
"""
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, basestring):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` """
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. """
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root,
mimetype='auto',
download=False,
charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
if download and download != True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0, )) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
""" Encode and sign a pickle-able object. Return a (byte) string """
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self, handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AiohttpServer(ServerAdapter):
""" Untested.
aiohttp
https://pypi.python.org/pypi/aiohttp/
"""
def run(self, handler):
import asyncio
from aiohttp.wsgi import WSGIServerHttpProtocol
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
protocol_factory = lambda: WSGIServerHttpProtocol(
handler,
readpayload=True,
debug=(not self.quiet))
self.loop.run_until_complete(self.loop.create_server(protocol_factory,
self.host,
self.port))
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.stop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO': GeventSocketIOServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None,
config=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if config:
app.config.update(config)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.',
True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are not supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''((?mx) # verbose and dot-matches-newline mode
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]:
kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses.copy()
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multi-threaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cli_parse(sys.argv)
def _cli_error(msg):
parser.print_help()
_stderr('\nError: %s\n' % msg)
sys.exit(1)
if opt.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args:
_cli_error("No application entry point specified.")
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
config = ConfigDict()
for cfile in opt.conf or []:
try:
if cfile.endswith('.json'):
with open(cfile, 'rb') as fp:
config.load_dict(json_loads(fp.read()))
else:
config.load_config(cfile)
except ConfigParserError:
_cli_error(str(_e()))
except IOError:
_cli_error("Unable to read config file %r" % cfile)
except (UnicodeError, TypeError, ValueError):
_cli_error("Unable to parse config file %r: %s" % (cfile, _e()))
for cval in opt.param or []:
if '=' in cval:
config.update((cval.split('=', 1),))
else:
config[cval] = True
run(args[0],
host=host,
port=int(port),
server=opt.server,
reloader=opt.reload,
plugins=opt.plugin,
debug=opt.debug,
config=config)
# THE END
| 39.03558
| 104
| 0.565752
|
4a075a69a752e4871ff49100500cd903a22f7955
| 287
|
py
|
Python
|
pythondesafios/desafio049.py
|
matheus-rosario/curso-python
|
ac9ccf7fc4b3f708821e44787a1bdc231d9426ac
|
[
"MIT"
] | null | null | null |
pythondesafios/desafio049.py
|
matheus-rosario/curso-python
|
ac9ccf7fc4b3f708821e44787a1bdc231d9426ac
|
[
"MIT"
] | null | null | null |
pythondesafios/desafio049.py
|
matheus-rosario/curso-python
|
ac9ccf7fc4b3f708821e44787a1bdc231d9426ac
|
[
"MIT"
] | null | null | null |
#Refaça o DESAFIO 009, mostrando a tabuada de um número que o usuário escolher,
#só que agora utilizando um laço for.
n = int(input('Digite um número: '))
print('-='*10)
print(f'A tabuada do número {n} é a seguinte:')
for c in range(0, 11):
print(f'{n} * {c} = {n * c}')
print('-='*10)
| 35.875
| 79
| 0.648084
|
4a075b15d5ebaef6e02b6d9ddf1f157a8db3dab4
| 215
|
py
|
Python
|
8192_Section 1_code/grab4.py
|
PacktPublishing/Basic-and-low-level-Python-Network-Attacks
|
8d400f70e3570f60844a3d6cb3486bec00fd4c03
|
[
"MIT"
] | 8
|
2018-07-23T20:30:04.000Z
|
2021-11-08T13:10:39.000Z
|
8192_Section 1_code/grab4.py
|
w0x12ef/Basic-and-low-level-Python-Network-Attacks
|
82d7acc2ea4ec0df9e435421adcdc9d5e9897ad6
|
[
"MIT"
] | null | null | null |
8192_Section 1_code/grab4.py
|
w0x12ef/Basic-and-low-level-Python-Network-Attacks
|
82d7acc2ea4ec0df9e435421adcdc9d5e9897ad6
|
[
"MIT"
] | 11
|
2018-01-23T01:52:22.000Z
|
2020-12-12T07:51:05.000Z
|
import socket
s = socket.socket()
s.settimeout(2)
port = raw_input("Port number: ")
try:
s.connect(("packtpub.samsclass.info", int(port)))
print s.recv(1024)
s.close()
except socket.error as err:
print err
| 13.4375
| 50
| 0.693023
|
4a075ee0ec1803809b40d968c6220da9c0c79530
| 180
|
py
|
Python
|
src/Kale/CodeAnalysis/Exceptions/illegalcharacter.py
|
billyeatcookies/Kale
|
b9f1b42291c3aefd012e92a28eb1d38df2122796
|
[
"MIT"
] | 8
|
2021-09-10T20:11:20.000Z
|
2021-11-16T12:54:15.000Z
|
src/Kale/CodeAnalysis/Exceptions/illegalcharacter.py
|
billyeatcookies/Kale
|
b9f1b42291c3aefd012e92a28eb1d38df2122796
|
[
"MIT"
] | 4
|
2021-09-13T15:27:56.000Z
|
2021-09-13T17:11:27.000Z
|
src/Kale/CodeAnalysis/Exceptions/illegalcharacter.py
|
billyeatcookies/Kale
|
b9f1b42291c3aefd012e92a28eb1d38df2122796
|
[
"MIT"
] | null | null | null |
from CodeAnalysis.Exceptions.error import Error
class IllegalCharacterException(Error):
def __init__(self, char):
self.char = char
super().__init__(self.char)
| 25.714286
| 47
| 0.716667
|
4a075efc59437257dbc181b230509aff3b7542ac
| 23,067
|
py
|
Python
|
exporting/export_objects.py
|
AssaSch/ExportImportPolicyPackage
|
a2e1a272e0692e375600895d569758fa022a0244
|
[
"Apache-2.0"
] | null | null | null |
exporting/export_objects.py
|
AssaSch/ExportImportPolicyPackage
|
a2e1a272e0692e375600895d569758fa022a0244
|
[
"Apache-2.0"
] | null | null | null |
exporting/export_objects.py
|
AssaSch/ExportImportPolicyPackage
|
a2e1a272e0692e375600895d569758fa022a0244
|
[
"Apache-2.0"
] | null | null | null |
from exporting.special_treatment_objects import handle_fields
from lists_and_dictionaries import no_export_fields_and_subfields, \
singular_to_plural_dictionary, group_objects_field, placeholder_type_by_obj_type, \
no_export_fields_by_api_type, special_treatment_types, no_export_fields
from utils import debug_log, merge_data, flatten_json, find_min_position_group, compare_versions, \
check_for_export_error, \
generate_new_dummy_ip_address
exported_objects = []
def get_query_rulebase_data(client, api_type, payload):
rulebase_items = []
rulebase_sections = []
rulebase_rules = []
general_objects = []
debug_log("Getting layer information for layer [" + payload["name"] + "]")
# We use here uid instead of name for supporting MDS env.
layer_reply = client.api_call("show-" + api_type.split("-")[0] + "-layer", {"uid": payload["uid"]})
if not layer_reply.success:
debug_log("Failed to retrieve layer named '" +
payload["name"] + "'! Error: " + str(layer_reply.error_message) +
". Layer was not exported!", True, True)
return None, None, None, None
layer_data = layer_reply.data
if layer_data["type"] == "access-layer":
layer_settings = {"name": layer_data["name"],
"uid": layer_data["uid"],
"color": layer_data["color"],
"comments": layer_data["comments"],
"applications-and-url-filtering": 'True',
"mobile-access": layer_data["mobile-access"],
"firewall": layer_data["firewall"],
"type": "access-layer"}
if compare_versions(client.api_version, "1.1") != -1:
layer_settings["shared"] = layer_data["shared"]
layer_settings["content-awareness"] = layer_data["content-awareness"]
else:
layer_settings["data-awareness"] = layer_data["data-awareness"]
elif layer_data["type"] == "https-layer":
layer_settings = {"name": layer_data["name"],
"uid": layer_data["uid"],
"color": layer_data["color"],
"comments": layer_data["comments"],
"shared": layer_data["shared"],
"type": "https-layer"}
else:
layer_settings = {"name": layer_data["name"],
"uid": layer_data["uid"],
"color": layer_data["color"],
"comments": layer_data["comments"],
"type": "threat-layer"}
if "detect-using-x-forward-for" in layer_data:
layer_settings["detect-using-x-forward-for"] = layer_data["detect-using-x-forward-for"]
debug_log("Getting information from show-" + api_type)
seen_object_uids = []
# We use here uid instead of name for supporting MDS env.
queryPayload = {"uid": payload["uid"], "package": payload["package"]}
if api_type == "threat-rule-exception-rulebase":
queryPayload = {"uid": payload["uid"], "package": payload["package"], "rule-uid": payload["rule-uid"]}
rulebase_replies = client.gen_api_query("show-" + api_type, details_level="full", container_keys=["rulebase"], payload=queryPayload)
for rulebase_reply in rulebase_replies:
if not rulebase_reply.success:
debug_log("Failed to retrieve layer named '" +
payload["name"] + "'! Error: " + str(rulebase_reply.error_message) +
". Layer was not exported!", True, True)
return None, None, None, None
rulebase_data = rulebase_reply.data
if "total" not in rulebase_data or rulebase_data["total"] == 0:
break
if rulebase_data["to"] == rulebase_data["total"]:
done = True
percentage_complete = int((float(rulebase_data["to"]) / float(rulebase_data["total"])) * 100)
debug_log("Retrieved " + str(rulebase_data["to"]) +
" out of " + str(rulebase_data["total"]) + " rules (" + str(percentage_complete) + "%)", True)
non_empty_rulebase_items = []
skipped_first_empty_section = False
for rulebase_item in rulebase_data["rulebase"]:
if not skipped_first_empty_section and "rule-number" not in rulebase_item and "to" not in rulebase_item:
continue
else:
skipped_first_empty_section = True
non_empty_rulebase_items.append(rulebase_item)
if ("rule-number" in rulebase_item and rulebase_item["rule-number"] == rulebase_data["to"]) or (
"to" in rulebase_item and rulebase_item["to"] == rulebase_data["to"]):
break
if non_empty_rulebase_items and rulebase_items and non_empty_rulebase_items[0]["uid"] == \
rulebase_items[len(rulebase_items) - 1]["uid"]:
rulebase_items[len(rulebase_items) - 1]["rulebase"].extend(non_empty_rulebase_items[0]["rulebase"])
rulebase_items[len(rulebase_items) - 1]["to"] = non_empty_rulebase_items[0]["to"]
non_empty_rulebase_items = non_empty_rulebase_items[1:]
rulebase_items.extend(non_empty_rulebase_items)
new_objects = [x for x in rulebase_data["objects-dictionary"] if x["uid"] not in seen_object_uids]
seen_object_uids.extend([x["uid"] for x in new_objects])
general_objects.extend(new_objects)
for general_object in general_objects:
string = (u"##Show presented object of type {0} " + (
u"with name {1}" if "name" in general_object else u"with no name")).format(
general_object["type"], general_object["name"] if "name" in general_object else "")
debug_log(string)
if should_export(general_object):
check_for_export_error(general_object, client)
debug_log("Analysing rulebase items...")
for rulebase_item in rulebase_items:
if any(x in rulebase_item["type"] for x in ["access-rule", "threat-rule", "threat-exception", "https-rule"]):
string = (u"##Show presented independent rule of type {0} "
+ (u"with name {1}" if "name" in rulebase_item else u"with no name")).format(
rulebase_item["type"],
rulebase_item["name"] if "name" in rulebase_item else "")
debug_log(string)
rulebase_rules.append(rulebase_item)
elif "section" in rulebase_item["type"]:
for rule in rulebase_item["rulebase"]:
string = (u"##Show presented dependent rule of type {0} under section {1} " + (u"with name {2}" if
"name" in rule else u"with no name")).format(
rule["type"], rulebase_item["name"] if "name" in
rulebase_item else "???",
rule["name"] if "name" in rule else "")
debug_log(string)
rulebase_rules.append(rule)
# Because of 50 items chunks per API query reply, one rule section may spread over several chunks!!!
if rulebase_sections and rulebase_sections[len(rulebase_sections) - 1]["uid"] == rulebase_item["uid"]:
if "to" in rulebase_item:
rulebase_sections[len(rulebase_sections) - 1]["to"] = rulebase_item["to"]
continue
string = (u"##Show presented section of type {0} " + (
u"with name {1}" if "name" in rulebase_item else u"with no name")).format(
rulebase_item["type"], rulebase_item["name"] if "name" in rulebase_item else "")
debug_log(string)
rulebase_sections.append(rulebase_item)
else:
debug_log("Unsupported rulebase object type - '" + rulebase_item["type"] + "'. Continue...",
print_to_error_log=True)
return layer_settings, rulebase_sections, rulebase_rules, general_objects
def get_query_nat_rulebase_data(client, payload):
rulebase_items = []
rulebase_rules = []
general_objects = []
seen_object_uids = []
before_auto_rules = True
debug_log("Getting information from show-nat-rulebase", True)
rulebase_replies = client.gen_api_query("show-nat-rulebase", details_level="full", container_keys=["rulebase"], payload=payload)
for rulebase_reply in rulebase_replies:
if not rulebase_reply.success:
debug_log("Failed to retrieve NAT rulebase! Error: " + str(rulebase_reply.error_message) +
". NAT rulebase was not exported!", True, True)
return None, None
rulebase_data = rulebase_reply.data
if "total" not in rulebase_data or rulebase_data["total"] == 0:
break
percentage_complete = int((float(rulebase_data["to"]) / float(rulebase_data["total"])) * 100)
debug_log("Retrieved " + str(rulebase_data["to"]) +
" out of " + str(rulebase_data["total"]) + " rules (" + str(percentage_complete) + "%)", True)
non_empty_rulebase_items = []
for rulebase_item in rulebase_data["rulebase"]:
if "nat-section" in rulebase_item["type"]:
# Skip system auto generated section
if "Automatic Generated Rules : " in rulebase_item["name"]:
before_auto_rules = False
continue
# Skip empty section (no rules inside...)
if "from" not in rulebase_item:
continue
rulebase_item["__before_auto_rules"] = before_auto_rules
non_empty_rulebase_items.append(rulebase_item)
if ("to" in rulebase_item and rulebase_item["to"] == rulebase_data["to"]):
break
if non_empty_rulebase_items and rulebase_items and non_empty_rulebase_items[0]["uid"] == \
rulebase_items[len(rulebase_items) - 1]["uid"]:
rulebase_items[len(rulebase_items) - 1]["rulebase"].extend(non_empty_rulebase_items[0]["rulebase"])
rulebase_items[len(rulebase_items) - 1]["to"] = non_empty_rulebase_items[0]["to"]
non_empty_rulebase_items = non_empty_rulebase_items[1:]
rulebase_items.extend(non_empty_rulebase_items)
new_objects = [x for x in rulebase_data["objects-dictionary"] if x["uid"] not in seen_object_uids]
seen_object_uids.extend([x["uid"] for x in new_objects])
general_objects.extend(new_objects)
for general_object in general_objects:
string = (u"##Show presented object of type {0} " + (
u"with name {1}" if "name" in general_object else u"with no name")).format(
general_object["type"], general_object["name"] if "name" in general_object else "")
debug_log(string)
if should_export(general_object):
check_for_export_error(general_object, client)
debug_log("Analysing rulebase items...")
for rulebase_item in rulebase_items:
if "nat-rule" in rulebase_item["type"]:
string = (u"##Show presented independent rule of type {0}").format(rulebase_item["type"])
debug_log(string)
rulebase_item.pop("auto-generated", None)
rulebase_rules.append(rulebase_item)
elif "nat-section" in rulebase_item["type"]:
# !!! Attention: exporting only NAT rules, without sections !!!
for rule in rulebase_item["rulebase"]:
string = (u"##Show presented dependent rule of type {0} under section {1}").format(
rule["type"], rulebase_item["name"] if "name" in rulebase_item else "???")
debug_log(string)
rule.pop("auto-generated", None)
rule["__before_auto_rules"] = rulebase_item["__before_auto_rules"]
rulebase_rules.append(rule)
string = (u"##Show presented section of type {0} " + (
u"with name {1}" if "name" in rulebase_item else u"with no name")).format(
rulebase_item["type"], rulebase_item["name"] if "name" in rulebase_item else "")
debug_log(string)
else:
debug_log("Unsupported NAT rulebase object type - '" + rulebase_item["type"] + "'. Continue...",
print_to_error_log=True)
return rulebase_rules, general_objects
def replace_rule_field_uids_by_name(rule, general_objects):
# This 'if' prevents the rare situations where this method is called on the same rule more than once
if "position" in rule:
return
debug_log("Updating data for rule #" + str(rule["rule-number"]))
rule["position"] = rule["rule-number"]
rule.pop("rule-number")
replace_data(rule, general_objects)
def replace_exception_data(exception, general_objects, layer=None,
rule_number=None, group=None, position_in_group=None):
if "position" in exception:
return
position = position_in_group if not layer else exception["exception-number"]
debug_log("Updating data for rule #" + str(position))
exception["position"] = position
if not layer:
exception["exception-group-name"] = group
if "rule-number" in exception:
exception.pop("rule-number")
elif "exception-group-name" not in exception:
exception["rule-number"] = rule_number
if "exception-number" in exception:
exception.pop("exception-number")
replace_data(exception, general_objects)
def replace_data(obj, general_objects):
if isinstance(obj, dict):
itr = obj.keys()
elif isinstance(obj, list):
itr = range(0, len(obj))
else:
itr = None
if itr is not None:
for key in itr:
obj[key] = replace_data(obj[key], general_objects)
else:
replacement = next((x for x in general_objects if x["uid"] == obj), None)
if replacement:
name = replacement["cpmiDisplayName"] if "cpmiDisplayName" in replacement else replacement["name"]
obj = name if name != "Inner Layer" else "Apply Layer"
return obj
def should_export(obj):
if "name" in obj and obj["name"] == "ThreatStandardSubRulebase":
return False
# TODO AdamG consider using domain-type
return "domain" in obj and obj["domain"]["domain-type"] in ["domain", "global domain"]
def get_objects(raw_data, version):
object_dictionary = {}
exportable_types = set()
unexportable_objects = []
for obj in raw_data:
if not should_export(obj):
continue
api_type = obj["type"]
if api_type in singular_to_plural_dictionary[version]:
if obj["type"] in object_dictionary:
object_dictionary[obj["type"]].append(obj)
else:
object_dictionary[obj["type"]] = [obj]
if "layer" not in api_type:
exportable_types.add(api_type)
else:
unexportable_objects.append(obj)
return object_dictionary, unexportable_objects, exportable_types
def export_general_objects(data_dict, api_type, object_dictionary, unexportable_objects, client):
new_object_dictionary = []
if api_type in group_objects_field.keys():
for group_object in object_dictionary:
full_group_objects = get_group_objects(data_dict,
api_type, group_object, client, unexportable_objects)
for full_group_object in full_group_objects:
for container in group_objects_field[full_group_object["type"]]:
full_group_object[container] = [x["name"] for x in full_group_object[container]]
new_object_dictionary.append(full_group_object)
if new_object_dictionary:
object_dictionary = new_object_dictionary
format_and_merge_data(data_dict, object_dictionary)
def format_and_merge_data(data_dict, objects):
global exported_objects
unexported_objects = [x for x in objects if x["uid"] not in exported_objects]
exported_objects.extend([x["uid"] for x in unexported_objects])
formatted_data = format_objects(unexported_objects)
merge_data(data_dict, formatted_data)
def format_objects(objects):
formatted_objects = []
for i in range(len(objects)):
api_type = objects[i]["type"]
if api_type in special_treatment_types:
handle_fields(objects[i])
flat_json = flatten_json(objects[i])
# Special handling for data-center-object types - prepare the data for the import!
if "data-center-object" in api_type:
if "data-center.name" in flat_json.keys():
flat_json["data-center-name"] = flat_json["data-center.name"]
string = u"Exporting {0} with uid {1} named {2}" if "name" in objects[i] else u"Exporting {0} with uid {1}"
message = string.format(api_type, objects[i]["uid"], objects[i]["name"] if 'name' in objects[i] else "").encode("utf-8")
debug_log(message)
formatted_objects.append(flat_json)
return formatted_objects
def format_and_merge_unexportable_objects(data_dict, unexportable_objects):
formatted_objects = []
for unexportable_object in unexportable_objects:
placeholder = {"name": unexportable_object["name"]}
for unexportable_obj_type in placeholder_type_by_obj_type.keys():
if unexportable_obj_type in unexportable_object["type"]:
for field in placeholder_type_by_obj_type[unexportable_obj_type]:
field_value = placeholder_type_by_obj_type[unexportable_obj_type][field]
if field_value:
placeholder[field] = placeholder_type_by_obj_type[unexportable_obj_type][field]
else:
placeholder[field] = generate_new_dummy_ip_address()
if "type" not in placeholder:
placeholder["type"] = "group"
formatted_objects.append(placeholder)
if placeholder["type"] in data_dict:
data_dict[placeholder["type"]].insert(0, placeholder)
else:
data_dict[placeholder["type"]] = [placeholder]
def get_group_objects(data_dict, api_type, group, client, unexportable_objects):
group_object_reply = client.api_call("show-" + api_type, {"uid": group["uid"], "details-level": "full"})
if not group_object_reply.success:
debug_log("Failed to retrieve group named '" +
group["name"] + "'! Error: " + str(group_object_reply.error_message) +
". Group was not exported!", True, True)
return []
group_object = group_object_reply.data
if api_type == "group-with-exclusion":
include_group_object = None
exclude_group_object = None
if "include" in group_object:
if group_object["include"]["type"] != "CpmiAnyObject":
include_group_object = get_group_objects(data_dict, group_object["include"]["type"],
group_object["include"], client, unexportable_objects)
group_object["include"] = group_object["include"]["name"]
if "except" in group_object:
if group_object["except"]["type"] != "CpmiAnyObject":
exclude_group_object = get_group_objects(data_dict, group_object["except"]["type"],
group_object["except"], client, unexportable_objects)
group_object["except"] = group_object["except"]["name"]
return_list = [group_object]
if include_group_object:
return_list.extend(include_group_object)
if exclude_group_object:
return_list.extend(exclude_group_object)
return return_list
member_objects = []
for container in group_objects_field[api_type]:
member_objects.extend(group_object[container])
object_dictionary, group_unexportable_objects, exportable_types = \
get_objects(member_objects, client.api_version)
for member_object in member_objects:
if should_export(member_object):
check_for_export_error(member_object, client)
merge_data(unexportable_objects, group_unexportable_objects)
for unexportable_object in unexportable_objects:
for container in group_objects_field[api_type]:
for member in group_object[container]:
if unexportable_object["uid"] == member["uid"]:
member["name"] = unexportable_object["name"]
break
for api_type in exportable_types:
debug_log("Exporting " + singular_to_plural_dictionary[client.api_version][api_type] +
" from group [" + group["name"] + "]", True)
export_general_objects(data_dict, api_type, object_dictionary[api_type], unexportable_objects, client)
return [group_object]
def format_and_merge_exception_groups(data_dict, exception_groups):
sorted_exception_groups = []
while exception_groups:
sorted_exception_groups.append(find_min_position_group(exception_groups))
for exception_group in sorted_exception_groups:
exception_group.pop('positions')
format_and_merge_data(data_dict, sorted_exception_groups)
# TODO AdamG
def cleanse_object_dictionary(object_dictionary):
for api_type in object_dictionary:
for obj in object_dictionary[api_type]:
if not should_export(obj):
object_dictionary[api_type].remove(obj)
def clean_objects(data_dict):
for api_type in data_dict:
for obj in data_dict[api_type]:
for field in obj.keys():
sub_fields = field.split(".")
local_no_export_fields_and_subfields = list(no_export_fields_and_subfields)
if api_type == "time":
# For time objects, these two fields are required and must be retained!
local_no_export_fields_and_subfields.remove("from")
local_no_export_fields_and_subfields.remove("to")
if any(x for x in sub_fields if x in local_no_export_fields_and_subfields) or (
sub_fields[0] in no_export_fields) or (api_type in no_export_fields_by_api_type and any(
x for x in sub_fields if x in no_export_fields_by_api_type[api_type])):
obj.pop(field, None)
| 48.664557
| 141
| 0.613387
|
4a0760bed2db1239112837265380781db21af2a5
| 616
|
py
|
Python
|
social/migrations/0002_friend.py
|
zhongmei57485/SwiperPro
|
b00dde5af05f158d7cd2c649e8a07a2c19623b69
|
[
"Apache-2.0"
] | null | null | null |
social/migrations/0002_friend.py
|
zhongmei57485/SwiperPro
|
b00dde5af05f158d7cd2c649e8a07a2c19623b69
|
[
"Apache-2.0"
] | 9
|
2019-12-04T23:48:54.000Z
|
2021-06-10T18:31:57.000Z
|
social/migrations/0002_friend.py
|
zhongmei57485/SwiperPro
|
b00dde5af05f158d7cd2c649e8a07a2c19623b69
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-07-20 10:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('social', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Friend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uid1', models.IntegerField()),
('uid2', models.IntegerField()),
],
options={
'db_table': 'friends',
},
),
]
| 24.64
| 114
| 0.519481
|
4a07611d6e577bf17f8307012c93ebe0193f7604
| 958
|
py
|
Python
|
algorithms/python/bad_anagrams.py
|
rik0/rk-exempla
|
811f859a0980b0636bbafa2656893d988c4d0e32
|
[
"MIT"
] | 1
|
2017-02-20T21:04:47.000Z
|
2017-02-20T21:04:47.000Z
|
algorithms/python/bad_anagrams.py
|
rik0/rk-exempla
|
811f859a0980b0636bbafa2656893d988c4d0e32
|
[
"MIT"
] | null | null | null |
algorithms/python/bad_anagrams.py
|
rik0/rk-exempla
|
811f859a0980b0636bbafa2656893d988c4d0e32
|
[
"MIT"
] | 2
|
2017-02-20T21:04:49.000Z
|
2021-05-18T11:29:16.000Z
|
import os
import sys
import itertools as it
import wordlists
import timeit
import time
import math
from anagrams import words_iterable
def yield_anagrams(dictionary, word):
return [word for word in
(''.join(candidate) for candidate in it.permutations(word))
if word in dictionary]
def time_yield(min_, max_):
for i in xrange(min_, max_):
repetitions = sx
def main():
WORDLIST_URL = 'ftp://ftp.ox.ac.uk/pub/wordlists/american/dic-0294.tar.gz'
try:
DICT_FILE = sys.argv[1]
except IndexError:
DICT_FILE = '2of12.txt'
start = time.clock()
with file(DICT_FILE) as fh:
wordlist = set(words_iterable(fh))
print 'Read file in: ', time.clock() - start
print len(wordlist), 'words available.'
print yield_anagrams(wordlist, 'fighter')
return wordlist
if __name__ == '__main__':
print timeit.timeit(main, number=1)
| 24.564103
| 78
| 0.639875
|
4a07611f4742de059ad72ba990ce0ad64d6447a0
| 515
|
py
|
Python
|
PythonToJavascript/converters/TupleConverter.py
|
stoogoff/python-to-javascript
|
4349b09b15ada544501e7091c7ff1574487e7598
|
[
"MIT"
] | 1
|
2021-11-19T09:56:41.000Z
|
2021-11-19T09:56:41.000Z
|
PythonToJavascript/converters/TupleConverter.py
|
stoogoff/python-to-javascript
|
4349b09b15ada544501e7091c7ff1574487e7598
|
[
"MIT"
] | 2
|
2022-02-25T23:11:27.000Z
|
2022-03-04T10:22:14.000Z
|
PythonToJavascript/converters/TupleConverter.py
|
stoogoff/python-to-javascript
|
4349b09b15ada544501e7091c7ff1574487e7598
|
[
"MIT"
] | 4
|
2021-05-06T19:03:19.000Z
|
2022-03-06T13:52:30.000Z
|
from Converter import Converter
from helpers import Treeverser, makeLeaf
class TupleConverter( Converter ):
PATTERN = """
atom< lpar='(' contents=testlist_gexp rpar=')' >
"""
def gather( self, node ):
tv = Treeverser( node )
matches = tv.gatherMatches( self.PATTERN )
return matches
def processOne( self, match ):
match.lpar.replace( makeLeaf( "LSQB", "[", match.lpar.prefix ) )
match.rpar.replace( makeLeaf( "RSQB", "]", match.rpar.prefix ) )
| 25.75
| 72
| 0.617476
|
4a0761e2e8140ae7126d8e7ad30c76df9d446fcb
| 2,879
|
py
|
Python
|
ote_sdk/ote_sdk/tests/configuration/test_configurable_parameters.py
|
vraoresearch/openvino_training_extensions
|
5cdade68a1ec25f694efddc40913fe2527e00e82
|
[
"Apache-2.0"
] | null | null | null |
ote_sdk/ote_sdk/tests/configuration/test_configurable_parameters.py
|
vraoresearch/openvino_training_extensions
|
5cdade68a1ec25f694efddc40913fe2527e00e82
|
[
"Apache-2.0"
] | null | null | null |
ote_sdk/ote_sdk/tests/configuration/test_configurable_parameters.py
|
vraoresearch/openvino_training_extensions
|
5cdade68a1ec25f694efddc40913fe2527e00e82
|
[
"Apache-2.0"
] | 1
|
2020-12-13T22:13:51.000Z
|
2020-12-13T22:13:51.000Z
|
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import pytest
from ote_sdk.configuration.configurable_parameters import ConfigurableParameters
from ote_sdk.configuration.enums.config_element_type import ConfigElementType
from ote_sdk.entities.id import ID
from ote_sdk.tests.constants.ote_sdk_components import OteSdkComponent
from ote_sdk.tests.constants.requirements import Requirements
@pytest.mark.components(OteSdkComponent.OTE_SDK)
class TestConfigurableParameters:
@pytest.mark.priority_medium
@pytest.mark.component
@pytest.mark.reqids(Requirements.REQ_1)
def test_configurable_parameters(self):
"""
<b>Description:</b>
Check "ConfigurableParameters" class object initialization
<b>Input data:</b>
"ConfigurableParameters" class object with specified initialization parameters
<b>Expected results:</b>
Test passes if attributes of initialized "ConfigurableParameters" class object are equal to expected
"""
def check_configurable_parameters_attributes(
configurable_parameters: ConfigurableParameters,
expected_header: str,
expected_description: str,
expected_id: ID,
expected_visible_in_ui: bool,
):
assert configurable_parameters.header == expected_header
assert configurable_parameters.description == expected_description
assert (
configurable_parameters.type
== ConfigElementType.CONFIGURABLE_PARAMETERS
)
assert configurable_parameters.groups == []
assert configurable_parameters.id == expected_id
assert configurable_parameters.visible_in_ui == expected_visible_in_ui
header = "Test Header"
# Checking "ConfigurableParameters" initialized with default optional parameters
check_configurable_parameters_attributes(
configurable_parameters=ConfigurableParameters(header=header),
expected_header=header,
expected_description="Default parameter group description",
expected_id=ID(""),
expected_visible_in_ui=True,
)
# Checking "ConfigurableParameters" initialized with specified optional parameters
description = "Test Description"
config_id = ID("Test ID")
visible_in_ui = False
check_configurable_parameters_attributes(
configurable_parameters=ConfigurableParameters(
header=header,
description=description,
id=config_id,
visible_in_ui=visible_in_ui,
),
expected_header=header,
expected_description=description,
expected_id=config_id,
expected_visible_in_ui=visible_in_ui,
)
| 39.438356
| 108
| 0.689823
|
4a07627a1233c7cb69827e010b64b4cde684456a
| 3,235
|
py
|
Python
|
profiles_project/profiles_project/settings.py
|
chaturvedishruti/profiles-rest-api
|
9518fd0fffc160c7c75cec204d2e10edd2a3af6f
|
[
"MIT"
] | null | null | null |
profiles_project/profiles_project/settings.py
|
chaturvedishruti/profiles-rest-api
|
9518fd0fffc160c7c75cec204d2e10edd2a3af6f
|
[
"MIT"
] | 6
|
2020-06-06T01:52:34.000Z
|
2022-02-10T14:40:03.000Z
|
profiles_project/profiles_project/settings.py
|
chaturvedishruti/profiles-rest-api
|
9518fd0fffc160c7c75cec204d2e10edd2a3af6f
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w_yl@hcjtl$r3nl^(5x)81b!!-_y82c&e1biv)_nq!=5-e7nlz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL= 'profiles_app.UserProfile'
| 25.674603
| 91
| 0.699845
|
4a07629cfd5b59d9602b1a2e7b9b877696f6dade
| 32,823
|
py
|
Python
|
sympy/printing/tests/test_fcode.py
|
JMSS-Unknown/sympy
|
cd98ba006b5c6d6a6d072eafa28ea6d0ebdaf0e7
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/printing/tests/test_fcode.py
|
JMSS-Unknown/sympy
|
cd98ba006b5c6d6a6d072eafa28ea6d0ebdaf0e7
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/printing/tests/test_fcode.py
|
JMSS-Unknown/sympy
|
cd98ba006b5c6d6a6d072eafa28ea6d0ebdaf0e7
|
[
"BSD-3-Clause"
] | 1
|
2018-10-21T06:32:46.000Z
|
2018-10-21T06:32:46.000Z
|
from sympy import (sin, cos, atan2, log, exp, gamma, conjugate, sqrt,
factorial, Integral, Piecewise, Add, diff, symbols, S,
Float, Dummy, Eq, Range, Catalan, EulerGamma, E,
GoldenRatio, I, pi, Function, Rational, Integer, Lambda,
sign, Mod)
from sympy.codegen import For, Assignment, aug_assign
from sympy.codegen.ast import Declaration, Type, Variable, float32, float64, value_const, real, bool_, While
from sympy.core.relational import Relational
from sympy.logic.boolalg import And, Or, Not, Equivalent, Xor
from sympy.printing.fcode import fcode, FCodePrinter
from sympy.tensor import IndexedBase, Idx
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import raises
from sympy.core.compatibility import range
from sympy.matrices import Matrix, MatrixSymbol
def test_printmethod():
x = symbols('x')
class nint(Function):
def _fcode(self, printer):
return "nint(%s)" % printer._print(self.args[0])
assert fcode(nint(x)) == " nint(x)"
def test_fcode_sign(): #issue 12267
x=symbols('x')
y=symbols('y', integer=True)
z=symbols('z', complex=True)
assert fcode(sign(x), standard=95, source_format='free') == "merge(0d0, dsign(1d0, x), x == 0d0)"
assert fcode(sign(y), standard=95, source_format='free') == "merge(0, isign(1, y), y == 0)"
assert fcode(sign(z), standard=95, source_format='free') == "merge(cmplx(0d0, 0d0), z/abs(z), abs(z) == 0d0)"
raises(NotImplementedError, lambda: fcode(sign(x)))
def test_fcode_Pow():
x, y = symbols('x,y')
n = symbols('n', integer=True)
assert fcode(x**3) == " x**3"
assert fcode(x**(y**3)) == " x**(y**3)"
assert fcode(1/(sin(x)*3.5)**(x - y**x)/(x**2 + y)) == \
" (3.5d0*sin(x))**(-x + y**x)/(x**2 + y)"
assert fcode(sqrt(x)) == ' sqrt(x)'
assert fcode(sqrt(n)) == ' sqrt(dble(n))'
assert fcode(x**0.5) == ' sqrt(x)'
assert fcode(sqrt(x)) == ' sqrt(x)'
assert fcode(sqrt(10)) == ' sqrt(10.0d0)'
assert fcode(x**-1.0) == ' 1d0/x'
assert fcode(x**-2.0, 'y', source_format='free') == 'y = x**(-2.0d0)' # 2823
assert fcode(x**Rational(3, 7)) == ' x**(3.0d0/7.0d0)'
def test_fcode_Rational():
x = symbols('x')
assert fcode(Rational(3, 7)) == " 3.0d0/7.0d0"
assert fcode(Rational(18, 9)) == " 2"
assert fcode(Rational(3, -7)) == " -3.0d0/7.0d0"
assert fcode(Rational(-3, -7)) == " 3.0d0/7.0d0"
assert fcode(x + Rational(3, 7)) == " x + 3.0d0/7.0d0"
assert fcode(Rational(3, 7)*x) == " (3.0d0/7.0d0)*x"
def test_fcode_Integer():
assert fcode(Integer(67)) == " 67"
assert fcode(Integer(-1)) == " -1"
def test_fcode_Float():
assert fcode(Float(42.0)) == " 42.0000000000000d0"
assert fcode(Float(-1e20)) == " -1.00000000000000d+20"
def test_fcode_functions():
x, y = symbols('x,y')
assert fcode(sin(x) ** cos(y)) == " sin(x)**cos(y)"
raises(NotImplementedError, lambda: fcode(Mod(x, y), standard=66))
raises(NotImplementedError, lambda: fcode(x % y, standard=66))
raises(NotImplementedError, lambda: fcode(Mod(x, y), standard=77))
raises(NotImplementedError, lambda: fcode(x % y, standard=77))
for standard in [90, 95, 2003, 2008]:
assert fcode(Mod(x, y), standard=standard) == " modulo(x, y)"
assert fcode(x % y, standard=standard) == " modulo(x, y)"
def test_case():
ob = FCodePrinter()
x,x_,x__,y,X,X_,Y = symbols('x,x_,x__,y,X,X_,Y')
assert fcode(exp(x_) + sin(x*y) + cos(X*Y)) == \
' exp(x_) + sin(x*y) + cos(X__*Y_)'
assert fcode(exp(x__) + 2*x*Y*X_**Rational(7, 2)) == \
' 2*X_**(7.0d0/2.0d0)*Y*x + exp(x__)'
assert fcode(exp(x_) + sin(x*y) + cos(X*Y), name_mangling=False) == \
' exp(x_) + sin(x*y) + cos(X*Y)'
assert fcode(x - cos(X), name_mangling=False) == ' x - cos(X)'
assert ob.doprint(X*sin(x) + x_, assign_to='me') == ' me = X*sin(x_) + x__'
assert ob.doprint(X*sin(x), assign_to='mu') == ' mu = X*sin(x_)'
assert ob.doprint(x_, assign_to='ad') == ' ad = x__'
n, m = symbols('n,m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
I = Idx('I', n)
assert fcode(A[i, I]*x[I], assign_to=y[i], source_format='free') == (
"do i = 1, m\n"
" y(i) = 0\n"
"end do\n"
"do i = 1, m\n"
" do I_ = 1, n\n"
" y(i) = A(i, I_)*x(I_) + y(i)\n"
" end do\n"
"end do" )
#issue 6814
def test_fcode_functions_with_integers():
x= symbols('x')
log10_17 = log(10).evalf(17)
loglog10_17 = '0.8340324452479558d0'
assert fcode(x * log(10)) == " x*%sd0" % log10_17
assert fcode(x * log(10)) == " x*%sd0" % log10_17
assert fcode(x * log(S(10))) == " x*%sd0" % log10_17
assert fcode(log(S(10))) == " %sd0" % log10_17
assert fcode(exp(10)) == " %sd0" % exp(10).evalf(17)
assert fcode(x * log(log(10))) == " x*%s" % loglog10_17
assert fcode(x * log(log(S(10)))) == " x*%s" % loglog10_17
def test_fcode_NumberSymbol():
prec = 17
p = FCodePrinter()
assert fcode(Catalan) == ' parameter (Catalan = %sd0)\n Catalan' % Catalan.evalf(prec)
assert fcode(EulerGamma) == ' parameter (EulerGamma = %sd0)\n EulerGamma' % EulerGamma.evalf(prec)
assert fcode(E) == ' parameter (E = %sd0)\n E' % E.evalf(prec)
assert fcode(GoldenRatio) == ' parameter (GoldenRatio = %sd0)\n GoldenRatio' % GoldenRatio.evalf(prec)
assert fcode(pi) == ' parameter (pi = %sd0)\n pi' % pi.evalf(prec)
assert fcode(
pi, precision=5) == ' parameter (pi = %sd0)\n pi' % pi.evalf(5)
assert fcode(Catalan, human=False) == (set(
[(Catalan, p._print(Catalan.evalf(prec)))]), set([]), ' Catalan')
assert fcode(EulerGamma, human=False) == (set([(EulerGamma, p._print(
EulerGamma.evalf(prec)))]), set([]), ' EulerGamma')
assert fcode(E, human=False) == (
set([(E, p._print(E.evalf(prec)))]), set([]), ' E')
assert fcode(GoldenRatio, human=False) == (set([(GoldenRatio, p._print(
GoldenRatio.evalf(prec)))]), set([]), ' GoldenRatio')
assert fcode(pi, human=False) == (
set([(pi, p._print(pi.evalf(prec)))]), set([]), ' pi')
assert fcode(pi, precision=5, human=False) == (
set([(pi, p._print(pi.evalf(5)))]), set([]), ' pi')
def test_fcode_complex():
assert fcode(I) == " cmplx(0,1)"
x = symbols('x')
assert fcode(4*I) == " cmplx(0,4)"
assert fcode(3 + 4*I) == " cmplx(3,4)"
assert fcode(3 + 4*I + x) == " cmplx(3,4) + x"
assert fcode(I*x) == " cmplx(0,1)*x"
assert fcode(3 + 4*I - x) == " cmplx(3,4) - x"
x = symbols('x', imaginary=True)
assert fcode(5*x) == " 5*x"
assert fcode(I*x) == " cmplx(0,1)*x"
assert fcode(3 + x) == " x + 3"
def test_implicit():
x, y = symbols('x,y')
assert fcode(sin(x)) == " sin(x)"
assert fcode(atan2(x, y)) == " atan2(x, y)"
assert fcode(conjugate(x)) == " conjg(x)"
def test_not_fortran():
x = symbols('x')
g = Function('g')
gamma_f = fcode(gamma(x))
assert gamma_f == "C Not supported in Fortran:\nC gamma\n gamma(x)"
assert fcode(Integral(sin(x))) == "C Not supported in Fortran:\nC Integral\n Integral(sin(x), x)"
assert fcode(g(x)) == "C Not supported in Fortran:\nC g\n g(x)"
def test_user_functions():
x = symbols('x')
assert fcode(sin(x), user_functions={"sin": "zsin"}) == " zsin(x)"
x = symbols('x')
assert fcode(
gamma(x), user_functions={"gamma": "mygamma"}) == " mygamma(x)"
g = Function('g')
assert fcode(g(x), user_functions={"g": "great"}) == " great(x)"
n = symbols('n', integer=True)
assert fcode(
factorial(n), user_functions={"factorial": "fct"}) == " fct(n)"
def test_inline_function():
x = symbols('x')
g = implemented_function('g', Lambda(x, 2*x))
assert fcode(g(x)) == " 2*x"
g = implemented_function('g', Lambda(x, 2*pi/x))
assert fcode(g(x)) == (
" parameter (pi = %sd0)\n"
" 2*pi/x"
) % pi.evalf(17)
A = IndexedBase('A')
i = Idx('i', symbols('n', integer=True))
g = implemented_function('g', Lambda(x, x*(1 + x)*(2 + x)))
assert fcode(g(A[i]), assign_to=A[i]) == (
" do i = 1, n\n"
" A(i) = (A(i) + 1)*(A(i) + 2)*A(i)\n"
" end do"
)
def test_assign_to():
x = symbols('x')
assert fcode(sin(x), assign_to="s") == " s = sin(x)"
def test_line_wrapping():
x, y = symbols('x,y')
assert fcode(((x + y)**10).expand(), assign_to="var") == (
" var = x**10 + 10*x**9*y + 45*x**8*y**2 + 120*x**7*y**3 + 210*x**6*\n"
" @ y**4 + 252*x**5*y**5 + 210*x**4*y**6 + 120*x**3*y**7 + 45*x**2*y\n"
" @ **8 + 10*x*y**9 + y**10"
)
e = [x**i for i in range(11)]
assert fcode(Add(*e)) == (
" x**10 + x**9 + x**8 + x**7 + x**6 + x**5 + x**4 + x**3 + x**2 + x\n"
" @ + 1"
)
def test_fcode_precedence():
x, y = symbols("x y")
assert fcode(And(x < y, y < x + 1), source_format="free") == \
"x < y .and. y < x + 1"
assert fcode(Or(x < y, y < x + 1), source_format="free") == \
"x < y .or. y < x + 1"
assert fcode(Xor(x < y, y < x + 1, evaluate=False),
source_format="free") == "x < y .neqv. y < x + 1"
assert fcode(Equivalent(x < y, y < x + 1), source_format="free") == \
"x < y .eqv. y < x + 1"
def test_fcode_Logical():
x, y, z = symbols("x y z")
# unary Not
assert fcode(Not(x), source_format="free") == ".not. x"
# binary And
assert fcode(And(x, y), source_format="free") == "x .and. y"
assert fcode(And(x, Not(y)), source_format="free") == "x .and. .not. y"
assert fcode(And(Not(x), y), source_format="free") == "y .and. .not. x"
assert fcode(And(Not(x), Not(y)), source_format="free") == \
".not. x .and. .not. y"
assert fcode(Not(And(x, y), evaluate=False), source_format="free") == \
".not. (x .and. y)"
# binary Or
assert fcode(Or(x, y), source_format="free") == "x .or. y"
assert fcode(Or(x, Not(y)), source_format="free") == "x .or. .not. y"
assert fcode(Or(Not(x), y), source_format="free") == "y .or. .not. x"
assert fcode(Or(Not(x), Not(y)), source_format="free") == \
".not. x .or. .not. y"
assert fcode(Not(Or(x, y), evaluate=False), source_format="free") == \
".not. (x .or. y)"
# mixed And/Or
assert fcode(And(Or(y, z), x), source_format="free") == "x .and. (y .or. z)"
assert fcode(And(Or(z, x), y), source_format="free") == "y .and. (x .or. z)"
assert fcode(And(Or(x, y), z), source_format="free") == "z .and. (x .or. y)"
assert fcode(Or(And(y, z), x), source_format="free") == "x .or. y .and. z"
assert fcode(Or(And(z, x), y), source_format="free") == "y .or. x .and. z"
assert fcode(Or(And(x, y), z), source_format="free") == "z .or. x .and. y"
# trinary And
assert fcode(And(x, y, z), source_format="free") == "x .and. y .and. z"
assert fcode(And(x, y, Not(z)), source_format="free") == \
"x .and. y .and. .not. z"
assert fcode(And(x, Not(y), z), source_format="free") == \
"x .and. z .and. .not. y"
assert fcode(And(Not(x), y, z), source_format="free") == \
"y .and. z .and. .not. x"
assert fcode(Not(And(x, y, z), evaluate=False), source_format="free") == \
".not. (x .and. y .and. z)"
# trinary Or
assert fcode(Or(x, y, z), source_format="free") == "x .or. y .or. z"
assert fcode(Or(x, y, Not(z)), source_format="free") == \
"x .or. y .or. .not. z"
assert fcode(Or(x, Not(y), z), source_format="free") == \
"x .or. z .or. .not. y"
assert fcode(Or(Not(x), y, z), source_format="free") == \
"y .or. z .or. .not. x"
assert fcode(Not(Or(x, y, z), evaluate=False), source_format="free") == \
".not. (x .or. y .or. z)"
def test_fcode_Xlogical():
x, y, z = symbols("x y z")
# binary Xor
assert fcode(Xor(x, y, evaluate=False), source_format="free") == \
"x .neqv. y"
assert fcode(Xor(x, Not(y), evaluate=False), source_format="free") == \
"x .neqv. .not. y"
assert fcode(Xor(Not(x), y, evaluate=False), source_format="free") == \
"y .neqv. .not. x"
assert fcode(Xor(Not(x), Not(y), evaluate=False),
source_format="free") == ".not. x .neqv. .not. y"
assert fcode(Not(Xor(x, y, evaluate=False), evaluate=False),
source_format="free") == ".not. (x .neqv. y)"
# binary Equivalent
assert fcode(Equivalent(x, y), source_format="free") == "x .eqv. y"
assert fcode(Equivalent(x, Not(y)), source_format="free") == \
"x .eqv. .not. y"
assert fcode(Equivalent(Not(x), y), source_format="free") == \
"y .eqv. .not. x"
assert fcode(Equivalent(Not(x), Not(y)), source_format="free") == \
".not. x .eqv. .not. y"
assert fcode(Not(Equivalent(x, y), evaluate=False),
source_format="free") == ".not. (x .eqv. y)"
# mixed And/Equivalent
assert fcode(Equivalent(And(y, z), x), source_format="free") == \
"x .eqv. y .and. z"
assert fcode(Equivalent(And(z, x), y), source_format="free") == \
"y .eqv. x .and. z"
assert fcode(Equivalent(And(x, y), z), source_format="free") == \
"z .eqv. x .and. y"
assert fcode(And(Equivalent(y, z), x), source_format="free") == \
"x .and. (y .eqv. z)"
assert fcode(And(Equivalent(z, x), y), source_format="free") == \
"y .and. (x .eqv. z)"
assert fcode(And(Equivalent(x, y), z), source_format="free") == \
"z .and. (x .eqv. y)"
# mixed Or/Equivalent
assert fcode(Equivalent(Or(y, z), x), source_format="free") == \
"x .eqv. y .or. z"
assert fcode(Equivalent(Or(z, x), y), source_format="free") == \
"y .eqv. x .or. z"
assert fcode(Equivalent(Or(x, y), z), source_format="free") == \
"z .eqv. x .or. y"
assert fcode(Or(Equivalent(y, z), x), source_format="free") == \
"x .or. (y .eqv. z)"
assert fcode(Or(Equivalent(z, x), y), source_format="free") == \
"y .or. (x .eqv. z)"
assert fcode(Or(Equivalent(x, y), z), source_format="free") == \
"z .or. (x .eqv. y)"
# mixed Xor/Equivalent
assert fcode(Equivalent(Xor(y, z, evaluate=False), x),
source_format="free") == "x .eqv. (y .neqv. z)"
assert fcode(Equivalent(Xor(z, x, evaluate=False), y),
source_format="free") == "y .eqv. (x .neqv. z)"
assert fcode(Equivalent(Xor(x, y, evaluate=False), z),
source_format="free") == "z .eqv. (x .neqv. y)"
assert fcode(Xor(Equivalent(y, z), x, evaluate=False),
source_format="free") == "x .neqv. (y .eqv. z)"
assert fcode(Xor(Equivalent(z, x), y, evaluate=False),
source_format="free") == "y .neqv. (x .eqv. z)"
assert fcode(Xor(Equivalent(x, y), z, evaluate=False),
source_format="free") == "z .neqv. (x .eqv. y)"
# mixed And/Xor
assert fcode(Xor(And(y, z), x, evaluate=False), source_format="free") == \
"x .neqv. y .and. z"
assert fcode(Xor(And(z, x), y, evaluate=False), source_format="free") == \
"y .neqv. x .and. z"
assert fcode(Xor(And(x, y), z, evaluate=False), source_format="free") == \
"z .neqv. x .and. y"
assert fcode(And(Xor(y, z, evaluate=False), x), source_format="free") == \
"x .and. (y .neqv. z)"
assert fcode(And(Xor(z, x, evaluate=False), y), source_format="free") == \
"y .and. (x .neqv. z)"
assert fcode(And(Xor(x, y, evaluate=False), z), source_format="free") == \
"z .and. (x .neqv. y)"
# mixed Or/Xor
assert fcode(Xor(Or(y, z), x, evaluate=False), source_format="free") == \
"x .neqv. y .or. z"
assert fcode(Xor(Or(z, x), y, evaluate=False), source_format="free") == \
"y .neqv. x .or. z"
assert fcode(Xor(Or(x, y), z, evaluate=False), source_format="free") == \
"z .neqv. x .or. y"
assert fcode(Or(Xor(y, z, evaluate=False), x), source_format="free") == \
"x .or. (y .neqv. z)"
assert fcode(Or(Xor(z, x, evaluate=False), y), source_format="free") == \
"y .or. (x .neqv. z)"
assert fcode(Or(Xor(x, y, evaluate=False), z), source_format="free") == \
"z .or. (x .neqv. y)"
# trinary Xor
assert fcode(Xor(x, y, z, evaluate=False), source_format="free") == \
"x .neqv. y .neqv. z"
assert fcode(Xor(x, y, Not(z), evaluate=False), source_format="free") == \
"x .neqv. y .neqv. .not. z"
assert fcode(Xor(x, Not(y), z, evaluate=False), source_format="free") == \
"x .neqv. z .neqv. .not. y"
assert fcode(Xor(Not(x), y, z, evaluate=False), source_format="free") == \
"y .neqv. z .neqv. .not. x"
def test_fcode_Relational():
x, y = symbols("x y")
assert fcode(Relational(x, y, "=="), source_format="free") == "x == y"
assert fcode(Relational(x, y, "!="), source_format="free") == "x /= y"
assert fcode(Relational(x, y, ">="), source_format="free") == "x >= y"
assert fcode(Relational(x, y, "<="), source_format="free") == "x <= y"
assert fcode(Relational(x, y, ">"), source_format="free") == "x > y"
assert fcode(Relational(x, y, "<"), source_format="free") == "x < y"
def test_fcode_Piecewise():
x = symbols('x')
expr = Piecewise((x, x < 1), (x**2, True))
# Check that inline conditional (merge) fails if standard isn't 95+
raises(NotImplementedError, lambda: fcode(expr))
code = fcode(expr, standard=95)
expected = " merge(x, x**2, x < 1)"
assert code == expected
assert fcode(Piecewise((x, x < 1), (x**2, True)), assign_to="var") == (
" if (x < 1) then\n"
" var = x\n"
" else\n"
" var = x**2\n"
" end if"
)
a = cos(x)/x
b = sin(x)/x
for i in range(10):
a = diff(a, x)
b = diff(b, x)
expected = (
" if (x < 0) then\n"
" weird_name = -cos(x)/x + 10*sin(x)/x**2 + 90*cos(x)/x**3 - 720*\n"
" @ sin(x)/x**4 - 5040*cos(x)/x**5 + 30240*sin(x)/x**6 + 151200*cos(x\n"
" @ )/x**7 - 604800*sin(x)/x**8 - 1814400*cos(x)/x**9 + 3628800*sin(x\n"
" @ )/x**10 + 3628800*cos(x)/x**11\n"
" else\n"
" weird_name = -sin(x)/x - 10*cos(x)/x**2 + 90*sin(x)/x**3 + 720*\n"
" @ cos(x)/x**4 - 5040*sin(x)/x**5 - 30240*cos(x)/x**6 + 151200*sin(x\n"
" @ )/x**7 + 604800*cos(x)/x**8 - 1814400*sin(x)/x**9 - 3628800*cos(x\n"
" @ )/x**10 + 3628800*sin(x)/x**11\n"
" end if"
)
code = fcode(Piecewise((a, x < 0), (b, True)), assign_to="weird_name")
assert code == expected
code = fcode(Piecewise((x, x < 1), (x**2, x > 1), (sin(x), True)), standard=95)
expected = " merge(x, merge(x**2, sin(x), x > 1), x < 1)"
assert code == expected
# Check that Piecewise without a True (default) condition error
expr = Piecewise((x, x < 1), (x**2, x > 1), (sin(x), x > 0))
raises(ValueError, lambda: fcode(expr))
def test_wrap_fortran():
# "########################################################################"
printer = FCodePrinter()
lines = [
"C This is a long comment on a single line that must be wrapped properly to produce nice output",
" this = is + a + long + and + nasty + fortran + statement + that * must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that * must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that * must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement(that)/must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement(that)/must + be + wrapped + properly",
]
wrapped_lines = printer._wrap_fortran(lines)
expected_lines = [
"C This is a long comment on a single line that must be wrapped",
"C properly to produce nice output",
" this = is + a + long + and + nasty + fortran + statement + that *",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that *",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that",
" @ * must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that",
" @ *must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement +",
" @ that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that",
" @ **must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that",
" @ **must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement +",
" @ that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement(that)/",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement(that)",
" @ /must + be + wrapped + properly",
]
for line in wrapped_lines:
assert len(line) <= 72
for w, e in zip(wrapped_lines, expected_lines):
assert w == e
assert len(wrapped_lines) == len(expected_lines)
def test_wrap_fortran_keep_d0():
printer = FCodePrinter()
lines = [
' this_variable_is_very_long_because_we_try_to_test_line_break=1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break = 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break = 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break = 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break = 10.0d0'
]
expected = [
' this_variable_is_very_long_because_we_try_to_test_line_break=1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 10.0d0'
]
assert printer._wrap_fortran(lines) == expected
def test_settings():
raises(TypeError, lambda: fcode(S(4), method="garbage"))
def test_free_form_code_line():
x, y = symbols('x,y')
assert fcode(cos(x) + sin(y), source_format='free') == "sin(y) + cos(x)"
def test_free_form_continuation_line():
x, y = symbols('x,y')
result = fcode(((cos(x) + sin(y))**(7)).expand(), source_format='free')
expected = (
'sin(y)**7 + 7*sin(y)**6*cos(x) + 21*sin(y)**5*cos(x)**2 + 35*sin(y)**4* &\n'
' cos(x)**3 + 35*sin(y)**3*cos(x)**4 + 21*sin(y)**2*cos(x)**5 + 7* &\n'
' sin(y)*cos(x)**6 + cos(x)**7'
)
assert result == expected
def test_free_form_comment_line():
printer = FCodePrinter({'source_format': 'free'})
lines = [ "! This is a long comment on a single line that must be wrapped properly to produce nice output"]
expected = [
'! This is a long comment on a single line that must be wrapped properly',
'! to produce nice output']
assert printer._wrap_fortran(lines) == expected
def test_loops():
n, m = symbols('n,m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
expected = (
'do i = 1, m\n'
' y(i) = 0\n'
'end do\n'
'do i = 1, m\n'
' do j = 1, n\n'
' y(i) = %(rhs)s\n'
' end do\n'
'end do'
)
code = fcode(A[i, j]*x[j], assign_to=y[i], source_format='free')
assert (code == expected % {'rhs': 'y(i) + A(i, j)*x(j)'} or
code == expected % {'rhs': 'y(i) + x(j)*A(i, j)'} or
code == expected % {'rhs': 'x(j)*A(i, j) + y(i)'} or
code == expected % {'rhs': 'A(i, j)*x(j) + y(i)'})
def test_dummy_loops():
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'do i_%(icount)i = 1, m_%(mcount)i\n'
' y(i_%(icount)i) = x(i_%(icount)i)\n'
'end do'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
code = fcode(x[i], assign_to=y[i], source_format='free')
assert code == expected
def test_fcode_Indexed_without_looking_for_contraction():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
Dy = IndexedBase('Dy', shape=(len_y-1,))
i = Idx('i', len_y-1)
e=Eq(Dy[i], (y[i+1]-y[i])/(x[i+1]-x[i]))
code0 = fcode(e.rhs, assign_to=e.lhs, contract=False)
assert code0.endswith('Dy(i) = (y(i + 1) - y(i))/(x(i + 1) - x(i))')
def test_derived_classes():
class MyFancyFCodePrinter(FCodePrinter):
_default_settings = FCodePrinter._default_settings.copy()
printer = MyFancyFCodePrinter()
x = symbols('x')
assert printer.doprint(sin(x), "bork") == " bork = sin(x)"
def test_indent():
codelines = (
'subroutine test(a)\n'
'integer :: a, i, j\n'
'\n'
'do\n'
'do \n'
'do j = 1, 5\n'
'if (a>b) then\n'
'if(b>0) then\n'
'a = 3\n'
'donot_indent_me = 2\n'
'do_not_indent_me_either = 2\n'
'ifIam_indented_something_went_wrong = 2\n'
'if_I_am_indented_something_went_wrong = 2\n'
'end should not be unindented here\n'
'end if\n'
'endif\n'
'end do\n'
'end do\n'
'enddo\n'
'end subroutine\n'
'\n'
'subroutine test2(a)\n'
'integer :: a\n'
'do\n'
'a = a + 1\n'
'end do \n'
'end subroutine\n'
)
expected = (
'subroutine test(a)\n'
'integer :: a, i, j\n'
'\n'
'do\n'
' do \n'
' do j = 1, 5\n'
' if (a>b) then\n'
' if(b>0) then\n'
' a = 3\n'
' donot_indent_me = 2\n'
' do_not_indent_me_either = 2\n'
' ifIam_indented_something_went_wrong = 2\n'
' if_I_am_indented_something_went_wrong = 2\n'
' end should not be unindented here\n'
' end if\n'
' endif\n'
' end do\n'
' end do\n'
'enddo\n'
'end subroutine\n'
'\n'
'subroutine test2(a)\n'
'integer :: a\n'
'do\n'
' a = a + 1\n'
'end do \n'
'end subroutine\n'
)
p = FCodePrinter({'source_format': 'free'})
result = p.indent_code(codelines)
assert result == expected
def test_Matrix_printing():
x, y, z = symbols('x,y,z')
# Test returning a Matrix
mat = Matrix([x*y, Piecewise((2 + x, y>0), (y, True)), sin(z)])
A = MatrixSymbol('A', 3, 1)
assert fcode(mat, A) == (
" A(1, 1) = x*y\n"
" if (y > 0) then\n"
" A(2, 1) = x + 2\n"
" else\n"
" A(2, 1) = y\n"
" end if\n"
" A(3, 1) = sin(z)")
# Test using MatrixElements in expressions
expr = Piecewise((2*A[2, 0], x > 0), (A[2, 0], True)) + sin(A[1, 0]) + A[0, 0]
assert fcode(expr, standard=95) == (
" merge(2*A(3, 1), A(3, 1), x > 0) + sin(A(2, 1)) + A(1, 1)")
# Test using MatrixElements in a Matrix
q = MatrixSymbol('q', 5, 1)
M = MatrixSymbol('M', 3, 3)
m = Matrix([[sin(q[1,0]), 0, cos(q[2,0])],
[q[1,0] + q[2,0], q[3, 0], 5],
[2*q[4, 0]/q[1,0], sqrt(q[0,0]) + 4, 0]])
assert fcode(m, M) == (
" M(1, 1) = sin(q(2, 1))\n"
" M(2, 1) = q(2, 1) + q(3, 1)\n"
" M(3, 1) = 2*q(5, 1)/q(2, 1)\n"
" M(1, 2) = 0\n"
" M(2, 2) = q(4, 1)\n"
" M(3, 2) = sqrt(q(1, 1)) + 4\n"
" M(1, 3) = cos(q(3, 1))\n"
" M(2, 3) = 5\n"
" M(3, 3) = 0")
def test_fcode_For():
x, y = symbols('x y')
f = For(x, Range(0, 10, 2), [Assignment(y, x * y)])
sol = fcode(f)
assert sol == (" do x = 0, 10, 2\n"
" y = x*y\n"
" end do")
def test_fcode_Declaration():
def check(expr, ref, **kwargs):
assert fcode(expr, standard=95, source_format='free', **kwargs) == ref
i = symbols('i', integer=True)
var1 = Variable.deduced(i)
dcl1 = Declaration(var1)
check(dcl1, "integer*4 :: i")
x, y = symbols('x y')
var2 = Variable(x, float32, value=42, attrs={value_const})
dcl2b = Declaration(var2)
check(dcl2b, 'real*4, parameter :: x = 42')
var3 = Variable(y, type=bool_)
dcl3 = Declaration(var3)
check(dcl3, 'logical :: y')
check(float32, "real*4")
check(float64, "real*8")
check(real, "real*4", type_aliases={real: float32})
check(real, "real*8", type_aliases={real: float64})
def test_MatrixElement_printing():
# test cases for issue #11821
A = MatrixSymbol("A", 1, 3)
B = MatrixSymbol("B", 1, 3)
C = MatrixSymbol("C", 1, 3)
assert(fcode(A[0, 0]) == " A(1, 1)")
assert(fcode(3 * A[0, 0]) == " 3*A(1, 1)")
F = C[0, 0].subs(C, A - B)
assert(fcode(F) == " (-B + A)(1, 1)")
def test_aug_assign():
x = symbols('x')
assert fcode(aug_assign(x, '+', 1), source_format='free') == 'x = x + 1'
def test_While():
x = symbols('x')
assert fcode(While(abs(x) > 1, [aug_assign(x, '-', 1)]), source_format='free') == (
'do while (abs(x) > 1)\n'
' x = x - 1\n'
'end do'
)
| 41.91954
| 116
| 0.517016
|
4a07640f818648cc11e8d8744af854c1bc0074a9
| 5,700
|
py
|
Python
|
docs/source/conf.py
|
0k/sact.epoch
|
6b0a47068992ff6a73f0f1da36090affad7c8be0
|
[
"BSD-3-Clause"
] | null | null | null |
docs/source/conf.py
|
0k/sact.epoch
|
6b0a47068992ff6a73f0f1da36090affad7c8be0
|
[
"BSD-3-Clause"
] | null | null | null |
docs/source/conf.py
|
0k/sact.epoch
|
6b0a47068992ff6a73f0f1da36090affad7c8be0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# NSS documentation build configuration file, created by sphinx-quickstart.
# This file is execfile()d with the current directory set to its containing dir.
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc','sphinx.ext.doctest','sphinx.ext.coverage',]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'sact.epoch'
copyright = '2011, securactive'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
# The short X.Y version.
version = '%%short-version%%'
# The full version, including alpha/beta/rc tags.
release = '%%version%%'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = ['overview']
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'NSSdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'NSS.tex', 'NSS Documentation',
'securactive', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| 32.758621
| 82
| 0.730877
|
4a0764af5766883b021bdbc700bc33355c2891dc
| 6,688
|
py
|
Python
|
eod/data/datasets/transforms.py
|
scott-mao/EOD
|
f10e64de86c0f356ebf5c7e923f4042eec4207b1
|
[
"Apache-2.0"
] | 1
|
2022-01-12T01:51:39.000Z
|
2022-01-12T01:51:39.000Z
|
eod/data/datasets/transforms.py
|
YZW-explorer/EOD
|
f10e64de86c0f356ebf5c7e923f4042eec4207b1
|
[
"Apache-2.0"
] | null | null | null |
eod/data/datasets/transforms.py
|
YZW-explorer/EOD
|
f10e64de86c0f356ebf5c7e923f4042eec4207b1
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
import torch
from abc import ABC, abstractmethod
from collections import defaultdict
import numpy as np
from torchvision.transforms import Compose, Normalize
from torchvision.transforms import functional as TF
from eod.utils.general.registry_factory import AUGMENTATION_REGISTRY
import copy
from ..data_utils import (
is_numpy_image,
is_pil_image,
is_tensor_image,
)
__all__ = [
'has_image',
'has_gt_bboxes',
'has_gt_ignores',
'has_gt_keyps',
'has_gt_masks',
'has_gt_semantic_seg',
'check_fake_gt',
'Augmentation',
'ImageNormalize',
'ImageToTensorInverse',
'CustomImageToTensor'
]
def has_image(data):
return data.get('image', None) is not None
def has_gt_bboxes(data):
return data.get('gt_bboxes', None) is not None
def has_gt_ignores(data):
return data.get('gt_ignores', None) is not None
def has_gt_keyps(data):
return data.get('gt_keyps', None) is not None
def has_gt_masks(data):
return data.get('gt_masks', None) is not None
def has_gt_semantic_seg(data):
return data.get('gt_semantic_seg', None) is not None
def check_fake_gt(gts):
if gts is None:
return False
if gts.shape[0] == 0:
return True
if gts.shape[0] == 1 and gts[0].sum() <= 1:
return True
return False
class Augmentation(ABC):
def __init__(self):
super(Augmentation, self).__init__()
def _sanity_check(self, data):
""" check the data format
"""
data_format = defaultdict(str)
if has_image(data):
if is_pil_image(data.image):
image_format = 'pil'
elif is_numpy_image(data.image):
image_format = 'np'
elif is_tensor_image(data.image):
image_format = 'tensor'
else:
raise TypeError('{} format is not supported for data augmentation function'.format(type(data.image)))
data_format['image'] = image_format
if has_gt_bboxes(data):
assert torch.is_tensor(data.gt_bboxes)
data_format['gt_bboxes'] = 'tensor'
if has_gt_ignores(data):
assert torch.is_tensor(data.gt_ignores)
data_format['gt_ignores'] = 'tensor'
if has_gt_keyps(data):
assert torch.is_tensor(data.gt_keyps)
data_format['gt_keyps'] = 'tensor'
if has_gt_masks(data):
assert isinstance(data.gt_masks, list)
data_format['gt_masks'] = 'list'
if has_gt_semantic_seg(data):
if isinstance(data.gt_semantic_seg, np.ndarray):
data_format['gt_semantic_seg'] = 'numpy'
elif torch.is_tensor(data.gt_semantic_seg):
data_format['gt_semantic_seg'] = 'tensor'
else:
raise TypeError('{} format is not supported for gt_semantic_seg'.format(type(data.gt_semantic_seg)))
def _allow_format_change(self):
return False
def __call__(self, data):
input_format = self._sanity_check(data)
augmented_data = self.augment(data)
output_format = self._sanity_check(augmented_data)
if not self._allow_format_change():
assert input_format == output_format, '{} vs {}'.format(input_format, output_format)
return augmented_data
@abstractmethod
def augment(self, data):
raise NotImplementedError
@AUGMENTATION_REGISTRY.register('normalize')
class ImageNormalize(Augmentation):
def __init__(self, mean, std, inplace=False):
self.mean = np.array(mean)
self.std = np.array(std)
self.normalize = Normalize(self.mean, self.std, inplace)
# super(ImageNormalize, self).__init__(self.mean, self.std, inplace)
def augment(self, data):
output = copy.copy(data)
# we need to convert image to float since normalize.mean and std are float tenosrs
output.image = self.normalize(data.image.float())
return output
@AUGMENTATION_REGISTRY.register('to_tensor')
class ImageToTensor(Augmentation):
""" Convert PIL and cv2 image to Tensor image, and convert semantic seg from numpy into tensor
"""
def _allow_format_change(self):
return True
def augment(self, data):
output = copy.copy(data)
output.image = self.image_to_tensor(data.image)
if has_gt_semantic_seg(data):
output.gt_semantic_seg = self.semantic_seg_to_tensor(data.gt_semantic_seg)
return output
def image_to_tensor(self, image):
"""
Args:
image: PIL or cv2 image
Return:
tensor image
1. convert image to Tensor from PIL and cv2
2. convert shape to [C, H, W] from PIL and cv2 shape
"""
return TF.to_tensor(image)
def semantic_seg_to_tensor(self, semantic_seg):
"""
Args:
semantic_seg (np.ndarray of [1, height, width]): mask
Returns:
semantic_seg (Tensor of [1, 1, height, width])
"""
return torch.as_tensor(semantic_seg).unsqueeze(0)
@AUGMENTATION_REGISTRY.register('normalize_inverse')
class ImageToTensorInverse(Augmentation):
def augment(self, data):
output = copy.copy(data)
output.image = self.inverse_image(data.image)
if has_gt_semantic_seg(data):
output.gt_semantic_seg = self.inverse_semantic_seg(data.gt_semantic_seg)
return output
def inverse_image(self, image):
return image * 255
def inverse_semantic_seg(self, semantic_seg):
return semantic_seg.squeeze(0).cpu().numpy()
@AUGMENTATION_REGISTRY.register('custom_to_tensor')
class CustomImageToTensor(Augmentation):
def augment(self, data):
image = data['image']
if image.ndim == 3:
image = image.transpose(2, 0, 1)
else:
image = image[None]
data['image'] = torch.from_numpy(image).float()
return data
def build_partially_inverse_transformer(compose_transformer):
""" To transform image tensor to visiable image inversely"""
inverse_transforms = []
for transform in compose_transformer.transforms[::-1]:
if isinstance(transform, ImageToTensor):
inverse_transforms.append(ImageToTensorInverse())
elif isinstance(transform, ImageNormalize):
mean = transform.mean
std = transform.std
inverse_transforms.append(ImageNormalize(-mean / std, 1.0 / std))
return Compose(inverse_transforms)
def build_transformer(cfgs):
transform_list = [AUGMENTATION_REGISTRY.build(cfg) for cfg in cfgs]
return Compose(transform_list)
| 30.538813
| 117
| 0.652213
|
4a07654e1a495ee6c39c5bef595ff42902f73813
| 6,848
|
py
|
Python
|
multimodelity/datasets/builders/visual_genome/dataset.py
|
hahaxun/mmf
|
6d32c3925ed9bf938e19a071aaa5e72a5cf01ee1
|
[
"BSD-3-Clause"
] | null | null | null |
multimodelity/datasets/builders/visual_genome/dataset.py
|
hahaxun/mmf
|
6d32c3925ed9bf938e19a071aaa5e72a5cf01ee1
|
[
"BSD-3-Clause"
] | null | null | null |
multimodelity/datasets/builders/visual_genome/dataset.py
|
hahaxun/mmf
|
6d32c3925ed9bf938e19a071aaa5e72a5cf01ee1
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import json
import torch
from multimodelity.common.sample import Sample, SampleList
from multimodelity.datasets.builders.vqa2 import VQA2Dataset
from multimodelity.datasets.databases.scene_graph_database import SceneGraphDatabase
_CONSTANTS = {"image_id_key": "image_id"}
class VisualGenomeDataset(VQA2Dataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
super().__init__(
config,
dataset_type,
imdb_file_index,
dataset_name="visual_genome",
*args,
**kwargs
)
self._return_scene_graph = config.return_scene_graph
self._return_objects = config.return_objects
self._return_relationships = config.return_relationships
self._no_unk = config.get("no_unk", False)
self.scene_graph_db = None
build_scene_graph_db = (
self._return_scene_graph
or self._return_objects
or self._return_relationships
)
if build_scene_graph_db:
scene_graph_file = config.scene_graph_files[dataset_type][imdb_file_index]
scene_graph_file = self._get_absolute_path(scene_graph_file)
self.scene_graph_db = SceneGraphDatabase(config, scene_graph_file)
def load_item(self, idx):
sample_info = self.annotation_db[idx]
sample_info = self._preprocess_answer(sample_info)
sample_info["question_id"] = sample_info["id"]
if self._check_unk(sample_info):
return self.load_item((idx + 1) % len(self.annotation_db))
current_sample = super().load_item(idx)
current_sample = self._load_scene_graph(idx, current_sample)
return current_sample
def _get_image_id(self, idx):
return self.annotation_db[idx][_CONSTANTS["image_id_key"]]
def _get_image_info(self, idx):
# Deep copy so that we can directly update the nested dicts
return copy.deepcopy(self.scene_graph_db[self._get_image_id(idx)])
def _preprocess_answer(self, sample_info):
sample_info["answers"] = [
self.vg_answer_preprocessor(
{"text": sample_info["answers"][0]},
remove=["?", ",", ".", "a", "an", "the"],
)["text"]
]
return sample_info
def _check_unk(self, sample_info):
if not self._no_unk:
return False
else:
index = self.answer_processor.word2idx(sample_info["answers"][0])
return index == self.answer_processor.answer_vocab.UNK_INDEX
def _load_scene_graph(self, idx, sample):
if self.scene_graph_db is None:
return sample
image_info = self._get_image_info(idx)
regions = image_info["regions"]
objects, object_map = self._load_objects(idx)
if self._return_objects:
sample.objects = objects
relationships, relationship_map = self._load_relationships(idx, object_map)
if self._return_relationships:
sample.relationships = relationships
regions, _ = self._load_regions(idx, object_map, relationship_map)
if self._return_scene_graph:
sample.scene_graph = regions
return sample
def _load_objects(self, idx):
image_info = self._get_image_info(idx)
image_height = image_info["height"]
image_width = image_info["width"]
object_map = {}
objects = []
for obj in image_info["objects"]:
obj["synsets"] = self.synset_processor({"tokens": obj["synsets"]})["text"]
obj["names"] = self.name_processor({"tokens": obj["names"]})["text"]
obj["height"] = obj["h"] / image_height
obj.pop("h")
obj["width"] = obj["w"] / image_width
obj.pop("w")
obj["y"] /= image_height
obj["x"] /= image_width
obj["attributes"] = self.attribute_processor({"tokens": obj["attributes"]})[
"text"
]
obj = Sample(obj)
object_map[obj["object_id"]] = obj
objects.append(obj)
objects = SampleList(objects)
return objects, object_map
def _load_relationships(self, idx, object_map):
if self._return_relationships is None and self._return_scene_graph is None:
return None, None
image_info = self._get_image_info(idx)
relationship_map = {}
relationships = []
for relationship in image_info["relationships"]:
relationship["synsets"] = self.synset_processor(
{"tokens": relationship["synsets"]}
)["text"]
relationship["predicate"] = self.predicate_processor(
{"tokens": relationship["predicate"]}
)["text"]
relationship["object"] = object_map[relationship["object_id"]]
relationship["subject"] = object_map[relationship["subject_id"]]
relationship = Sample(relationship)
relationship_map[relationship["relationship_id"]] = relationship
relationships.append(relationship)
relationships = SampleList(relationships)
return relationships, relationship_map
def _load_regions(self, idx, object_map, relationship_map):
if self._return_scene_graph is None:
return None, None
image_info = self._get_image_info(idx)
image_height = image_info["height"]
image_width = image_info["width"]
region_map = {}
regions = []
for region in image_info["regions"]:
for synset in region["synsets"]:
synset["entity_name"] = self.name_processor(
{"tokens": [synset["entity_name"]]}
)["text"]
synset["synset_name"] = self.synset_processor(
{"tokens": [synset["synset_name"]]}
)["text"]
region["height"] /= image_height
region["width"] /= image_width
region["y"] /= image_height
region["x"] /= image_width
relationships = []
objects = []
for relationship_idx in region["relationships"]:
relationships.append(relationship_map[relationship_idx])
for object_idx in region["objects"]:
objects.append(object_map[object_idx])
region["relationships"] = relationships
region["objects"] = objects
region["phrase"] = self.text_processor({"text": region["phrase"]})["text"]
region = Sample(region)
region_map[region["region_id"]] = region
regions.append(region)
regions = SampleList(regions)
return regions, region_map
| 34.938776
| 88
| 0.609375
|
4a07662554b903b949869d486c49f7c0ddcabc98
| 16,768
|
py
|
Python
|
tests/python/relay/test_op_level1.py
|
byungchul/tvm
|
ce72e9b552c14e9636e43782ccb3732d00fa0b6b
|
[
"Apache-2.0"
] | null | null | null |
tests/python/relay/test_op_level1.py
|
byungchul/tvm
|
ce72e9b552c14e9636e43782ccb3732d00fa0b6b
|
[
"Apache-2.0"
] | null | null | null |
tests/python/relay/test_op_level1.py
|
byungchul/tvm
|
ce72e9b552c14e9636e43782ccb3732d00fa0b6b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import scipy
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list
import topi.testing
from tvm.contrib.nvcc import have_fp16
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def sigmoid(x):
one = np.ones_like(x)
return one / (one + np.exp(-x))
def relu(x):
x_copy = np.copy(x)
np.maximum(x_copy, 0, x_copy)
return x_copy
def rsqrt(x):
one = np.ones_like(x)
return one / np.sqrt(x)
def test_unary_op():
def check_single_op(opfunc, ref, dtype):
shape = (10, 4)
dtype = dtype
tp = relay.TensorType(shape)
x = relay.var("x", tp, dtype=dtype)
y = opfunc(x)
# test printer
assert ("{}(%x)".format(y.op.name)) in y.astext()
# test type inference
yy = run_infer_type(y)
assert yy.checked_type == tp
if ref is not None:
data = np.random.rand(*shape).astype(dtype)
ref_res = ref(data)
func = relay.Function([x], y)
for target, ctx in ctx_list():
# use graph by execuor default for testing, as we need
# create function explicitly to avoid constant-folding.
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
for opfunc, ref in [(tvm.relay.log, np.log),
(tvm.relay.exp, np.exp),
(tvm.relay.erf, scipy.special.erf),
(tvm.relay.sqrt, np.sqrt),
(tvm.relay.rsqrt, rsqrt),
(tvm.relay.sigmoid, sigmoid),
(tvm.relay.tanh, np.tanh),
(relay.nn.relu, relu),
(tvm.relay.cos, np.cos),
(tvm.relay.sin, np.sin),
(tvm.relay.atan, np.arctan)]:
for dtype in ['float16', 'float32']:
check_single_op(opfunc, ref, dtype)
def test_binary_op():
def inst(vars, sh):
return [vars.get(s, s) for s in sh]
def check_binary_op(opfunc, ref, dtype):
# TODO(@jroesch): this piece of code improperly uses type variables.
n = tvm.var("n")
s1 = (5, n, 5)
s2 = (n, 1)
t1 = relay.TensorType(s1)
t2 = relay.TensorType(s2)
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
# test printer
assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == t1
if ref is not None:
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
x_data = np.random.rand(5, 10, 5).astype(dtype)
y_data = np.random.rand(5, 10, 5).astype(dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, ctx in ctx_list():
# use graph by execuor default for testing, as we need
# create function explicitly to avoid constant-folding.
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
for opfunc, ref in [(relay.add, np.add),
(relay.subtract, np.subtract),
(relay.multiply, np.multiply),
(relay.divide, np.divide)]:
for dtype in ['float16', 'float32']:
check_binary_op(opfunc, ref, dtype)
def test_expand_dims():
# based on topi test
def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis):
x = relay.Var("x", relay.TensorType(dshape, dtype))
func = relay.Function([x], relay.expand_dims(x, axis, num_newaxis))
for target, ctx in ctx_list():
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = data.reshape(oshape)
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
for dtype in ['float16', 'float32']:
verify_expand_dims((3, 10), dtype, (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), dtype, (1, 3, 10), -3, 1)
def test_bias_add():
for dtype in ['float16', 'float32']:
xshape=(10, 2, 3, 4)
bshape=(2,)
rtol = 1e-2 if dtype is 'float16' else 1e-5
x = relay.var("x", shape=xshape, dtype=dtype)
bias = relay.var("bias", dtype=dtype)
z = relay.nn.bias_add(x, bias)
zz = run_infer_type(z)
assert "axis=" not in zz.astext()
assert zz.args[1].checked_type == relay.TensorType(bshape, dtype)
func = relay.Function([x, bias], z)
x_data = np.random.uniform(size=xshape).astype(dtype)
y_data = np.random.uniform(size=bshape).astype(dtype)
ref_res = x_data + y_data.reshape((2, 1, 1))
for target, ctx in ctx_list():
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=rtol)
def test_expand_dims_infer_type():
for dtype in ['float16', 'float32']:
n, t, d = tvm.var("n"), tvm.var("t"), 100
x = relay.var("x", shape=(n, t, d), dtype=dtype)
y = relay.expand_dims(x, axis=2)
assert "axis=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, t, 1, 100), dtype)
def test_softmax():
for dtype in ['float16', 'float32']:
# Softmax accuracy for float16 is poor
if dtype == 'float16':
return
shape = (10, 4)
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.softmax(x, axis=1)
assert "nn.softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = topi.testing.softmax_python(x_data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
def test_log_softmax():
for dtype in ['float16', 'float32']:
# Softmax accuracy for float16 is poor
if dtype == 'float16':
return
shape = (10, 4)
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.log_softmax(x, axis=1)
assert "nn.log_softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = topi.testing.log_softmax_python(x_data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
def test_concatenate():
for dtype in ['float16', 'float32']:
n, t, d = tvm.var("n"), tvm.var("t"), 100
x = relay.var("x", shape=(n, t, d))
y = relay.var("y", shape=(n, t, d))
z = relay.concatenate((x, y), axis=-1)
assert "axis=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
x = relay.exp(x)
z = relay.concatenate((x, y), axis=2)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
z = relay.concatenate((x, y), axis=1)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t + t, 100))
# check shape mismatches (the following case is expected to raise tvm._ffi.base.TVMError.
try:
x = relay.var('p1', shape=(2, 5))
y = relay.var('p2', shape=(2, 3))
c = relay.concatenate([x, y], axis=0)
func = relay.Function([x, y], c)
zz = run_infer_type(func)
except tvm._ffi.base.TVMError:
pass
else:
assert False
x = relay.var("x", shape=(10, 5), dtype=dtype)
y = relay.var("y", shape=(10, 5), dtype=dtype)
t = relay.var("z", shape=(), dtype=dtype)
z = relay.concatenate((x, y), axis=1)
z = relay.add(z, t)
# Check result.
func = relay.Function([x, y, t], z)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
ref_res = np.concatenate((x_data, y_data), axis=1) + t_data
for target, ctx in ctx_list():
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, y_data, t_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=0.01)
op_res2 = intrp2.evaluate(func)(x_data, y_data, t_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=0.01)
def test_dropout():
for dtype in ['float16', 'float32']:
n, t, d = tvm.var("n"), tvm.var("t"), tvm.var("d")
input_ty = relay.TensorType((n, t, d), dtype)
x = relay.var("x", input_ty)
y = relay.nn.dropout(x, rate=0.75)
assert "rate=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == input_ty
def test_batch_norm():
for dtype in ['float16', 'float32']:
# beta and gamma ignored
data = relay.var("data", relay.TensorType((3, 2, 1), dtype))
beta = relay.var("beta", relay.TensorType((2,), dtype))
gamma = relay.var("gamma", relay.TensorType((2,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((2,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((2,), dtype))
y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
center=False, scale=False)
yy = run_infer_type(y.astuple())
assert "center=" in yy.astext()
assert yy.checked_type == relay.ty.TupleType(tvm.convert([
relay.TensorType((3, 2, 1), dtype),
relay.TensorType((2,), dtype),
relay.TensorType((2,), dtype)
]))
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
axis=0, center=False, scale=False)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(tvm.convert([
relay.ty.TensorType((3, 2, 1), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype)
]))
# axis=-1
data = relay.var("data", relay.TensorType((1, 2, 3), dtype))
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
axis=-1, center=False, scale=False)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(tvm.convert([
relay.ty.TensorType((1, 2, 3), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype)
]))
def test_dense():
for dtype in ['float16', 'float32']:
# Dense accuracy for float16 is poor
if dtype == 'float16':
return
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.TensorType((2, w), dtype))
y = relay.nn.dense(x, w, units=2)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
wh, ww = tvm.var("wh"), tvm.var("ww")
w = relay.var("w", relay.TensorType((ww, wh), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.dense(x, w, units=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
x = relay.var("x", shape=(10, 5), dtype=dtype)
w = relay.var("w", shape=(2, 5), dtype=dtype)
z = relay.nn.dense(x, w)
# Check result.
func = relay.Function([x, w], z)
x_data = np.random.rand(10, 5).astype(dtype)
w_data = np.random.rand(2, 5).astype(dtype)
ref_res = np.dot(x_data, w_data.T)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, w_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data, w_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def test_bitserial_dense():
m, k = tvm.var("m"), tvm.var("k")
x = relay.var("x", relay.TensorType((m, k), "int16"))
w = relay.var("w", relay.TensorType((k, 32), "int16"))
y = relay.nn.bitserial_dense(x, w, units=32)
"units=8" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((m, 32), "int16")
if __name__ == "__main__":
test_concatenate()
test_bias_add()
test_unary_op()
test_binary_op()
test_expand_dims_infer_type()
test_expand_dims()
test_softmax()
test_log_softmax()
test_dropout()
test_batch_norm()
test_dense()
test_bitserial_dense()
| 41.098039
| 106
| 0.574487
|
4a0767b4b5d97bc4b4b633240f6928170286ebda
| 4,254
|
py
|
Python
|
backend/src/model/Room.py
|
MNI1996/TIP-Jara-Ibarra-MasDespacioCerebrito
|
45775384382e7069f45e1a5ce932e742eae6629e
|
[
"MIT"
] | null | null | null |
backend/src/model/Room.py
|
MNI1996/TIP-Jara-Ibarra-MasDespacioCerebrito
|
45775384382e7069f45e1a5ce932e742eae6629e
|
[
"MIT"
] | 5
|
2020-10-03T12:14:57.000Z
|
2020-11-28T13:23:58.000Z
|
backend/src/model/Room.py
|
MNI1996/TIP-Jara-Ibarra-MasDespacioCerebrito
|
45775384382e7069f45e1a5ce932e742eae6629e
|
[
"MIT"
] | null | null | null |
import random
from itertools import chain
from mongoengine import Document, ReferenceField, ListField, QuerySet, StringField, EmbeddedDocumentListField, IntField
from backend.src.model.Category import Category
from backend.src.model.Player import Player
from backend.src.model.Question import Question
from backend.src.model.Round import Round
def sort_by_points(e):
return e['points']
class RoomManager(QuerySet):
def add_participant(self, room_name, a_participant):
a_room = Room.objects(name=room_name).first()
a_room.update(add_to_set__participants=a_participant)
def remove_participant(self, room_name, a_participant):
a_room = Room.objects(name=room_name).first()
a_room.update(pull__participants=a_participant)
def getRoundsFor(self, name):
a_room = self.get(name=name)
categories = a_room.categories
round_amount = a_room.rounds_amount
questions_of_category = Question.objects.filter(categories__in=categories)
if len(questions_of_category) >= round_amount:
questions = [val['id'] for val in random.sample(list(questions_of_category), k=round_amount)]
else:
pending_questions_amount = round_amount - len(questions_of_category)
extra_questions = Question.objects(id__nin=questions_of_category.values_list('id'))
if len(extra_questions) >= pending_questions_amount:
extra_questions_random = random.sample(list(extra_questions), k=pending_questions_amount)
questions = list(chain(questions_of_category, extra_questions_random))
else:
questions = list(chain(questions_of_category, extra_questions))
rounds = []
shuffled_questions = random.sample(list(questions), k=len(questions))
for question in shuffled_questions:
round = Round(question=question)
rounds.append(round)
return rounds
def getPointsFor(self, room_name, player_nick):
answers = self.getAllAnswersOf(room_name, player_nick)
points_rate = 1
extra_points = 0
for answer in answers:
if answer.first:
extra_points += 1
currentPoints = len(answers) * points_rate + extra_points
return currentPoints
def getAllAnswersOf(self, room_name, player_nick):
a_room = self.get(name=room_name)
answers = []
for a_round in a_room.rounds:
for answer in a_round.answers:
if answer.player_id == player_nick and self.isAnswerCorrect(a_round.question, answer.question_option_id):
answers.append(answer)
return answers
def isAnswerCorrect(self, question, question_option_id):
question_option = question.options.get(_id=question_option_id)
return question_option.correct
def roundHasAnyCorrectAnswer(self, a_round):
for answer in a_round.answers:
if self.isAnswerCorrect(a_round.question, answer.question_option_id):
return True
return False
def getRoundForAQuestion(self, room_name, question_id):
a_room = self.get(name=room_name)
for round_obj in a_room.rounds:
if str(round_obj.question.id) == question_id:
return round_obj
return None
def getPointsForAllPlayers(self, room_name):
a_room = self.get(name=room_name)
ranking = []
for player in a_room.participants:
points = self.getPointsFor(room_name, player.nick)
ranking.append({"player": player.nick, "points": points})
ranking.sort(key=sort_by_points, reverse=True)
return ranking
class Room(Document):
name = StringField(primary_key=True, min_length=5)
owner = ReferenceField(Player)
participants = ListField(ReferenceField(Player), default=[])
rounds_amount = IntField(default=4)
round_time = IntField(default=10, min_value=10, max_value=60)
rounds = EmbeddedDocumentListField(Round, default=[])
categories = ListField(ReferenceField(Category), default=[])
meta = {'queryset_class': RoomManager}
def __str__(self):
return f"ID: {self.id}, participants: {self.participants}"
| 40.514286
| 121
| 0.686413
|
4a0767d1c1fae04b4318e234f84432d5173e061d
| 4,899
|
py
|
Python
|
yandex/cloud/mdb/mysql/v1/database_service_pb2_grpc.py
|
kbespalov/python-sdk
|
e86563ee850e46a35b4c84053ecd4affdf66a963
|
[
"MIT"
] | null | null | null |
yandex/cloud/mdb/mysql/v1/database_service_pb2_grpc.py
|
kbespalov/python-sdk
|
e86563ee850e46a35b4c84053ecd4affdf66a963
|
[
"MIT"
] | null | null | null |
yandex/cloud/mdb/mysql/v1/database_service_pb2_grpc.py
|
kbespalov/python-sdk
|
e86563ee850e46a35b4c84053ecd4affdf66a963
|
[
"MIT"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from yandex.cloud.mdb.mysql.v1 import database_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__pb2
from yandex.cloud.mdb.mysql.v1 import database_service_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__service__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
class DatabaseServiceStub(object):
"""A set of methods for managing MySQL databases.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.DatabaseService/Get',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__service__pb2.GetDatabaseRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__pb2.Database.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.DatabaseService/List',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__service__pb2.ListDatabasesRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__service__pb2.ListDatabasesResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.DatabaseService/Create',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__service__pb2.CreateDatabaseRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.DatabaseService/Delete',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__service__pb2.DeleteDatabaseRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
class DatabaseServiceServicer(object):
"""A set of methods for managing MySQL databases.
"""
def Get(self, request, context):
"""Returns the specified MySQL database.
To get the list of available MySQL databases, make a [List] request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Retrieves the list of MySQL databases in the specified cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Creates a new MySQL database in the specified cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes the specified MySQL database.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DatabaseServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__service__pb2.GetDatabaseRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__pb2.Database.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__service__pb2.ListDatabasesRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__service__pb2.ListDatabasesResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__service__pb2.CreateDatabaseRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__service__pb2.DeleteDatabaseRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.mdb.mysql.v1.DatabaseService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 48.029412
| 139
| 0.789753
|
4a07680265426feba99be7217bee3800ba801b09
| 4,045
|
py
|
Python
|
samples/client/petstore/python/petstore_api/models/animal.py
|
FantasyTeddy/openapi-generator
|
866dc03f4fda48800ba52e428f2f1010de8540c0
|
[
"Apache-2.0"
] | null | null | null |
samples/client/petstore/python/petstore_api/models/animal.py
|
FantasyTeddy/openapi-generator
|
866dc03f4fda48800ba52e428f2f1010de8540c0
|
[
"Apache-2.0"
] | null | null | null |
samples/client/petstore/python/petstore_api/models/animal.py
|
FantasyTeddy/openapi-generator
|
866dc03f4fda48800ba52e428f2f1010de8540c0
|
[
"Apache-2.0"
] | 1
|
2019-11-25T15:03:05.000Z
|
2019-11-25T15:03:05.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Animal(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'class_name': 'str',
'color': 'str'
}
attribute_map = {
'class_name': 'className',
'color': 'color'
}
discriminator_value_class_map = {
'Dog': 'Dog',
'Cat': 'Cat'
}
def __init__(self, class_name=None, color='red'): # noqa: E501
"""Animal - a model defined in OpenAPI""" # noqa: E501
self._class_name = None
self._color = None
self.discriminator = 'className'
self.class_name = class_name
if color is not None:
self.color = color
@property
def class_name(self):
"""Gets the class_name of this Animal. # noqa: E501
:return: The class_name of this Animal. # noqa: E501
:rtype: str
"""
return self._class_name
@class_name.setter
def class_name(self, class_name):
"""Sets the class_name of this Animal.
:param class_name: The class_name of this Animal. # noqa: E501
:type: str
"""
if class_name is None:
raise ValueError("Invalid value for `class_name`, must not be `None`") # noqa: E501
self._class_name = class_name
@property
def color(self):
"""Gets the color of this Animal. # noqa: E501
:return: The color of this Animal. # noqa: E501
:rtype: str
"""
return self._color
@color.setter
def color(self, color):
"""Sets the color of this Animal.
:param color: The color of this Animal. # noqa: E501
:type: str
"""
self._color = color
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_value = data[self.discriminator].lower()
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Animal):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.966667
| 174
| 0.564648
|
4a076842aa86f03a17cd351ce0471870dc2f4d2d
| 32
|
py
|
Python
|
python/testData/inspections/PyTypeCheckerInspection/StrFormat.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyTypeCheckerInspection/StrFormat.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyTypeCheckerInspection/StrFormat.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
b'{}'.format(0)
u'{}'.format(0)
| 10.666667
| 15
| 0.5
|
4a0768c36cb620ada06aa9135c09a4fa13aef73f
| 70,553
|
py
|
Python
|
sympy/core/tests/test_expr.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/tests/test_expr.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/tests/test_expr.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy import (Add, Basic, Expr, S, Symbol, Wild, Float, Integer, Rational, I,
sin, cos, tan, exp, log, nan, oo, sqrt, symbols, Integral, sympify,
WildFunction, Poly, Function, Derivative, Number, pi, NumberSymbol, zoo,
Piecewise, Mul, Pow, nsimplify, ratsimp, trigsimp, radsimp, powsimp,
simplify, together, collect, factorial, apart, combsimp, factor, refine,
cancel, Tuple, default_sort_key, DiracDelta, gamma, Dummy, Sum, E,
exp_polar, expand, diff, O, Heaviside, Si, Max, UnevaluatedExpr,
integrate, gammasimp, Gt)
from sympy.core.expr import ExprBuilder, unchanged
from sympy.core.function import AppliedUndef
from sympy.core.compatibility import round
from sympy.physics.secondquant import FockState
from sympy.physics.units import meter
from sympy.testing.pytest import raises, XFAIL
from sympy.abc import a, b, c, n, t, u, x, y, z
class DummyNumber(object):
"""
Minimal implementation of a number that works with SymPy.
If one has a Number class (e.g. Sage Integer, or some other custom class)
that one wants to work well with SymPy, one has to implement at least the
methods of this class DummyNumber, resp. its subclasses I5 and F1_1.
Basically, one just needs to implement either __int__() or __float__() and
then one needs to make sure that the class works with Python integers and
with itself.
"""
def __radd__(self, a):
if isinstance(a, (int, float)):
return a + self.number
return NotImplemented
def __truediv__(a, b):
return a.__div__(b)
def __rtruediv__(a, b):
return a.__rdiv__(b)
def __add__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number + a
return NotImplemented
def __rsub__(self, a):
if isinstance(a, (int, float)):
return a - self.number
return NotImplemented
def __sub__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number - a
return NotImplemented
def __rmul__(self, a):
if isinstance(a, (int, float)):
return a * self.number
return NotImplemented
def __mul__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number * a
return NotImplemented
def __rdiv__(self, a):
if isinstance(a, (int, float)):
return a / self.number
return NotImplemented
def __div__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number / a
return NotImplemented
def __rpow__(self, a):
if isinstance(a, (int, float)):
return a ** self.number
return NotImplemented
def __pow__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number ** a
return NotImplemented
def __pos__(self):
return self.number
def __neg__(self):
return - self.number
class I5(DummyNumber):
number = 5
def __int__(self):
return self.number
class F1_1(DummyNumber):
number = 1.1
def __float__(self):
return self.number
i5 = I5()
f1_1 = F1_1()
# basic sympy objects
basic_objs = [
Rational(2),
Float("1.3"),
x,
y,
pow(x, y)*y,
]
# all supported objects
all_objs = basic_objs + [
5,
5.5,
i5,
f1_1
]
def dotest(s):
for xo in all_objs:
for yo in all_objs:
s(xo, yo)
return True
def test_basic():
def j(a, b):
x = a
x = +a
x = -a
x = a + b
x = a - b
x = a*b
x = a/b
x = a**b
del x
assert dotest(j)
def test_ibasic():
def s(a, b):
x = a
x += b
x = a
x -= b
x = a
x *= b
x = a
x /= b
assert dotest(s)
class NonBasic(object):
'''This class represents an object that knows how to implement binary
operations like +, -, etc with Expr but is not a subclass of Basic itself.
The NonExpr subclass below does subclass Basic but not Expr.
For both NonBasic and NonExpr it should be possible for them to override
Expr.__add__ etc because Expr.__add__ should be returning NotImplemented
for non Expr classes. Otherwise Expr.__add__ would create meaningless
objects like Add(Integer(1), FiniteSet(2)) and it wouldn't be possible for
other classes to override these operations when interacting with Expr.
'''
def __add__(self, other):
return SpecialOp('+', self, other)
def __radd__(self, other):
return SpecialOp('+', other, self)
def __sub__(self, other):
return SpecialOp('-', self, other)
def __rsub__(self, other):
return SpecialOp('-', other, self)
def __mul__(self, other):
return SpecialOp('*', self, other)
def __rmul__(self, other):
return SpecialOp('*', other, self)
def __div__(self, other):
return SpecialOp('/', self, other)
def __rdiv__(self, other):
return SpecialOp('/', other, self)
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __floordiv__(self, other):
return SpecialOp('//', self, other)
def __rfloordiv__(self, other):
return SpecialOp('//', other, self)
def __mod__(self, other):
return SpecialOp('%', self, other)
def __rmod__(self, other):
return SpecialOp('%', other, self)
def __divmod__(self, other):
return SpecialOp('divmod', self, other)
def __rdivmod__(self, other):
return SpecialOp('divmod', other, self)
def __pow__(self, other):
return SpecialOp('**', self, other)
def __rpow__(self, other):
return SpecialOp('**', other, self)
def __lt__(self, other):
return SpecialOp('<', self, other)
def __gt__(self, other):
return SpecialOp('>', self, other)
def __le__(self, other):
return SpecialOp('<=', self, other)
def __ge__(self, other):
return SpecialOp('>=', self, other)
class NonExpr(Basic, NonBasic):
'''Like NonBasic above except this is a subclass of Basic but not Expr'''
pass
class SpecialOp(Basic):
'''Represents the results of operations with NonBasic and NonExpr'''
def __new__(cls, op, arg1, arg2):
return Basic.__new__(cls, op, arg1, arg2)
class NonArithmetic(Basic):
'''Represents a Basic subclass that does not support arithmetic operations'''
pass
def test_cooperative_operations():
'''Tests that Expr uses binary operations cooperatively.
In particular it should be possible for non-Expr classes to override
binary operators like +, - etc when used with Expr instances. This should
work for non-Expr classes whether they are Basic subclasses or not. Also
non-Expr classes that do not define binary operators with Expr should give
TypeError.
'''
# A bunch of instances of Expr subclasses
exprs = [
Expr(),
S.Zero,
S.One,
S.Infinity,
S.NegativeInfinity,
S.ComplexInfinity,
S.Half,
Float(0.5),
Integer(2),
Symbol('x'),
Mul(2, Symbol('x')),
Add(2, Symbol('x')),
Pow(2, Symbol('x')),
]
for e in exprs:
# Test that these classes can override arithmetic operations in
# combination with various Expr types.
for ne in [NonBasic(), NonExpr()]:
results = [
(ne + e, ('+', ne, e)),
(e + ne, ('+', e, ne)),
(ne - e, ('-', ne, e)),
(e - ne, ('-', e, ne)),
(ne * e, ('*', ne, e)),
(e * ne, ('*', e, ne)),
(ne / e, ('/', ne, e)),
(e / ne, ('/', e, ne)),
(ne // e, ('//', ne, e)),
(e // ne, ('//', e, ne)),
(ne % e, ('%', ne, e)),
(e % ne, ('%', e, ne)),
(divmod(ne, e), ('divmod', ne, e)),
(divmod(e, ne), ('divmod', e, ne)),
(ne ** e, ('**', ne, e)),
(e ** ne, ('**', e, ne)),
(e < ne, ('>', ne, e)),
(ne < e, ('<', ne, e)),
(e > ne, ('<', ne, e)),
(ne > e, ('>', ne, e)),
(e <= ne, ('>=', ne, e)),
(ne <= e, ('<=', ne, e)),
(e >= ne, ('<=', ne, e)),
(ne >= e, ('>=', ne, e)),
]
for res, args in results:
assert type(res) is SpecialOp and res.args == args
# These classes do not support binary operators with Expr. Every
# operation should raise in combination with any of the Expr types.
for na in [NonArithmetic(), object()]:
raises(TypeError, lambda : e + na)
raises(TypeError, lambda : na + e)
raises(TypeError, lambda : e - na)
raises(TypeError, lambda : na - e)
raises(TypeError, lambda : e * na)
raises(TypeError, lambda : na * e)
raises(TypeError, lambda : e / na)
raises(TypeError, lambda : na / e)
raises(TypeError, lambda : e // na)
raises(TypeError, lambda : na // e)
raises(TypeError, lambda : e % na)
raises(TypeError, lambda : na % e)
raises(TypeError, lambda : divmod(e, na))
raises(TypeError, lambda : divmod(na, e))
raises(TypeError, lambda : e ** na)
raises(TypeError, lambda : na ** e)
raises(TypeError, lambda : e > na)
raises(TypeError, lambda : na > e)
raises(TypeError, lambda : e < na)
raises(TypeError, lambda : na < e)
raises(TypeError, lambda : e >= na)
raises(TypeError, lambda : na >= e)
raises(TypeError, lambda : e <= na)
raises(TypeError, lambda : na <= e)
def test_relational():
from sympy import Lt
assert (pi < 3) is S.false
assert (pi <= 3) is S.false
assert (pi > 3) is S.true
assert (pi >= 3) is S.true
assert (-pi < 3) is S.true
assert (-pi <= 3) is S.true
assert (-pi > 3) is S.false
assert (-pi >= 3) is S.false
r = Symbol('r', real=True)
assert (r - 2 < r - 3) is S.false
assert Lt(x + I, x + I + 2).func == Lt # issue 8288
def test_relational_assumptions():
from sympy import Lt, Gt, Le, Ge
m1 = Symbol("m1", nonnegative=False)
m2 = Symbol("m2", positive=False)
m3 = Symbol("m3", nonpositive=False)
m4 = Symbol("m4", negative=False)
assert (m1 < 0) == Lt(m1, 0)
assert (m2 <= 0) == Le(m2, 0)
assert (m3 > 0) == Gt(m3, 0)
assert (m4 >= 0) == Ge(m4, 0)
m1 = Symbol("m1", nonnegative=False, real=True)
m2 = Symbol("m2", positive=False, real=True)
m3 = Symbol("m3", nonpositive=False, real=True)
m4 = Symbol("m4", negative=False, real=True)
assert (m1 < 0) is S.true
assert (m2 <= 0) is S.true
assert (m3 > 0) is S.true
assert (m4 >= 0) is S.true
m1 = Symbol("m1", negative=True)
m2 = Symbol("m2", nonpositive=True)
m3 = Symbol("m3", positive=True)
m4 = Symbol("m4", nonnegative=True)
assert (m1 < 0) is S.true
assert (m2 <= 0) is S.true
assert (m3 > 0) is S.true
assert (m4 >= 0) is S.true
m1 = Symbol("m1", negative=False, real=True)
m2 = Symbol("m2", nonpositive=False, real=True)
m3 = Symbol("m3", positive=False, real=True)
m4 = Symbol("m4", nonnegative=False, real=True)
assert (m1 < 0) is S.false
assert (m2 <= 0) is S.false
assert (m3 > 0) is S.false
assert (m4 >= 0) is S.false
# See https://github.com/sympy/sympy/issues/17708
#def test_relational_noncommutative():
# from sympy import Lt, Gt, Le, Ge
# A, B = symbols('A,B', commutative=False)
# assert (A < B) == Lt(A, B)
# assert (A <= B) == Le(A, B)
# assert (A > B) == Gt(A, B)
# assert (A >= B) == Ge(A, B)
def test_basic_nostr():
for obj in basic_objs:
raises(TypeError, lambda: obj + '1')
raises(TypeError, lambda: obj - '1')
if obj == 2:
assert obj * '1' == '11'
else:
raises(TypeError, lambda: obj * '1')
raises(TypeError, lambda: obj / '1')
raises(TypeError, lambda: obj ** '1')
def test_series_expansion_for_uniform_order():
assert (1/x + y + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + x).series(x, 0, 1) == 1/x + y + O(x)
assert (1/x + 1 + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + 1 + x).series(x, 0, 1) == 1/x + 1 + O(x)
assert (1/x + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + y*x + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + y*x + x).series(x, 0, 1) == 1/x + y + O(x)
def test_leadterm():
assert (3 + 2*x**(log(3)/log(2) - 1)).leadterm(x) == (3, 0)
assert (1/x**2 + 1 + x + x**2).leadterm(x)[1] == -2
assert (1/x + 1 + x + x**2).leadterm(x)[1] == -1
assert (x**2 + 1/x).leadterm(x)[1] == -1
assert (1 + x**2).leadterm(x)[1] == 0
assert (x + 1).leadterm(x)[1] == 0
assert (x + x**2).leadterm(x)[1] == 1
assert (x**2).leadterm(x)[1] == 2
def test_as_leading_term():
assert (3 + 2*x**(log(3)/log(2) - 1)).as_leading_term(x) == 3
assert (1/x**2 + 1 + x + x**2).as_leading_term(x) == 1/x**2
assert (1/x + 1 + x + x**2).as_leading_term(x) == 1/x
assert (x**2 + 1/x).as_leading_term(x) == 1/x
assert (1 + x**2).as_leading_term(x) == 1
assert (x + 1).as_leading_term(x) == 1
assert (x + x**2).as_leading_term(x) == x
assert (x**2).as_leading_term(x) == x**2
assert (x + oo).as_leading_term(x) is oo
raises(ValueError, lambda: (x + 1).as_leading_term(1))
def test_leadterm2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).leadterm(x) == \
(sin(1 + sin(1)), 0)
def test_leadterm3():
assert (y + z + x).leadterm(x) == (y + z, 0)
def test_as_leading_term2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).as_leading_term(x) == \
sin(1 + sin(1))
def test_as_leading_term3():
assert (2 + pi + x).as_leading_term(x) == 2 + pi
assert (2*x + pi*x + x**2).as_leading_term(x) == (2 + pi)*x
def test_as_leading_term4():
# see issue 6843
n = Symbol('n', integer=True, positive=True)
r = -n**3/(2*n**2 + 4*n + 2) - n**2/(n**2 + 2*n + 1) + \
n**2/(n + 1) - n/(2*n**2 + 4*n + 2) + n/(n*x + x) + 2*n/(n + 1) - \
1 + 1/(n*x + x) + 1/(n + 1) - 1/x
assert r.as_leading_term(x).cancel() == n/2
def test_as_leading_term_stub():
class foo(Function):
pass
assert foo(1/x).as_leading_term(x) == foo(1/x)
assert foo(1).as_leading_term(x) == foo(1)
raises(NotImplementedError, lambda: foo(x).as_leading_term(x))
def test_as_leading_term_deriv_integral():
# related to issue 11313
assert Derivative(x ** 3, x).as_leading_term(x) == 3*x**2
assert Derivative(x ** 3, y).as_leading_term(x) == 0
assert Integral(x ** 3, x).as_leading_term(x) == x**4/4
assert Integral(x ** 3, y).as_leading_term(x) == y*x**3
assert Derivative(exp(x), x).as_leading_term(x) == 1
assert Derivative(log(x), x).as_leading_term(x) == (1/x).as_leading_term(x)
def test_atoms():
assert x.atoms() == {x}
assert (1 + x).atoms() == {x, S.One}
assert (1 + 2*cos(x)).atoms(Symbol) == {x}
assert (1 + 2*cos(x)).atoms(Symbol, Number) == {S.One, S(2), x}
assert (2*(x**(y**x))).atoms() == {S(2), x, y}
assert S.Half.atoms() == {S.Half}
assert S.Half.atoms(Symbol) == set([])
assert sin(oo).atoms(oo) == set()
assert Poly(0, x).atoms() == {S.Zero, x}
assert Poly(1, x).atoms() == {S.One, x}
assert Poly(x, x).atoms() == {x}
assert Poly(x, x, y).atoms() == {x, y}
assert Poly(x + y, x, y).atoms() == {x, y}
assert Poly(x + y, x, y, z).atoms() == {x, y, z}
assert Poly(x + y*t, x, y, z).atoms() == {t, x, y, z}
assert (I*pi).atoms(NumberSymbol) == {pi}
assert (I*pi).atoms(NumberSymbol, I) == \
(I*pi).atoms(I, NumberSymbol) == {pi, I}
assert exp(exp(x)).atoms(exp) == {exp(exp(x)), exp(x)}
assert (1 + x*(2 + y) + exp(3 + z)).atoms(Add) == \
{1 + x*(2 + y) + exp(3 + z), 2 + y, 3 + z}
# issue 6132
f = Function('f')
e = (f(x) + sin(x) + 2)
assert e.atoms(AppliedUndef) == \
{f(x)}
assert e.atoms(AppliedUndef, Function) == \
{f(x), sin(x)}
assert e.atoms(Function) == \
{f(x), sin(x)}
assert e.atoms(AppliedUndef, Number) == \
{f(x), S(2)}
assert e.atoms(Function, Number) == \
{S(2), sin(x), f(x)}
def test_is_polynomial():
k = Symbol('k', nonnegative=True, integer=True)
assert Rational(2).is_polynomial(x, y, z) is True
assert (S.Pi).is_polynomial(x, y, z) is True
assert x.is_polynomial(x) is True
assert x.is_polynomial(y) is True
assert (x**2).is_polynomial(x) is True
assert (x**2).is_polynomial(y) is True
assert (x**(-2)).is_polynomial(x) is False
assert (x**(-2)).is_polynomial(y) is True
assert (2**x).is_polynomial(x) is False
assert (2**x).is_polynomial(y) is True
assert (x**k).is_polynomial(x) is False
assert (x**k).is_polynomial(k) is False
assert (x**x).is_polynomial(x) is False
assert (k**k).is_polynomial(k) is False
assert (k**x).is_polynomial(k) is False
assert (x**(-k)).is_polynomial(x) is False
assert ((2*x)**k).is_polynomial(x) is False
assert (x**2 + 3*x - 8).is_polynomial(x) is True
assert (x**2 + 3*x - 8).is_polynomial(y) is True
assert (x**2 + 3*x - 8).is_polynomial() is True
assert sqrt(x).is_polynomial(x) is False
assert (sqrt(x)**3).is_polynomial(x) is False
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(x) is True
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(y) is False
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial() is True
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial() is False
assert (
(x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial(x, y) is True
assert (
(x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial(x, y) is False
def test_is_rational_function():
assert Integer(1).is_rational_function() is True
assert Integer(1).is_rational_function(x) is True
assert Rational(17, 54).is_rational_function() is True
assert Rational(17, 54).is_rational_function(x) is True
assert (12/x).is_rational_function() is True
assert (12/x).is_rational_function(x) is True
assert (x/y).is_rational_function() is True
assert (x/y).is_rational_function(x) is True
assert (x/y).is_rational_function(x, y) is True
assert (x**2 + 1/x/y).is_rational_function() is True
assert (x**2 + 1/x/y).is_rational_function(x) is True
assert (x**2 + 1/x/y).is_rational_function(x, y) is True
assert (sin(y)/x).is_rational_function() is False
assert (sin(y)/x).is_rational_function(y) is False
assert (sin(y)/x).is_rational_function(x) is True
assert (sin(y)/x).is_rational_function(x, y) is False
assert (S.NaN).is_rational_function() is False
assert (S.Infinity).is_rational_function() is False
assert (S.NegativeInfinity).is_rational_function() is False
assert (S.ComplexInfinity).is_rational_function() is False
def test_is_algebraic_expr():
assert sqrt(3).is_algebraic_expr(x) is True
assert sqrt(3).is_algebraic_expr() is True
eq = ((1 + x**2)/(1 - y**2))**(S.One/3)
assert eq.is_algebraic_expr(x) is True
assert eq.is_algebraic_expr(y) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(x) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(y) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr() is True
assert (cos(y)/sqrt(x)).is_algebraic_expr() is False
assert (cos(y)/sqrt(x)).is_algebraic_expr(x) is True
assert (cos(y)/sqrt(x)).is_algebraic_expr(y) is False
assert (cos(y)/sqrt(x)).is_algebraic_expr(x, y) is False
def test_SAGE1():
#see https://github.com/sympy/sympy/issues/3346
class MyInt:
def _sympy_(self):
return Integer(5)
m = MyInt()
e = Rational(2)*m
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE2():
class MyInt(object):
def __int__(self):
return 5
assert sympify(MyInt()) == 5
e = Rational(2)*MyInt()
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE3():
class MySymbol:
def __rmul__(self, other):
return ('mys', other, self)
o = MySymbol()
e = x*o
assert e == ('mys', x, o)
def test_len():
e = x*y
assert len(e.args) == 2
e = x + y + z
assert len(e.args) == 3
def test_doit():
a = Integral(x**2, x)
assert isinstance(a.doit(), Integral) is False
assert isinstance(a.doit(integrals=True), Integral) is False
assert isinstance(a.doit(integrals=False), Integral) is True
assert (2*Integral(x, x)).doit() == x**2
def test_attribute_error():
raises(AttributeError, lambda: x.cos())
raises(AttributeError, lambda: x.sin())
raises(AttributeError, lambda: x.exp())
def test_args():
assert (x*y).args in ((x, y), (y, x))
assert (x + y).args in ((x, y), (y, x))
assert (x*y + 1).args in ((x*y, 1), (1, x*y))
assert sin(x*y).args == (x*y,)
assert sin(x*y).args[0] == x*y
assert (x**y).args == (x, y)
assert (x**y).args[0] == x
assert (x**y).args[1] == y
def test_noncommutative_expand_issue_3757():
A, B, C = symbols('A,B,C', commutative=False)
assert A*B - B*A != 0
assert (A*(A + B)*B).expand() == A**2*B + A*B**2
assert (A*(A + B + C)*B).expand() == A**2*B + A*B**2 + A*C*B
def test_as_numer_denom():
a, b, c = symbols('a, b, c')
assert nan.as_numer_denom() == (nan, 1)
assert oo.as_numer_denom() == (oo, 1)
assert (-oo).as_numer_denom() == (-oo, 1)
assert zoo.as_numer_denom() == (zoo, 1)
assert (-zoo).as_numer_denom() == (zoo, 1)
assert x.as_numer_denom() == (x, 1)
assert (1/x).as_numer_denom() == (1, x)
assert (x/y).as_numer_denom() == (x, y)
assert (x/2).as_numer_denom() == (x, 2)
assert (x*y/z).as_numer_denom() == (x*y, z)
assert (x/(y*z)).as_numer_denom() == (x, y*z)
assert S.Half.as_numer_denom() == (1, 2)
assert (1/y**2).as_numer_denom() == (1, y**2)
assert (x/y**2).as_numer_denom() == (x, y**2)
assert ((x**2 + 1)/y).as_numer_denom() == (x**2 + 1, y)
assert (x*(y + 1)/y**7).as_numer_denom() == (x*(y + 1), y**7)
assert (x**-2).as_numer_denom() == (1, x**2)
assert (a/x + b/2/x + c/3/x).as_numer_denom() == \
(6*a + 3*b + 2*c, 6*x)
assert (a/x + b/2/x + c/3/y).as_numer_denom() == \
(2*c*x + y*(6*a + 3*b), 6*x*y)
assert (a/x + b/2/x + c/.5/x).as_numer_denom() == \
(2*a + b + 4.0*c, 2*x)
# this should take no more than a few seconds
assert int(log(Add(*[Dummy()/i/x for i in range(1, 705)]
).as_numer_denom()[1]/x).n(4)) == 705
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).as_numer_denom() == \
(x + i, 3)
assert (S.Infinity + x/3 + y/4).as_numer_denom() == \
(4*x + 3*y + S.Infinity, 12)
assert (oo*x + zoo*y).as_numer_denom() == \
(zoo*y + oo*x, 1)
A, B, C = symbols('A,B,C', commutative=False)
assert (A*B*C**-1).as_numer_denom() == (A*B*C**-1, 1)
assert (A*B*C**-1/x).as_numer_denom() == (A*B*C**-1, x)
assert (C**-1*A*B).as_numer_denom() == (C**-1*A*B, 1)
assert (C**-1*A*B/x).as_numer_denom() == (C**-1*A*B, x)
assert ((A*B*C)**-1).as_numer_denom() == ((A*B*C)**-1, 1)
assert ((A*B*C)**-1/x).as_numer_denom() == ((A*B*C)**-1, x)
def test_trunc():
import math
x, y = symbols('x y')
assert math.trunc(2) == 2
assert math.trunc(4.57) == 4
assert math.trunc(-5.79) == -5
assert math.trunc(pi) == 3
assert math.trunc(log(7)) == 1
assert math.trunc(exp(5)) == 148
assert math.trunc(cos(pi)) == -1
assert math.trunc(sin(5)) == 0
raises(TypeError, lambda: math.trunc(x))
raises(TypeError, lambda: math.trunc(x + y**2))
raises(TypeError, lambda: math.trunc(oo))
def test_as_independent():
assert S.Zero.as_independent(x, as_Add=True) == (0, 0)
assert S.Zero.as_independent(x, as_Add=False) == (0, 0)
assert (2*x*sin(x) + y + x).as_independent(x) == (y, x + 2*x*sin(x))
assert (2*x*sin(x) + y + x).as_independent(y) == (x + 2*x*sin(x), y)
assert (2*x*sin(x) + y + x).as_independent(x, y) == (0, y + x + 2*x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(x) == (cos(y), x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(y) == (x*sin(x), cos(y))
assert (x*sin(x)*cos(y)).as_independent(x, y) == (1, x*sin(x)*cos(y))
assert (sin(x)).as_independent(x) == (1, sin(x))
assert (sin(x)).as_independent(y) == (sin(x), 1)
assert (2*sin(x)).as_independent(x) == (2, sin(x))
assert (2*sin(x)).as_independent(y) == (2*sin(x), 1)
# issue 4903 = 1766b
n1, n2, n3 = symbols('n1 n2 n3', commutative=False)
assert (n1 + n1*n2).as_independent(n2) == (n1, n1*n2)
assert (n2*n1 + n1*n2).as_independent(n2) == (0, n1*n2 + n2*n1)
assert (n1*n2*n1).as_independent(n2) == (n1, n2*n1)
assert (n1*n2*n1).as_independent(n1) == (1, n1*n2*n1)
assert (3*x).as_independent(x, as_Add=True) == (0, 3*x)
assert (3*x).as_independent(x, as_Add=False) == (3, x)
assert (3 + x).as_independent(x, as_Add=True) == (3, x)
assert (3 + x).as_independent(x, as_Add=False) == (1, 3 + x)
# issue 5479
assert (3*x).as_independent(Symbol) == (3, x)
# issue 5648
assert (n1*x*y).as_independent(x) == (n1*y, x)
assert ((x + n1)*(x - y)).as_independent(x) == (1, (x + n1)*(x - y))
assert ((x + n1)*(x - y)).as_independent(y) == (x + n1, x - y)
assert (DiracDelta(x - n1)*DiracDelta(x - y)).as_independent(x) \
== (1, DiracDelta(x - n1)*DiracDelta(x - y))
assert (x*y*n1*n2*n3).as_independent(n2) == (x*y*n1, n2*n3)
assert (x*y*n1*n2*n3).as_independent(n1) == (x*y, n1*n2*n3)
assert (x*y*n1*n2*n3).as_independent(n3) == (x*y*n1*n2, n3)
assert (DiracDelta(x - n1)*DiracDelta(y - n1)*DiracDelta(x - n2)).as_independent(y) == \
(DiracDelta(x - n1)*DiracDelta(x - n2), DiracDelta(y - n1))
# issue 5784
assert (x + Integral(x, (x, 1, 2))).as_independent(x, strict=True) == \
(Integral(x, (x, 1, 2)), x)
eq = Add(x, -x, 2, -3, evaluate=False)
assert eq.as_independent(x) == (-1, Add(x, -x, evaluate=False))
eq = Mul(x, 1/x, 2, -3, evaluate=False)
eq.as_independent(x) == (-6, Mul(x, 1/x, evaluate=False))
assert (x*y).as_independent(z, as_Add=True) == (x*y, 0)
@XFAIL
def test_call_2():
# TODO UndefinedFunction does not subclass Expr
f = Function('f')
assert (2*f)(x) == 2*f(x)
def test_replace():
f = log(sin(x)) + tan(sin(x**2))
assert f.replace(sin, cos) == log(cos(x)) + tan(cos(x**2))
assert f.replace(
sin, lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
a = Wild('a')
b = Wild('b')
assert f.replace(sin(a), cos(a)) == log(cos(x)) + tan(cos(x**2))
assert f.replace(
sin(a), lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
# test exact
assert (2*x).replace(a*x + b, b - a, exact=True) == 2*x
assert (2*x).replace(a*x + b, b - a) == 2*x
assert (2*x).replace(a*x + b, b - a, exact=False) == 2/x
assert (2*x).replace(a*x + b, lambda a, b: b - a, exact=True) == 2*x
assert (2*x).replace(a*x + b, lambda a, b: b - a) == 2*x
assert (2*x).replace(a*x + b, lambda a, b: b - a, exact=False) == 2/x
g = 2*sin(x**3)
assert g.replace(
lambda expr: expr.is_Number, lambda expr: expr**2) == 4*sin(x**9)
assert cos(x).replace(cos, sin, map=True) == (sin(x), {cos(x): sin(x)})
assert sin(x).replace(cos, sin) == sin(x)
cond, func = lambda x: x.is_Mul, lambda x: 2*x
assert (x*y).replace(cond, func, map=True) == (2*x*y, {x*y: 2*x*y})
assert (x*(1 + x*y)).replace(cond, func, map=True) == \
(2*x*(2*x*y + 1), {x*(2*x*y + 1): 2*x*(2*x*y + 1), x*y: 2*x*y})
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y, map=True) == \
(sin(x), {sin(x): sin(x)/y})
# if not simultaneous then y*sin(x) -> y*sin(x)/y = sin(x) -> sin(x)/y
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y,
simultaneous=False) == sin(x)/y
assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e) == O(1, x)
assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e,
simultaneous=False) == x**2/2 + O(x**3)
assert (x*(x*y + 3)).replace(lambda x: x.is_Mul, lambda x: 2 + x) == \
x*(x*y + 5) + 2
e = (x*y + 1)*(2*x*y + 1) + 1
assert e.replace(cond, func, map=True) == (
2*((2*x*y + 1)*(4*x*y + 1)) + 1,
{2*x*y: 4*x*y, x*y: 2*x*y, (2*x*y + 1)*(4*x*y + 1):
2*((2*x*y + 1)*(4*x*y + 1))})
assert x.replace(x, y) == y
assert (x + 1).replace(1, 2) == x + 2
# https://groups.google.com/forum/#!topic/sympy/8wCgeC95tz0
n1, n2, n3 = symbols('n1:4', commutative=False)
f = Function('f')
assert (n1*f(n2)).replace(f, lambda x: x) == n1*n2
assert (n3*f(n2)).replace(f, lambda x: x) == n3*n2
# issue 16725
assert S.Zero.replace(Wild('x'), 1) == 1
# let the user override the default decision of False
assert S.Zero.replace(Wild('x'), 1, exact=True) == 0
def test_find():
expr = (x + y + 2 + sin(3*x))
assert expr.find(lambda u: u.is_Integer) == {S(2), S(3)}
assert expr.find(lambda u: u.is_Symbol) == {x, y}
assert expr.find(lambda u: u.is_Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(lambda u: u.is_Symbol, group=True) == {x: 2, y: 1}
assert expr.find(Integer) == {S(2), S(3)}
assert expr.find(Symbol) == {x, y}
assert expr.find(Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(Symbol, group=True) == {x: 2, y: 1}
a = Wild('a')
expr = sin(sin(x)) + sin(x) + cos(x) + x
assert expr.find(lambda u: type(u) is sin) == {sin(x), sin(sin(x))}
assert expr.find(
lambda u: type(u) is sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin(a)) == {sin(x), sin(sin(x))}
assert expr.find(sin(a), group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin) == {sin(x), sin(sin(x))}
assert expr.find(sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
def test_count():
expr = (x + y + 2 + sin(3*x))
assert expr.count(lambda u: u.is_Integer) == 2
assert expr.count(lambda u: u.is_Symbol) == 3
assert expr.count(Integer) == 2
assert expr.count(Symbol) == 3
assert expr.count(2) == 1
a = Wild('a')
assert expr.count(sin) == 1
assert expr.count(sin(a)) == 1
assert expr.count(lambda u: type(u) is sin) == 1
f = Function('f')
assert f(x).count(f(x)) == 1
assert f(x).diff(x).count(f(x)) == 1
assert f(x).diff(x).count(x) == 2
def test_has_basics():
f = Function('f')
g = Function('g')
p = Wild('p')
assert sin(x).has(x)
assert sin(x).has(sin)
assert not sin(x).has(y)
assert not sin(x).has(cos)
assert f(x).has(x)
assert f(x).has(f)
assert not f(x).has(y)
assert not f(x).has(g)
assert f(x).diff(x).has(x)
assert f(x).diff(x).has(f)
assert f(x).diff(x).has(Derivative)
assert not f(x).diff(x).has(y)
assert not f(x).diff(x).has(g)
assert not f(x).diff(x).has(sin)
assert (x**2).has(Symbol)
assert not (x**2).has(Wild)
assert (2*p).has(Wild)
assert not x.has()
def test_has_multiple():
f = x**2*y + sin(2**t + log(z))
assert f.has(x)
assert f.has(y)
assert f.has(z)
assert f.has(t)
assert not f.has(u)
assert f.has(x, y, z, t)
assert f.has(x, y, z, t, u)
i = Integer(4400)
assert not i.has(x)
assert (i*x**i).has(x)
assert not (i*y**i).has(x)
assert (i*y**i).has(x, y)
assert not (i*y**i).has(x, z)
def test_has_piecewise():
f = (x*y + 3/y)**(3 + 2)
g = Function('g')
h = Function('h')
p = Piecewise((g(x), x < -1), (1, x <= 1), (f, True))
assert p.has(x)
assert p.has(y)
assert not p.has(z)
assert p.has(1)
assert p.has(3)
assert not p.has(4)
assert p.has(f)
assert p.has(g)
assert not p.has(h)
def test_has_iterative():
A, B, C = symbols('A,B,C', commutative=False)
f = x*gamma(x)*sin(x)*exp(x*y)*A*B*C*cos(x*A*B)
assert f.has(x)
assert f.has(x*y)
assert f.has(x*sin(x))
assert not f.has(x*sin(y))
assert f.has(x*A)
assert f.has(x*A*B)
assert not f.has(x*A*C)
assert f.has(x*A*B*C)
assert not f.has(x*A*C*B)
assert f.has(x*sin(x)*A*B*C)
assert not f.has(x*sin(x)*A*C*B)
assert not f.has(x*sin(y)*A*B*C)
assert f.has(x*gamma(x))
assert not f.has(x + sin(x))
assert (x & y & z).has(x & z)
def test_has_integrals():
f = Integral(x**2 + sin(x*y*z), (x, 0, x + y + z))
assert f.has(x + y)
assert f.has(x + z)
assert f.has(y + z)
assert f.has(x*y)
assert f.has(x*z)
assert f.has(y*z)
assert not f.has(2*x + y)
assert not f.has(2*x*y)
def test_has_tuple():
f = Function('f')
g = Function('g')
h = Function('h')
assert Tuple(x, y).has(x)
assert not Tuple(x, y).has(z)
assert Tuple(f(x), g(x)).has(x)
assert not Tuple(f(x), g(x)).has(y)
assert Tuple(f(x), g(x)).has(f)
assert Tuple(f(x), g(x)).has(f(x))
assert not Tuple(f, g).has(x)
assert Tuple(f, g).has(f)
assert not Tuple(f, g).has(h)
assert Tuple(True).has(True) is True # .has(1) will also be True
def test_has_units():
from sympy.physics.units import m, s
assert (x*m/s).has(x)
assert (x*m/s).has(y, z) is False
def test_has_polys():
poly = Poly(x**2 + x*y*sin(z), x, y, t)
assert poly.has(x)
assert poly.has(x, y, z)
assert poly.has(x, y, z, t)
def test_has_physics():
assert FockState((x, y)).has(x)
def test_as_poly_as_expr():
f = x**2 + 2*x*y
assert f.as_poly().as_expr() == f
assert f.as_poly(x, y).as_expr() == f
assert (f + sin(x)).as_poly(x, y) is None
p = Poly(f, x, y)
assert p.as_poly() == p
raises(AttributeError, lambda: Tuple(x, x).as_poly(x))
raises(AttributeError, lambda: Tuple(x ** 2, x, y).as_poly(x))
def test_nonzero():
assert bool(S.Zero) is False
assert bool(S.One) is True
assert bool(x) is True
assert bool(x + y) is True
assert bool(x - x) is False
assert bool(x*y) is True
assert bool(x*1) is True
assert bool(x*0) is False
def test_is_number():
assert Float(3.14).is_number is True
assert Integer(737).is_number is True
assert Rational(3, 2).is_number is True
assert Rational(8).is_number is True
assert x.is_number is False
assert (2*x).is_number is False
assert (x + y).is_number is False
assert log(2).is_number is True
assert log(x).is_number is False
assert (2 + log(2)).is_number is True
assert (8 + log(2)).is_number is True
assert (2 + log(x)).is_number is False
assert (8 + log(2) + x).is_number is False
assert (1 + x**2/x - x).is_number is True
assert Tuple(Integer(1)).is_number is False
assert Add(2, x).is_number is False
assert Mul(3, 4).is_number is True
assert Pow(log(2), 2).is_number is True
assert oo.is_number is True
g = WildFunction('g')
assert g.is_number is False
assert (2*g).is_number is False
assert (x**2).subs(x, 3).is_number is True
# test extensibility of .is_number
# on subinstances of Basic
class A(Basic):
pass
a = A()
assert a.is_number is False
def test_as_coeff_add():
assert S(2).as_coeff_add() == (2, ())
assert S(3.0).as_coeff_add() == (0, (S(3.0),))
assert S(-3.0).as_coeff_add() == (0, (S(-3.0),))
assert x.as_coeff_add() == (0, (x,))
assert (x - 1).as_coeff_add() == (-1, (x,))
assert (x + 1).as_coeff_add() == (1, (x,))
assert (x + 2).as_coeff_add() == (2, (x,))
assert (x + y).as_coeff_add(y) == (x, (y,))
assert (3*x).as_coeff_add(y) == (3*x, ())
# don't do expansion
e = (x + y)**2
assert e.as_coeff_add(y) == (0, (e,))
def test_as_coeff_mul():
assert S(2).as_coeff_mul() == (2, ())
assert S(3.0).as_coeff_mul() == (1, (S(3.0),))
assert S(-3.0).as_coeff_mul() == (-1, (S(3.0),))
assert S(-3.0).as_coeff_mul(rational=False) == (-S(3.0), ())
assert x.as_coeff_mul() == (1, (x,))
assert (-x).as_coeff_mul() == (-1, (x,))
assert (2*x).as_coeff_mul() == (2, (x,))
assert (x*y).as_coeff_mul(y) == (x, (y,))
assert (3 + x).as_coeff_mul() == (1, (3 + x,))
assert (3 + x).as_coeff_mul(y) == (3 + x, ())
# don't do expansion
e = exp(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
e = 2**(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
assert (1.1*x).as_coeff_mul(rational=False) == (1.1, (x,))
assert (1.1*x).as_coeff_mul() == (1, (1.1, x))
assert (-oo*x).as_coeff_mul(rational=True) == (-1, (oo, x))
def test_as_coeff_exponent():
assert (3*x**4).as_coeff_exponent(x) == (3, 4)
assert (2*x**3).as_coeff_exponent(x) == (2, 3)
assert (4*x**2).as_coeff_exponent(x) == (4, 2)
assert (6*x**1).as_coeff_exponent(x) == (6, 1)
assert (3*x**0).as_coeff_exponent(x) == (3, 0)
assert (2*x**0).as_coeff_exponent(x) == (2, 0)
assert (1*x**0).as_coeff_exponent(x) == (1, 0)
assert (0*x**0).as_coeff_exponent(x) == (0, 0)
assert (-1*x**0).as_coeff_exponent(x) == (-1, 0)
assert (-2*x**0).as_coeff_exponent(x) == (-2, 0)
assert (2*x**3 + pi*x**3).as_coeff_exponent(x) == (2 + pi, 3)
assert (x*log(2)/(2*x + pi*x)).as_coeff_exponent(x) == \
(log(2)/(2 + pi), 0)
# issue 4784
D = Derivative
f = Function('f')
fx = D(f(x), x)
assert fx.as_coeff_exponent(f(x)) == (fx, 0)
def test_extractions():
assert ((x*y)**3).extract_multiplicatively(x**2 * y) == x*y**2
assert ((x*y)**3).extract_multiplicatively(x**4 * y) is None
assert (2*x).extract_multiplicatively(2) == x
assert (2*x).extract_multiplicatively(3) is None
assert (2*x).extract_multiplicatively(-1) is None
assert (S.Half*x).extract_multiplicatively(3) == x/6
assert (sqrt(x)).extract_multiplicatively(x) is None
assert (sqrt(x)).extract_multiplicatively(1/x) is None
assert x.extract_multiplicatively(-x) is None
assert (-2 - 4*I).extract_multiplicatively(-2) == 1 + 2*I
assert (-2 - 4*I).extract_multiplicatively(3) is None
assert (-2*x - 4*y - 8).extract_multiplicatively(-2) == x + 2*y + 4
assert (-2*x*y - 4*x**2*y).extract_multiplicatively(-2*y) == 2*x**2 + x
assert (2*x*y + 4*x**2*y).extract_multiplicatively(2*y) == 2*x**2 + x
assert (-4*y**2*x).extract_multiplicatively(-3*y) is None
assert (2*x).extract_multiplicatively(1) == 2*x
assert (-oo).extract_multiplicatively(5) is -oo
assert (oo).extract_multiplicatively(5) is oo
assert ((x*y)**3).extract_additively(1) is None
assert (x + 1).extract_additively(x) == 1
assert (x + 1).extract_additively(2*x) is None
assert (x + 1).extract_additively(-x) is None
assert (-x + 1).extract_additively(2*x) is None
assert (2*x + 3).extract_additively(x) == x + 3
assert (2*x + 3).extract_additively(2) == 2*x + 1
assert (2*x + 3).extract_additively(3) == 2*x
assert (2*x + 3).extract_additively(-2) is None
assert (2*x + 3).extract_additively(3*x) is None
assert (2*x + 3).extract_additively(2*x) == 3
assert x.extract_additively(0) == x
assert S(2).extract_additively(x) is None
assert S(2.).extract_additively(2) is S.Zero
assert S(2*x + 3).extract_additively(x + 1) == x + 2
assert S(2*x + 3).extract_additively(y + 1) is None
assert S(2*x - 3).extract_additively(x + 1) is None
assert S(2*x - 3).extract_additively(y + z) is None
assert ((a + 1)*x*4 + y).extract_additively(x).expand() == \
4*a*x + 3*x + y
assert ((a + 1)*x*4 + 3*y).extract_additively(x + 2*y).expand() == \
4*a*x + 3*x + y
assert (y*(x + 1)).extract_additively(x + 1) is None
assert ((y + 1)*(x + 1) + 3).extract_additively(x + 1) == \
y*(x + 1) + 3
assert ((x + y)*(x + 1) + x + y + 3).extract_additively(x + y) == \
x*(x + y) + 3
assert (x + y + 2*((x + y)*(x + 1)) + 3).extract_additively((x + y)*(x + 1)) == \
x + y + (x + 1)*(x + y) + 3
assert ((y + 1)*(x + 2*y + 1) + 3).extract_additively(y + 1) == \
(x + 2*y)*(y + 1) + 3
n = Symbol("n", integer=True)
assert (Integer(-3)).could_extract_minus_sign() is True
assert (-n*x + x).could_extract_minus_sign() != \
(n*x - x).could_extract_minus_sign()
assert (x - y).could_extract_minus_sign() != \
(-x + y).could_extract_minus_sign()
assert (1 - x - y).could_extract_minus_sign() is True
assert (1 - x + y).could_extract_minus_sign() is False
assert ((-x - x*y)/y).could_extract_minus_sign() is True
assert (-(x + x*y)/y).could_extract_minus_sign() is True
assert ((x + x*y)/(-y)).could_extract_minus_sign() is True
assert ((x + x*y)/y).could_extract_minus_sign() is False
assert (x*(-x - x**3)).could_extract_minus_sign() is True
assert ((-x - y)/(x + y)).could_extract_minus_sign() is True
class sign_invariant(Function, Expr):
nargs = 1
def __neg__(self):
return self
foo = sign_invariant(x)
assert foo == -foo
assert foo.could_extract_minus_sign() is False
# The results of each of these will vary on different machines, e.g.
# the first one might be False and the other (then) is true or vice versa,
# so both are included.
assert ((-x - y)/(x - y)).could_extract_minus_sign() is False or \
((-x - y)/(y - x)).could_extract_minus_sign() is False
assert (x - y).could_extract_minus_sign() is False
assert (-x + y).could_extract_minus_sign() is True
# check that result is canonical
eq = (3*x + 15*y).extract_multiplicatively(3)
assert eq.args == eq.func(*eq.args).args
def test_nan_extractions():
for r in (1, 0, I, nan):
assert nan.extract_additively(r) is None
assert nan.extract_multiplicatively(r) is None
def test_coeff():
assert (x + 1).coeff(x + 1) == 1
assert (3*x).coeff(0) == 0
assert (z*(1 + x)*x**2).coeff(1 + x) == z*x**2
assert (1 + 2*x*x**(1 + x)).coeff(x*x**(1 + x)) == 2
assert (1 + 2*x**(y + z)).coeff(x**(y + z)) == 2
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (3 + 2*x + 4*x**2).coeff(-1) == 0
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (-x/8 + x*y).coeff(x) == Rational(-1, 8) + y
assert (-x/8 + x*y).coeff(-x) == S.One/8
assert (4*x).coeff(2*x) == 0
assert (2*x).coeff(2*x) == 1
assert (-oo*x).coeff(x*oo) == -1
assert (10*x).coeff(x, 0) == 0
assert (10*x).coeff(10*x, 0) == 0
n1, n2 = symbols('n1 n2', commutative=False)
assert (n1*n2).coeff(n1) == 1
assert (n1*n2).coeff(n2) == n1
assert (n1*n2 + x*n1).coeff(n1) == 1 # 1*n1*(n2+x)
assert (n2*n1 + x*n1).coeff(n1) == n2 + x
assert (n2*n1 + x*n1**2).coeff(n1) == n2
assert (n1**x).coeff(n1) == 0
assert (n1*n2 + n2*n1).coeff(n1) == 0
assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=1) == n2
assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=0) == 2
f = Function('f')
assert (2*f(x) + 3*f(x).diff(x)).coeff(f(x)) == 2
expr = z*(x + y)**2
expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x + y)**2
assert expr.coeff(x + y) == 0
assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2
assert (x + y + 3*z).coeff(1) == x + y
assert (-x + 2*y).coeff(-1) == x
assert (x - 2*y).coeff(-1) == 2*y
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (-x - 2*y).coeff(2) == -y
assert (x + sqrt(2)*x).coeff(sqrt(2)) == x
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (z*(x + y)**2).coeff((x + y)**2) == z
assert (z*(x + y)**2).coeff(x + y) == 0
assert (2 + 2*x + (x + 1)*y).coeff(x + 1) == y
assert (x + 2*y + 3).coeff(1) == x
assert (x + 2*y + 3).coeff(x, 0) == 2*y + 3
assert (x**2 + 2*y + 3*x).coeff(x**2, 0) == 2*y + 3*x
assert x.coeff(0, 0) == 0
assert x.coeff(x, 0) == 0
n, m, o, l = symbols('n m o l', commutative=False)
assert n.coeff(n) == 1
assert y.coeff(n) == 0
assert (3*n).coeff(n) == 3
assert (2 + n).coeff(x*m) == 0
assert (2*x*n*m).coeff(x) == 2*n*m
assert (2 + n).coeff(x*m*n + y) == 0
assert (2*x*n*m).coeff(3*n) == 0
assert (n*m + m*n*m).coeff(n) == 1 + m
assert (n*m + m*n*m).coeff(n, right=True) == m # = (1 + m)*n*m
assert (n*m + m*n).coeff(n) == 0
assert (n*m + o*m*n).coeff(m*n) == o
assert (n*m + o*m*n).coeff(m*n, right=1) == 1
assert (n*m + n*m*n).coeff(n*m, right=1) == 1 + n # = n*m*(n + 1)
assert (x*y).coeff(z, 0) == x*y
def test_coeff2():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff((psi(r).diff(r))) == 2/r
def test_coeff2_0():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff(psi(r).diff(r, 2)) == 1
def test_coeff_expand():
expr = z*(x + y)**2
expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x + y)**2
assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2
def test_integrate():
assert x.integrate(x) == x**2/2
assert x.integrate((x, 0, 1)) == S.Half
def test_as_base_exp():
assert x.as_base_exp() == (x, S.One)
assert (x*y*z).as_base_exp() == (x*y*z, S.One)
assert (x + y + z).as_base_exp() == (x + y + z, S.One)
assert ((x + y)**z).as_base_exp() == (x + y, z)
def test_issue_4963():
assert hasattr(Mul(x, y), "is_commutative")
assert hasattr(Mul(x, y, evaluate=False), "is_commutative")
assert hasattr(Pow(x, y), "is_commutative")
assert hasattr(Pow(x, y, evaluate=False), "is_commutative")
expr = Mul(Pow(2, 2, evaluate=False), 3, evaluate=False) + 1
assert hasattr(expr, "is_commutative")
def test_action_verbs():
assert nsimplify((1/(exp(3*pi*x/5) + 1))) == \
(1/(exp(3*pi*x/5) + 1)).nsimplify()
assert ratsimp(1/x + 1/y) == (1/x + 1/y).ratsimp()
assert trigsimp(log(x), deep=True) == (log(x)).trigsimp(deep=True)
assert radsimp(1/(2 + sqrt(2))) == (1/(2 + sqrt(2))).radsimp()
assert radsimp(1/(a + b*sqrt(c)), symbolic=False) == \
(1/(a + b*sqrt(c))).radsimp(symbolic=False)
assert powsimp(x**y*x**z*y**z, combine='all') == \
(x**y*x**z*y**z).powsimp(combine='all')
assert (x**t*y**t).powsimp(force=True) == (x*y)**t
assert simplify(x**y*x**z*y**z) == (x**y*x**z*y**z).simplify()
assert together(1/x + 1/y) == (1/x + 1/y).together()
assert collect(a*x**2 + b*x**2 + a*x - b*x + c, x) == \
(a*x**2 + b*x**2 + a*x - b*x + c).collect(x)
assert apart(y/(y + 2)/(y + 1), y) == (y/(y + 2)/(y + 1)).apart(y)
assert combsimp(y/(x + 2)/(x + 1)) == (y/(x + 2)/(x + 1)).combsimp()
assert gammasimp(gamma(x)/gamma(x-5)) == (gamma(x)/gamma(x-5)).gammasimp()
assert factor(x**2 + 5*x + 6) == (x**2 + 5*x + 6).factor()
assert refine(sqrt(x**2)) == sqrt(x**2).refine()
assert cancel((x**2 + 5*x + 6)/(x + 2)) == ((x**2 + 5*x + 6)/(x + 2)).cancel()
def test_as_powers_dict():
assert x.as_powers_dict() == {x: 1}
assert (x**y*z).as_powers_dict() == {x: y, z: 1}
assert Mul(2, 2, evaluate=False).as_powers_dict() == {S(2): S(2)}
assert (x*y).as_powers_dict()[z] == 0
assert (x + y).as_powers_dict()[z] == 0
def test_as_coefficients_dict():
check = [S.One, x, y, x*y, 1]
assert [Add(3*x, 2*x, y, 3).as_coefficients_dict()[i] for i in check] == \
[3, 5, 1, 0, 3]
assert [Add(3*x, 2*x, y, 3, evaluate=False).as_coefficients_dict()[i]
for i in check] == [3, 5, 1, 0, 3]
assert [(3*x*y).as_coefficients_dict()[i] for i in check] == \
[0, 0, 0, 3, 0]
assert [(3.0*x*y).as_coefficients_dict()[i] for i in check] == \
[0, 0, 0, 3.0, 0]
assert (3.0*x*y).as_coefficients_dict()[3.0*x*y] == 0
def test_args_cnc():
A = symbols('A', commutative=False)
assert (x + A).args_cnc() == \
[[], [x + A]]
assert (x + a).args_cnc() == \
[[a + x], []]
assert (x*a).args_cnc() == \
[[a, x], []]
assert (x*y*A*(A + 1)).args_cnc(cset=True) == \
[{x, y}, [A, 1 + A]]
assert Mul(x, x, evaluate=False).args_cnc(cset=True, warn=False) == \
[{x}, []]
assert Mul(x, x**2, evaluate=False).args_cnc(cset=True, warn=False) == \
[{x, x**2}, []]
raises(ValueError, lambda: Mul(x, x, evaluate=False).args_cnc(cset=True))
assert Mul(x, y, x, evaluate=False).args_cnc() == \
[[x, y, x], []]
# always split -1 from leading number
assert (-1.*x).args_cnc() == [[-1, 1.0, x], []]
def test_new_rawargs():
n = Symbol('n', commutative=False)
a = x + n
assert a.is_commutative is False
assert a._new_rawargs(x).is_commutative
assert a._new_rawargs(x, y).is_commutative
assert a._new_rawargs(x, n).is_commutative is False
assert a._new_rawargs(x, y, n).is_commutative is False
m = x*n
assert m.is_commutative is False
assert m._new_rawargs(x).is_commutative
assert m._new_rawargs(n).is_commutative is False
assert m._new_rawargs(x, y).is_commutative
assert m._new_rawargs(x, n).is_commutative is False
assert m._new_rawargs(x, y, n).is_commutative is False
assert m._new_rawargs(x, n, reeval=False).is_commutative is False
assert m._new_rawargs(S.One) is S.One
def test_issue_5226():
assert Add(evaluate=False) == 0
assert Mul(evaluate=False) == 1
assert Mul(x + y, evaluate=False).is_Add
def test_free_symbols():
# free_symbols should return the free symbols of an object
assert S.One.free_symbols == set()
assert x.free_symbols == {x}
assert Integral(x, (x, 1, y)).free_symbols == {y}
assert (-Integral(x, (x, 1, y))).free_symbols == {y}
assert meter.free_symbols == set()
assert (meter**x).free_symbols == {x}
def test_issue_5300():
x = Symbol('x', commutative=False)
assert x*sqrt(2)/sqrt(6) == x*sqrt(3)/3
def test_floordiv():
from sympy.functions.elementary.integers import floor
assert x // y == floor(x / y)
def test_as_coeff_Mul():
assert S.Zero.as_coeff_Mul() == (S.One, S.Zero)
assert Integer(3).as_coeff_Mul() == (Integer(3), Integer(1))
assert Rational(3, 4).as_coeff_Mul() == (Rational(3, 4), Integer(1))
assert Float(5.0).as_coeff_Mul() == (Float(5.0), Integer(1))
assert (Integer(3)*x).as_coeff_Mul() == (Integer(3), x)
assert (Rational(3, 4)*x).as_coeff_Mul() == (Rational(3, 4), x)
assert (Float(5.0)*x).as_coeff_Mul() == (Float(5.0), x)
assert (Integer(3)*x*y).as_coeff_Mul() == (Integer(3), x*y)
assert (Rational(3, 4)*x*y).as_coeff_Mul() == (Rational(3, 4), x*y)
assert (Float(5.0)*x*y).as_coeff_Mul() == (Float(5.0), x*y)
assert (x).as_coeff_Mul() == (S.One, x)
assert (x*y).as_coeff_Mul() == (S.One, x*y)
assert (-oo*x).as_coeff_Mul(rational=True) == (-1, oo*x)
def test_as_coeff_Add():
assert Integer(3).as_coeff_Add() == (Integer(3), Integer(0))
assert Rational(3, 4).as_coeff_Add() == (Rational(3, 4), Integer(0))
assert Float(5.0).as_coeff_Add() == (Float(5.0), Integer(0))
assert (Integer(3) + x).as_coeff_Add() == (Integer(3), x)
assert (Rational(3, 4) + x).as_coeff_Add() == (Rational(3, 4), x)
assert (Float(5.0) + x).as_coeff_Add() == (Float(5.0), x)
assert (Float(5.0) + x).as_coeff_Add(rational=True) == (0, Float(5.0) + x)
assert (Integer(3) + x + y).as_coeff_Add() == (Integer(3), x + y)
assert (Rational(3, 4) + x + y).as_coeff_Add() == (Rational(3, 4), x + y)
assert (Float(5.0) + x + y).as_coeff_Add() == (Float(5.0), x + y)
assert (x).as_coeff_Add() == (S.Zero, x)
assert (x*y).as_coeff_Add() == (S.Zero, x*y)
def test_expr_sorting():
f, g = symbols('f,g', cls=Function)
exprs = [1/x**2, 1/x, sqrt(sqrt(x)), sqrt(x), x, sqrt(x)**3, x**2]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x, 2*x, 2*x**2, 2*x**3, x**n, 2*x**n, sin(x), sin(x)**n,
sin(x**2), cos(x), cos(x**2), tan(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x + 1, x**2 + x + 1, x**3 + x**2 + x + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [S(4), x - 3*I/2, x + 3*I/2, x - 4*I + 1, x + 4*I + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(x), g(x), exp(x), sin(x), cos(x), factorial(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [Tuple(x, y), Tuple(x, z), Tuple(x, y, z)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[3], [1, 2]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [1, 2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [{x: -y}, {x: y}]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [{1}, {1, 2}]
assert sorted(exprs, key=default_sort_key) == exprs
a, b = exprs = [Dummy('x'), Dummy('x')]
assert sorted([b, a], key=default_sort_key) == exprs
def test_as_ordered_factors():
f, g = symbols('f,g', cls=Function)
assert x.as_ordered_factors() == [x]
assert (2*x*x**n*sin(x)*cos(x)).as_ordered_factors() \
== [Integer(2), x, x**n, sin(x), cos(x)]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Mul(*args)
assert expr.as_ordered_factors() == args
A, B = symbols('A,B', commutative=False)
assert (A*B).as_ordered_factors() == [A, B]
assert (B*A).as_ordered_factors() == [B, A]
def test_as_ordered_terms():
f, g = symbols('f,g', cls=Function)
assert x.as_ordered_terms() == [x]
assert (sin(x)**2*cos(x) + sin(x)*cos(x)**2 + 1).as_ordered_terms() \
== [sin(x)**2*cos(x), sin(x)*cos(x)**2, 1]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Add(*args)
assert expr.as_ordered_terms() == args
assert (1 + 4*sqrt(3)*pi*x).as_ordered_terms() == [4*pi*x*sqrt(3), 1]
assert ( 2 + 3*I).as_ordered_terms() == [2, 3*I]
assert (-2 + 3*I).as_ordered_terms() == [-2, 3*I]
assert ( 2 - 3*I).as_ordered_terms() == [2, -3*I]
assert (-2 - 3*I).as_ordered_terms() == [-2, -3*I]
assert ( 4 + 3*I).as_ordered_terms() == [4, 3*I]
assert (-4 + 3*I).as_ordered_terms() == [-4, 3*I]
assert ( 4 - 3*I).as_ordered_terms() == [4, -3*I]
assert (-4 - 3*I).as_ordered_terms() == [-4, -3*I]
f = x**2*y**2 + x*y**4 + y + 2
assert f.as_ordered_terms(order="lex") == [x**2*y**2, x*y**4, y, 2]
assert f.as_ordered_terms(order="grlex") == [x*y**4, x**2*y**2, y, 2]
assert f.as_ordered_terms(order="rev-lex") == [2, y, x*y**4, x**2*y**2]
assert f.as_ordered_terms(order="rev-grlex") == [2, y, x**2*y**2, x*y**4]
k = symbols('k')
assert k.as_ordered_terms(data=True) == ([(k, ((1.0, 0.0), (1,), ()))], [k])
def test_sort_key_atomic_expr():
from sympy.physics.units import m, s
assert sorted([-m, s], key=lambda arg: arg.sort_key()) == [-m, s]
def test_eval_interval():
assert exp(x)._eval_interval(*Tuple(x, 0, 1)) == exp(1) - exp(0)
# issue 4199
# first subs and limit gives NaN
a = x/y
assert a._eval_interval(x, S.Zero, oo)._eval_interval(y, oo, S.Zero) is S.NaN
# second subs and limit gives NaN
assert a._eval_interval(x, S.Zero, oo)._eval_interval(y, S.Zero, oo) is S.NaN
# difference gives S.NaN
a = x - y
assert a._eval_interval(x, S.One, oo)._eval_interval(y, oo, S.One) is S.NaN
raises(ValueError, lambda: x._eval_interval(x, None, None))
a = -y*Heaviside(x - y)
assert a._eval_interval(x, -oo, oo) == -y
assert a._eval_interval(x, oo, -oo) == y
def test_eval_interval_zoo():
# Test that limit is used when zoo is returned
assert Si(1/x)._eval_interval(x, S.Zero, S.One) == -pi/2 + Si(1)
def test_primitive():
assert (3*(x + 1)**2).primitive() == (3, (x + 1)**2)
assert (6*x + 2).primitive() == (2, 3*x + 1)
assert (x/2 + 3).primitive() == (S.Half, x + 6)
eq = (6*x + 2)*(x/2 + 3)
assert eq.primitive()[0] == 1
eq = (2 + 2*x)**2
assert eq.primitive()[0] == 1
assert (4.0*x).primitive() == (1, 4.0*x)
assert (4.0*x + y/2).primitive() == (S.Half, 8.0*x + y)
assert (-2*x).primitive() == (2, -x)
assert Add(5*z/7, 0.5*x, 3*y/2, evaluate=False).primitive() == \
(S.One/14, 7.0*x + 21*y + 10*z)
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).primitive() == \
(S.One/3, i + x)
assert (S.Infinity + 2*x/3 + 4*y/7).primitive() == \
(S.One/21, 14*x + 12*y + oo)
assert S.Zero.primitive() == (S.One, S.Zero)
def test_issue_5843():
a = 1 + x
assert (2*a).extract_multiplicatively(a) == 2
assert (4*a).extract_multiplicatively(2*a) == 2
assert ((3*a)*(2*a)).extract_multiplicatively(a) == 6*a
def test_is_constant():
from sympy.solvers.solvers import checksol
Sum(x, (x, 1, 10)).is_constant() is True
Sum(x, (x, 1, n)).is_constant() is False
Sum(x, (x, 1, n)).is_constant(y) is True
Sum(x, (x, 1, n)).is_constant(n) is False
Sum(x, (x, 1, n)).is_constant(x) is True
eq = a*cos(x)**2 + a*sin(x)**2 - a
eq.is_constant() is True
assert eq.subs({x: pi, a: 2}) == eq.subs({x: pi, a: 3}) == 0
assert x.is_constant() is False
assert x.is_constant(y) is True
assert checksol(x, x, Sum(x, (x, 1, n))) is False
assert checksol(x, x, Sum(x, (x, 1, n))) is False
f = Function('f')
assert f(1).is_constant
assert checksol(x, x, f(x)) is False
assert Pow(x, S.Zero, evaluate=False).is_constant() is True # == 1
assert Pow(S.Zero, x, evaluate=False).is_constant() is False # == 0 or 1
assert (2**x).is_constant() is False
assert Pow(S(2), S(3), evaluate=False).is_constant() is True
z1, z2 = symbols('z1 z2', zero=True)
assert (z1 + 2*z2).is_constant() is True
assert meter.is_constant() is True
assert (3*meter).is_constant() is True
assert (x*meter).is_constant() is False
assert Poly(3, x).is_constant() is True
def test_equals():
assert (-3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2).equals(0)
assert (x**2 - 1).equals((x + 1)*(x - 1))
assert (cos(x)**2 + sin(x)**2).equals(1)
assert (a*cos(x)**2 + a*sin(x)**2).equals(a)
r = sqrt(2)
assert (-1/(r + r*x) + 1/r/(1 + x)).equals(0)
assert factorial(x + 1).equals((x + 1)*factorial(x))
assert sqrt(3).equals(2*sqrt(3)) is False
assert (sqrt(5)*sqrt(3)).equals(sqrt(3)) is False
assert (sqrt(5) + sqrt(3)).equals(0) is False
assert (sqrt(5) + pi).equals(0) is False
assert meter.equals(0) is False
assert (3*meter**2).equals(0) is False
eq = -(-1)**(S(3)/4)*6**(S.One/4) + (-6)**(S.One/4)*I
if eq != 0: # if canonicalization makes this zero, skip the test
assert eq.equals(0)
assert sqrt(x).equals(0) is False
# from integrate(x*sqrt(1 + 2*x), x);
# diff is zero only when assumptions allow
i = 2*sqrt(2)*x**(S(5)/2)*(1 + 1/(2*x))**(S(5)/2)/5 + \
2*sqrt(2)*x**(S(3)/2)*(1 + 1/(2*x))**(S(5)/2)/(-6 - 3/x)
ans = sqrt(2*x + 1)*(6*x**2 + x - 1)/15
diff = i - ans
assert diff.equals(0) is False
assert diff.subs(x, Rational(-1, 2)/2) == 7*sqrt(2)/120
# there are regions for x for which the expression is True, for
# example, when x < -1/2 or x > 0 the expression is zero
p = Symbol('p', positive=True)
assert diff.subs(x, p).equals(0) is True
assert diff.subs(x, -1).equals(0) is True
# prove via minimal_polynomial or self-consistency
eq = sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3)) - sqrt(10 + 6*sqrt(3))
assert eq.equals(0)
q = 3**Rational(1, 3) + 3
p = expand(q**3)**Rational(1, 3)
assert (p - q).equals(0)
# issue 6829
# eq = q*x + q/4 + x**4 + x**3 + 2*x**2 - S.One/3
# z = eq.subs(x, solve(eq, x)[0])
q = symbols('q')
z = (q*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/12)/2 - sqrt((2*q - S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S.One/3) - S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S.One/3) - S(13)/6)/2 - S.One/4) + q/4 + (-sqrt(-2*(-(q
- S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) - S(13)/12)/2 - sqrt((2*q
- S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/6)/2 - S.One/4)**4 + (-sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S.One/3) - S(13)/12)/2 - sqrt((2*q -
S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/6)/2 - S.One/4)**3 + 2*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S.One/3) - S(13)/12)/2 - sqrt((2*q -
S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S.One/3) -
S(13)/6)/2 - S.One/4)**2 - Rational(1, 3))
assert z.equals(0)
def test_random():
from sympy import posify, lucas
assert posify(x)[0]._random() is not None
assert lucas(n)._random(2, -2, 0, -1, 1) is None
# issue 8662
assert Piecewise((Max(x, y), z))._random() is None
def test_round():
from sympy.abc import x
assert str(Float('0.1249999').round(2)) == '0.12'
d20 = 12345678901234567890
ans = S(d20).round(2)
assert ans.is_Integer and ans == d20
ans = S(d20).round(-2)
assert ans.is_Integer and ans == 12345678901234567900
assert str(S('1/7').round(4)) == '0.1429'
assert str(S('.[12345]').round(4)) == '0.1235'
assert str(S('.1349').round(2)) == '0.13'
n = S(12345)
ans = n.round()
assert ans.is_Integer
assert ans == n
ans = n.round(1)
assert ans.is_Integer
assert ans == n
ans = n.round(4)
assert ans.is_Integer
assert ans == n
assert n.round(-1) == 12340
r = Float(str(n)).round(-4)
assert r == 10000
assert n.round(-5) == 0
assert str((pi + sqrt(2)).round(2)) == '4.56'
assert (10*(pi + sqrt(2))).round(-1) == 50
raises(TypeError, lambda: round(x + 2, 2))
assert str(S(2.3).round(1)) == '2.3'
# rounding in SymPy (as in Decimal) should be
# exact for the given precision; we check here
# that when a 5 follows the last digit that
# the rounded digit will be even.
for i in range(-99, 100):
# construct a decimal that ends in 5, e.g. 123 -> 0.1235
s = str(abs(i))
p = len(s) # we are going to round to the last digit of i
n = '0.%s5' % s # put a 5 after i's digits
j = p + 2 # 2 for '0.'
if i < 0: # 1 for '-'
j += 1
n = '-' + n
v = str(Float(n).round(p))[:j] # pertinent digits
if v.endswith('.'):
continue # it ends with 0 which is even
L = int(v[-1]) # last digit
assert L % 2 == 0, (n, '->', v)
assert (Float(.3, 3) + 2*pi).round() == 7
assert (Float(.3, 3) + 2*pi*100).round() == 629
assert (pi + 2*E*I).round() == 3 + 5*I
# don't let request for extra precision give more than
# what is known (in this case, only 3 digits)
assert str((Float(.03, 3) + 2*pi/100).round(5)) == '0.0928'
assert str((Float(.03, 3) + 2*pi/100).round(4)) == '0.0928'
assert S.Zero.round() == 0
a = (Add(1, Float('1.' + '9'*27, ''), evaluate=0))
assert a.round(10) == Float('3.0000000000', '')
assert a.round(25) == Float('3.0000000000000000000000000', '')
assert a.round(26) == Float('3.00000000000000000000000000', '')
assert a.round(27) == Float('2.999999999999999999999999999', '')
assert a.round(30) == Float('2.999999999999999999999999999', '')
raises(TypeError, lambda: x.round())
f = Function('f')
raises(TypeError, lambda: f(1).round())
# exact magnitude of 10
assert str(S.One.round()) == '1'
assert str(S(100).round()) == '100'
# applied to real and imaginary portions
assert (2*pi + E*I).round() == 6 + 3*I
assert (2*pi + I/10).round() == 6
assert (pi/10 + 2*I).round() == 2*I
# the lhs re and im parts are Float with dps of 2
# and those on the right have dps of 15 so they won't compare
# equal unless we use string or compare components (which will
# then coerce the floats to the same precision) or re-create
# the floats
assert str((pi/10 + E*I).round(2)) == '0.31 + 2.72*I'
assert str((pi/10 + E*I).round(2).as_real_imag()) == '(0.31, 2.72)'
assert str((pi/10 + E*I).round(2)) == '0.31 + 2.72*I'
# issue 6914
assert (I**(I + 3)).round(3) == Float('-0.208', '')*I
# issue 8720
assert S(-123.6).round() == -124
assert S(-1.5).round() == -2
assert S(-100.5).round() == -100
assert S(-1.5 - 10.5*I).round() == -2 - 10*I
# issue 7961
assert str(S(0.006).round(2)) == '0.01'
assert str(S(0.00106).round(4)) == '0.0011'
# issue 8147
assert S.NaN.round() is S.NaN
assert S.Infinity.round() is S.Infinity
assert S.NegativeInfinity.round() is S.NegativeInfinity
assert S.ComplexInfinity.round() is S.ComplexInfinity
# check that types match
for i in range(2):
f = float(i)
# 2 args
assert all(type(round(i, p)) is int for p in (-1, 0, 1))
assert all(S(i).round(p).is_Integer for p in (-1, 0, 1))
assert all(type(round(f, p)) is float for p in (-1, 0, 1))
assert all(S(f).round(p).is_Float for p in (-1, 0, 1))
# 1 arg (p is None)
assert type(round(i)) is int
assert S(i).round().is_Integer
assert type(round(f)) is int
assert S(f).round().is_Integer
def test_held_expression_UnevaluatedExpr():
x = symbols("x")
he = UnevaluatedExpr(1/x)
e1 = x*he
assert isinstance(e1, Mul)
assert e1.args == (x, he)
assert e1.doit() == 1
assert UnevaluatedExpr(Derivative(x, x)).doit(deep=False
) == Derivative(x, x)
assert UnevaluatedExpr(Derivative(x, x)).doit() == 1
xx = Mul(x, x, evaluate=False)
assert xx != x**2
ue2 = UnevaluatedExpr(xx)
assert isinstance(ue2, UnevaluatedExpr)
assert ue2.args == (xx,)
assert ue2.doit() == x**2
assert ue2.doit(deep=False) == xx
x2 = UnevaluatedExpr(2)*2
assert type(x2) is Mul
assert x2.args == (2, UnevaluatedExpr(2))
def test_round_exception_nostr():
# Don't use the string form of the expression in the round exception, as
# it's too slow
s = Symbol('bad')
try:
s.round()
except TypeError as e:
assert 'bad' not in str(e)
else:
# Did not raise
raise AssertionError("Did not raise")
def test_extract_branch_factor():
assert exp_polar(2.0*I*pi).extract_branch_factor() == (1, 1)
def test_identity_removal():
assert Add.make_args(x + 0) == (x,)
assert Mul.make_args(x*1) == (x,)
def test_float_0():
assert Float(0.0) + 1 == Float(1.0)
@XFAIL
def test_float_0_fail():
assert Float(0.0)*x == Float(0.0)
assert (x + Float(0.0)).is_Add
def test_issue_6325():
ans = (b**2 + z**2 - (b*(a + b*t) + z*(c + t*z))**2/(
(a + b*t)**2 + (c + t*z)**2))/sqrt((a + b*t)**2 + (c + t*z)**2)
e = sqrt((a + b*t)**2 + (c + z*t)**2)
assert diff(e, t, 2) == ans
e.diff(t, 2) == ans
assert diff(e, t, 2, simplify=False) != ans
def test_issue_7426():
f1 = a % c
f2 = x % z
assert f1.equals(f2) is None
def test_issue_11122():
x = Symbol('x', extended_positive=False)
assert unchanged(Gt, x, 0) # (x > 0)
# (x > 0) should remain unevaluated after PR #16956
x = Symbol('x', positive=False, real=True)
assert (x > 0) is S.false
def test_issue_10651():
x = Symbol('x', real=True)
e1 = (-1 + x)/(1 - x)
e3 = (4*x**2 - 4)/((1 - x)*(1 + x))
e4 = 1/(cos(x)**2) - (tan(x))**2
x = Symbol('x', positive=True)
e5 = (1 + x)/x
assert e1.is_constant() is None
assert e3.is_constant() is None
assert e4.is_constant() is None
assert e5.is_constant() is False
def test_issue_10161():
x = symbols('x', real=True)
assert x*abs(x)*abs(x) == x**3
def test_issue_10755():
x = symbols('x')
raises(TypeError, lambda: int(log(x)))
raises(TypeError, lambda: log(x).round(2))
def test_issue_11877():
x = symbols('x')
assert integrate(log(S.Half - x), (x, 0, S.Half)) == Rational(-1, 2) -log(2)/2
def test_normal():
x = symbols('x')
e = Mul(S.Half, 1 + x, evaluate=False)
assert e.normal() == e
def test_expr():
x = symbols('x')
raises(TypeError, lambda: tan(x).series(x, 2, oo, "+"))
def test_ExprBuilder():
eb = ExprBuilder(Mul)
eb.args.extend([x, x])
assert eb.build() == x**2
def test_non_string_equality():
# Expressions should not compare equal to strings
x = symbols('x')
one = sympify(1)
assert (x == 'x') is False
assert (x != 'x') is True
assert (one == '1') is False
assert (one != '1') is True
assert (x + 1 == 'x + 1') is False
assert (x + 1 != 'x + 1') is True
# Make sure == doesn't try to convert the resulting expression to a string
# (e.g., by calling sympify() instead of _sympify())
class BadRepr(object):
def __repr__(self):
raise RuntimeError
assert (x == BadRepr()) is False
assert (x != BadRepr()) is True
| 33.469165
| 92
| 0.557808
|
4a07690c1655c2d3f46da713d0eff86e1fbe7e9b
| 1,267
|
py
|
Python
|
home/Alessandruino/Tracking.faceDetection.py
|
sola1993/inmoov
|
34e7bb6e214bd9bf3eee808c19f0ab09ec79345f
|
[
"Apache-2.0"
] | 1
|
2021-02-24T17:05:52.000Z
|
2021-02-24T17:05:52.000Z
|
home/Alessandruino/Tracking.faceDetection.py
|
kwatters/pyrobotlab
|
87ef8975638c3d0f8e2d842ac2abe03da465fd51
|
[
"Apache-2.0"
] | null | null | null |
home/Alessandruino/Tracking.faceDetection.py
|
kwatters/pyrobotlab
|
87ef8975638c3d0f8e2d842ac2abe03da465fd51
|
[
"Apache-2.0"
] | 1
|
2021-02-24T17:05:56.000Z
|
2021-02-24T17:05:56.000Z
|
# a minimal tracking script - this will start all peer
# services and attach everything appropriately
# change parameters depending on your pan tilt, pins and
# Arduino details
# all commented code is not necessary but allows custom
# options
port = "COM15"
xServoPin = 13
yServoPin = 12
tracker = Runtime.createAndStart("tracker", "Tracking")
# set specifics on each Servo
servoX = tracker.getX()
servoX.setPin(xServoPin)
servoX.setMinMax(30, 150)
servoY = tracker.getY()
servoY.setPin(yServoPin)
servoY.setMinMax(30, 150)
# changing PID values change the
# speed and "jumpyness" of the Servos
xpid = tracker.getXPID()
ypid = tracker.getYPID()
# these are default setting
# adjust to make more smooth
# or faster
# xpid.setPID(5.0, 5.0, 0.1)
# ypid.setPID(5.0, 5.0, 0.1)
# optional filter settings
opencv = tracker.getOpenCV()
# setting camera index to 1 default is 0
opencv.setCameraIndex(1)
# connect to the Arduino
tracker.connect(port)
# Gray & PyramidDown make face tracking
# faster - if you dont like these filters - you
# may remove them before you select a tracking type with
# the following command
# tracker.clearPreFilters()
# simple face detection and tracking
tracker.faceDetect()
# scans for faces - tracks if found
# tracker.findFace()
| 23.462963
| 56
| 0.751381
|
4a076958bede620f626036ec4c79c754c347e681
| 788
|
py
|
Python
|
blog/migrations/0003_blogpage_body.py
|
StratoBallooning/website
|
dad8db401279674d03355fdf5557c73074ff851f
|
[
"MIT"
] | null | null | null |
blog/migrations/0003_blogpage_body.py
|
StratoBallooning/website
|
dad8db401279674d03355fdf5557c73074ff851f
|
[
"MIT"
] | 19
|
2016-02-17T03:41:17.000Z
|
2016-02-24T03:01:18.000Z
|
blog/migrations/0003_blogpage_body.py
|
StratoBallooning/website
|
dad8db401279674d03355fdf5557c73074ff851f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-13 22:26
from __future__ import unicode_literals
from django.db import migrations
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_blogindexpage'),
]
operations = [
migrations.AddField(
model_name='blogpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())], default={}),
preserve_default=False,
),
]
| 31.52
| 266
| 0.686548
|
4a076a3dd6870c220e39bb67ee7bf8ad3b00b05e
| 3,976
|
py
|
Python
|
airflow/sensors/named_hive_partition_sensor.py
|
abhishek-ch/incubator-airflow
|
3358551c8e73d9019900f7a85f18ebfd88591450
|
[
"Apache-2.0"
] | 4
|
2019-01-17T06:21:45.000Z
|
2020-06-20T01:59:57.000Z
|
airflow/sensors/named_hive_partition_sensor.py
|
abhishek-ch/incubator-airflow
|
3358551c8e73d9019900f7a85f18ebfd88591450
|
[
"Apache-2.0"
] | 14
|
2018-10-24T03:15:11.000Z
|
2019-01-02T19:02:58.000Z
|
airflow/sensors/named_hive_partition_sensor.py
|
abhishek-ch/incubator-airflow
|
3358551c8e73d9019900f7a85f18ebfd88591450
|
[
"Apache-2.0"
] | 6
|
2018-12-04T12:15:23.000Z
|
2020-11-23T03:51:41.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from past.builtins import basestring
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class NamedHivePartitionSensor(BaseSensorOperator):
"""
Waits for a set of partitions to show up in Hive.
:param partition_names: List of fully qualified names of the
partitions to wait for. A fully qualified name is of the
form ``schema.table/pk1=pv1/pk2=pv2``, for example,
default.users/ds=2016-01-01. This is passed as is to the metastore
Thrift client ``get_partitions_by_name`` method. Note that
you cannot use logical or comparison operators as in
HivePartitionSensor.
:type partition_names: list of strings
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('partition_names',)
ui_color = '#8d99ae'
@apply_defaults
def __init__(self,
partition_names,
metastore_conn_id='metastore_default',
poke_interval=60 * 3,
hook=None,
*args,
**kwargs):
super(NamedHivePartitionSensor, self).__init__(
poke_interval=poke_interval, *args, **kwargs)
if isinstance(partition_names, basestring):
raise TypeError('partition_names must be an array of strings')
self.metastore_conn_id = metastore_conn_id
self.partition_names = partition_names
self.hook = hook
if self.hook and metastore_conn_id != 'metastore_default':
self.log.warning('A hook was passed but a non default'
'metastore_conn_id='
'{} was used'.format(metastore_conn_id))
@staticmethod
def parse_partition_name(partition):
first_split = partition.split('.', 1)
if len(first_split) == 1:
schema = 'default'
table_partition = max(first_split) # poor man first
else:
schema, table_partition = first_split
second_split = table_partition.split('/', 1)
if len(second_split) == 1:
raise ValueError('Could not parse ' + partition +
'into table, partition')
else:
table, partition = second_split
return schema, table, partition
def poke_partition(self, partition):
if not self.hook:
from airflow.hooks.hive_hooks import HiveMetastoreHook
self.hook = HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
schema, table, partition = self.parse_partition_name(partition)
self.log.info(
'Poking for {schema}.{table}/{partition}'.format(**locals())
)
return self.hook.check_for_named_partition(
schema, table, partition)
def poke(self, context):
self.partition_names = [
partition_name for partition_name in self.partition_names
if not self.poke_partition(partition_name)
]
return not self.partition_names
| 37.866667
| 74
| 0.657948
|
4a076ab4bdef701809ba3db11cb8e429bd5cfee3
| 11,848
|
py
|
Python
|
padertorch/contrib/examples/source_separation/tasnet/train.py
|
boeddeker/padertorch
|
2298a4e6cb1368e50add0b78bc751fd6a13129a8
|
[
"MIT"
] | null | null | null |
padertorch/contrib/examples/source_separation/tasnet/train.py
|
boeddeker/padertorch
|
2298a4e6cb1368e50add0b78bc751fd6a13129a8
|
[
"MIT"
] | null | null | null |
padertorch/contrib/examples/source_separation/tasnet/train.py
|
boeddeker/padertorch
|
2298a4e6cb1368e50add0b78bc751fd6a13129a8
|
[
"MIT"
] | null | null | null |
"""
Example call on NT infrastructure:
export STORAGE_ROOT=<your desired storage root>
export OMP_NUM_THREADS=1
export MKL_NUM_THREADS=1
python -m padertorch.contrib.examples.source_separation.tasnet.train with database_json=${paths to your JSON}
"""
import os
import numpy as np
import paderbox as pb
import sacred.commands
import torch
from lazy_dataset.database import JsonDatabase
from pathlib import Path
from sacred import Experiment
from sacred.observers.file_storage import FileStorageObserver
from sacred.utils import InvalidConfigError, MissingConfigError
import padertorch as pt
import padertorch.contrib.examples.source_separation.tasnet.model
from padertorch.data.segment import Segmenter
sacred.SETTINGS.CONFIG.READ_ONLY_CONFIG = False
experiment_name = "tasnet"
ex = Experiment(experiment_name)
JSON_BASE = os.environ.get('NT_DATABASE_JSONS_DIR', None)
@ex.config
def config():
debug = False
batch_size = 4 # Runs on 4GB GPU mem. Can safely be set to 12 on 12 GB (e.g., GTX1080)
chunk_size = 32000 # 4s chunks @8kHz
train_dataset = "mix_2_spk_min_tr"
validate_dataset = "mix_2_spk_min_cv"
target = 'speech_source'
lr_scheduler_step = 2
lr_scheduler_gamma = 0.98
load_model_from = None
database_json = None
if database_json is None and JSON_BASE:
database_json = Path(JSON_BASE) / 'wsj0_2mix_8k.json'
if database_json is None:
raise MissingConfigError(
'You have to set the path to the database JSON!', 'database_json')
if not Path(database_json).exists():
raise InvalidConfigError('The database JSON does not exist!',
'database_json')
feat_size = 64
encoder_window_size = 16
trainer = {
"model": {
"factory": padertorch.contrib.examples.source_separation.tasnet.TasNet,
'encoder': {
'factory': padertorch.contrib.examples.source_separation.tasnet.tas_coders.TasEncoder,
'window_length': encoder_window_size,
'feature_size': feat_size,
},
'decoder': {
'factory': padertorch.contrib.examples.source_separation.tasnet.tas_coders.TasDecoder,
'window_length': encoder_window_size,
'feature_size': feat_size,
},
},
"storage_dir": None,
"optimizer": {
"factory": pt.optimizer.Adam,
"gradient_clipping": 1
},
"summary_trigger": (1000, "iteration"),
"stop_trigger": (100, "epoch"),
"loss_weights": {
"si-sdr": 1.0,
"log-mse": 0.0,
"log1p-mse": 0.0,
}
}
pt.Trainer.get_config(trainer)
if trainer['storage_dir'] is None:
trainer['storage_dir'] = pt.io.get_new_storage_dir(experiment_name)
ex.observers.append(FileStorageObserver(
Path(trainer['storage_dir']) / 'sacred')
)
@ex.named_config
def win2():
"""
This is the configuration for the best performing model from the DPRNN
paper. Training takes very long time with this configuration.
"""
# The model becomes very memory consuming with this small window size.
# You might have to reduce the chunk size as well.
batch_size = 1
trainer = {
'model': {
'encoder': {
'window_length': 2
},
'separator': {
'window_length': 250,
'hop_size': 125, # Half of window length
},
'decoder': {
'window_length': 2
}
}
}
@ex.named_config
def stft():
"""
Use the STFT and iSTFT as encoder and decoder instead of a learned
transformation
"""
trainer = {
'model': {
'encoder': {
'factory': 'padertorch.contrib.examples.source_separation.tasnet.tas_coders.StftEncoder'
},
'decoder': {
'factory': 'padertorch.contrib.examples.source_separation.tasnet.tas_coders.IstftDecoder'
},
}
}
@ex.named_config
def dprnn():
trainer = {'model': {'separator': {
'factory': pt.modules.dual_path_rnn.DPRNN,
'input_size': 64,
'rnn_size': 128,
'window_length': 100,
'hop_size': 50,
'num_blocks': 6,
}}}
@ex.named_config
def convnet():
feat_size = 256
trainer = {'model': {'separator': {
'factory': 'padertorch.modules.convnet.ConvNet',
'input_size': feat_size,
'num_blocks': 8,
'num_repeats': 4,
'hidden_channels': 512,
'kernel_size': 3,
'norm': "gLN",
}}}
@ex.named_config
def log_mse():
"""
Use the log_mse loss
"""
trainer = {
'loss_weights': {
'si-sdr': 0.0,
'log-mse': 1.0,
}
}
@ex.named_config
def log1p_mse():
"""
Use the log1p_mse loss
"""
trainer = {
'loss_weights': {
'si-sdr': 0.0,
'log1p-mse': 1.0,
}
}
@ex.named_config
def on_wsj0_2mix_max():
chunk_size = -1
train_dataset = "mix_2_spk_max_tr"
validate_dataset = "mix_2_spk_max_cv"
@ex.capture
def pre_batch_transform(inputs):
return {
's': np.ascontiguousarray([
pb.io.load_audio(p)
for p in inputs['audio_path']['speech_source']
], np.float32),
'y': np.ascontiguousarray(
pb.io.load_audio(inputs['audio_path']['observation']), np.float32),
'num_samples': inputs['num_samples'],
'example_id': inputs['example_id'],
'audio_path': inputs['audio_path'],
}
def prepare_dataset(
db, dataset: str, batch_size, chunk_size, shuffle=True,
prefetch=True, dataset_slice=None,
):
"""
This is re-used in the evaluate script
"""
dataset = db.get_dataset(dataset)
if dataset_slice is not None:
dataset = dataset[dataset_slice]
segmenter = Segmenter(
chunk_size, include_keys=('y', 's'), axis=-1,
anchor='random' if shuffle else 'left',
)
def _set_num_samples(example):
example['num_samples'] = example['y'].shape[-1]
return example
if shuffle:
dataset = dataset.shuffle(reshuffle=True)
dataset = dataset.map(pre_batch_transform)
dataset = dataset.map(segmenter)
# FilterExceptions are only raised inside the chunking code if the
# example is too short. If chunk_size == -1, no filter exception is raised.
catch_exception = segmenter.length > 0
if prefetch:
dataset = dataset.prefetch(
8, 16, catch_filter_exception=catch_exception)
elif catch_exception:
dataset = dataset.catch()
if chunk_size != -1:
dataset = dataset.unbatch()
else:
def unbatch(example):
assert len(example) == 1, example
return example[0]
dataset = dataset.map(unbatch)
dataset = dataset.map(_set_num_samples)
if shuffle:
dataset = dataset.shuffle(reshuffle=True, buffer_size=128)
dataset = dataset.batch(batch_size)
dataset = dataset.map(pt.data.batch.Sorter('num_samples'))
dataset = dataset.map(pt.data.utils.collate_fn)
return dataset
@ex.capture
def prepare_dataset_captured(
database_obj, dataset, batch_size, debug, chunk_size,
shuffle, dataset_slice=None,
):
if dataset_slice is None:
if debug:
dataset_slice = slice(0, 100, 1)
return prepare_dataset(
database_obj, dataset, batch_size, chunk_size,
shuffle=shuffle,
prefetch=not debug,
dataset_slice=dataset_slice,
)
@ex.capture
def dump_config_and_makefile(_config):
"""
Dumps the configuration into the experiment dir and creates a Makefile
next to it. If a Makefile already exists, it does not do anything.
"""
experiment_dir = Path(_config['trainer']['storage_dir'])
makefile_path = Path(experiment_dir) / "Makefile"
if not makefile_path.exists():
from padertorch.contrib.examples.source_separation.tasnet.templates import \
MAKEFILE_TEMPLATE_TRAIN
config_path = experiment_dir / "config.json"
pt.io.dump_config(_config, config_path)
makefile_path.write_text(
MAKEFILE_TEMPLATE_TRAIN.format(
main_python_path=pt.configurable.resolve_main_python_path(),
experiment_name=experiment_name,
eval_python_path=('.'.join(
pt.configurable.resolve_main_python_path().split('.')[:-1]
) + '.evaluate')
)
)
@ex.command(unobserved=True)
def init(_config, _run):
"""Create a storage dir, write Makefile. Do not start any training."""
sacred.commands.print_config(_run)
dump_config_and_makefile()
print()
print('Initialized storage dir. Now run these commands:')
print(f"cd {_config['trainer']['storage_dir']}")
print(f"make train")
print()
print('or')
print()
print(f"cd {_config['trainer']['storage_dir']}")
print('make ccsalloc')
@ex.capture
def prepare_and_train(_run, _log, trainer, train_dataset, validate_dataset,
lr_scheduler_step, lr_scheduler_gamma,
load_model_from, database_json):
trainer = get_trainer(trainer, load_model_from, _log)
db = JsonDatabase(database_json)
train_dataset = prepare_dataset_captured(db, train_dataset, shuffle=True)
validate_dataset = prepare_dataset_captured(
db, validate_dataset, shuffle=False, chunk_size=-1
)
# Perform a test run to check if everything works
trainer.test_run(train_dataset, validate_dataset)
# Register hooks and start the actual training
# Learning rate scheduler
if lr_scheduler_step:
trainer.register_hook(pt.train.hooks.LRSchedulerHook(
torch.optim.lr_scheduler.StepLR(
trainer.optimizer.optimizer,
step_size=lr_scheduler_step,
gamma=lr_scheduler_gamma,
)
))
# Don't use LR back-off
trainer.register_validation_hook(validate_dataset)
else:
# Use LR back-off
trainer.register_validation_hook(
validate_dataset, n_back_off=5, back_off_patience=3
)
trainer.train(train_dataset, resume=trainer.checkpoint_dir.exists())
def get_trainer(trainer_config, load_model_from, _log):
trainer = pt.Trainer.from_config(trainer_config)
checkpoint_path = trainer.checkpoint_dir / 'ckpt_latest.pth'
if load_model_from is not None and not checkpoint_path.is_file():
_log.info(f'Loading model weights from {load_model_from}')
checkpoint = torch.load(load_model_from)
trainer.model.load_state_dict(checkpoint['model'])
return trainer
@ex.command
def test_run(_run, _log, trainer, train_dataset, validate_dataset,
load_model_from, database_json):
trainer = get_trainer(trainer, load_model_from, _log)
db = JsonDatabase(database_json)
# Perform a test run to check if everything works
trainer.test_run(
prepare_dataset_captured(db, train_dataset, shuffle=True),
prepare_dataset_captured(db, validate_dataset, shuffle=True),
)
@ex.main
def main(_config, _run):
"""Main does resume directly.
It also writes the `Makefile` and `config.json` again, even when you are
resuming from an initialized storage dir. This way, the `config.json` is
always up to date. Historic configuration can be found in Sacred's folder.
"""
sacred.commands.print_config(_run)
dump_config_and_makefile()
prepare_and_train()
if __name__ == '__main__':
with pb.utils.debug_utils.debug_on(Exception):
ex.run_commandline()
| 29.039216
| 109
| 0.636394
|
4a076baca299c44d17519e6070b87df62ea14d13
| 9,726
|
py
|
Python
|
train.py
|
OctopusRice/LineNet
|
a03ce3bf741dc497ae6bc680cd779128bda1a34b
|
[
"BSD-3-Clause"
] | 1
|
2020-12-03T03:07:15.000Z
|
2020-12-03T03:07:15.000Z
|
train.py
|
OctopusRice/LineNet
|
a03ce3bf741dc497ae6bc680cd779128bda1a34b
|
[
"BSD-3-Clause"
] | 1
|
2019-08-23T09:27:33.000Z
|
2019-08-29T01:46:12.000Z
|
train.py
|
OctopusRice/LineNet
|
a03ce3bf741dc497ae6bc680cd779128bda1a34b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import argparse
import importlib
import threading
import traceback
import torch.distributed as dist
import torch.multiprocessing as mp
from tqdm import tqdm
from torch.multiprocessing import Process, Queue, Pool
import config_debug
from core.dbs import datasets
from core.utils import stdout_to_tqdm
from core.config import SystemConfig
from core.sample import data_sampling_func
from core.nnet.py_factory import NetworkFactory
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(description="Training Script")
parser.add_argument("--cfg_file", default=config_debug.cfg_file, help="config file", type=str)
parser.add_argument("--iter", dest="start_iter",
help="train at iteration i",
default=0, type=int)
parser.add_argument("--workers", default=config_debug.workers, type=int)
parser.add_argument("--initialize", action="store_true")
parser.add_argument("--distributed", action="store_true")
parser.add_argument("--world-size", default=-1, type=int,
help="number of nodes of distributed training")
parser.add_argument("--rank", default=0, type=int,
help="node rank for distributed training")
parser.add_argument("--dist-url", default=None, type=str,
help="url used to set up distributed training")
parser.add_argument("--dist-backend", default="nccl", type=str)
args = parser.parse_args()
return args
def prefetch_data(system_config, db, queue, sample_data, data_aug):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(system_config, db, ind, data_aug=data_aug)
queue.put(data)
except Exception as e:
traceback.print_exc()
raise e
def _pin_memory(ts):
if type(ts) is list:
return [t.pin_memory() for t in ts]
return ts.pin_memory()
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data["xs"] = [_pin_memory(x) for x in data["xs"]]
data["ys"] = [_pin_memory(y) for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(system_config, dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(system_config, db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def terminate_tasks(tasks):
for task in tasks:
task.terminate()
def train(training_dbs, validation_db, system_config, model, args):
# reading arguments from command
start_iter = args.start_iter
distributed = args.distributed
world_size = args.world_size
initialize = args.initialize
gpu = args.gpu
rank = args.rank
# reading arguments from json file
batch_size = system_config.batch_size
learning_rate = system_config.learning_rate
max_iteration = system_config.max_iter
pretrained_model = system_config.pretrain
stepsize = system_config.stepsize
snapshot = system_config.snapshot
val_iter = system_config.val_iter
display = system_config.display
decay_rate = system_config.decay_rate
stepsize = system_config.stepsize
if config_debug.validation:
if config_debug.cfg_file == 'CornerNet_ifp_Squeeze':
pretrained_model = "./cache/nnet/CornerNet_Squeeze/CornerNet_Squeeze_500000.pkl"
elif config_debug.cfg_file == 'CornerNet_ifp_Saccade':
pretrained_model = "./cache/nnet/CornerNet_Saccade/CornerNet_Saccade_500000.pkl"
# pretrained_model = "./cache/nnet/CornerNet/CornerNet_500000.pkl"
# start_iter = 500000
max_iteration = 100000
stepsize = 100000
print("Process {}: building model...".format(rank))
nnet = NetworkFactory(system_config, model, distributed=distributed, gpu=gpu)
if initialize:
nnet.save_params(0)
exit(0)
# queues storing data for training
training_queue = Queue(system_config.prefetch_size)
validation_queue = Queue(5)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(system_config.prefetch_size)
pinned_validation_queue = queue.Queue(5)
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(system_config, training_dbs, training_queue, data_sampling_func, True)
if val_iter:
validation_tasks = init_parallel_jobs(system_config, [validation_db], validation_queue, data_sampling_func, False)
training_pin_semaphore = threading.Semaphore()
validation_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
validation_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
validation_pin_args = (validation_queue, pinned_validation_queue, validation_pin_semaphore)
validation_pin_thread = threading.Thread(target=pin_memory, args=validation_pin_args)
validation_pin_thread.daemon = True
validation_pin_thread.start()
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("Process {}: loading from pretrained model".format(rank))
nnet.load_pretrained_params(pretrained_model)
if start_iter:
nnet.load_params(start_iter)
learning_rate /= (decay_rate ** (start_iter // stepsize))
nnet.set_lr(learning_rate)
print("Process {}: training starts from iteration {} with learning_rate {}".format(rank, start_iter + 1, learning_rate))
else:
nnet.set_lr(learning_rate)
if rank == 0:
print("training start...")
nnet.cuda()
nnet.train_mode()
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss = nnet.train(**training)
if display and iteration % display == 0:
print("Process {}: training loss at iteration {}: {}".format(rank, iteration, training_loss.item()))
del training_loss
if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
nnet.eval_mode()
validation = pinned_validation_queue.get(block=True)
validation_loss = nnet.validate(**validation)
print("Process {}: validation loss at iteration {}: {}".format(rank, iteration, validation_loss.item()))
nnet.train_mode()
if iteration % snapshot == 0 and rank == 0:
nnet.save_params(iteration)
if iteration % stepsize == 0:
learning_rate /= decay_rate
nnet.set_lr(learning_rate)
# sending signal to kill the thread
training_pin_semaphore.release()
validation_pin_semaphore.release()
# terminating data fetching processes
terminate_tasks(training_tasks)
terminate_tasks(validation_tasks)
def main(gpu, ngpus_per_node, args):
args.gpu = gpu
if args.distributed:
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
rank = args.rank
cfg_file = os.path.join("./configs", args.cfg_file + ".json")
with open(cfg_file, "r") as f:
config = json.load(f)
config["system"]["snapshot_name"] = args.cfg_file
system_config = SystemConfig().update_config(config["system"])
model_file = "core.models.{}".format(args.cfg_file)
model_file = importlib.import_module(model_file)
model = model_file.model()
train_split = system_config.train_split
val_split = system_config.val_split
print("Process {}: loading all datasets...".format(rank))
dataset = system_config.dataset
workers = args.workers
print("Process {}: using {} workers".format(rank, workers))
training_dbs = [datasets[dataset](config["db"], split=train_split, sys_config=system_config) for _ in range(workers)]
validation_db = datasets[dataset](config["db"], split=val_split, sys_config=system_config)
if rank == 0:
print("system config...")
pprint.pprint(system_config.full)
print("db config...")
pprint.pprint(training_dbs[0].configs)
print("len of db: {}".format(len(training_dbs[0].db_inds)))
print("distributed: {}".format(args.distributed))
train(training_dbs, validation_db, system_config, model, args)
if __name__ == "__main__":
args = parse_args()
distributed = args.distributed
world_size = args.world_size
if distributed and world_size < 0:
raise ValueError("world size must be greater than 0 in distributed training")
ngpus_per_node = torch.cuda.device_count()
if distributed:
args.world_size = ngpus_per_node * args.world_size
mp.spawn(main, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main(None, ngpus_per_node, args)
| 37.264368
| 128
| 0.681061
|
4a076c10b651b98fcdc4d7e18b2ef341ef786c56
| 316
|
py
|
Python
|
projects/migrations/0002_remove_project_image.py
|
jimmybutton/portfolio
|
c5e20fa9c3198e73b8f8434659d2a67704c62d06
|
[
"MIT"
] | 1
|
2021-03-26T11:12:40.000Z
|
2021-03-26T11:12:40.000Z
|
projects/migrations/0002_remove_project_image.py
|
jimmybutton/rp_portfolio
|
c5e20fa9c3198e73b8f8434659d2a67704c62d06
|
[
"MIT"
] | null | null | null |
projects/migrations/0002_remove_project_image.py
|
jimmybutton/rp_portfolio
|
c5e20fa9c3198e73b8f8434659d2a67704c62d06
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1 on 2020-08-21 16:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='image',
),
]
| 17.555556
| 45
| 0.579114
|
4a076d496c08da29fb65a95b366a4f03bad417a6
| 848
|
py
|
Python
|
Snapchat/find_all_anagrams.py
|
mohamedsugal/Leetcode-solutions
|
539c4a70ab0fd23b7e5a9d450ae5ac2270325f77
|
[
"MIT"
] | 3
|
2020-11-12T06:51:44.000Z
|
2021-09-19T00:26:33.000Z
|
Snapchat/find_all_anagrams.py
|
mohamedsugal/Leetcode-solutions
|
539c4a70ab0fd23b7e5a9d450ae5ac2270325f77
|
[
"MIT"
] | null | null | null |
Snapchat/find_all_anagrams.py
|
mohamedsugal/Leetcode-solutions
|
539c4a70ab0fd23b7e5a9d450ae5ac2270325f77
|
[
"MIT"
] | null | null | null |
from typing import List
from collections import defaultdict
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
if len(s) < len(p):
return []
p_count, s_count = {}, {}
for letter in p:
p_count[letter] = p_count.get(letter, 0) + 1
left = right = 0
result, k = [], len(p)
while right < len(s):
s_count[s[right]] = s_count.get(s[right], 0) + 1
if right >= k:
if s_count[s[left]] == 1:
del s_count[s[left]]
else:
s_count[s[left]] -= 1
left += 1
if p_count == s_count:
result.append(left)
right += 1
return result
s = "cbaebabacd"
p = "abc"
print(Solution().findAnagrams(s, p))
| 30.285714
| 60
| 0.459906
|
4a076e3bbf5426bbf1ef0522938e3457b8e428c0
| 151,858
|
py
|
Python
|
Lib/test/test_os.py
|
marcoramirezmx/cpython
|
8b31a11a698cb5aa9b439b349c8de4e388846f73
|
[
"CNRI-Python-GPL-Compatible"
] | 5
|
2018-07-02T19:10:39.000Z
|
2021-09-27T04:05:10.000Z
|
Lib/test/test_os.py
|
marcoramirezmx/cpython
|
8b31a11a698cb5aa9b439b349c8de4e388846f73
|
[
"CNRI-Python-GPL-Compatible"
] | 3
|
2018-06-10T06:28:06.000Z
|
2021-09-24T13:54:19.000Z
|
Lib/test/test_os.py
|
marcoramirezmx/cpython
|
8b31a11a698cb5aa9b439b349c8de4e388846f73
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2020-10-09T12:23:55.000Z
|
2020-10-09T12:23:55.000Z
|
# As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import asynchat
import asyncore
import codecs
import contextlib
import decimal
import errno
import fnmatch
import fractions
import itertools
import locale
import mmap
import os
import pickle
import shutil
import signal
import socket
import stat
import subprocess
import sys
import sysconfig
import tempfile
import threading
import time
import unittest
import uuid
import warnings
from test import support
from platform import win32_is_iot
try:
import resource
except ImportError:
resource = None
try:
import fcntl
except ImportError:
fcntl = None
try:
import _winapi
except ImportError:
_winapi = None
try:
import pwd
all_users = [u.pw_uid for u in pwd.getpwall()]
except (ImportError, AttributeError):
all_users = []
try:
from _testcapi import INT_MAX, PY_SSIZE_T_MAX
except ImportError:
INT_MAX = PY_SSIZE_T_MAX = sys.maxsize
from test.support.script_helper import assert_python_ok
from test.support import unix_shell, FakePath
root_in_posix = False
if hasattr(os, 'geteuid'):
root_in_posix = (os.geteuid() == 0)
# Detect whether we're on a Linux system that uses the (now outdated
# and unmaintained) linuxthreads threading library. There's an issue
# when combining linuxthreads with a failed execv call: see
# http://bugs.python.org/issue4970.
if hasattr(sys, 'thread_info') and sys.thread_info.version:
USING_LINUXTHREADS = sys.thread_info.version.startswith("linuxthreads")
else:
USING_LINUXTHREADS = False
# Issue #14110: Some tests fail on FreeBSD if the user is in the wheel group.
HAVE_WHEEL_GROUP = sys.platform.startswith('freebsd') and os.getgid() == 0
def requires_os_func(name):
return unittest.skipUnless(hasattr(os, name), 'requires os.%s' % name)
def create_file(filename, content=b'content'):
with open(filename, "xb", 0) as fp:
fp.write(content)
class MiscTests(unittest.TestCase):
def test_getcwd(self):
cwd = os.getcwd()
self.assertIsInstance(cwd, str)
def test_getcwd_long_path(self):
# bpo-37412: On Linux, PATH_MAX is usually around 4096 bytes. On
# Windows, MAX_PATH is defined as 260 characters, but Windows supports
# longer path if longer paths support is enabled. Internally, the os
# module uses MAXPATHLEN which is at least 1024.
#
# Use a directory name of 200 characters to fit into Windows MAX_PATH
# limit.
#
# On Windows, the test can stop when trying to create a path longer
# than MAX_PATH if long paths support is disabled:
# see RtlAreLongPathsEnabled().
min_len = 2000 # characters
dirlen = 200 # characters
dirname = 'python_test_dir_'
dirname = dirname + ('a' * (dirlen - len(dirname)))
with tempfile.TemporaryDirectory() as tmpdir:
with support.change_cwd(tmpdir) as path:
expected = path
while True:
cwd = os.getcwd()
self.assertEqual(cwd, expected)
need = min_len - (len(cwd) + len(os.path.sep))
if need <= 0:
break
if len(dirname) > need and need > 0:
dirname = dirname[:need]
path = os.path.join(path, dirname)
try:
os.mkdir(path)
# On Windows, chdir() can fail
# even if mkdir() succeeded
os.chdir(path)
except FileNotFoundError:
# On Windows, catch ERROR_PATH_NOT_FOUND (3) and
# ERROR_FILENAME_EXCED_RANGE (206) errors
# ("The filename or extension is too long")
break
except OSError as exc:
if exc.errno == errno.ENAMETOOLONG:
break
else:
raise
expected = path
if support.verbose:
print(f"Tested current directory length: {len(cwd)}")
def test_getcwdb(self):
cwd = os.getcwdb()
self.assertIsInstance(cwd, bytes)
self.assertEqual(os.fsdecode(cwd), os.getcwd())
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
if os.path.lexists(support.TESTFN):
os.unlink(support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assertTrue(os.access(support.TESTFN, os.W_OK))
def test_closerange(self):
first = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
self.skipTest("couldn't allocate two consecutive fds")
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, b"a")
@support.cpython_only
def test_rename(self):
path = support.TESTFN
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
new = sys.getrefcount(path)
self.assertEqual(old, new)
def test_read(self):
with open(support.TESTFN, "w+b") as fobj:
fobj.write(b"spam")
fobj.flush()
fd = fobj.fileno()
os.lseek(fd, 0, 0)
s = os.read(fd, 4)
self.assertEqual(type(s), bytes)
self.assertEqual(s, b"spam")
@support.cpython_only
# Skip the test on 32-bit platforms: the number of bytes must fit in a
# Py_ssize_t type
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX,
"needs INT_MAX < PY_SSIZE_T_MAX")
@support.bigmemtest(size=INT_MAX + 10, memuse=1, dry_run=False)
def test_large_read(self, size):
self.addCleanup(support.unlink, support.TESTFN)
create_file(support.TESTFN, b'test')
# Issue #21932: Make sure that os.read() does not raise an
# OverflowError for size larger than INT_MAX
with open(support.TESTFN, "rb") as fp:
data = os.read(fp.fileno(), size)
# The test does not try to read more than 2 GiB at once because the
# operating system is free to return less bytes than requested.
self.assertEqual(data, b'test')
def test_write(self):
# os.write() accepts bytes- and buffer-like objects but not strings
fd = os.open(support.TESTFN, os.O_CREAT | os.O_WRONLY)
self.assertRaises(TypeError, os.write, fd, "beans")
os.write(fd, b"bacon\n")
os.write(fd, bytearray(b"eggs\n"))
os.write(fd, memoryview(b"spam\n"))
os.close(fd)
with open(support.TESTFN, "rb") as fobj:
self.assertEqual(fobj.read().splitlines(),
[b"bacon", b"eggs", b"spam"])
def write_windows_console(self, *args):
retcode = subprocess.call(args,
# use a new console to not flood the test output
creationflags=subprocess.CREATE_NEW_CONSOLE,
# use a shell to hide the console window (SW_HIDE)
shell=True)
self.assertEqual(retcode, 0)
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the Windows console')
def test_write_windows_console(self):
# Issue #11395: the Windows console returns an error (12: not enough
# space error) on writing into stdout if stdout mode is binary and the
# length is greater than 66,000 bytes (or less, depending on heap
# usage).
code = "print('x' * 100000)"
self.write_windows_console(sys.executable, "-c", code)
self.write_windows_console(sys.executable, "-u", "-c", code)
def fdopen_helper(self, *args):
fd = os.open(support.TESTFN, os.O_RDONLY)
f = os.fdopen(fd, *args)
f.close()
def test_fdopen(self):
fd = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(fd)
self.fdopen_helper()
self.fdopen_helper('r')
self.fdopen_helper('r', 100)
def test_replace(self):
TESTFN2 = support.TESTFN + ".2"
self.addCleanup(support.unlink, support.TESTFN)
self.addCleanup(support.unlink, TESTFN2)
create_file(support.TESTFN, b"1")
create_file(TESTFN2, b"2")
os.replace(support.TESTFN, TESTFN2)
self.assertRaises(FileNotFoundError, os.stat, support.TESTFN)
with open(TESTFN2, 'r') as f:
self.assertEqual(f.read(), "1")
def test_open_keywords(self):
f = os.open(path=__file__, flags=os.O_RDONLY, mode=0o777,
dir_fd=None)
os.close(f)
def test_symlink_keywords(self):
symlink = support.get_attribute(os, "symlink")
try:
symlink(src='target', dst=support.TESTFN,
target_is_directory=False, dir_fd=None)
except (NotImplementedError, OSError):
pass # No OS support or unprivileged user
@unittest.skipUnless(hasattr(os, 'copy_file_range'), 'test needs os.copy_file_range()')
def test_copy_file_range_invalid_values(self):
with self.assertRaises(ValueError):
os.copy_file_range(0, 1, -10)
@unittest.skipUnless(hasattr(os, 'copy_file_range'), 'test needs os.copy_file_range()')
def test_copy_file_range(self):
TESTFN2 = support.TESTFN + ".3"
data = b'0123456789'
create_file(support.TESTFN, data)
self.addCleanup(support.unlink, support.TESTFN)
in_file = open(support.TESTFN, 'rb')
self.addCleanup(in_file.close)
in_fd = in_file.fileno()
out_file = open(TESTFN2, 'w+b')
self.addCleanup(support.unlink, TESTFN2)
self.addCleanup(out_file.close)
out_fd = out_file.fileno()
try:
i = os.copy_file_range(in_fd, out_fd, 5)
except OSError as e:
# Handle the case in which Python was compiled
# in a system with the syscall but without support
# in the kernel.
if e.errno != errno.ENOSYS:
raise
self.skipTest(e)
else:
# The number of copied bytes can be less than
# the number of bytes originally requested.
self.assertIn(i, range(0, 6));
with open(TESTFN2, 'rb') as in_file:
self.assertEqual(in_file.read(), data[:i])
@unittest.skipUnless(hasattr(os, 'copy_file_range'), 'test needs os.copy_file_range()')
def test_copy_file_range_offset(self):
TESTFN4 = support.TESTFN + ".4"
data = b'0123456789'
bytes_to_copy = 6
in_skip = 3
out_seek = 5
create_file(support.TESTFN, data)
self.addCleanup(support.unlink, support.TESTFN)
in_file = open(support.TESTFN, 'rb')
self.addCleanup(in_file.close)
in_fd = in_file.fileno()
out_file = open(TESTFN4, 'w+b')
self.addCleanup(support.unlink, TESTFN4)
self.addCleanup(out_file.close)
out_fd = out_file.fileno()
try:
i = os.copy_file_range(in_fd, out_fd, bytes_to_copy,
offset_src=in_skip,
offset_dst=out_seek)
except OSError as e:
# Handle the case in which Python was compiled
# in a system with the syscall but without support
# in the kernel.
if e.errno != errno.ENOSYS:
raise
self.skipTest(e)
else:
# The number of copied bytes can be less than
# the number of bytes originally requested.
self.assertIn(i, range(0, bytes_to_copy+1));
with open(TESTFN4, 'rb') as in_file:
read = in_file.read()
# seeked bytes (5) are zero'ed
self.assertEqual(read[:out_seek], b'\x00'*out_seek)
# 012 are skipped (in_skip)
# 345678 are copied in the file (in_skip + bytes_to_copy)
self.assertEqual(read[out_seek:],
data[in_skip:in_skip+i])
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
self.fname = support.TESTFN
self.addCleanup(support.unlink, self.fname)
create_file(self.fname, b"ABC")
def check_stat_attributes(self, fname):
result = os.stat(fname)
# Make sure direct access works
self.assertEqual(result[stat.ST_SIZE], 3)
self.assertEqual(result.st_size, 3)
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEqual(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assertIn(attr, members)
# Make sure that the st_?time and st_?time_ns fields roughly agree
# (they should always agree up to around tens-of-microseconds)
for name in 'st_atime st_mtime st_ctime'.split():
floaty = int(getattr(result, name) * 100000)
nanosecondy = getattr(result, name + "_ns") // 10000
self.assertAlmostEqual(floaty, nanosecondy, delta=2)
try:
result[200]
self.fail("No exception raised")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception raised")
except AttributeError:
pass
try:
result.st_rdev = 1
self.fail("No exception raised")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception raised")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception raised")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_stat_attributes(self):
self.check_stat_attributes(self.fname)
def test_stat_attributes_bytes(self):
try:
fname = self.fname.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
self.skipTest("cannot encode %a for the filesystem" % self.fname)
self.check_stat_attributes(fname)
def test_stat_result_pickle(self):
result = os.stat(self.fname)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(result, proto)
self.assertIn(b'stat_result', p)
if proto < 4:
self.assertIn(b'cos\nstat_result\n', p)
unpickled = pickle.loads(p)
self.assertEqual(result, unpickled)
@unittest.skipUnless(hasattr(os, 'statvfs'), 'test needs os.statvfs()')
def test_statvfs_attributes(self):
result = os.statvfs(self.fname)
# Make sure direct access works
self.assertEqual(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEqual(getattr(result, 'f_' + member), result[value])
self.assertTrue(isinstance(result.f_fsid, int))
# Test that the size of the tuple doesn't change
self.assertEqual(len(result), 10)
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception raised")
except AttributeError:
pass
try:
result.parrot = 1
self.fail("No exception raised")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception raised")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
@unittest.skipUnless(hasattr(os, 'statvfs'),
"need os.statvfs()")
def test_statvfs_result_pickle(self):
result = os.statvfs(self.fname)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(result, proto)
self.assertIn(b'statvfs_result', p)
if proto < 4:
self.assertIn(b'cos\nstatvfs_result\n', p)
unpickled = pickle.loads(p)
self.assertEqual(result, unpickled)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except FileNotFoundError:
self.skipTest(r'c:\pagefile.sys does not exist')
except OSError as e:
self.fail("Could not stat pagefile.sys")
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_15261(self):
# Verify that stat'ing a closed fd does not cause crash
r, w = os.pipe()
try:
os.stat(r) # should not raise error
finally:
os.close(r)
os.close(w)
with self.assertRaises(OSError) as ctx:
os.stat(r)
self.assertEqual(ctx.exception.errno, errno.EBADF)
def check_file_attributes(self, result):
self.assertTrue(hasattr(result, 'st_file_attributes'))
self.assertTrue(isinstance(result.st_file_attributes, int))
self.assertTrue(0 <= result.st_file_attributes <= 0xFFFFFFFF)
@unittest.skipUnless(sys.platform == "win32",
"st_file_attributes is Win32 specific")
def test_file_attributes(self):
# test file st_file_attributes (FILE_ATTRIBUTE_DIRECTORY not set)
result = os.stat(self.fname)
self.check_file_attributes(result)
self.assertEqual(
result.st_file_attributes & stat.FILE_ATTRIBUTE_DIRECTORY,
0)
# test directory st_file_attributes (FILE_ATTRIBUTE_DIRECTORY set)
dirname = support.TESTFN + "dir"
os.mkdir(dirname)
self.addCleanup(os.rmdir, dirname)
result = os.stat(dirname)
self.check_file_attributes(result)
self.assertEqual(
result.st_file_attributes & stat.FILE_ATTRIBUTE_DIRECTORY,
stat.FILE_ATTRIBUTE_DIRECTORY)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
def test_access_denied(self):
# Default to FindFirstFile WIN32_FIND_DATA when access is
# denied. See issue 28075.
# os.environ['TEMP'] should be located on a volume that
# supports file ACLs.
fname = os.path.join(os.environ['TEMP'], self.fname)
self.addCleanup(support.unlink, fname)
create_file(fname, b'ABC')
# Deny the right to [S]YNCHRONIZE on the file to
# force CreateFile to fail with ERROR_ACCESS_DENIED.
DETACHED_PROCESS = 8
subprocess.check_call(
# bpo-30584: Use security identifier *S-1-5-32-545 instead
# of localized "Users" to not depend on the locale.
['icacls.exe', fname, '/deny', '*S-1-5-32-545:(S)'],
creationflags=DETACHED_PROCESS
)
result = os.stat(fname)
self.assertNotEqual(result.st_size, 0)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
def test_stat_block_device(self):
# bpo-38030: os.stat fails for block devices
# Test a filename like "//./C:"
fname = "//./" + os.path.splitdrive(os.getcwd())[0]
result = os.stat(fname)
self.assertEqual(result.st_mode, stat.S_IFBLK)
class UtimeTests(unittest.TestCase):
def setUp(self):
self.dirname = support.TESTFN
self.fname = os.path.join(self.dirname, "f1")
self.addCleanup(support.rmtree, self.dirname)
os.mkdir(self.dirname)
create_file(self.fname)
def support_subsecond(self, filename):
# Heuristic to check if the filesystem supports timestamp with
# subsecond resolution: check if float and int timestamps are different
st = os.stat(filename)
return ((st.st_atime != st[7])
or (st.st_mtime != st[8])
or (st.st_ctime != st[9]))
def _test_utime(self, set_time, filename=None):
if not filename:
filename = self.fname
support_subsecond = self.support_subsecond(filename)
if support_subsecond:
# Timestamp with a resolution of 1 microsecond (10^-6).
#
# The resolution of the C internal function used by os.utime()
# depends on the platform: 1 sec, 1 us, 1 ns. Writing a portable
# test with a resolution of 1 ns requires more work:
# see the issue #15745.
atime_ns = 1002003000 # 1.002003 seconds
mtime_ns = 4005006000 # 4.005006 seconds
else:
# use a resolution of 1 second
atime_ns = 5 * 10**9
mtime_ns = 8 * 10**9
set_time(filename, (atime_ns, mtime_ns))
st = os.stat(filename)
if support_subsecond:
self.assertAlmostEqual(st.st_atime, atime_ns * 1e-9, delta=1e-6)
self.assertAlmostEqual(st.st_mtime, mtime_ns * 1e-9, delta=1e-6)
else:
self.assertEqual(st.st_atime, atime_ns * 1e-9)
self.assertEqual(st.st_mtime, mtime_ns * 1e-9)
self.assertEqual(st.st_atime_ns, atime_ns)
self.assertEqual(st.st_mtime_ns, mtime_ns)
def test_utime(self):
def set_time(filename, ns):
# test the ns keyword parameter
os.utime(filename, ns=ns)
self._test_utime(set_time)
@staticmethod
def ns_to_sec(ns):
# Convert a number of nanosecond (int) to a number of seconds (float).
# Round towards infinity by adding 0.5 nanosecond to avoid rounding
# issue, os.utime() rounds towards minus infinity.
return (ns * 1e-9) + 0.5e-9
def test_utime_by_indexed(self):
# pass times as floating point seconds as the second indexed parameter
def set_time(filename, ns):
atime_ns, mtime_ns = ns
atime = self.ns_to_sec(atime_ns)
mtime = self.ns_to_sec(mtime_ns)
# test utimensat(timespec), utimes(timeval), utime(utimbuf)
# or utime(time_t)
os.utime(filename, (atime, mtime))
self._test_utime(set_time)
def test_utime_by_times(self):
def set_time(filename, ns):
atime_ns, mtime_ns = ns
atime = self.ns_to_sec(atime_ns)
mtime = self.ns_to_sec(mtime_ns)
# test the times keyword parameter
os.utime(filename, times=(atime, mtime))
self._test_utime(set_time)
@unittest.skipUnless(os.utime in os.supports_follow_symlinks,
"follow_symlinks support for utime required "
"for this test.")
def test_utime_nofollow_symlinks(self):
def set_time(filename, ns):
# use follow_symlinks=False to test utimensat(timespec)
# or lutimes(timeval)
os.utime(filename, ns=ns, follow_symlinks=False)
self._test_utime(set_time)
@unittest.skipUnless(os.utime in os.supports_fd,
"fd support for utime required for this test.")
def test_utime_fd(self):
def set_time(filename, ns):
with open(filename, 'wb', 0) as fp:
# use a file descriptor to test futimens(timespec)
# or futimes(timeval)
os.utime(fp.fileno(), ns=ns)
self._test_utime(set_time)
@unittest.skipUnless(os.utime in os.supports_dir_fd,
"dir_fd support for utime required for this test.")
def test_utime_dir_fd(self):
def set_time(filename, ns):
dirname, name = os.path.split(filename)
dirfd = os.open(dirname, os.O_RDONLY)
try:
# pass dir_fd to test utimensat(timespec) or futimesat(timeval)
os.utime(name, dir_fd=dirfd, ns=ns)
finally:
os.close(dirfd)
self._test_utime(set_time)
def test_utime_directory(self):
def set_time(filename, ns):
# test calling os.utime() on a directory
os.utime(filename, ns=ns)
self._test_utime(set_time, filename=self.dirname)
def _test_utime_current(self, set_time):
# Get the system clock
current = time.time()
# Call os.utime() to set the timestamp to the current system clock
set_time(self.fname)
if not self.support_subsecond(self.fname):
delta = 1.0
else:
# On Windows, the usual resolution of time.time() is 15.6 ms.
# bpo-30649: Tolerate 50 ms for slow Windows buildbots.
#
# x86 Gentoo Refleaks 3.x once failed with dt=20.2 ms. So use
# also 50 ms on other platforms.
delta = 0.050
st = os.stat(self.fname)
msg = ("st_time=%r, current=%r, dt=%r"
% (st.st_mtime, current, st.st_mtime - current))
self.assertAlmostEqual(st.st_mtime, current,
delta=delta, msg=msg)
def test_utime_current(self):
def set_time(filename):
# Set to the current time in the new way
os.utime(self.fname)
self._test_utime_current(set_time)
def test_utime_current_old(self):
def set_time(filename):
# Set to the current time in the old explicit way.
os.utime(self.fname, None)
self._test_utime_current(set_time)
def get_file_system(self, path):
if sys.platform == 'win32':
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_unicode_buffer("", 100)
ok = kernel32.GetVolumeInformationW(root, None, 0,
None, None, None,
buf, len(buf))
if ok:
return buf.value
# return None if the filesystem is unknown
def test_large_time(self):
# Many filesystems are limited to the year 2038. At least, the test
# pass with NTFS filesystem.
if self.get_file_system(self.dirname) != "NTFS":
self.skipTest("requires NTFS")
large = 5000000000 # some day in 2128
os.utime(self.fname, (large, large))
self.assertEqual(os.stat(self.fname).st_mtime, large)
def test_utime_invalid_arguments(self):
# seconds and nanoseconds parameters are mutually exclusive
with self.assertRaises(ValueError):
os.utime(self.fname, (5, 5), ns=(5, 5))
with self.assertRaises(TypeError):
os.utime(self.fname, [5, 5])
with self.assertRaises(TypeError):
os.utime(self.fname, (5,))
with self.assertRaises(TypeError):
os.utime(self.fname, (5, 5, 5))
with self.assertRaises(TypeError):
os.utime(self.fname, ns=[5, 5])
with self.assertRaises(TypeError):
os.utime(self.fname, ns=(5,))
with self.assertRaises(TypeError):
os.utime(self.fname, ns=(5, 5, 5))
if os.utime not in os.supports_follow_symlinks:
with self.assertRaises(NotImplementedError):
os.utime(self.fname, (5, 5), follow_symlinks=False)
if os.utime not in os.supports_fd:
with open(self.fname, 'wb', 0) as fp:
with self.assertRaises(TypeError):
os.utime(fp.fileno(), (5, 5))
if os.utime not in os.supports_dir_fd:
with self.assertRaises(NotImplementedError):
os.utime(self.fname, (5, 5), dir_fd=0)
@support.cpython_only
def test_issue31577(self):
# The interpreter shouldn't crash in case utime() received a bad
# ns argument.
def get_bad_int(divmod_ret_val):
class BadInt:
def __divmod__(*args):
return divmod_ret_val
return BadInt()
with self.assertRaises(TypeError):
os.utime(self.fname, ns=(get_bad_int(42), 1))
with self.assertRaises(TypeError):
os.utime(self.fname, ns=(get_bad_int(()), 1))
with self.assertRaises(TypeError):
os.utime(self.fname, ns=(get_bad_int((1, 2, 3)), 1))
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def setUp(self):
self.__save = dict(os.environ)
if os.supports_bytes_environ:
self.__saveb = dict(os.environb)
for key, value in self._reference().items():
os.environ[key] = value
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
if os.supports_bytes_environ:
os.environb.clear()
os.environb.update(self.__saveb)
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
# Bug 1110478
@unittest.skipUnless(unix_shell and os.path.exists(unix_shell),
'requires a shell')
def test_update2(self):
os.environ.clear()
os.environ.update(HELLO="World")
with os.popen("%s -c 'echo $HELLO'" % unix_shell) as popen:
value = popen.read().strip()
self.assertEqual(value, "World")
@unittest.skipUnless(unix_shell and os.path.exists(unix_shell),
'requires a shell')
def test_os_popen_iter(self):
with os.popen("%s -c 'echo \"line1\nline2\nline3\"'"
% unix_shell) as popen:
it = iter(popen)
self.assertEqual(next(it), "line1\n")
self.assertEqual(next(it), "line2\n")
self.assertEqual(next(it), "line3\n")
self.assertRaises(StopIteration, next, it)
# Verify environ keys and values from the OS are of the
# correct str type.
def test_keyvalue_types(self):
for key, val in os.environ.items():
self.assertEqual(type(key), str)
self.assertEqual(type(val), str)
def test_items(self):
for key, value in self._reference().items():
self.assertEqual(os.environ.get(key), value)
# Issue 7310
def test___repr__(self):
"""Check that the repr() of os.environ looks like environ({...})."""
env = os.environ
self.assertEqual(repr(env), 'environ({{{}}})'.format(', '.join(
'{!r}: {!r}'.format(key, value)
for key, value in env.items())))
def test_get_exec_path(self):
defpath_list = os.defpath.split(os.pathsep)
test_path = ['/monty', '/python', '', '/flying/circus']
test_env = {'PATH': os.pathsep.join(test_path)}
saved_environ = os.environ
try:
os.environ = dict(test_env)
# Test that defaulting to os.environ works.
self.assertSequenceEqual(test_path, os.get_exec_path())
self.assertSequenceEqual(test_path, os.get_exec_path(env=None))
finally:
os.environ = saved_environ
# No PATH environment variable
self.assertSequenceEqual(defpath_list, os.get_exec_path({}))
# Empty PATH environment variable
self.assertSequenceEqual(('',), os.get_exec_path({'PATH':''}))
# Supplied PATH environment variable
self.assertSequenceEqual(test_path, os.get_exec_path(test_env))
if os.supports_bytes_environ:
# env cannot contain 'PATH' and b'PATH' keys
try:
# ignore BytesWarning warning
with warnings.catch_warnings(record=True):
mixed_env = {'PATH': '1', b'PATH': b'2'}
except BytesWarning:
# mixed_env cannot be created with python -bb
pass
else:
self.assertRaises(ValueError, os.get_exec_path, mixed_env)
# bytes key and/or value
self.assertSequenceEqual(os.get_exec_path({b'PATH': b'abc'}),
['abc'])
self.assertSequenceEqual(os.get_exec_path({b'PATH': 'abc'}),
['abc'])
self.assertSequenceEqual(os.get_exec_path({'PATH': b'abc'}),
['abc'])
@unittest.skipUnless(os.supports_bytes_environ,
"os.environb required for this test.")
def test_environb(self):
# os.environ -> os.environb
value = 'euro\u20ac'
try:
value_bytes = value.encode(sys.getfilesystemencoding(),
'surrogateescape')
except UnicodeEncodeError:
msg = "U+20AC character is not encodable to %s" % (
sys.getfilesystemencoding(),)
self.skipTest(msg)
os.environ['unicode'] = value
self.assertEqual(os.environ['unicode'], value)
self.assertEqual(os.environb[b'unicode'], value_bytes)
# os.environb -> os.environ
value = b'\xff'
os.environb[b'bytes'] = value
self.assertEqual(os.environb[b'bytes'], value)
value_str = value.decode(sys.getfilesystemencoding(), 'surrogateescape')
self.assertEqual(os.environ['bytes'], value_str)
# On OS X < 10.6, unsetenv() doesn't return a value (bpo-13415).
@support.requires_mac_ver(10, 6)
def test_unset_error(self):
if sys.platform == "win32":
# an environment variable is limited to 32,767 characters
key = 'x' * 50000
self.assertRaises(ValueError, os.environ.__delitem__, key)
else:
# "=" is not allowed in a variable name
key = 'key='
self.assertRaises(OSError, os.environ.__delitem__, key)
def test_key_type(self):
missing = 'missingkey'
self.assertNotIn(missing, os.environ)
with self.assertRaises(KeyError) as cm:
os.environ[missing]
self.assertIs(cm.exception.args[0], missing)
self.assertTrue(cm.exception.__suppress_context__)
with self.assertRaises(KeyError) as cm:
del os.environ[missing]
self.assertIs(cm.exception.args[0], missing)
self.assertTrue(cm.exception.__suppress_context__)
def _test_environ_iteration(self, collection):
iterator = iter(collection)
new_key = "__new_key__"
next(iterator) # start iteration over os.environ.items
# add a new key in os.environ mapping
os.environ[new_key] = "test_environ_iteration"
try:
next(iterator) # force iteration over modified mapping
self.assertEqual(os.environ[new_key], "test_environ_iteration")
finally:
del os.environ[new_key]
def test_iter_error_when_changing_os_environ(self):
self._test_environ_iteration(os.environ)
def test_iter_error_when_changing_os_environ_items(self):
self._test_environ_iteration(os.environ.items())
def test_iter_error_when_changing_os_environ_values(self):
self._test_environ_iteration(os.environ.values())
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
# Wrapper to hide minor differences between os.walk and os.fwalk
# to tests both functions with the same code base
def walk(self, top, **kwargs):
if 'follow_symlinks' in kwargs:
kwargs['followlinks'] = kwargs.pop('follow_symlinks')
return os.walk(top, **kwargs)
def setUp(self):
join = os.path.join
self.addCleanup(support.rmtree, support.TESTFN)
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# SUB21/ not readable
# tmp5
# link/ a symlink to TESTFN.2
# broken_link
# broken_link2
# broken_link3
# TEST2/
# tmp4 a lone file
self.walk_path = join(support.TESTFN, "TEST1")
self.sub1_path = join(self.walk_path, "SUB1")
self.sub11_path = join(self.sub1_path, "SUB11")
sub2_path = join(self.walk_path, "SUB2")
sub21_path = join(sub2_path, "SUB21")
tmp1_path = join(self.walk_path, "tmp1")
tmp2_path = join(self.sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
tmp5_path = join(sub21_path, "tmp3")
self.link_path = join(sub2_path, "link")
t2_path = join(support.TESTFN, "TEST2")
tmp4_path = join(support.TESTFN, "TEST2", "tmp4")
broken_link_path = join(sub2_path, "broken_link")
broken_link2_path = join(sub2_path, "broken_link2")
broken_link3_path = join(sub2_path, "broken_link3")
# Create stuff.
os.makedirs(self.sub11_path)
os.makedirs(sub2_path)
os.makedirs(sub21_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path, tmp5_path:
with open(path, "x") as f:
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
if support.can_symlink():
os.symlink(os.path.abspath(t2_path), self.link_path)
os.symlink('broken', broken_link_path, True)
os.symlink(join('tmp3', 'broken'), broken_link2_path, True)
os.symlink(join('SUB21', 'tmp5'), broken_link3_path, True)
self.sub2_tree = (sub2_path, ["SUB21", "link"],
["broken_link", "broken_link2", "broken_link3",
"tmp3"])
else:
self.sub2_tree = (sub2_path, ["SUB21"], ["tmp3"])
os.chmod(sub21_path, 0)
try:
os.listdir(sub21_path)
except PermissionError:
self.addCleanup(os.chmod, sub21_path, stat.S_IRWXU)
else:
os.chmod(sub21_path, stat.S_IRWXU)
os.unlink(tmp5_path)
os.rmdir(sub21_path)
del self.sub2_tree[1][:1]
def test_walk_topdown(self):
# Walk top-down.
all = list(self.walk(self.walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
all[3 - 2 * flipped][-1].sort()
all[3 - 2 * flipped][1].sort()
self.assertEqual(all[0], (self.walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (self.sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (self.sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], self.sub2_tree)
def test_walk_prune(self, walk_path=None):
if walk_path is None:
walk_path = self.walk_path
# Prune the search.
all = []
for root, dirs, files in self.walk(walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0], (self.walk_path, ["SUB2"], ["tmp1"]))
all[1][-1].sort()
all[1][1].sort()
self.assertEqual(all[1], self.sub2_tree)
def test_file_like_path(self):
self.test_walk_prune(FakePath(self.walk_path))
def test_walk_bottom_up(self):
# Walk bottom-up.
all = list(self.walk(self.walk_path, topdown=False))
self.assertEqual(len(all), 4, all)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
all[2 - 2 * flipped][-1].sort()
all[2 - 2 * flipped][1].sort()
self.assertEqual(all[3],
(self.walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped],
(self.sub11_path, [], []))
self.assertEqual(all[flipped + 1],
(self.sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped],
self.sub2_tree)
def test_walk_symlink(self):
if not support.can_symlink():
self.skipTest("need symlink support")
# Walk, following symlinks.
walk_it = self.walk(self.walk_path, follow_symlinks=True)
for root, dirs, files in walk_it:
if root == self.link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def test_walk_bad_dir(self):
# Walk top-down.
errors = []
walk_it = self.walk(self.walk_path, onerror=errors.append)
root, dirs, files = next(walk_it)
self.assertEqual(errors, [])
dir1 = 'SUB1'
path1 = os.path.join(root, dir1)
path1new = os.path.join(root, dir1 + '.new')
os.rename(path1, path1new)
try:
roots = [r for r, d, f in walk_it]
self.assertTrue(errors)
self.assertNotIn(path1, roots)
self.assertNotIn(path1new, roots)
for dir2 in dirs:
if dir2 != dir1:
self.assertIn(os.path.join(root, dir2), roots)
finally:
os.rename(path1new, path1)
def test_walk_many_open_files(self):
depth = 30
base = os.path.join(support.TESTFN, 'deep')
p = os.path.join(base, *(['d']*depth))
os.makedirs(p)
iters = [self.walk(base, topdown=False) for j in range(100)]
for i in range(depth + 1):
expected = (p, ['d'] if i else [], [])
for it in iters:
self.assertEqual(next(it), expected)
p = os.path.dirname(p)
iters = [self.walk(base, topdown=True) for j in range(100)]
p = base
for i in range(depth + 1):
expected = (p, ['d'] if i < depth else [], [])
for it in iters:
self.assertEqual(next(it), expected)
p = os.path.join(p, 'd')
@unittest.skipUnless(hasattr(os, 'fwalk'), "Test needs os.fwalk()")
class FwalkTests(WalkTests):
"""Tests for os.fwalk()."""
def walk(self, top, **kwargs):
for root, dirs, files, root_fd in self.fwalk(top, **kwargs):
yield (root, dirs, files)
def fwalk(self, *args, **kwargs):
return os.fwalk(*args, **kwargs)
def _compare_to_walk(self, walk_kwargs, fwalk_kwargs):
"""
compare with walk() results.
"""
walk_kwargs = walk_kwargs.copy()
fwalk_kwargs = fwalk_kwargs.copy()
for topdown, follow_symlinks in itertools.product((True, False), repeat=2):
walk_kwargs.update(topdown=topdown, followlinks=follow_symlinks)
fwalk_kwargs.update(topdown=topdown, follow_symlinks=follow_symlinks)
expected = {}
for root, dirs, files in os.walk(**walk_kwargs):
expected[root] = (set(dirs), set(files))
for root, dirs, files, rootfd in self.fwalk(**fwalk_kwargs):
self.assertIn(root, expected)
self.assertEqual(expected[root], (set(dirs), set(files)))
def test_compare_to_walk(self):
kwargs = {'top': support.TESTFN}
self._compare_to_walk(kwargs, kwargs)
def test_dir_fd(self):
try:
fd = os.open(".", os.O_RDONLY)
walk_kwargs = {'top': support.TESTFN}
fwalk_kwargs = walk_kwargs.copy()
fwalk_kwargs['dir_fd'] = fd
self._compare_to_walk(walk_kwargs, fwalk_kwargs)
finally:
os.close(fd)
def test_yields_correct_dir_fd(self):
# check returned file descriptors
for topdown, follow_symlinks in itertools.product((True, False), repeat=2):
args = support.TESTFN, topdown, None
for root, dirs, files, rootfd in self.fwalk(*args, follow_symlinks=follow_symlinks):
# check that the FD is valid
os.fstat(rootfd)
# redundant check
os.stat(rootfd)
# check that listdir() returns consistent information
self.assertEqual(set(os.listdir(rootfd)), set(dirs) | set(files))
def test_fd_leak(self):
# Since we're opening a lot of FDs, we must be careful to avoid leaks:
# we both check that calling fwalk() a large number of times doesn't
# yield EMFILE, and that the minimum allocated FD hasn't changed.
minfd = os.dup(1)
os.close(minfd)
for i in range(256):
for x in self.fwalk(support.TESTFN):
pass
newfd = os.dup(1)
self.addCleanup(os.close, newfd)
self.assertEqual(newfd, minfd)
# fwalk() keeps file descriptors open
test_walk_many_open_files = None
class BytesWalkTests(WalkTests):
"""Tests for os.walk() with bytes."""
def walk(self, top, **kwargs):
if 'follow_symlinks' in kwargs:
kwargs['followlinks'] = kwargs.pop('follow_symlinks')
for broot, bdirs, bfiles in os.walk(os.fsencode(top), **kwargs):
root = os.fsdecode(broot)
dirs = list(map(os.fsdecode, bdirs))
files = list(map(os.fsdecode, bfiles))
yield (root, dirs, files)
bdirs[:] = list(map(os.fsencode, dirs))
bfiles[:] = list(map(os.fsencode, files))
@unittest.skipUnless(hasattr(os, 'fwalk'), "Test needs os.fwalk()")
class BytesFwalkTests(FwalkTests):
"""Tests for os.walk() with bytes."""
def fwalk(self, top='.', *args, **kwargs):
for broot, bdirs, bfiles, topfd in os.fwalk(os.fsencode(top), *args, **kwargs):
root = os.fsdecode(broot)
dirs = list(map(os.fsdecode, bdirs))
files = list(map(os.fsdecode, bfiles))
yield (root, dirs, files, topfd)
bdirs[:] = list(map(os.fsencode, dirs))
bfiles[:] = list(map(os.fsencode, files))
class MakedirTests(unittest.TestCase):
def setUp(self):
os.mkdir(support.TESTFN)
def test_makedir(self):
base = support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.assertRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def test_mode(self):
with support.temp_umask(0o002):
base = support.TESTFN
parent = os.path.join(base, 'dir1')
path = os.path.join(parent, 'dir2')
os.makedirs(path, 0o555)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isdir(path))
if os.name != 'nt':
self.assertEqual(os.stat(path).st_mode & 0o777, 0o555)
self.assertEqual(os.stat(parent).st_mode & 0o777, 0o775)
def test_exist_ok_existing_directory(self):
path = os.path.join(support.TESTFN, 'dir1')
mode = 0o777
old_mask = os.umask(0o022)
os.makedirs(path, mode)
self.assertRaises(OSError, os.makedirs, path, mode)
self.assertRaises(OSError, os.makedirs, path, mode, exist_ok=False)
os.makedirs(path, 0o776, exist_ok=True)
os.makedirs(path, mode=mode, exist_ok=True)
os.umask(old_mask)
# Issue #25583: A drive root could raise PermissionError on Windows
os.makedirs(os.path.abspath('/'), exist_ok=True)
def test_exist_ok_s_isgid_directory(self):
path = os.path.join(support.TESTFN, 'dir1')
S_ISGID = stat.S_ISGID
mode = 0o777
old_mask = os.umask(0o022)
try:
existing_testfn_mode = stat.S_IMODE(
os.lstat(support.TESTFN).st_mode)
try:
os.chmod(support.TESTFN, existing_testfn_mode | S_ISGID)
except PermissionError:
raise unittest.SkipTest('Cannot set S_ISGID for dir.')
if (os.lstat(support.TESTFN).st_mode & S_ISGID != S_ISGID):
raise unittest.SkipTest('No support for S_ISGID dir mode.')
# The os should apply S_ISGID from the parent dir for us, but
# this test need not depend on that behavior. Be explicit.
os.makedirs(path, mode | S_ISGID)
# http://bugs.python.org/issue14992
# Should not fail when the bit is already set.
os.makedirs(path, mode, exist_ok=True)
# remove the bit.
os.chmod(path, stat.S_IMODE(os.lstat(path).st_mode) & ~S_ISGID)
# May work even when the bit is not already set when demanded.
os.makedirs(path, mode | S_ISGID, exist_ok=True)
finally:
os.umask(old_mask)
def test_exist_ok_existing_regular_file(self):
base = support.TESTFN
path = os.path.join(support.TESTFN, 'dir1')
with open(path, 'w') as f:
f.write('abc')
self.assertRaises(OSError, os.makedirs, path)
self.assertRaises(OSError, os.makedirs, path, exist_ok=False)
self.assertRaises(OSError, os.makedirs, path, exist_ok=True)
os.remove(path)
def tearDown(self):
path = os.path.join(support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
@unittest.skipUnless(hasattr(os, 'chown'), "Test needs chown")
class ChownFileTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.mkdir(support.TESTFN)
def test_chown_uid_gid_arguments_must_be_index(self):
stat = os.stat(support.TESTFN)
uid = stat.st_uid
gid = stat.st_gid
for value in (-1.0, -1j, decimal.Decimal(-1), fractions.Fraction(-2, 2)):
self.assertRaises(TypeError, os.chown, support.TESTFN, value, gid)
self.assertRaises(TypeError, os.chown, support.TESTFN, uid, value)
self.assertIsNone(os.chown(support.TESTFN, uid, gid))
self.assertIsNone(os.chown(support.TESTFN, -1, -1))
@unittest.skipUnless(hasattr(os, 'getgroups'), 'need os.getgroups')
def test_chown_gid(self):
groups = os.getgroups()
if len(groups) < 2:
self.skipTest("test needs at least 2 groups")
gid_1, gid_2 = groups[:2]
uid = os.stat(support.TESTFN).st_uid
os.chown(support.TESTFN, uid, gid_1)
gid = os.stat(support.TESTFN).st_gid
self.assertEqual(gid, gid_1)
os.chown(support.TESTFN, uid, gid_2)
gid = os.stat(support.TESTFN).st_gid
self.assertEqual(gid, gid_2)
@unittest.skipUnless(root_in_posix and len(all_users) > 1,
"test needs root privilege and more than one user")
def test_chown_with_root(self):
uid_1, uid_2 = all_users[:2]
gid = os.stat(support.TESTFN).st_gid
os.chown(support.TESTFN, uid_1, gid)
uid = os.stat(support.TESTFN).st_uid
self.assertEqual(uid, uid_1)
os.chown(support.TESTFN, uid_2, gid)
uid = os.stat(support.TESTFN).st_uid
self.assertEqual(uid, uid_2)
@unittest.skipUnless(not root_in_posix and len(all_users) > 1,
"test needs non-root account and more than one user")
def test_chown_without_permission(self):
uid_1, uid_2 = all_users[:2]
gid = os.stat(support.TESTFN).st_gid
with self.assertRaises(PermissionError):
os.chown(support.TESTFN, uid_1, gid)
os.chown(support.TESTFN, uid_2, gid)
@classmethod
def tearDownClass(cls):
os.rmdir(support.TESTFN)
class RemoveDirsTests(unittest.TestCase):
def setUp(self):
os.makedirs(support.TESTFN)
def tearDown(self):
support.rmtree(support.TESTFN)
def test_remove_all(self):
dira = os.path.join(support.TESTFN, 'dira')
os.mkdir(dira)
dirb = os.path.join(dira, 'dirb')
os.mkdir(dirb)
os.removedirs(dirb)
self.assertFalse(os.path.exists(dirb))
self.assertFalse(os.path.exists(dira))
self.assertFalse(os.path.exists(support.TESTFN))
def test_remove_partial(self):
dira = os.path.join(support.TESTFN, 'dira')
os.mkdir(dira)
dirb = os.path.join(dira, 'dirb')
os.mkdir(dirb)
create_file(os.path.join(dira, 'file.txt'))
os.removedirs(dirb)
self.assertFalse(os.path.exists(dirb))
self.assertTrue(os.path.exists(dira))
self.assertTrue(os.path.exists(support.TESTFN))
def test_remove_nothing(self):
dira = os.path.join(support.TESTFN, 'dira')
os.mkdir(dira)
dirb = os.path.join(dira, 'dirb')
os.mkdir(dirb)
create_file(os.path.join(dirb, 'file.txt'))
with self.assertRaises(OSError):
os.removedirs(dirb)
self.assertTrue(os.path.exists(dirb))
self.assertTrue(os.path.exists(dira))
self.assertTrue(os.path.exists(support.TESTFN))
class DevNullTests(unittest.TestCase):
def test_devnull(self):
with open(os.devnull, 'wb', 0) as f:
f.write(b'hello')
f.close()
with open(os.devnull, 'rb') as f:
self.assertEqual(f.read(), b'')
class URandomTests(unittest.TestCase):
def test_urandom_length(self):
self.assertEqual(len(os.urandom(0)), 0)
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
def test_urandom_value(self):
data1 = os.urandom(16)
self.assertIsInstance(data1, bytes)
data2 = os.urandom(16)
self.assertNotEqual(data1, data2)
def get_urandom_subprocess(self, count):
code = '\n'.join((
'import os, sys',
'data = os.urandom(%s)' % count,
'sys.stdout.buffer.write(data)',
'sys.stdout.buffer.flush()'))
out = assert_python_ok('-c', code)
stdout = out[1]
self.assertEqual(len(stdout), count)
return stdout
def test_urandom_subprocess(self):
data1 = self.get_urandom_subprocess(16)
data2 = self.get_urandom_subprocess(16)
self.assertNotEqual(data1, data2)
@unittest.skipUnless(hasattr(os, 'getrandom'), 'need os.getrandom()')
class GetRandomTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
os.getrandom(1)
except OSError as exc:
if exc.errno == errno.ENOSYS:
# Python compiled on a more recent Linux version
# than the current Linux kernel
raise unittest.SkipTest("getrandom() syscall fails with ENOSYS")
else:
raise
def test_getrandom_type(self):
data = os.getrandom(16)
self.assertIsInstance(data, bytes)
self.assertEqual(len(data), 16)
def test_getrandom0(self):
empty = os.getrandom(0)
self.assertEqual(empty, b'')
def test_getrandom_random(self):
self.assertTrue(hasattr(os, 'GRND_RANDOM'))
# Don't test os.getrandom(1, os.GRND_RANDOM) to not consume the rare
# resource /dev/random
def test_getrandom_nonblock(self):
# The call must not fail. Check also that the flag exists
try:
os.getrandom(1, os.GRND_NONBLOCK)
except BlockingIOError:
# System urandom is not initialized yet
pass
def test_getrandom_value(self):
data1 = os.getrandom(16)
data2 = os.getrandom(16)
self.assertNotEqual(data1, data2)
# os.urandom() doesn't use a file descriptor when it is implemented with the
# getentropy() function, the getrandom() function or the getrandom() syscall
OS_URANDOM_DONT_USE_FD = (
sysconfig.get_config_var('HAVE_GETENTROPY') == 1
or sysconfig.get_config_var('HAVE_GETRANDOM') == 1
or sysconfig.get_config_var('HAVE_GETRANDOM_SYSCALL') == 1)
@unittest.skipIf(OS_URANDOM_DONT_USE_FD ,
"os.random() does not use a file descriptor")
@unittest.skipIf(sys.platform == "vxworks",
"VxWorks can't set RLIMIT_NOFILE to 1")
class URandomFDTests(unittest.TestCase):
@unittest.skipUnless(resource, "test requires the resource module")
def test_urandom_failure(self):
# Check urandom() failing when it is not able to open /dev/random.
# We spawn a new process to make the test more robust (if getrlimit()
# failed to restore the file descriptor limit after this, the whole
# test suite would crash; this actually happened on the OS X Tiger
# buildbot).
code = """if 1:
import errno
import os
import resource
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
try:
os.urandom(16)
except OSError as e:
assert e.errno == errno.EMFILE, e.errno
else:
raise AssertionError("OSError not raised")
"""
assert_python_ok('-c', code)
def test_urandom_fd_closed(self):
# Issue #21207: urandom() should reopen its fd to /dev/urandom if
# closed.
code = """if 1:
import os
import sys
import test.support
os.urandom(4)
with test.support.SuppressCrashReport():
os.closerange(3, 256)
sys.stdout.buffer.write(os.urandom(4))
"""
rc, out, err = assert_python_ok('-Sc', code)
def test_urandom_fd_reopened(self):
# Issue #21207: urandom() should detect its fd to /dev/urandom
# changed to something else, and reopen it.
self.addCleanup(support.unlink, support.TESTFN)
create_file(support.TESTFN, b"x" * 256)
code = """if 1:
import os
import sys
import test.support
os.urandom(4)
with test.support.SuppressCrashReport():
for fd in range(3, 256):
try:
os.close(fd)
except OSError:
pass
else:
# Found the urandom fd (XXX hopefully)
break
os.closerange(3, 256)
with open({TESTFN!r}, 'rb') as f:
new_fd = f.fileno()
# Issue #26935: posix allows new_fd and fd to be equal but
# some libc implementations have dup2 return an error in this
# case.
if new_fd != fd:
os.dup2(new_fd, fd)
sys.stdout.buffer.write(os.urandom(4))
sys.stdout.buffer.write(os.urandom(4))
""".format(TESTFN=support.TESTFN)
rc, out, err = assert_python_ok('-Sc', code)
self.assertEqual(len(out), 8)
self.assertNotEqual(out[0:4], out[4:8])
rc, out2, err2 = assert_python_ok('-Sc', code)
self.assertEqual(len(out2), 8)
self.assertNotEqual(out2, out)
@contextlib.contextmanager
def _execvpe_mockup(defpath=None):
"""
Stubs out execv and execve functions when used as context manager.
Records exec calls. The mock execv and execve functions always raise an
exception as they would normally never return.
"""
# A list of tuples containing (function name, first arg, args)
# of calls to execv or execve that have been made.
calls = []
def mock_execv(name, *args):
calls.append(('execv', name, args))
raise RuntimeError("execv called")
def mock_execve(name, *args):
calls.append(('execve', name, args))
raise OSError(errno.ENOTDIR, "execve called")
try:
orig_execv = os.execv
orig_execve = os.execve
orig_defpath = os.defpath
os.execv = mock_execv
os.execve = mock_execve
if defpath is not None:
os.defpath = defpath
yield calls
finally:
os.execv = orig_execv
os.execve = orig_execve
os.defpath = orig_defpath
@unittest.skipUnless(hasattr(os, 'execv'),
"need os.execv()")
class ExecTests(unittest.TestCase):
@unittest.skipIf(USING_LINUXTHREADS,
"avoid triggering a linuxthreads bug: see issue #4970")
def test_execvpe_with_bad_program(self):
self.assertRaises(OSError, os.execvpe, 'no such app-',
['no such app-'], None)
def test_execv_with_bad_arglist(self):
self.assertRaises(ValueError, os.execv, 'notepad', ())
self.assertRaises(ValueError, os.execv, 'notepad', [])
self.assertRaises(ValueError, os.execv, 'notepad', ('',))
self.assertRaises(ValueError, os.execv, 'notepad', [''])
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
self.assertRaises(ValueError, os.execvpe, 'notepad', [], {})
self.assertRaises(ValueError, os.execvpe, 'notepad', [''], {})
@unittest.skipUnless(hasattr(os, '_execvpe'),
"No internal os._execvpe function to test.")
def _test_internal_execvpe(self, test_type):
program_path = os.sep + 'absolutepath'
if test_type is bytes:
program = b'executable'
fullpath = os.path.join(os.fsencode(program_path), program)
native_fullpath = fullpath
arguments = [b'progname', 'arg1', 'arg2']
else:
program = 'executable'
arguments = ['progname', 'arg1', 'arg2']
fullpath = os.path.join(program_path, program)
if os.name != "nt":
native_fullpath = os.fsencode(fullpath)
else:
native_fullpath = fullpath
env = {'spam': 'beans'}
# test os._execvpe() with an absolute path
with _execvpe_mockup() as calls:
self.assertRaises(RuntimeError,
os._execvpe, fullpath, arguments)
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], ('execv', fullpath, (arguments,)))
# test os._execvpe() with a relative path:
# os.get_exec_path() returns defpath
with _execvpe_mockup(defpath=program_path) as calls:
self.assertRaises(OSError,
os._execvpe, program, arguments, env=env)
self.assertEqual(len(calls), 1)
self.assertSequenceEqual(calls[0],
('execve', native_fullpath, (arguments, env)))
# test os._execvpe() with a relative path:
# os.get_exec_path() reads the 'PATH' variable
with _execvpe_mockup() as calls:
env_path = env.copy()
if test_type is bytes:
env_path[b'PATH'] = program_path
else:
env_path['PATH'] = program_path
self.assertRaises(OSError,
os._execvpe, program, arguments, env=env_path)
self.assertEqual(len(calls), 1)
self.assertSequenceEqual(calls[0],
('execve', native_fullpath, (arguments, env_path)))
def test_internal_execvpe_str(self):
self._test_internal_execvpe(str)
if os.name != "nt":
self._test_internal_execvpe(bytes)
def test_execve_invalid_env(self):
args = [sys.executable, '-c', 'pass']
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
os.execve(args[0], args, newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
os.execve(args[0], args, newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
os.execve(args[0], args, newenv)
@unittest.skipUnless(sys.platform == "win32", "Win32-specific test")
def test_execve_with_empty_path(self):
# bpo-32890: Check GetLastError() misuse
try:
os.execve('', ['arg'], {})
except OSError as e:
self.assertTrue(e.winerror is None or e.winerror != 0)
else:
self.fail('No OSError raised')
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32ErrorTests(unittest.TestCase):
def setUp(self):
try:
os.stat(support.TESTFN)
except FileNotFoundError:
exists = False
except OSError as exc:
exists = True
self.fail("file %s must not exist; os.stat failed with %s"
% (support.TESTFN, exc))
else:
self.fail("file %s must not exist" % support.TESTFN)
def test_rename(self):
self.assertRaises(OSError, os.rename, support.TESTFN, support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(OSError, os.remove, support.TESTFN)
def test_chdir(self):
self.assertRaises(OSError, os.chdir, support.TESTFN)
def test_mkdir(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "x") as f:
self.assertRaises(OSError, os.mkdir, support.TESTFN)
def test_utime(self):
self.assertRaises(OSError, os.utime, support.TESTFN, None)
def test_chmod(self):
self.assertRaises(OSError, os.chmod, support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "dup", "fdopen", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn't raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
try:
f(support.make_bad_fd(), *args)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("%r didn't raise an OSError with a bad file descriptor"
% f)
@unittest.skipUnless(hasattr(os, 'isatty'), 'test needs os.isatty()')
def test_isatty(self):
self.assertEqual(os.isatty(support.make_bad_fd()), False)
@unittest.skipUnless(hasattr(os, 'closerange'), 'test needs os.closerange()')
def test_closerange(self):
fd = support.make_bad_fd()
# Make sure none of the descriptors we are about to close are
# currently valid (issue 6542).
for i in range(10):
try: os.fstat(fd+i)
except OSError:
pass
else:
break
if i < 2:
raise unittest.SkipTest(
"Unable to acquire a range of invalid file descriptors")
self.assertEqual(os.closerange(fd, fd + i-1), None)
@unittest.skipUnless(hasattr(os, 'dup2'), 'test needs os.dup2()')
def test_dup2(self):
self.check(os.dup2, 20)
@unittest.skipUnless(hasattr(os, 'fchmod'), 'test needs os.fchmod()')
def test_fchmod(self):
self.check(os.fchmod, 0)
@unittest.skipUnless(hasattr(os, 'fchown'), 'test needs os.fchown()')
def test_fchown(self):
self.check(os.fchown, -1, -1)
@unittest.skipUnless(hasattr(os, 'fpathconf'), 'test needs os.fpathconf()')
def test_fpathconf(self):
self.check(os.pathconf, "PC_NAME_MAX")
self.check(os.fpathconf, "PC_NAME_MAX")
@unittest.skipUnless(hasattr(os, 'ftruncate'), 'test needs os.ftruncate()')
def test_ftruncate(self):
self.check(os.truncate, 0)
self.check(os.ftruncate, 0)
@unittest.skipUnless(hasattr(os, 'lseek'), 'test needs os.lseek()')
def test_lseek(self):
self.check(os.lseek, 0, 0)
@unittest.skipUnless(hasattr(os, 'read'), 'test needs os.read()')
def test_read(self):
self.check(os.read, 1)
@unittest.skipUnless(hasattr(os, 'readv'), 'test needs os.readv()')
def test_readv(self):
buf = bytearray(10)
self.check(os.readv, [buf])
@unittest.skipUnless(hasattr(os, 'tcsetpgrp'), 'test needs os.tcsetpgrp()')
def test_tcsetpgrpt(self):
self.check(os.tcsetpgrp, 0)
@unittest.skipUnless(hasattr(os, 'write'), 'test needs os.write()')
def test_write(self):
self.check(os.write, b" ")
@unittest.skipUnless(hasattr(os, 'writev'), 'test needs os.writev()')
def test_writev(self):
self.check(os.writev, [b'abc'])
def test_inheritable(self):
self.check(os.get_inheritable)
self.check(os.set_inheritable, True)
@unittest.skipUnless(hasattr(os, 'get_blocking'),
'needs os.get_blocking() and os.set_blocking()')
def test_blocking(self):
self.check(os.get_blocking)
self.check(os.set_blocking, True)
class LinkTests(unittest.TestCase):
def setUp(self):
self.file1 = support.TESTFN
self.file2 = os.path.join(support.TESTFN + "2")
def tearDown(self):
for file in (self.file1, self.file2):
if os.path.exists(file):
os.unlink(file)
def _test_link(self, file1, file2):
create_file(file1)
try:
os.link(file1, file2)
except PermissionError as e:
self.skipTest('os.link(): %s' % e)
with open(file1, "r") as f1, open(file2, "r") as f2:
self.assertTrue(os.path.sameopenfile(f1.fileno(), f2.fileno()))
def test_link(self):
self._test_link(self.file1, self.file2)
def test_link_bytes(self):
self._test_link(bytes(self.file1, sys.getfilesystemencoding()),
bytes(self.file2, sys.getfilesystemencoding()))
def test_unicode_name(self):
try:
os.fsencode("\xf1")
except UnicodeError:
raise unittest.SkipTest("Unable to encode for this platform.")
self.file1 += "\xf1"
self.file2 = self.file1 + "2"
self._test_link(self.file1, self.file2)
@unittest.skipIf(sys.platform == "win32", "Posix specific tests")
class PosixUidGidTests(unittest.TestCase):
# uid_t and gid_t are 32-bit unsigned integers on Linux
UID_OVERFLOW = (1 << 32)
GID_OVERFLOW = (1 << 32)
@unittest.skipUnless(hasattr(os, 'setuid'), 'test needs os.setuid()')
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(OSError, os.setuid, 0)
self.assertRaises(TypeError, os.setuid, 'not an int')
self.assertRaises(OverflowError, os.setuid, self.UID_OVERFLOW)
@unittest.skipUnless(hasattr(os, 'setgid'), 'test needs os.setgid()')
def test_setgid(self):
if os.getuid() != 0 and not HAVE_WHEEL_GROUP:
self.assertRaises(OSError, os.setgid, 0)
self.assertRaises(TypeError, os.setgid, 'not an int')
self.assertRaises(OverflowError, os.setgid, self.GID_OVERFLOW)
@unittest.skipUnless(hasattr(os, 'seteuid'), 'test needs os.seteuid()')
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(OSError, os.seteuid, 0)
self.assertRaises(TypeError, os.setegid, 'not an int')
self.assertRaises(OverflowError, os.seteuid, self.UID_OVERFLOW)
@unittest.skipUnless(hasattr(os, 'setegid'), 'test needs os.setegid()')
def test_setegid(self):
if os.getuid() != 0 and not HAVE_WHEEL_GROUP:
self.assertRaises(OSError, os.setegid, 0)
self.assertRaises(TypeError, os.setegid, 'not an int')
self.assertRaises(OverflowError, os.setegid, self.GID_OVERFLOW)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(OSError, os.setreuid, 0, 0)
self.assertRaises(TypeError, os.setreuid, 'not an int', 0)
self.assertRaises(TypeError, os.setreuid, 0, 'not an int')
self.assertRaises(OverflowError, os.setreuid, self.UID_OVERFLOW, 0)
self.assertRaises(OverflowError, os.setreuid, 0, self.UID_OVERFLOW)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
def test_setreuid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
@unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()')
def test_setregid(self):
if os.getuid() != 0 and not HAVE_WHEEL_GROUP:
self.assertRaises(OSError, os.setregid, 0, 0)
self.assertRaises(TypeError, os.setregid, 'not an int', 0)
self.assertRaises(TypeError, os.setregid, 0, 'not an int')
self.assertRaises(OverflowError, os.setregid, self.GID_OVERFLOW, 0)
self.assertRaises(OverflowError, os.setregid, 0, self.GID_OVERFLOW)
@unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()')
def test_setregid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
@unittest.skipIf(sys.platform == "win32", "Posix specific tests")
class Pep383Tests(unittest.TestCase):
def setUp(self):
if support.TESTFN_UNENCODABLE:
self.dir = support.TESTFN_UNENCODABLE
elif support.TESTFN_NONASCII:
self.dir = support.TESTFN_NONASCII
else:
self.dir = support.TESTFN
self.bdir = os.fsencode(self.dir)
bytesfn = []
def add_filename(fn):
try:
fn = os.fsencode(fn)
except UnicodeEncodeError:
return
bytesfn.append(fn)
add_filename(support.TESTFN_UNICODE)
if support.TESTFN_UNENCODABLE:
add_filename(support.TESTFN_UNENCODABLE)
if support.TESTFN_NONASCII:
add_filename(support.TESTFN_NONASCII)
if not bytesfn:
self.skipTest("couldn't create any non-ascii filename")
self.unicodefn = set()
os.mkdir(self.dir)
try:
for fn in bytesfn:
support.create_empty_file(os.path.join(self.bdir, fn))
fn = os.fsdecode(fn)
if fn in self.unicodefn:
raise ValueError("duplicate filename")
self.unicodefn.add(fn)
except:
shutil.rmtree(self.dir)
raise
def tearDown(self):
shutil.rmtree(self.dir)
def test_listdir(self):
expected = self.unicodefn
found = set(os.listdir(self.dir))
self.assertEqual(found, expected)
# test listdir without arguments
current_directory = os.getcwd()
try:
os.chdir(os.sep)
self.assertEqual(set(os.listdir()), set(os.listdir(os.sep)))
finally:
os.chdir(current_directory)
def test_open(self):
for fn in self.unicodefn:
f = open(os.path.join(self.dir, fn), 'rb')
f.close()
@unittest.skipUnless(hasattr(os, 'statvfs'),
"need os.statvfs()")
def test_statvfs(self):
# issue #9645
for fn in self.unicodefn:
# should not fail with file not found error
fullname = os.path.join(self.dir, fn)
os.statvfs(fullname)
def test_stat(self):
for fn in self.unicodefn:
os.stat(os.path.join(self.dir, fn))
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32KillTests(unittest.TestCase):
def _kill(self, sig):
# Start sys.executable as a subprocess and communicate from the
# subprocess to the parent that the interpreter is ready. When it
# becomes ready, send *sig* via os.kill to the subprocess and check
# that the return code is equal to *sig*.
import ctypes
from ctypes import wintypes
import msvcrt
# Since we can't access the contents of the process' stdout until the
# process has exited, use PeekNamedPipe to see what's inside stdout
# without waiting. This is done so we can tell that the interpreter
# is started and running at a point where it could handle a signal.
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.restype = wintypes.BOOL
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
ctypes.POINTER(ctypes.c_char), # stdout buf
wintypes.DWORD, # Buffer size
ctypes.POINTER(wintypes.DWORD), # bytes read
ctypes.POINTER(wintypes.DWORD), # bytes avail
ctypes.POINTER(wintypes.DWORD)) # bytes left
msg = "running"
proc = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('{}');"
"sys.stdout.flush();"
"input()".format(msg)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
self.addCleanup(proc.stdout.close)
self.addCleanup(proc.stderr.close)
self.addCleanup(proc.stdin.close)
count, max = 0, 100
while count < max and proc.poll() is None:
# Create a string buffer to store the result of stdout from the pipe
buf = ctypes.create_string_buffer(len(msg))
# Obtain the text currently in proc.stdout
# Bytes read/avail/left are left as NULL and unused
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
buf, ctypes.sizeof(buf), None, None, None)
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
if buf.value:
self.assertEqual(msg, buf.value.decode())
break
time.sleep(0.1)
count += 1
else:
self.fail("Did not receive communication from the subprocess")
os.kill(proc.pid, sig)
self.assertEqual(proc.wait(), sig)
def test_kill_sigterm(self):
# SIGTERM doesn't mean anything special, but make sure it works
self._kill(signal.SIGTERM)
def test_kill_int(self):
# os.kill on Windows can take an int which gets set as the exit code
self._kill(100)
def _kill_with_event(self, event, name):
tagname = "test_os_%s" % uuid.uuid1()
m = mmap.mmap(-1, 1, tagname)
m[0] = 0
# Run a script which has console control handling enabled.
proc = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
"win_console_handler.py"), tagname],
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
# Let the interpreter startup before we send signals. See #3137.
count, max = 0, 100
while count < max and proc.poll() is None:
if m[0] == 1:
break
time.sleep(0.1)
count += 1
else:
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("Subprocess didn't finish initialization")
os.kill(proc.pid, event)
# proc.send_signal(event) could also be done here.
# Allow time for the signal to be passed and the process to exit.
time.sleep(0.5)
if not proc.poll():
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("subprocess did not stop on {}".format(name))
@unittest.skip("subprocesses aren't inheriting Ctrl+C property")
def test_CTRL_C_EVENT(self):
from ctypes import wintypes
import ctypes
# Make a NULL value by creating a pointer with no argument.
NULL = ctypes.POINTER(ctypes.c_int)()
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
wintypes.BOOL)
SetConsoleCtrlHandler.restype = wintypes.BOOL
# Calling this with NULL and FALSE causes the calling process to
# handle Ctrl+C, rather than ignore it. This property is inherited
# by subprocesses.
SetConsoleCtrlHandler(NULL, 0)
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
def test_CTRL_BREAK_EVENT(self):
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32ListdirTests(unittest.TestCase):
"""Test listdir on Windows."""
def setUp(self):
self.created_paths = []
for i in range(2):
dir_name = 'SUB%d' % i
dir_path = os.path.join(support.TESTFN, dir_name)
file_name = 'FILE%d' % i
file_path = os.path.join(support.TESTFN, file_name)
os.makedirs(dir_path)
with open(file_path, 'w') as f:
f.write("I'm %s and proud of it. Blame test_os.\n" % file_path)
self.created_paths.extend([dir_name, file_name])
self.created_paths.sort()
def tearDown(self):
shutil.rmtree(support.TESTFN)
def test_listdir_no_extended_path(self):
"""Test when the path is not an "extended" path."""
# unicode
self.assertEqual(
sorted(os.listdir(support.TESTFN)),
self.created_paths)
# bytes
self.assertEqual(
sorted(os.listdir(os.fsencode(support.TESTFN))),
[os.fsencode(path) for path in self.created_paths])
def test_listdir_extended_path(self):
"""Test when the path starts with '\\\\?\\'."""
# See: http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
# unicode
path = '\\\\?\\' + os.path.abspath(support.TESTFN)
self.assertEqual(
sorted(os.listdir(path)),
self.created_paths)
# bytes
path = b'\\\\?\\' + os.fsencode(os.path.abspath(support.TESTFN))
self.assertEqual(
sorted(os.listdir(path)),
[os.fsencode(path) for path in self.created_paths])
@unittest.skipUnless(hasattr(os, 'readlink'), 'needs os.readlink()')
class ReadlinkTests(unittest.TestCase):
filelink = 'readlinktest'
filelink_target = os.path.abspath(__file__)
filelinkb = os.fsencode(filelink)
filelinkb_target = os.fsencode(filelink_target)
def assertPathEqual(self, left, right):
left = os.path.normcase(left)
right = os.path.normcase(right)
if sys.platform == 'win32':
# Bad practice to blindly strip the prefix as it may be required to
# correctly refer to the file, but we're only comparing paths here.
has_prefix = lambda p: p.startswith(
b'\\\\?\\' if isinstance(p, bytes) else '\\\\?\\')
if has_prefix(left):
left = left[4:]
if has_prefix(right):
right = right[4:]
self.assertEqual(left, right)
def setUp(self):
self.assertTrue(os.path.exists(self.filelink_target))
self.assertTrue(os.path.exists(self.filelinkb_target))
self.assertFalse(os.path.exists(self.filelink))
self.assertFalse(os.path.exists(self.filelinkb))
def test_not_symlink(self):
filelink_target = FakePath(self.filelink_target)
self.assertRaises(OSError, os.readlink, self.filelink_target)
self.assertRaises(OSError, os.readlink, filelink_target)
def test_missing_link(self):
self.assertRaises(FileNotFoundError, os.readlink, 'missing-link')
self.assertRaises(FileNotFoundError, os.readlink,
FakePath('missing-link'))
@support.skip_unless_symlink
def test_pathlike(self):
os.symlink(self.filelink_target, self.filelink)
self.addCleanup(support.unlink, self.filelink)
filelink = FakePath(self.filelink)
self.assertPathEqual(os.readlink(filelink), self.filelink_target)
@support.skip_unless_symlink
def test_pathlike_bytes(self):
os.symlink(self.filelinkb_target, self.filelinkb)
self.addCleanup(support.unlink, self.filelinkb)
path = os.readlink(FakePath(self.filelinkb))
self.assertPathEqual(path, self.filelinkb_target)
self.assertIsInstance(path, bytes)
@support.skip_unless_symlink
def test_bytes(self):
os.symlink(self.filelinkb_target, self.filelinkb)
self.addCleanup(support.unlink, self.filelinkb)
path = os.readlink(self.filelinkb)
self.assertPathEqual(path, self.filelinkb_target)
self.assertIsInstance(path, bytes)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
@support.skip_unless_symlink
class Win32SymlinkTests(unittest.TestCase):
filelink = 'filelinktest'
filelink_target = os.path.abspath(__file__)
dirlink = 'dirlinktest'
dirlink_target = os.path.dirname(filelink_target)
missing_link = 'missing link'
def setUp(self):
assert os.path.exists(self.dirlink_target)
assert os.path.exists(self.filelink_target)
assert not os.path.exists(self.dirlink)
assert not os.path.exists(self.filelink)
assert not os.path.exists(self.missing_link)
def tearDown(self):
if os.path.exists(self.filelink):
os.remove(self.filelink)
if os.path.exists(self.dirlink):
os.rmdir(self.dirlink)
if os.path.lexists(self.missing_link):
os.remove(self.missing_link)
def test_directory_link(self):
os.symlink(self.dirlink_target, self.dirlink)
self.assertTrue(os.path.exists(self.dirlink))
self.assertTrue(os.path.isdir(self.dirlink))
self.assertTrue(os.path.islink(self.dirlink))
self.check_stat(self.dirlink, self.dirlink_target)
def test_file_link(self):
os.symlink(self.filelink_target, self.filelink)
self.assertTrue(os.path.exists(self.filelink))
self.assertTrue(os.path.isfile(self.filelink))
self.assertTrue(os.path.islink(self.filelink))
self.check_stat(self.filelink, self.filelink_target)
def _create_missing_dir_link(self):
'Create a "directory" link to a non-existent target'
linkname = self.missing_link
if os.path.lexists(linkname):
os.remove(linkname)
target = r'c:\\target does not exist.29r3c740'
assert not os.path.exists(target)
target_is_dir = True
os.symlink(target, linkname, target_is_dir)
def test_remove_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# For compatibility with Unix, os.remove will check the
# directory status and call RemoveDirectory if the symlink
# was created with target_is_dir==True.
os.remove(self.missing_link)
def test_isdir_on_directory_link_to_missing_target(self):
self._create_missing_dir_link()
self.assertFalse(os.path.isdir(self.missing_link))
def test_rmdir_on_directory_link_to_missing_target(self):
self._create_missing_dir_link()
os.rmdir(self.missing_link)
def check_stat(self, link, target):
self.assertEqual(os.stat(link), os.stat(target))
self.assertNotEqual(os.lstat(link), os.stat(link))
bytes_link = os.fsencode(link)
self.assertEqual(os.stat(bytes_link), os.stat(target))
self.assertNotEqual(os.lstat(bytes_link), os.stat(bytes_link))
def test_12084(self):
level1 = os.path.abspath(support.TESTFN)
level2 = os.path.join(level1, "level2")
level3 = os.path.join(level2, "level3")
self.addCleanup(support.rmtree, level1)
os.mkdir(level1)
os.mkdir(level2)
os.mkdir(level3)
file1 = os.path.abspath(os.path.join(level1, "file1"))
create_file(file1)
orig_dir = os.getcwd()
try:
os.chdir(level2)
link = os.path.join(level2, "link")
os.symlink(os.path.relpath(file1), "link")
self.assertIn("link", os.listdir(os.getcwd()))
# Check os.stat calls from the same dir as the link
self.assertEqual(os.stat(file1), os.stat("link"))
# Check os.stat calls from a dir below the link
os.chdir(level1)
self.assertEqual(os.stat(file1),
os.stat(os.path.relpath(link)))
# Check os.stat calls from a dir above the link
os.chdir(level3)
self.assertEqual(os.stat(file1),
os.stat(os.path.relpath(link)))
finally:
os.chdir(orig_dir)
@unittest.skipUnless(os.path.lexists(r'C:\Users\All Users')
and os.path.exists(r'C:\ProgramData'),
'Test directories not found')
def test_29248(self):
# os.symlink() calls CreateSymbolicLink, which creates
# the reparse data buffer with the print name stored
# first, so the offset is always 0. CreateSymbolicLink
# stores the "PrintName" DOS path (e.g. "C:\") first,
# with an offset of 0, followed by the "SubstituteName"
# NT path (e.g. "\??\C:\"). The "All Users" link, on
# the other hand, seems to have been created manually
# with an inverted order.
target = os.readlink(r'C:\Users\All Users')
self.assertTrue(os.path.samefile(target, r'C:\ProgramData'))
def test_buffer_overflow(self):
# Older versions would have a buffer overflow when detecting
# whether a link source was a directory. This test ensures we
# no longer crash, but does not otherwise validate the behavior
segment = 'X' * 27
path = os.path.join(*[segment] * 10)
test_cases = [
# overflow with absolute src
('\\' + path, segment),
# overflow dest with relative src
(segment, path),
# overflow when joining src
(path[:180], path[:180]),
]
for src, dest in test_cases:
try:
os.symlink(src, dest)
except FileNotFoundError:
pass
else:
try:
os.remove(dest)
except OSError:
pass
# Also test with bytes, since that is a separate code path.
try:
os.symlink(os.fsencode(src), os.fsencode(dest))
except FileNotFoundError:
pass
else:
try:
os.remove(dest)
except OSError:
pass
def test_appexeclink(self):
root = os.path.expandvars(r'%LOCALAPPDATA%\Microsoft\WindowsApps')
if not os.path.isdir(root):
self.skipTest("test requires a WindowsApps directory")
aliases = [os.path.join(root, a)
for a in fnmatch.filter(os.listdir(root), '*.exe')]
for alias in aliases:
if support.verbose:
print()
print("Testing with", alias)
st = os.lstat(alias)
self.assertEqual(st, os.stat(alias))
self.assertFalse(stat.S_ISLNK(st.st_mode))
self.assertEqual(st.st_reparse_tag, stat.IO_REPARSE_TAG_APPEXECLINK)
# testing the first one we see is sufficient
break
else:
self.skipTest("test requires an app execution alias")
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32JunctionTests(unittest.TestCase):
junction = 'junctiontest'
junction_target = os.path.dirname(os.path.abspath(__file__))
def setUp(self):
assert os.path.exists(self.junction_target)
assert not os.path.lexists(self.junction)
def tearDown(self):
if os.path.lexists(self.junction):
os.unlink(self.junction)
def test_create_junction(self):
_winapi.CreateJunction(self.junction_target, self.junction)
self.assertTrue(os.path.lexists(self.junction))
self.assertTrue(os.path.exists(self.junction))
self.assertTrue(os.path.isdir(self.junction))
self.assertNotEqual(os.stat(self.junction), os.lstat(self.junction))
self.assertEqual(os.stat(self.junction), os.stat(self.junction_target))
# bpo-37834: Junctions are not recognized as links.
self.assertFalse(os.path.islink(self.junction))
self.assertEqual(os.path.normcase("\\\\?\\" + self.junction_target),
os.path.normcase(os.readlink(self.junction)))
def test_unlink_removes_junction(self):
_winapi.CreateJunction(self.junction_target, self.junction)
self.assertTrue(os.path.exists(self.junction))
self.assertTrue(os.path.lexists(self.junction))
os.unlink(self.junction)
self.assertFalse(os.path.exists(self.junction))
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32NtTests(unittest.TestCase):
def test_getfinalpathname_handles(self):
nt = support.import_module('nt')
ctypes = support.import_module('ctypes')
import ctypes.wintypes
kernel = ctypes.WinDLL('Kernel32.dll', use_last_error=True)
kernel.GetCurrentProcess.restype = ctypes.wintypes.HANDLE
kernel.GetProcessHandleCount.restype = ctypes.wintypes.BOOL
kernel.GetProcessHandleCount.argtypes = (ctypes.wintypes.HANDLE,
ctypes.wintypes.LPDWORD)
# This is a pseudo-handle that doesn't need to be closed
hproc = kernel.GetCurrentProcess()
handle_count = ctypes.wintypes.DWORD()
ok = kernel.GetProcessHandleCount(hproc, ctypes.byref(handle_count))
self.assertEqual(1, ok)
before_count = handle_count.value
# The first two test the error path, __file__ tests the success path
filenames = [
r'\\?\C:',
r'\\?\NUL',
r'\\?\CONIN',
__file__,
]
for _ in range(10):
for name in filenames:
try:
nt._getfinalpathname(name)
except Exception:
# Failure is expected
pass
try:
os.stat(name)
except Exception:
pass
ok = kernel.GetProcessHandleCount(hproc, ctypes.byref(handle_count))
self.assertEqual(1, ok)
handle_delta = handle_count.value - before_count
self.assertEqual(0, handle_delta)
@support.skip_unless_symlink
class NonLocalSymlinkTests(unittest.TestCase):
def setUp(self):
r"""
Create this structure:
base
\___ some_dir
"""
os.makedirs('base/some_dir')
def tearDown(self):
shutil.rmtree('base')
def test_directory_link_nonlocal(self):
"""
The symlink target should resolve relative to the link, not relative
to the current directory.
Then, link base/some_link -> base/some_dir and ensure that some_link
is resolved as a directory.
In issue13772, it was discovered that directory detection failed if
the symlink target was not specified relative to the current
directory, which was a defect in the implementation.
"""
src = os.path.join('base', 'some_link')
os.symlink('some_dir', src)
assert os.path.isdir(src)
class FSEncodingTests(unittest.TestCase):
def test_nop(self):
self.assertEqual(os.fsencode(b'abc\xff'), b'abc\xff')
self.assertEqual(os.fsdecode('abc\u0141'), 'abc\u0141')
def test_identity(self):
# assert fsdecode(fsencode(x)) == x
for fn in ('unicode\u0141', 'latin\xe9', 'ascii'):
try:
bytesfn = os.fsencode(fn)
except UnicodeEncodeError:
continue
self.assertEqual(os.fsdecode(bytesfn), fn)
class DeviceEncodingTests(unittest.TestCase):
def test_bad_fd(self):
# Return None when an fd doesn't actually exist.
self.assertIsNone(os.device_encoding(123456))
@unittest.skipUnless(os.isatty(0) and not win32_is_iot() and (sys.platform.startswith('win') or
(hasattr(locale, 'nl_langinfo') and hasattr(locale, 'CODESET'))),
'test requires a tty and either Windows or nl_langinfo(CODESET)')
def test_device_encoding(self):
encoding = os.device_encoding(0)
self.assertIsNotNone(encoding)
self.assertTrue(codecs.lookup(encoding))
class PidTests(unittest.TestCase):
@unittest.skipUnless(hasattr(os, 'getppid'), "test needs os.getppid")
def test_getppid(self):
p = subprocess.Popen([sys.executable, '-c',
'import os; print(os.getppid())'],
stdout=subprocess.PIPE)
stdout, _ = p.communicate()
# We are the parent of our subprocess
self.assertEqual(int(stdout), os.getpid())
def test_waitpid(self):
args = [sys.executable, '-c', 'pass']
# Add an implicit test for PyUnicode_FSConverter().
pid = os.spawnv(os.P_NOWAIT, FakePath(args[0]), args)
status = os.waitpid(pid, 0)
self.assertEqual(status, (pid, 0))
class SpawnTests(unittest.TestCase):
def create_args(self, *, with_env=False, use_bytes=False):
self.exitcode = 17
filename = support.TESTFN
self.addCleanup(support.unlink, filename)
if not with_env:
code = 'import sys; sys.exit(%s)' % self.exitcode
else:
self.env = dict(os.environ)
# create an unique key
self.key = str(uuid.uuid4())
self.env[self.key] = self.key
# read the variable from os.environ to check that it exists
code = ('import sys, os; magic = os.environ[%r]; sys.exit(%s)'
% (self.key, self.exitcode))
with open(filename, "w") as fp:
fp.write(code)
args = [sys.executable, filename]
if use_bytes:
args = [os.fsencode(a) for a in args]
self.env = {os.fsencode(k): os.fsencode(v)
for k, v in self.env.items()}
return args
@requires_os_func('spawnl')
def test_spawnl(self):
args = self.create_args()
exitcode = os.spawnl(os.P_WAIT, args[0], *args)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnle')
def test_spawnle(self):
args = self.create_args(with_env=True)
exitcode = os.spawnle(os.P_WAIT, args[0], *args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnlp')
def test_spawnlp(self):
args = self.create_args()
exitcode = os.spawnlp(os.P_WAIT, args[0], *args)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnlpe')
def test_spawnlpe(self):
args = self.create_args(with_env=True)
exitcode = os.spawnlpe(os.P_WAIT, args[0], *args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnv')
def test_spawnv(self):
args = self.create_args()
exitcode = os.spawnv(os.P_WAIT, args[0], args)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnve')
def test_spawnve(self):
args = self.create_args(with_env=True)
exitcode = os.spawnve(os.P_WAIT, args[0], args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnvp')
def test_spawnvp(self):
args = self.create_args()
exitcode = os.spawnvp(os.P_WAIT, args[0], args)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnvpe')
def test_spawnvpe(self):
args = self.create_args(with_env=True)
exitcode = os.spawnvpe(os.P_WAIT, args[0], args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnv')
def test_nowait(self):
args = self.create_args()
pid = os.spawnv(os.P_NOWAIT, args[0], args)
result = os.waitpid(pid, 0)
self.assertEqual(result[0], pid)
status = result[1]
if hasattr(os, 'WIFEXITED'):
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(os.WEXITSTATUS(status), self.exitcode)
else:
self.assertEqual(status, self.exitcode << 8)
@requires_os_func('spawnve')
def test_spawnve_bytes(self):
# Test bytes handling in parse_arglist and parse_envlist (#28114)
args = self.create_args(with_env=True, use_bytes=True)
exitcode = os.spawnve(os.P_WAIT, args[0], args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnl')
def test_spawnl_noargs(self):
args = self.create_args()
self.assertRaises(ValueError, os.spawnl, os.P_NOWAIT, args[0])
self.assertRaises(ValueError, os.spawnl, os.P_NOWAIT, args[0], '')
@requires_os_func('spawnle')
def test_spawnle_noargs(self):
args = self.create_args()
self.assertRaises(ValueError, os.spawnle, os.P_NOWAIT, args[0], {})
self.assertRaises(ValueError, os.spawnle, os.P_NOWAIT, args[0], '', {})
@requires_os_func('spawnv')
def test_spawnv_noargs(self):
args = self.create_args()
self.assertRaises(ValueError, os.spawnv, os.P_NOWAIT, args[0], ())
self.assertRaises(ValueError, os.spawnv, os.P_NOWAIT, args[0], [])
self.assertRaises(ValueError, os.spawnv, os.P_NOWAIT, args[0], ('',))
self.assertRaises(ValueError, os.spawnv, os.P_NOWAIT, args[0], [''])
@requires_os_func('spawnve')
def test_spawnve_noargs(self):
args = self.create_args()
self.assertRaises(ValueError, os.spawnve, os.P_NOWAIT, args[0], (), {})
self.assertRaises(ValueError, os.spawnve, os.P_NOWAIT, args[0], [], {})
self.assertRaises(ValueError, os.spawnve, os.P_NOWAIT, args[0], ('',), {})
self.assertRaises(ValueError, os.spawnve, os.P_NOWAIT, args[0], [''], {})
def _test_invalid_env(self, spawn):
args = [sys.executable, '-c', 'pass']
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
try:
exitcode = spawn(os.P_WAIT, args[0], args, newenv)
except ValueError:
pass
else:
self.assertEqual(exitcode, 127)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
try:
exitcode = spawn(os.P_WAIT, args[0], args, newenv)
except ValueError:
pass
else:
self.assertEqual(exitcode, 127)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
try:
exitcode = spawn(os.P_WAIT, args[0], args, newenv)
except ValueError:
pass
else:
self.assertEqual(exitcode, 127)
# equal character in the environment variable value
filename = support.TESTFN
self.addCleanup(support.unlink, filename)
with open(filename, "w") as fp:
fp.write('import sys, os\n'
'if os.getenv("FRUIT") != "orange=lemon":\n'
' raise AssertionError')
args = [sys.executable, filename]
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
exitcode = spawn(os.P_WAIT, args[0], args, newenv)
self.assertEqual(exitcode, 0)
@requires_os_func('spawnve')
def test_spawnve_invalid_env(self):
self._test_invalid_env(os.spawnve)
@requires_os_func('spawnvpe')
def test_spawnvpe_invalid_env(self):
self._test_invalid_env(os.spawnvpe)
# The introduction of this TestCase caused at least two different errors on
# *nix buildbots. Temporarily skip this to let the buildbots move along.
@unittest.skip("Skip due to platform/environment differences on *NIX buildbots")
@unittest.skipUnless(hasattr(os, 'getlogin'), "test needs os.getlogin")
class LoginTests(unittest.TestCase):
def test_getlogin(self):
user_name = os.getlogin()
self.assertNotEqual(len(user_name), 0)
@unittest.skipUnless(hasattr(os, 'getpriority') and hasattr(os, 'setpriority'),
"needs os.getpriority and os.setpriority")
class ProgramPriorityTests(unittest.TestCase):
"""Tests for os.getpriority() and os.setpriority()."""
def test_set_get_priority(self):
base = os.getpriority(os.PRIO_PROCESS, os.getpid())
os.setpriority(os.PRIO_PROCESS, os.getpid(), base + 1)
try:
new_prio = os.getpriority(os.PRIO_PROCESS, os.getpid())
if base >= 19 and new_prio <= 19:
raise unittest.SkipTest("unable to reliably test setpriority "
"at current nice level of %s" % base)
else:
self.assertEqual(new_prio, base + 1)
finally:
try:
os.setpriority(os.PRIO_PROCESS, os.getpid(), base)
except OSError as err:
if err.errno != errno.EACCES:
raise
class SendfileTestServer(asyncore.dispatcher, threading.Thread):
class Handler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.in_buffer = []
self.accumulate = True
self.closed = False
self.push(b"220 ready\r\n")
def handle_read(self):
data = self.recv(4096)
if self.accumulate:
self.in_buffer.append(data)
def get_data(self):
return b''.join(self.in_buffer)
def handle_close(self):
self.close()
self.closed = True
def handle_error(self):
raise
def __init__(self, address):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
self._active = False
self._active_lock = threading.Lock()
# --- public API
@property
def running(self):
return self._active
def start(self):
assert not self.running
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def stop(self):
assert self.running
self._active = False
self.join()
def wait(self):
# wait for handler connection to be closed, then stop the server
while not getattr(self.handler_instance, "closed", False):
time.sleep(0.001)
self.stop()
# --- internals
def run(self):
self._active = True
self.__flag.set()
while self._active and asyncore.socket_map:
self._active_lock.acquire()
asyncore.loop(timeout=0.001, count=1)
self._active_lock.release()
asyncore.close_all()
def handle_accept(self):
conn, addr = self.accept()
self.handler_instance = self.Handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
@unittest.skipUnless(hasattr(os, 'sendfile'), "test needs os.sendfile()")
class TestSendfile(unittest.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
SUPPORT_HEADERS_TRAILERS = not sys.platform.startswith("linux") and \
not sys.platform.startswith("solaris") and \
not sys.platform.startswith("sunos")
requires_headers_trailers = unittest.skipUnless(SUPPORT_HEADERS_TRAILERS,
'requires headers and trailers support')
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
'test is only meaningful on 32-bit builds')
@classmethod
def setUpClass(cls):
cls.key = support.threading_setup()
create_file(support.TESTFN, cls.DATA)
@classmethod
def tearDownClass(cls):
support.threading_cleanup(*cls.key)
support.unlink(support.TESTFN)
def setUp(self):
self.server = SendfileTestServer((support.HOST, 0))
self.server.start()
self.client = socket.socket()
self.client.connect((self.server.host, self.server.port))
self.client.settimeout(1)
# synchronize by waiting for "220 ready" response
self.client.recv(1024)
self.sockno = self.client.fileno()
self.file = open(support.TESTFN, 'rb')
self.fileno = self.file.fileno()
def tearDown(self):
self.file.close()
self.client.close()
if self.server.running:
self.server.stop()
self.server = None
def sendfile_wrapper(self, *args, **kwargs):
"""A higher level wrapper representing how an application is
supposed to use sendfile().
"""
while True:
try:
return os.sendfile(*args, **kwargs)
except OSError as err:
if err.errno == errno.ECONNRESET:
# disconnected
raise
elif err.errno in (errno.EAGAIN, errno.EBUSY):
# we have to retry send data
continue
else:
raise
def test_send_whole_file(self):
# normal send
total_sent = 0
offset = 0
nbytes = 4096
while total_sent < len(self.DATA):
sent = self.sendfile_wrapper(self.sockno, self.fileno, offset, nbytes)
if sent == 0:
break
offset += sent
total_sent += sent
self.assertTrue(sent <= nbytes)
self.assertEqual(offset, total_sent)
self.assertEqual(total_sent, len(self.DATA))
self.client.shutdown(socket.SHUT_RDWR)
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(len(data), len(self.DATA))
self.assertEqual(data, self.DATA)
def test_send_at_certain_offset(self):
# start sending a file at a certain offset
total_sent = 0
offset = len(self.DATA) // 2
must_send = len(self.DATA) - offset
nbytes = 4096
while total_sent < must_send:
sent = self.sendfile_wrapper(self.sockno, self.fileno, offset, nbytes)
if sent == 0:
break
offset += sent
total_sent += sent
self.assertTrue(sent <= nbytes)
self.client.shutdown(socket.SHUT_RDWR)
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
expected = self.DATA[len(self.DATA) // 2:]
self.assertEqual(total_sent, len(expected))
self.assertEqual(len(data), len(expected))
self.assertEqual(data, expected)
def test_offset_overflow(self):
# specify an offset > file size
offset = len(self.DATA) + 4096
try:
sent = os.sendfile(self.sockno, self.fileno, offset, 4096)
except OSError as e:
# Solaris can raise EINVAL if offset >= file length, ignore.
if e.errno != errno.EINVAL:
raise
else:
self.assertEqual(sent, 0)
self.client.shutdown(socket.SHUT_RDWR)
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(data, b'')
def test_invalid_offset(self):
with self.assertRaises(OSError) as cm:
os.sendfile(self.sockno, self.fileno, -1, 4096)
self.assertEqual(cm.exception.errno, errno.EINVAL)
def test_keywords(self):
# Keyword arguments should be supported
os.sendfile(out=self.sockno, offset=0, count=4096,
**{'in': self.fileno})
if self.SUPPORT_HEADERS_TRAILERS:
os.sendfile(self.sockno, self.fileno, offset=0, count=4096,
headers=(), trailers=(), flags=0)
# --- headers / trailers tests
@requires_headers_trailers
def test_headers(self):
total_sent = 0
expected_data = b"x" * 512 + b"y" * 256 + self.DATA[:-1]
sent = os.sendfile(self.sockno, self.fileno, 0, 4096,
headers=[b"x" * 512, b"y" * 256])
self.assertLessEqual(sent, 512 + 256 + 4096)
total_sent += sent
offset = 4096
while total_sent < len(expected_data):
nbytes = min(len(expected_data) - total_sent, 4096)
sent = self.sendfile_wrapper(self.sockno, self.fileno,
offset, nbytes)
if sent == 0:
break
self.assertLessEqual(sent, nbytes)
total_sent += sent
offset += sent
self.assertEqual(total_sent, len(expected_data))
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(hash(data), hash(expected_data))
@requires_headers_trailers
def test_trailers(self):
TESTFN2 = support.TESTFN + "2"
file_data = b"abcdef"
self.addCleanup(support.unlink, TESTFN2)
create_file(TESTFN2, file_data)
with open(TESTFN2, 'rb') as f:
os.sendfile(self.sockno, f.fileno(), 0, 5,
trailers=[b"123456", b"789"])
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(data, b"abcde123456789")
@requires_headers_trailers
@requires_32b
def test_headers_overflow_32bits(self):
self.server.handler_instance.accumulate = False
with self.assertRaises(OSError) as cm:
os.sendfile(self.sockno, self.fileno, 0, 0,
headers=[b"x" * 2**16] * 2**15)
self.assertEqual(cm.exception.errno, errno.EINVAL)
@requires_headers_trailers
@requires_32b
def test_trailers_overflow_32bits(self):
self.server.handler_instance.accumulate = False
with self.assertRaises(OSError) as cm:
os.sendfile(self.sockno, self.fileno, 0, 0,
trailers=[b"x" * 2**16] * 2**15)
self.assertEqual(cm.exception.errno, errno.EINVAL)
@requires_headers_trailers
@unittest.skipUnless(hasattr(os, 'SF_NODISKIO'),
'test needs os.SF_NODISKIO')
def test_flags(self):
try:
os.sendfile(self.sockno, self.fileno, 0, 4096,
flags=os.SF_NODISKIO)
except OSError as err:
if err.errno not in (errno.EBUSY, errno.EAGAIN):
raise
def supports_extended_attributes():
if not hasattr(os, "setxattr"):
return False
try:
with open(support.TESTFN, "xb", 0) as fp:
try:
os.setxattr(fp.fileno(), b"user.test", b"")
except OSError:
return False
finally:
support.unlink(support.TESTFN)
return True
@unittest.skipUnless(supports_extended_attributes(),
"no non-broken extended attribute support")
# Kernels < 2.6.39 don't respect setxattr flags.
@support.requires_linux_version(2, 6, 39)
class ExtendedAttributeTests(unittest.TestCase):
def _check_xattrs_str(self, s, getxattr, setxattr, removexattr, listxattr, **kwargs):
fn = support.TESTFN
self.addCleanup(support.unlink, fn)
create_file(fn)
with self.assertRaises(OSError) as cm:
getxattr(fn, s("user.test"), **kwargs)
self.assertEqual(cm.exception.errno, errno.ENODATA)
init_xattr = listxattr(fn)
self.assertIsInstance(init_xattr, list)
setxattr(fn, s("user.test"), b"", **kwargs)
xattr = set(init_xattr)
xattr.add("user.test")
self.assertEqual(set(listxattr(fn)), xattr)
self.assertEqual(getxattr(fn, b"user.test", **kwargs), b"")
setxattr(fn, s("user.test"), b"hello", os.XATTR_REPLACE, **kwargs)
self.assertEqual(getxattr(fn, b"user.test", **kwargs), b"hello")
with self.assertRaises(OSError) as cm:
setxattr(fn, s("user.test"), b"bye", os.XATTR_CREATE, **kwargs)
self.assertEqual(cm.exception.errno, errno.EEXIST)
with self.assertRaises(OSError) as cm:
setxattr(fn, s("user.test2"), b"bye", os.XATTR_REPLACE, **kwargs)
self.assertEqual(cm.exception.errno, errno.ENODATA)
setxattr(fn, s("user.test2"), b"foo", os.XATTR_CREATE, **kwargs)
xattr.add("user.test2")
self.assertEqual(set(listxattr(fn)), xattr)
removexattr(fn, s("user.test"), **kwargs)
with self.assertRaises(OSError) as cm:
getxattr(fn, s("user.test"), **kwargs)
self.assertEqual(cm.exception.errno, errno.ENODATA)
xattr.remove("user.test")
self.assertEqual(set(listxattr(fn)), xattr)
self.assertEqual(getxattr(fn, s("user.test2"), **kwargs), b"foo")
setxattr(fn, s("user.test"), b"a"*1024, **kwargs)
self.assertEqual(getxattr(fn, s("user.test"), **kwargs), b"a"*1024)
removexattr(fn, s("user.test"), **kwargs)
many = sorted("user.test{}".format(i) for i in range(100))
for thing in many:
setxattr(fn, thing, b"x", **kwargs)
self.assertEqual(set(listxattr(fn)), set(init_xattr) | set(many))
def _check_xattrs(self, *args, **kwargs):
self._check_xattrs_str(str, *args, **kwargs)
support.unlink(support.TESTFN)
self._check_xattrs_str(os.fsencode, *args, **kwargs)
support.unlink(support.TESTFN)
def test_simple(self):
self._check_xattrs(os.getxattr, os.setxattr, os.removexattr,
os.listxattr)
def test_lpath(self):
self._check_xattrs(os.getxattr, os.setxattr, os.removexattr,
os.listxattr, follow_symlinks=False)
def test_fds(self):
def getxattr(path, *args):
with open(path, "rb") as fp:
return os.getxattr(fp.fileno(), *args)
def setxattr(path, *args):
with open(path, "wb", 0) as fp:
os.setxattr(fp.fileno(), *args)
def removexattr(path, *args):
with open(path, "wb", 0) as fp:
os.removexattr(fp.fileno(), *args)
def listxattr(path, *args):
with open(path, "rb") as fp:
return os.listxattr(fp.fileno(), *args)
self._check_xattrs(getxattr, setxattr, removexattr, listxattr)
@unittest.skipUnless(hasattr(os, 'get_terminal_size'), "requires os.get_terminal_size")
class TermsizeTests(unittest.TestCase):
def test_does_not_crash(self):
"""Check if get_terminal_size() returns a meaningful value.
There's no easy portable way to actually check the size of the
terminal, so let's check if it returns something sensible instead.
"""
try:
size = os.get_terminal_size()
except OSError as e:
if sys.platform == "win32" or e.errno in (errno.EINVAL, errno.ENOTTY):
# Under win32 a generic OSError can be thrown if the
# handle cannot be retrieved
self.skipTest("failed to query terminal size")
raise
self.assertGreaterEqual(size.columns, 0)
self.assertGreaterEqual(size.lines, 0)
def test_stty_match(self):
"""Check if stty returns the same results
stty actually tests stdin, so get_terminal_size is invoked on
stdin explicitly. If stty succeeded, then get_terminal_size()
should work too.
"""
try:
size = subprocess.check_output(['stty', 'size']).decode().split()
except (FileNotFoundError, subprocess.CalledProcessError,
PermissionError):
self.skipTest("stty invocation failed")
expected = (int(size[1]), int(size[0])) # reversed order
try:
actual = os.get_terminal_size(sys.__stdin__.fileno())
except OSError as e:
if sys.platform == "win32" or e.errno in (errno.EINVAL, errno.ENOTTY):
# Under win32 a generic OSError can be thrown if the
# handle cannot be retrieved
self.skipTest("failed to query terminal size")
raise
self.assertEqual(expected, actual)
@unittest.skipUnless(hasattr(os, 'memfd_create'), 'requires os.memfd_create')
@support.requires_linux_version(3, 17)
class MemfdCreateTests(unittest.TestCase):
def test_memfd_create(self):
fd = os.memfd_create("Hi", os.MFD_CLOEXEC)
self.assertNotEqual(fd, -1)
self.addCleanup(os.close, fd)
self.assertFalse(os.get_inheritable(fd))
with open(fd, "wb", closefd=False) as f:
f.write(b'memfd_create')
self.assertEqual(f.tell(), 12)
fd2 = os.memfd_create("Hi")
self.addCleanup(os.close, fd2)
self.assertFalse(os.get_inheritable(fd2))
class OSErrorTests(unittest.TestCase):
def setUp(self):
class Str(str):
pass
self.bytes_filenames = []
self.unicode_filenames = []
if support.TESTFN_UNENCODABLE is not None:
decoded = support.TESTFN_UNENCODABLE
else:
decoded = support.TESTFN
self.unicode_filenames.append(decoded)
self.unicode_filenames.append(Str(decoded))
if support.TESTFN_UNDECODABLE is not None:
encoded = support.TESTFN_UNDECODABLE
else:
encoded = os.fsencode(support.TESTFN)
self.bytes_filenames.append(encoded)
self.bytes_filenames.append(bytearray(encoded))
self.bytes_filenames.append(memoryview(encoded))
self.filenames = self.bytes_filenames + self.unicode_filenames
def test_oserror_filename(self):
funcs = [
(self.filenames, os.chdir,),
(self.filenames, os.chmod, 0o777),
(self.filenames, os.lstat,),
(self.filenames, os.open, os.O_RDONLY),
(self.filenames, os.rmdir,),
(self.filenames, os.stat,),
(self.filenames, os.unlink,),
]
if sys.platform == "win32":
funcs.extend((
(self.bytes_filenames, os.rename, b"dst"),
(self.bytes_filenames, os.replace, b"dst"),
(self.unicode_filenames, os.rename, "dst"),
(self.unicode_filenames, os.replace, "dst"),
(self.unicode_filenames, os.listdir, ),
))
else:
funcs.extend((
(self.filenames, os.listdir,),
(self.filenames, os.rename, "dst"),
(self.filenames, os.replace, "dst"),
))
if hasattr(os, "chown"):
funcs.append((self.filenames, os.chown, 0, 0))
if hasattr(os, "lchown"):
funcs.append((self.filenames, os.lchown, 0, 0))
if hasattr(os, "truncate"):
funcs.append((self.filenames, os.truncate, 0))
if hasattr(os, "chflags"):
funcs.append((self.filenames, os.chflags, 0))
if hasattr(os, "lchflags"):
funcs.append((self.filenames, os.lchflags, 0))
if hasattr(os, "chroot"):
funcs.append((self.filenames, os.chroot,))
if hasattr(os, "link"):
if sys.platform == "win32":
funcs.append((self.bytes_filenames, os.link, b"dst"))
funcs.append((self.unicode_filenames, os.link, "dst"))
else:
funcs.append((self.filenames, os.link, "dst"))
if hasattr(os, "listxattr"):
funcs.extend((
(self.filenames, os.listxattr,),
(self.filenames, os.getxattr, "user.test"),
(self.filenames, os.setxattr, "user.test", b'user'),
(self.filenames, os.removexattr, "user.test"),
))
if hasattr(os, "lchmod"):
funcs.append((self.filenames, os.lchmod, 0o777))
if hasattr(os, "readlink"):
funcs.append((self.filenames, os.readlink,))
for filenames, func, *func_args in funcs:
for name in filenames:
try:
if isinstance(name, (str, bytes)):
func(name, *func_args)
else:
with self.assertWarnsRegex(DeprecationWarning, 'should be'):
func(name, *func_args)
except OSError as err:
self.assertIs(err.filename, name, str(func))
except UnicodeDecodeError:
pass
else:
self.fail("No exception thrown by {}".format(func))
class CPUCountTests(unittest.TestCase):
def test_cpu_count(self):
cpus = os.cpu_count()
if cpus is not None:
self.assertIsInstance(cpus, int)
self.assertGreater(cpus, 0)
else:
self.skipTest("Could not determine the number of CPUs")
class FDInheritanceTests(unittest.TestCase):
def test_get_set_inheritable(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
os.set_inheritable(fd, True)
self.assertEqual(os.get_inheritable(fd), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(os.get_inheritable(fd), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
os.set_inheritable(fd, True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_open(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
@unittest.skipUnless(hasattr(os, 'pipe'), "need os.pipe()")
def test_pipe(self):
rfd, wfd = os.pipe()
self.addCleanup(os.close, rfd)
self.addCleanup(os.close, wfd)
self.assertEqual(os.get_inheritable(rfd), False)
self.assertEqual(os.get_inheritable(wfd), False)
def test_dup(self):
fd1 = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd1)
fd2 = os.dup(fd1)
self.addCleanup(os.close, fd2)
self.assertEqual(os.get_inheritable(fd2), False)
def test_dup_standard_stream(self):
fd = os.dup(1)
self.addCleanup(os.close, fd)
self.assertGreater(fd, 0)
@unittest.skipUnless(sys.platform == 'win32', 'win32-specific test')
def test_dup_nul(self):
# os.dup() was creating inheritable fds for character files.
fd1 = os.open('NUL', os.O_RDONLY)
self.addCleanup(os.close, fd1)
fd2 = os.dup(fd1)
self.addCleanup(os.close, fd2)
self.assertFalse(os.get_inheritable(fd2))
@unittest.skipUnless(hasattr(os, 'dup2'), "need os.dup2()")
def test_dup2(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
# inheritable by default
fd2 = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd2)
self.assertEqual(os.dup2(fd, fd2), fd2)
self.assertTrue(os.get_inheritable(fd2))
# force non-inheritable
fd3 = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd3)
self.assertEqual(os.dup2(fd, fd3, inheritable=False), fd3)
self.assertFalse(os.get_inheritable(fd3))
@unittest.skipUnless(hasattr(os, 'openpty'), "need os.openpty()")
def test_openpty(self):
master_fd, slave_fd = os.openpty()
self.addCleanup(os.close, master_fd)
self.addCleanup(os.close, slave_fd)
self.assertEqual(os.get_inheritable(master_fd), False)
self.assertEqual(os.get_inheritable(slave_fd), False)
class PathTConverterTests(unittest.TestCase):
# tuples of (function name, allows fd arguments, additional arguments to
# function, cleanup function)
functions = [
('stat', True, (), None),
('lstat', False, (), None),
('access', False, (os.F_OK,), None),
('chflags', False, (0,), None),
('lchflags', False, (0,), None),
('open', False, (0,), getattr(os, 'close', None)),
]
def test_path_t_converter(self):
str_filename = support.TESTFN
if os.name == 'nt':
bytes_fspath = bytes_filename = None
else:
bytes_filename = support.TESTFN.encode('ascii')
bytes_fspath = FakePath(bytes_filename)
fd = os.open(FakePath(str_filename), os.O_WRONLY|os.O_CREAT)
self.addCleanup(support.unlink, support.TESTFN)
self.addCleanup(os.close, fd)
int_fspath = FakePath(fd)
str_fspath = FakePath(str_filename)
for name, allow_fd, extra_args, cleanup_fn in self.functions:
with self.subTest(name=name):
try:
fn = getattr(os, name)
except AttributeError:
continue
for path in (str_filename, bytes_filename, str_fspath,
bytes_fspath):
if path is None:
continue
with self.subTest(name=name, path=path):
result = fn(path, *extra_args)
if cleanup_fn is not None:
cleanup_fn(result)
with self.assertRaisesRegex(
TypeError, 'to return str or bytes'):
fn(int_fspath, *extra_args)
if allow_fd:
result = fn(fd, *extra_args) # should not fail
if cleanup_fn is not None:
cleanup_fn(result)
else:
with self.assertRaisesRegex(
TypeError,
'os.PathLike'):
fn(fd, *extra_args)
def test_path_t_converter_and_custom_class(self):
msg = r'__fspath__\(\) to return str or bytes, not %s'
with self.assertRaisesRegex(TypeError, msg % r'int'):
os.stat(FakePath(2))
with self.assertRaisesRegex(TypeError, msg % r'float'):
os.stat(FakePath(2.34))
with self.assertRaisesRegex(TypeError, msg % r'object'):
os.stat(FakePath(object()))
@unittest.skipUnless(hasattr(os, 'get_blocking'),
'needs os.get_blocking() and os.set_blocking()')
class BlockingTests(unittest.TestCase):
def test_blocking(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_blocking(fd), True)
os.set_blocking(fd, False)
self.assertEqual(os.get_blocking(fd), False)
os.set_blocking(fd, True)
self.assertEqual(os.get_blocking(fd), True)
class ExportsTests(unittest.TestCase):
def test_os_all(self):
self.assertIn('open', os.__all__)
self.assertIn('walk', os.__all__)
class TestScandir(unittest.TestCase):
check_no_resource_warning = support.check_no_resource_warning
def setUp(self):
self.path = os.path.realpath(support.TESTFN)
self.bytes_path = os.fsencode(self.path)
self.addCleanup(support.rmtree, self.path)
os.mkdir(self.path)
def create_file(self, name="file.txt"):
path = self.bytes_path if isinstance(name, bytes) else self.path
filename = os.path.join(path, name)
create_file(filename, b'python')
return filename
def get_entries(self, names):
entries = dict((entry.name, entry)
for entry in os.scandir(self.path))
self.assertEqual(sorted(entries.keys()), names)
return entries
def assert_stat_equal(self, stat1, stat2, skip_fields):
if skip_fields:
for attr in dir(stat1):
if not attr.startswith("st_"):
continue
if attr in ("st_dev", "st_ino", "st_nlink"):
continue
self.assertEqual(getattr(stat1, attr),
getattr(stat2, attr),
(stat1, stat2, attr))
else:
self.assertEqual(stat1, stat2)
def check_entry(self, entry, name, is_dir, is_file, is_symlink):
self.assertIsInstance(entry, os.DirEntry)
self.assertEqual(entry.name, name)
self.assertEqual(entry.path, os.path.join(self.path, name))
self.assertEqual(entry.inode(),
os.stat(entry.path, follow_symlinks=False).st_ino)
entry_stat = os.stat(entry.path)
self.assertEqual(entry.is_dir(),
stat.S_ISDIR(entry_stat.st_mode))
self.assertEqual(entry.is_file(),
stat.S_ISREG(entry_stat.st_mode))
self.assertEqual(entry.is_symlink(),
os.path.islink(entry.path))
entry_lstat = os.stat(entry.path, follow_symlinks=False)
self.assertEqual(entry.is_dir(follow_symlinks=False),
stat.S_ISDIR(entry_lstat.st_mode))
self.assertEqual(entry.is_file(follow_symlinks=False),
stat.S_ISREG(entry_lstat.st_mode))
self.assert_stat_equal(entry.stat(),
entry_stat,
os.name == 'nt' and not is_symlink)
self.assert_stat_equal(entry.stat(follow_symlinks=False),
entry_lstat,
os.name == 'nt')
def test_attributes(self):
link = hasattr(os, 'link')
symlink = support.can_symlink()
dirname = os.path.join(self.path, "dir")
os.mkdir(dirname)
filename = self.create_file("file.txt")
if link:
try:
os.link(filename, os.path.join(self.path, "link_file.txt"))
except PermissionError as e:
self.skipTest('os.link(): %s' % e)
if symlink:
os.symlink(dirname, os.path.join(self.path, "symlink_dir"),
target_is_directory=True)
os.symlink(filename, os.path.join(self.path, "symlink_file.txt"))
names = ['dir', 'file.txt']
if link:
names.append('link_file.txt')
if symlink:
names.extend(('symlink_dir', 'symlink_file.txt'))
entries = self.get_entries(names)
entry = entries['dir']
self.check_entry(entry, 'dir', True, False, False)
entry = entries['file.txt']
self.check_entry(entry, 'file.txt', False, True, False)
if link:
entry = entries['link_file.txt']
self.check_entry(entry, 'link_file.txt', False, True, False)
if symlink:
entry = entries['symlink_dir']
self.check_entry(entry, 'symlink_dir', True, False, True)
entry = entries['symlink_file.txt']
self.check_entry(entry, 'symlink_file.txt', False, True, True)
def get_entry(self, name):
path = self.bytes_path if isinstance(name, bytes) else self.path
entries = list(os.scandir(path))
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertEqual(entry.name, name)
return entry
def create_file_entry(self, name='file.txt'):
filename = self.create_file(name=name)
return self.get_entry(os.path.basename(filename))
def test_current_directory(self):
filename = self.create_file()
old_dir = os.getcwd()
try:
os.chdir(self.path)
# call scandir() without parameter: it must list the content
# of the current directory
entries = dict((entry.name, entry) for entry in os.scandir())
self.assertEqual(sorted(entries.keys()),
[os.path.basename(filename)])
finally:
os.chdir(old_dir)
def test_repr(self):
entry = self.create_file_entry()
self.assertEqual(repr(entry), "<DirEntry 'file.txt'>")
def test_fspath_protocol(self):
entry = self.create_file_entry()
self.assertEqual(os.fspath(entry), os.path.join(self.path, 'file.txt'))
def test_fspath_protocol_bytes(self):
bytes_filename = os.fsencode('bytesfile.txt')
bytes_entry = self.create_file_entry(name=bytes_filename)
fspath = os.fspath(bytes_entry)
self.assertIsInstance(fspath, bytes)
self.assertEqual(fspath,
os.path.join(os.fsencode(self.path),bytes_filename))
def test_removed_dir(self):
path = os.path.join(self.path, 'dir')
os.mkdir(path)
entry = self.get_entry('dir')
os.rmdir(path)
# On POSIX, is_dir() result depends if scandir() filled d_type or not
if os.name == 'nt':
self.assertTrue(entry.is_dir())
self.assertFalse(entry.is_file())
self.assertFalse(entry.is_symlink())
if os.name == 'nt':
self.assertRaises(FileNotFoundError, entry.inode)
# don't fail
entry.stat()
entry.stat(follow_symlinks=False)
else:
self.assertGreater(entry.inode(), 0)
self.assertRaises(FileNotFoundError, entry.stat)
self.assertRaises(FileNotFoundError, entry.stat, follow_symlinks=False)
def test_removed_file(self):
entry = self.create_file_entry()
os.unlink(entry.path)
self.assertFalse(entry.is_dir())
# On POSIX, is_dir() result depends if scandir() filled d_type or not
if os.name == 'nt':
self.assertTrue(entry.is_file())
self.assertFalse(entry.is_symlink())
if os.name == 'nt':
self.assertRaises(FileNotFoundError, entry.inode)
# don't fail
entry.stat()
entry.stat(follow_symlinks=False)
else:
self.assertGreater(entry.inode(), 0)
self.assertRaises(FileNotFoundError, entry.stat)
self.assertRaises(FileNotFoundError, entry.stat, follow_symlinks=False)
def test_broken_symlink(self):
if not support.can_symlink():
return self.skipTest('cannot create symbolic link')
filename = self.create_file("file.txt")
os.symlink(filename,
os.path.join(self.path, "symlink.txt"))
entries = self.get_entries(['file.txt', 'symlink.txt'])
entry = entries['symlink.txt']
os.unlink(filename)
self.assertGreater(entry.inode(), 0)
self.assertFalse(entry.is_dir())
self.assertFalse(entry.is_file()) # broken symlink returns False
self.assertFalse(entry.is_dir(follow_symlinks=False))
self.assertFalse(entry.is_file(follow_symlinks=False))
self.assertTrue(entry.is_symlink())
self.assertRaises(FileNotFoundError, entry.stat)
# don't fail
entry.stat(follow_symlinks=False)
def test_bytes(self):
self.create_file("file.txt")
path_bytes = os.fsencode(self.path)
entries = list(os.scandir(path_bytes))
self.assertEqual(len(entries), 1, entries)
entry = entries[0]
self.assertEqual(entry.name, b'file.txt')
self.assertEqual(entry.path,
os.fsencode(os.path.join(self.path, 'file.txt')))
def test_bytes_like(self):
self.create_file("file.txt")
for cls in bytearray, memoryview:
path_bytes = cls(os.fsencode(self.path))
with self.assertWarns(DeprecationWarning):
entries = list(os.scandir(path_bytes))
self.assertEqual(len(entries), 1, entries)
entry = entries[0]
self.assertEqual(entry.name, b'file.txt')
self.assertEqual(entry.path,
os.fsencode(os.path.join(self.path, 'file.txt')))
self.assertIs(type(entry.name), bytes)
self.assertIs(type(entry.path), bytes)
@unittest.skipUnless(os.listdir in os.supports_fd,
'fd support for listdir required for this test.')
def test_fd(self):
self.assertIn(os.scandir, os.supports_fd)
self.create_file('file.txt')
expected_names = ['file.txt']
if support.can_symlink():
os.symlink('file.txt', os.path.join(self.path, 'link'))
expected_names.append('link')
fd = os.open(self.path, os.O_RDONLY)
try:
with os.scandir(fd) as it:
entries = list(it)
names = [entry.name for entry in entries]
self.assertEqual(sorted(names), expected_names)
self.assertEqual(names, os.listdir(fd))
for entry in entries:
self.assertEqual(entry.path, entry.name)
self.assertEqual(os.fspath(entry), entry.name)
self.assertEqual(entry.is_symlink(), entry.name == 'link')
if os.stat in os.supports_dir_fd:
st = os.stat(entry.name, dir_fd=fd)
self.assertEqual(entry.stat(), st)
st = os.stat(entry.name, dir_fd=fd, follow_symlinks=False)
self.assertEqual(entry.stat(follow_symlinks=False), st)
finally:
os.close(fd)
def test_empty_path(self):
self.assertRaises(FileNotFoundError, os.scandir, '')
def test_consume_iterator_twice(self):
self.create_file("file.txt")
iterator = os.scandir(self.path)
entries = list(iterator)
self.assertEqual(len(entries), 1, entries)
# check than consuming the iterator twice doesn't raise exception
entries2 = list(iterator)
self.assertEqual(len(entries2), 0, entries2)
def test_bad_path_type(self):
for obj in [1.234, {}, []]:
self.assertRaises(TypeError, os.scandir, obj)
def test_close(self):
self.create_file("file.txt")
self.create_file("file2.txt")
iterator = os.scandir(self.path)
next(iterator)
iterator.close()
# multiple closes
iterator.close()
with self.check_no_resource_warning():
del iterator
def test_context_manager(self):
self.create_file("file.txt")
self.create_file("file2.txt")
with os.scandir(self.path) as iterator:
next(iterator)
with self.check_no_resource_warning():
del iterator
def test_context_manager_close(self):
self.create_file("file.txt")
self.create_file("file2.txt")
with os.scandir(self.path) as iterator:
next(iterator)
iterator.close()
def test_context_manager_exception(self):
self.create_file("file.txt")
self.create_file("file2.txt")
with self.assertRaises(ZeroDivisionError):
with os.scandir(self.path) as iterator:
next(iterator)
1/0
with self.check_no_resource_warning():
del iterator
def test_resource_warning(self):
self.create_file("file.txt")
self.create_file("file2.txt")
iterator = os.scandir(self.path)
next(iterator)
with self.assertWarns(ResourceWarning):
del iterator
support.gc_collect()
# exhausted iterator
iterator = os.scandir(self.path)
list(iterator)
with self.check_no_resource_warning():
del iterator
class TestPEP519(unittest.TestCase):
# Abstracted so it can be overridden to test pure Python implementation
# if a C version is provided.
fspath = staticmethod(os.fspath)
def test_return_bytes(self):
for b in b'hello', b'goodbye', b'some/path/and/file':
self.assertEqual(b, self.fspath(b))
def test_return_string(self):
for s in 'hello', 'goodbye', 'some/path/and/file':
self.assertEqual(s, self.fspath(s))
def test_fsencode_fsdecode(self):
for p in "path/like/object", b"path/like/object":
pathlike = FakePath(p)
self.assertEqual(p, self.fspath(pathlike))
self.assertEqual(b"path/like/object", os.fsencode(pathlike))
self.assertEqual("path/like/object", os.fsdecode(pathlike))
def test_pathlike(self):
self.assertEqual('#feelthegil', self.fspath(FakePath('#feelthegil')))
self.assertTrue(issubclass(FakePath, os.PathLike))
self.assertTrue(isinstance(FakePath('x'), os.PathLike))
def test_garbage_in_exception_out(self):
vapor = type('blah', (), {})
for o in int, type, os, vapor():
self.assertRaises(TypeError, self.fspath, o)
def test_argument_required(self):
self.assertRaises(TypeError, self.fspath)
def test_bad_pathlike(self):
# __fspath__ returns a value other than str or bytes.
self.assertRaises(TypeError, self.fspath, FakePath(42))
# __fspath__ attribute that is not callable.
c = type('foo', (), {})
c.__fspath__ = 1
self.assertRaises(TypeError, self.fspath, c())
# __fspath__ raises an exception.
self.assertRaises(ZeroDivisionError, self.fspath,
FakePath(ZeroDivisionError()))
class TimesTests(unittest.TestCase):
def test_times(self):
times = os.times()
self.assertIsInstance(times, os.times_result)
for field in ('user', 'system', 'children_user', 'children_system',
'elapsed'):
value = getattr(times, field)
self.assertIsInstance(value, float)
if os.name == 'nt':
self.assertEqual(times.children_user, 0)
self.assertEqual(times.children_system, 0)
self.assertEqual(times.elapsed, 0)
# Only test if the C version is provided, otherwise TestPEP519 already tested
# the pure Python implementation.
if hasattr(os, "_fspath"):
class TestPEP519PurePython(TestPEP519):
"""Explicitly test the pure Python implementation of os.fspath()."""
fspath = staticmethod(os._fspath)
if __name__ == "__main__":
unittest.main()
| 37.505063
| 101
| 0.595023
|
4a076e8d7a97457db02c8b8523c05992112be651
| 1,340
|
py
|
Python
|
setup.py
|
THAVASIGTI/india_covid
|
800e2786094438b3f8e298eafed0fbfd6f9c5910
|
[
"MIT"
] | 2
|
2021-07-09T16:34:03.000Z
|
2021-08-03T16:46:54.000Z
|
setup.py
|
THAVASIGTI/india_covid
|
800e2786094438b3f8e298eafed0fbfd6f9c5910
|
[
"MIT"
] | null | null | null |
setup.py
|
THAVASIGTI/india_covid
|
800e2786094438b3f8e298eafed0fbfd6f9c5910
|
[
"MIT"
] | 1
|
2021-07-09T16:34:03.000Z
|
2021-07-09T16:34:03.000Z
|
import setuptools
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name="pycovid_india",
version="0.0.8",
author="T.THAVASI GTI",
license="MIT",
author_email="ganeshanthavasigti1032000@gmail.com",
description="Indian COVID-19 Vaccine and Cases Status Information",
long_description=long_description,
long_description_content_type="text/markdown",
project_urls={
"Source":"https://github.com/THAVASIGTI/pycovid_india.git",
"download_url":"https://github.com/THAVASIGTI/pycovid_india/archive/refs/heads/master.zip",
"Tracker":"https://github.com/THAVASIGTI/pycovid_india/issues",
},
zip_safe=True,
data_files=[('config', ['pycovid_india/config.json']),
('statecode', ['pycovid_india/statecode.json'])],
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=["requests"],
python_requires='>=3',
)
| 37.222222
| 99
| 0.679851
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.