hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c478d69dfa8ae825ea6fc0e5a10dfc164798605
| 3,750
|
py
|
Python
|
nova/scheduler/filters/disk_filter.py
|
panguan737/nova
|
0d177185a439baa228b42c948cab4e934d6ac7b8
|
[
"Apache-2.0"
] | null | null | null |
nova/scheduler/filters/disk_filter.py
|
panguan737/nova
|
0d177185a439baa228b42c948cab4e934d6ac7b8
|
[
"Apache-2.0"
] | null | null | null |
nova/scheduler/filters/disk_filter.py
|
panguan737/nova
|
0d177185a439baa228b42c948cab4e934d6ac7b8
|
[
"Apache-2.0"
] | 1
|
2020-11-02T10:17:13.000Z
|
2020-11-02T10:17:13.000Z
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import nova.conf
from nova.i18n import _LW
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class DiskFilter(filters.BaseHostFilter):
"""Disk Filter with over subscription flag."""
RUN_ON_REBUILD = False
def _get_disk_allocation_ratio(self, host_state, spec_obj):
return host_state.disk_allocation_ratio
def host_passes(self, host_state, spec_obj):
"""Filter based on disk usage."""
requested_disk = (1024 * (spec_obj.root_gb +
spec_obj.ephemeral_gb) +
spec_obj.swap)
free_disk_mb = host_state.free_disk_mb
total_usable_disk_mb = host_state.total_usable_disk_gb * 1024
# Do not allow an instance to overcommit against itself, only against
# other instances. In other words, if there isn't room for even just
# this one instance in total_usable_disk space, consider the host full.
if total_usable_disk_mb < requested_disk:
LOG.debug("%(host_state)s does not have %(requested_disk)s "
"MB usable disk space before overcommit, it only "
"has %(physical_disk_size)s MB.",
{'host_state': host_state,
'requested_disk': requested_disk,
'physical_disk_size':
total_usable_disk_mb})
return False
disk_allocation_ratio = self._get_disk_allocation_ratio(
host_state, spec_obj)
disk_mb_limit = total_usable_disk_mb * disk_allocation_ratio
used_disk_mb = total_usable_disk_mb - free_disk_mb
usable_disk_mb = disk_mb_limit - used_disk_mb
if not usable_disk_mb >= requested_disk:
LOG.debug("%(host_state)s does not have %(requested_disk)s MB "
"usable disk, it only has %(usable_disk_mb)s MB usable "
"disk.", {'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': usable_disk_mb})
return False
disk_gb_limit = disk_mb_limit / 1024
host_state.limits['disk_gb'] = disk_gb_limit
return True
class AggregateDiskFilter(DiskFilter):
"""AggregateDiskFilter with per-aggregate disk allocation ratio flag.
Fall back to global disk_allocation_ratio if no per-aggregate setting
found.
"""
RUN_ON_REBUILD = False
def _get_disk_allocation_ratio(self, host_state, spec_obj):
aggregate_vals = utils.aggregate_values_from_key(
host_state,
'disk_allocation_ratio')
try:
ratio = utils.validate_num_values(
aggregate_vals, host_state.disk_allocation_ratio,
cast_to=float)
except ValueError as e:
LOG.warning(_LW("Could not decode disk_allocation_ratio: '%s'"), e)
ratio = host_state.disk_allocation_ratio
return ratio
| 37.5
| 79
| 0.6496
|
from oslo_log import log as logging
import nova.conf
from nova.i18n import _LW
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class DiskFilter(filters.BaseHostFilter):
RUN_ON_REBUILD = False
def _get_disk_allocation_ratio(self, host_state, spec_obj):
return host_state.disk_allocation_ratio
def host_passes(self, host_state, spec_obj):
requested_disk = (1024 * (spec_obj.root_gb +
spec_obj.ephemeral_gb) +
spec_obj.swap)
free_disk_mb = host_state.free_disk_mb
total_usable_disk_mb = host_state.total_usable_disk_gb * 1024
# this one instance in total_usable_disk space, consider the host full.
if total_usable_disk_mb < requested_disk:
LOG.debug("%(host_state)s does not have %(requested_disk)s "
"MB usable disk space before overcommit, it only "
"has %(physical_disk_size)s MB.",
{'host_state': host_state,
'requested_disk': requested_disk,
'physical_disk_size':
total_usable_disk_mb})
return False
disk_allocation_ratio = self._get_disk_allocation_ratio(
host_state, spec_obj)
disk_mb_limit = total_usable_disk_mb * disk_allocation_ratio
used_disk_mb = total_usable_disk_mb - free_disk_mb
usable_disk_mb = disk_mb_limit - used_disk_mb
if not usable_disk_mb >= requested_disk:
LOG.debug("%(host_state)s does not have %(requested_disk)s MB "
"usable disk, it only has %(usable_disk_mb)s MB usable "
"disk.", {'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': usable_disk_mb})
return False
disk_gb_limit = disk_mb_limit / 1024
host_state.limits['disk_gb'] = disk_gb_limit
return True
class AggregateDiskFilter(DiskFilter):
RUN_ON_REBUILD = False
def _get_disk_allocation_ratio(self, host_state, spec_obj):
aggregate_vals = utils.aggregate_values_from_key(
host_state,
'disk_allocation_ratio')
try:
ratio = utils.validate_num_values(
aggregate_vals, host_state.disk_allocation_ratio,
cast_to=float)
except ValueError as e:
LOG.warning(_LW("Could not decode disk_allocation_ratio: '%s'"), e)
ratio = host_state.disk_allocation_ratio
return ratio
| true
| true
|
1c478dfbc5d80108de891f08ddbc1d37b7c4fa6e
| 7,930
|
py
|
Python
|
tests/user/test_scoreboard.py
|
HYU-ICEWALL/CTFd
|
d2d95d882663d39d32527afd4382f07188ecb89a
|
[
"Apache-2.0"
] | null | null | null |
tests/user/test_scoreboard.py
|
HYU-ICEWALL/CTFd
|
d2d95d882663d39d32527afd4382f07188ecb89a
|
[
"Apache-2.0"
] | null | null | null |
tests/user/test_scoreboard.py
|
HYU-ICEWALL/CTFd
|
d2d95d882663d39d32527afd4382f07188ecb89a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from CTFd.models import Teams, Solves, WrongKeys
from CTFd.utils import get_config, set_config
from CTFd import utils
from tests.helpers import *
from freezegun import freeze_time
from mock import patch
import json
def test_top_10():
'''Make sure top10 returns correct information'''
app = create_ctfd()
with app.app_context():
register_user(app, name="user1", email="user1@hanyang.ac.kr")
register_user(app, name="user2", email="user2@hanyang.ac.kr")
chal1 = gen_challenge(app.db)
flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')
chal1_id = chal1.id
chal2 = gen_challenge(app.db)
flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')
chal2_id = chal2.id
# Generates solve for user1
with freeze_time("2017-10-3 03:21:34"):
gen_solve(app.db, teamid=2, chalid=chal1_id)
with freeze_time("2017-10-4 03:25:45"):
gen_solve(app.db, teamid=2, chalid=chal2_id)
# Generate solve for user2
with freeze_time("2017-10-3 03:21:34"):
gen_solve(app.db, teamid=3, chalid=chal1_id)
client = login_as_user(app)
r = client.get('/top/10')
response = r.get_data(as_text=True)
saved = '''{
"places": {
"1": {
"id": 2,
"name": "user1",
"solves": [
{
"chal": 1,
"team": 2,
"time": 1507000894,
"value": 100
},
{
"chal": 2,
"team": 2,
"time": 1507087545,
"value": 100
}
]
},
"2": {
"id": 3,
"name": "user2",
"solves": [
{
"chal": 1,
"team": 3,
"time": 1507000894,
"value": 100
}
]
}
}
}'''
saved = json.loads(saved)
received = json.loads(response)
assert saved == received
destroy_ctfd(app)
def test_scoring_logic():
"""Test that scoring logic is correct"""
app = create_ctfd()
with app.app_context():
admin = login_as_user(app, name="admin", password="password")
register_user(app, name="user1", email="user1@hanyang.ac.kr", password="password")
client1 = login_as_user(app, name="user1", password="password")
register_user(app, name="user2", email="user2@hanyang.ac.kr", password="password")
client2 = login_as_user(app, name="user2", password="password")
chal1 = gen_challenge(app.db)
flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')
chal1_id = chal1.id
chal2 = gen_challenge(app.db)
flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')
chal2_id = chal2.id
# user1 solves chal1
with freeze_time("2017-10-3 03:21:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal1_id), data=data)
# user1 is now on top
scores = get_scores(admin)
assert scores[0]['team'] == 'user1'
# user2 solves chal1 and chal2
with freeze_time("2017-10-4 03:30:34"):
with client2.session_transaction() as sess:
# solve chal1
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal1_id), data=data)
# solve chal2
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal2_id), data=data)
# user2 is now on top
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
# user1 solves chal2
with freeze_time("2017-10-5 03:50:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal2_id), data=data)
# user2 should still be on top because they solved chal2 first
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
destroy_ctfd(app)
def test_scoring_logic_with_zero_point_challenges():
"""Test that scoring logic is correct with zero point challenges. Zero point challenges should not tie break"""
app = create_ctfd()
with app.app_context():
admin = login_as_user(app, name="admin", password="password")
register_user(app, name="user1", email="user1@hanyang.ac.kr", password="password")
client1 = login_as_user(app, name="user1", password="password")
register_user(app, name="user2", email="user2@hanyang.ac.kr", password="password")
client2 = login_as_user(app, name="user2", password="password")
chal1 = gen_challenge(app.db)
flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')
chal1_id = chal1.id
chal2 = gen_challenge(app.db)
flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')
chal2_id = chal2.id
# A 0 point challenge shouldn't influence the scoreboard (see #577)
chal0 = gen_challenge(app.db, value=0)
flag0 = gen_flag(app.db, chal=chal0.id, flag='flag')
chal0_id = chal0.id
# user1 solves chal1
with freeze_time("2017-10-3 03:21:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal1_id), data=data)
# user1 is now on top
scores = get_scores(admin)
assert scores[0]['team'] == 'user1'
# user2 solves chal1 and chal2
with freeze_time("2017-10-4 03:30:34"):
with client2.session_transaction() as sess:
# solve chal1
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal1_id), data=data)
# solve chal2
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal2_id), data=data)
# user2 is now on top
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
# user1 solves chal2
with freeze_time("2017-10-5 03:50:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal2_id), data=data)
# user2 should still be on top because they solved chal2 first
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
# user2 solves a 0 point challenge
with freeze_time("2017-10-5 03:55:34"):
with client2.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal0_id), data=data)
# user2 should still be on top because 0 point challenges should not tie break
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
destroy_ctfd(app)
| 34.034335
| 115
| 0.509458
|
from CTFd.models import Teams, Solves, WrongKeys
from CTFd.utils import get_config, set_config
from CTFd import utils
from tests.helpers import *
from freezegun import freeze_time
from mock import patch
import json
def test_top_10():
app = create_ctfd()
with app.app_context():
register_user(app, name="user1", email="user1@hanyang.ac.kr")
register_user(app, name="user2", email="user2@hanyang.ac.kr")
chal1 = gen_challenge(app.db)
flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')
chal1_id = chal1.id
chal2 = gen_challenge(app.db)
flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')
chal2_id = chal2.id
with freeze_time("2017-10-3 03:21:34"):
gen_solve(app.db, teamid=2, chalid=chal1_id)
with freeze_time("2017-10-4 03:25:45"):
gen_solve(app.db, teamid=2, chalid=chal2_id)
with freeze_time("2017-10-3 03:21:34"):
gen_solve(app.db, teamid=3, chalid=chal1_id)
client = login_as_user(app)
r = client.get('/top/10')
response = r.get_data(as_text=True)
saved = '''{
"places": {
"1": {
"id": 2,
"name": "user1",
"solves": [
{
"chal": 1,
"team": 2,
"time": 1507000894,
"value": 100
},
{
"chal": 2,
"team": 2,
"time": 1507087545,
"value": 100
}
]
},
"2": {
"id": 3,
"name": "user2",
"solves": [
{
"chal": 1,
"team": 3,
"time": 1507000894,
"value": 100
}
]
}
}
}'''
saved = json.loads(saved)
received = json.loads(response)
assert saved == received
destroy_ctfd(app)
def test_scoring_logic():
app = create_ctfd()
with app.app_context():
admin = login_as_user(app, name="admin", password="password")
register_user(app, name="user1", email="user1@hanyang.ac.kr", password="password")
client1 = login_as_user(app, name="user1", password="password")
register_user(app, name="user2", email="user2@hanyang.ac.kr", password="password")
client2 = login_as_user(app, name="user2", password="password")
chal1 = gen_challenge(app.db)
flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')
chal1_id = chal1.id
chal2 = gen_challenge(app.db)
flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')
chal2_id = chal2.id
with freeze_time("2017-10-3 03:21:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal1_id), data=data)
scores = get_scores(admin)
assert scores[0]['team'] == 'user1'
with freeze_time("2017-10-4 03:30:34"):
with client2.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal1_id), data=data)
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal2_id), data=data)
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
with freeze_time("2017-10-5 03:50:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal2_id), data=data)
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
destroy_ctfd(app)
def test_scoring_logic_with_zero_point_challenges():
app = create_ctfd()
with app.app_context():
admin = login_as_user(app, name="admin", password="password")
register_user(app, name="user1", email="user1@hanyang.ac.kr", password="password")
client1 = login_as_user(app, name="user1", password="password")
register_user(app, name="user2", email="user2@hanyang.ac.kr", password="password")
client2 = login_as_user(app, name="user2", password="password")
chal1 = gen_challenge(app.db)
flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')
chal1_id = chal1.id
chal2 = gen_challenge(app.db)
flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')
chal2_id = chal2.id
chal0 = gen_challenge(app.db, value=0)
flag0 = gen_flag(app.db, chal=chal0.id, flag='flag')
chal0_id = chal0.id
# user1 solves chal1
with freeze_time("2017-10-3 03:21:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal1_id), data=data)
# user1 is now on top
scores = get_scores(admin)
assert scores[0]['team'] == 'user1'
# user2 solves chal1 and chal2
with freeze_time("2017-10-4 03:30:34"):
with client2.session_transaction() as sess:
# solve chal1
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal1_id), data=data)
# solve chal2
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal2_id), data=data)
# user2 is now on top
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
# user1 solves chal2
with freeze_time("2017-10-5 03:50:34"):
with client1.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client1.post('/chal/{}'.format(chal2_id), data=data)
# user2 should still be on top because they solved chal2 first
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
# user2 solves a 0 point challenge
with freeze_time("2017-10-5 03:55:34"):
with client2.session_transaction() as sess:
data = {
"key": 'flag',
"nonce": sess.get('nonce')
}
r = client2.post('/chal/{}'.format(chal0_id), data=data)
# user2 should still be on top because 0 point challenges should not tie break
scores = get_scores(admin)
assert scores[0]['team'] == 'user2'
destroy_ctfd(app)
| true
| true
|
1c478ed2cb7df85c8da293ebd6985cd21b3671a5
| 3,769
|
py
|
Python
|
pynventory/hosts.py
|
kufsa/pynventory
|
708e7950c38e873b2a4b7bdc779c0533888ac811
|
[
"MIT"
] | null | null | null |
pynventory/hosts.py
|
kufsa/pynventory
|
708e7950c38e873b2a4b7bdc779c0533888ac811
|
[
"MIT"
] | null | null | null |
pynventory/hosts.py
|
kufsa/pynventory
|
708e7950c38e873b2a4b7bdc779c0533888ac811
|
[
"MIT"
] | null | null | null |
from fabric import Connection
from invoke.exceptions import UnexpectedExit
class LinuxHost:
def __init__(self, host, user):
self.connection = Connection(host, connect_timeout=1, user=user, )
self.host = host
@staticmethod
def display_name():
return 'Host'
def __str__(self):
return self.host
class GetOsRelease:
def __init__(self, parent):
try:
self.output = parent.connection.run('cat /etc/os-release | grep "PRETTY_NAME"', hide=True)
self.output = self.output.stdout.split('=')[1].replace('"', '')
except UnexpectedExit:
try:
self.output = parent.connection.run(' cat /etc/redhat-release', hide=True)
self.output = self.output.stdout
except UnexpectedExit:
self.output = "Failed to retrieve OS Release"
def __str__(self):
# some words to remove from output as they are redundant
clean_up = ['Linux', 'Server', 'release']
_out = []
for i in self.output.split():
if i not in clean_up:
_out.append(i)
return ' '.join(_out)
@staticmethod
def display_name():
return 'OS Version'
class GetHostname:
def __init__(self, parent):
self.output = parent.connection.run('hostname', hide=True).stdout
@staticmethod
def display_name():
return 'Hostname'
def __str__(self):
return self.output.strip()
class GetNtpServer:
def __init__(self, parent):
output = parent.connection.run('ntpq -pn', hide=True)
# ntpq will output error if daemon is not running
if output.stderr:
self.output = [output.stderr.strip(), ]
else:
# remove header from ntpq output
self.output = output.stdout.strip().split('\n')[2:]
def __str__(self):
# Filter out details and only return server ip
servers = []
for line in self.output:
servers.append(line.split(' ')[0])
return ', '.join(servers)
@staticmethod
def display_name():
return 'NTP Server'
class GetCpuCores:
def __init__(self, parent):
self.output = parent.connection.run('nproc', hide=True).stdout
def __str__(self):
return self.output.strip()
@staticmethod
def display_name():
return 'Core count'
class GetMemory:
def __init__(self, parent):
output = parent.connection.run('free -h', hide=True).stdout
# Split output into lines, then split the columns and take total memory value
self.memory = output.split('\n')[1].split()[1]
def __str__(self):
return self.memory
@staticmethod
def display_name():
return 'Memory'
class GetDiskSize:
def __init__(self, parent):
output = parent.connection.run('df -h -l --total', hide=True).stdout
# Split output into lines, then split the columns and take disk size
self.disk_size = output.split('\n')[-2].split()[1]
def __str__(self):
return self.disk_size
@staticmethod
def display_name():
return 'Disk size'
class GetKernelVersion:
def __init__(self, parent):
self.output = parent.connection.run('uname -r', hide=True).stdout
def __str__(self):
return self.output.strip()
@staticmethod
def display_name():
return 'Kernel version'
| 30.893443
| 106
| 0.555585
|
from fabric import Connection
from invoke.exceptions import UnexpectedExit
class LinuxHost:
def __init__(self, host, user):
self.connection = Connection(host, connect_timeout=1, user=user, )
self.host = host
@staticmethod
def display_name():
return 'Host'
def __str__(self):
return self.host
class GetOsRelease:
def __init__(self, parent):
try:
self.output = parent.connection.run('cat /etc/os-release | grep "PRETTY_NAME"', hide=True)
self.output = self.output.stdout.split('=')[1].replace('"', '')
except UnexpectedExit:
try:
self.output = parent.connection.run(' cat /etc/redhat-release', hide=True)
self.output = self.output.stdout
except UnexpectedExit:
self.output = "Failed to retrieve OS Release"
def __str__(self):
# some words to remove from output as they are redundant
clean_up = ['Linux', 'Server', 'release']
_out = []
for i in self.output.split():
if i not in clean_up:
_out.append(i)
return ' '.join(_out)
@staticmethod
def display_name():
return 'OS Version'
class GetHostname:
def __init__(self, parent):
self.output = parent.connection.run('hostname', hide=True).stdout
@staticmethod
def display_name():
return 'Hostname'
def __str__(self):
return self.output.strip()
class GetNtpServer:
def __init__(self, parent):
output = parent.connection.run('ntpq -pn', hide=True)
# ntpq will output error if daemon is not running
if output.stderr:
self.output = [output.stderr.strip(), ]
else:
# remove header from ntpq output
self.output = output.stdout.strip().split('\n')[2:]
def __str__(self):
# Filter out details and only return server ip
servers = []
for line in self.output:
servers.append(line.split(' ')[0])
return ', '.join(servers)
@staticmethod
def display_name():
return 'NTP Server'
class GetCpuCores:
def __init__(self, parent):
self.output = parent.connection.run('nproc', hide=True).stdout
def __str__(self):
return self.output.strip()
@staticmethod
def display_name():
return 'Core count'
class GetMemory:
def __init__(self, parent):
output = parent.connection.run('free -h', hide=True).stdout
# Split output into lines, then split the columns and take total memory value
self.memory = output.split('\n')[1].split()[1]
def __str__(self):
return self.memory
@staticmethod
def display_name():
return 'Memory'
class GetDiskSize:
def __init__(self, parent):
output = parent.connection.run('df -h -l --total', hide=True).stdout
# Split output into lines, then split the columns and take disk size
self.disk_size = output.split('\n')[-2].split()[1]
def __str__(self):
return self.disk_size
@staticmethod
def display_name():
return 'Disk size'
class GetKernelVersion:
def __init__(self, parent):
self.output = parent.connection.run('uname -r', hide=True).stdout
def __str__(self):
return self.output.strip()
@staticmethod
def display_name():
return 'Kernel version'
| true
| true
|
1c478ee5315f97fa1a7ac3ba3481af09e56571ff
| 786
|
py
|
Python
|
setup.py
|
AndrewRPorter/stocki
|
0793fe05735c8c803f5cb3ef2ea029a82243dbbd
|
[
"MIT"
] | 33
|
2018-07-11T19:22:00.000Z
|
2021-01-02T13:01:10.000Z
|
setup.py
|
AndrewRPorter/stocki
|
0793fe05735c8c803f5cb3ef2ea029a82243dbbd
|
[
"MIT"
] | 2
|
2018-07-12T12:33:46.000Z
|
2018-07-16T13:07:59.000Z
|
setup.py
|
AndrewRPorter/stocki
|
0793fe05735c8c803f5cb3ef2ea029a82243dbbd
|
[
"MIT"
] | 5
|
2018-07-11T17:22:07.000Z
|
2019-03-19T08:48:08.000Z
|
from setuptools import setup
try:
with open("LICENSE.txt", "r") as f:
_license = f.read()
except Exception:
_license = ""
try:
with open("README.md", "r") as f:
_readme = f.read()
except Exception:
_readme = ""
install_requires = ["requests", "urwid", "pycodestyle"]
setup(
name="stocki",
version="0.2.0",
description="The CLI for fetching stock market data.",
long_description=_readme,
license=_license,
install_requires=install_requires,
packages=["stocki"],
entry_points={"console_scripts": ["stocki = stocki.stocki:main"]},
include_package_data=True,
python_requires=">=2.7",
url="https://github.com/andrewrporter/stocki",
author="AndrewRPorter",
author_email="porter.r.andrew@gmail.com",
)
| 22.457143
| 70
| 0.652672
|
from setuptools import setup
try:
with open("LICENSE.txt", "r") as f:
_license = f.read()
except Exception:
_license = ""
try:
with open("README.md", "r") as f:
_readme = f.read()
except Exception:
_readme = ""
install_requires = ["requests", "urwid", "pycodestyle"]
setup(
name="stocki",
version="0.2.0",
description="The CLI for fetching stock market data.",
long_description=_readme,
license=_license,
install_requires=install_requires,
packages=["stocki"],
entry_points={"console_scripts": ["stocki = stocki.stocki:main"]},
include_package_data=True,
python_requires=">=2.7",
url="https://github.com/andrewrporter/stocki",
author="AndrewRPorter",
author_email="porter.r.andrew@gmail.com",
)
| true
| true
|
1c478fc5baec380c9474bb2707520c938527aa52
| 1,730
|
py
|
Python
|
Puzzle5/binaryPartitioning.py
|
manasharma90/AoC-2020-Python
|
6a979eff34136b6b74a340c40121da76e35451da
|
[
"Apache-2.0"
] | null | null | null |
Puzzle5/binaryPartitioning.py
|
manasharma90/AoC-2020-Python
|
6a979eff34136b6b74a340c40121da76e35451da
|
[
"Apache-2.0"
] | null | null | null |
Puzzle5/binaryPartitioning.py
|
manasharma90/AoC-2020-Python
|
6a979eff34136b6b74a340c40121da76e35451da
|
[
"Apache-2.0"
] | null | null | null |
# defining a function to execute binary partitioning of a list
# input = list; output = tuple with two lists ie. ([first half list], [second half list])
def list_half(input_list):
half = len(input_list)//2
lower_half = input_list[:half]
upper_half = input_list[half:]
return lower_half, upper_half
with open('input.txt', 'r') as f:
a = f.read()
boarding_passes = a.split('\n')
#cleaning the file by validating that the elements are 10 characters and each character is either F,B,R or L
boarding_passes_cleaned = []
for bp in boarding_passes:
if len(bp) == 10:
valid = True
for l in bp:
if l not in ['F', 'B', 'R', 'L']:
valid = False
if valid:
boarding_passes_cleaned.append(bp)
largest_sID = 0
#defining a function to decode the row number from the boarding pass code
def decode_row(bp_code):
rows = list(range(128))
for letter in bp_code:
if letter == 'F':
rows = list_half(rows)[0]
if letter == 'B':
rows = list_half(rows)[1]
return rows[0]
#defining a function to decode the column number from the boarding pass code
def decode_column(bp_code):
columns = list(range(8))
for letter in bp_code:
if letter == 'L':
columns = list_half(columns)[0]
if letter == 'R':
columns = list_half(columns)[1]
return columns[0]
# finding out the largest seat ID on the given list of boarding passes
for bp_code in boarding_passes_cleaned:
r = decode_row(bp_code)
c = decode_column(bp_code)
sID = (r * 8) + c
if sID > largest_sID:
largest_sID = sID
print(largest_sID)
| 25.441176
| 108
| 0.616185
|
def list_half(input_list):
half = len(input_list)//2
lower_half = input_list[:half]
upper_half = input_list[half:]
return lower_half, upper_half
with open('input.txt', 'r') as f:
a = f.read()
boarding_passes = a.split('\n')
boarding_passes_cleaned = []
for bp in boarding_passes:
if len(bp) == 10:
valid = True
for l in bp:
if l not in ['F', 'B', 'R', 'L']:
valid = False
if valid:
boarding_passes_cleaned.append(bp)
largest_sID = 0
def decode_row(bp_code):
rows = list(range(128))
for letter in bp_code:
if letter == 'F':
rows = list_half(rows)[0]
if letter == 'B':
rows = list_half(rows)[1]
return rows[0]
def decode_column(bp_code):
columns = list(range(8))
for letter in bp_code:
if letter == 'L':
columns = list_half(columns)[0]
if letter == 'R':
columns = list_half(columns)[1]
return columns[0]
for bp_code in boarding_passes_cleaned:
r = decode_row(bp_code)
c = decode_column(bp_code)
sID = (r * 8) + c
if sID > largest_sID:
largest_sID = sID
print(largest_sID)
| true
| true
|
1c4790bd2a51657327ca769fe5588e04bb77bab6
| 2,878
|
py
|
Python
|
python/src/nnabla/backward_function/div2.py
|
chunxiaosz/nnabla
|
9f4249313129d0fd23d304453830157fee96a2e5
|
[
"Apache-2.0"
] | 1
|
2019-09-10T06:51:37.000Z
|
2019-09-10T06:51:37.000Z
|
python/src/nnabla/backward_function/div2.py
|
langbin2014/nnabla
|
e94bac5bed65337010e2ac07a5937fb862ab2dd8
|
[
"Apache-2.0"
] | null | null | null |
python/src/nnabla/backward_function/div2.py
|
langbin2014/nnabla
|
e94bac5bed65337010e2ac07a5937fb862ab2dd8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nnabla as nn
import nnabla.functions as F
from .backward_function import BackwardFunction
class Div2Backward(BackwardFunction):
def name(self):
return 'Div2Backward'
def _create_forward_inputs_and_outputs(self, inputs, outputs):
# Inputs on the forward graph
inputs_fwd = []
for i in range(self._num_inputs_fwd):
need_grad = self.forward_func.inputs[i].need_grad
v = nn.Variable(inputs[i].shape, need_grad=need_grad)
v.data = inputs[i].data
v.grad = outputs[i].data
inputs_fwd += [v]
# Outputs on the forward graph
outputs_fwd = []
for i in range(self._num_outputs_fwd):
inp = inputs[self._num_inputs_fwd + i]
v = nn.Variable(inp.shape)
v.grad = inp.data
outputs_fwd += [v]
return inputs_fwd, outputs_fwd
def backward_impl(self, inputs, outputs, prop_down, accum):
# inputs: [inputs_fwd_graph] + [inputs_bwd_graph] or
# [inputs_fwd_graph] + [outputs_fwd_graph] + [inputs_bwd_graph]
# Inputs
x0 = inputs[0].data
x1 = inputs[1].data
dy = inputs[2].data
# Outputs
dx0 = outputs[0].data
dx1 = outputs[1].data
# Grads of inputs
g_x0 = inputs[0].grad
g_x1 = inputs[1].grad
g_dy = inputs[2].grad
# Grads of outputs
g_dx0 = outputs[0].grad
g_dx1 = outputs[1].grad
# Computation
x1_inv_square = F.pow_scalar(x1, -2.0)
if prop_down[0]:
if accum[0]:
g_x0 -= g_dx1 * dy * x1_inv_square
else:
g_x0.copy_from(- g_dx1 * dy * x1_inv_square)
if prop_down[1]:
if accum[1]:
g_x1 += dy * (g_dx1 * 2 * x0 *
F.pow_scalar(x1, -3.0) - g_dx0 * x1_inv_square)
else:
g_x1.copy_from(
dy * (2 * g_dx1 * x0 * F.pow_scalar(x1, -3.0) - g_dx0 * x1_inv_square))
if prop_down[2]:
if accum[2]:
g_dy += g_dx0 / x1 - g_dx1 * x0 * x1_inv_square
else:
g_dy.copy_from(g_dx0 / x1 - g_dx1 * x0 * x1_inv_square)
| 35.097561
| 91
| 0.589298
|
import numpy as np
import nnabla as nn
import nnabla.functions as F
from .backward_function import BackwardFunction
class Div2Backward(BackwardFunction):
def name(self):
return 'Div2Backward'
def _create_forward_inputs_and_outputs(self, inputs, outputs):
inputs_fwd = []
for i in range(self._num_inputs_fwd):
need_grad = self.forward_func.inputs[i].need_grad
v = nn.Variable(inputs[i].shape, need_grad=need_grad)
v.data = inputs[i].data
v.grad = outputs[i].data
inputs_fwd += [v]
outputs_fwd = []
for i in range(self._num_outputs_fwd):
inp = inputs[self._num_inputs_fwd + i]
v = nn.Variable(inp.shape)
v.grad = inp.data
outputs_fwd += [v]
return inputs_fwd, outputs_fwd
def backward_impl(self, inputs, outputs, prop_down, accum):
x0 = inputs[0].data
x1 = inputs[1].data
dy = inputs[2].data
dx0 = outputs[0].data
dx1 = outputs[1].data
g_x0 = inputs[0].grad
g_x1 = inputs[1].grad
g_dy = inputs[2].grad
g_dx0 = outputs[0].grad
g_dx1 = outputs[1].grad
x1_inv_square = F.pow_scalar(x1, -2.0)
if prop_down[0]:
if accum[0]:
g_x0 -= g_dx1 * dy * x1_inv_square
else:
g_x0.copy_from(- g_dx1 * dy * x1_inv_square)
if prop_down[1]:
if accum[1]:
g_x1 += dy * (g_dx1 * 2 * x0 *
F.pow_scalar(x1, -3.0) - g_dx0 * x1_inv_square)
else:
g_x1.copy_from(
dy * (2 * g_dx1 * x0 * F.pow_scalar(x1, -3.0) - g_dx0 * x1_inv_square))
if prop_down[2]:
if accum[2]:
g_dy += g_dx0 / x1 - g_dx1 * x0 * x1_inv_square
else:
g_dy.copy_from(g_dx0 / x1 - g_dx1 * x0 * x1_inv_square)
| true
| true
|
1c4791f5de8986417f5d44fefb3cdffd7192c28f
| 2,568
|
py
|
Python
|
python_scripts/linear_models_sol_03.py
|
odotreppe/scikit-learn-mooc
|
da97773fc9b860371e94e3c72791b0c92471b22d
|
[
"CC-BY-4.0"
] | 2
|
2021-09-30T11:07:28.000Z
|
2021-09-30T11:07:31.000Z
|
python_scripts/linear_models_sol_03.py
|
Ravimk07/scikit-learn-mooc
|
c3aaf8c5a9aa4f1d749ebc1b7d5ae24619fee4bf
|
[
"CC-BY-4.0"
] | null | null | null |
python_scripts/linear_models_sol_03.py
|
Ravimk07/scikit-learn-mooc
|
c3aaf8c5a9aa4f1d749ebc1b7d5ae24619fee4bf
|
[
"CC-BY-4.0"
] | null | null | null |
# %% [markdown]
# # 📃 Solution for Exercise M4.03
#
# In all previous notebooks, we only used a single feature in `data`. But we
# have already shown that we could add new features to make the model more
# expressive by deriving new features, based on the original feature.
#
# The aim of this notebook is to train a linear regression algorithm on a
# dataset with more than a single feature.
#
# We will load a dataset about house prices in California.
# The dataset consists of 8 features regarding the demography and geography of
# districts in California and the aim is to predict the median house price of
# each district. We will use all 8 features to predict the target, the median
# house price.
# %% [markdown]
# ```{note}
# If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.
# ```
# %%
from sklearn.datasets import fetch_california_housing
data, target = fetch_california_housing(as_frame=True, return_X_y=True)
target *= 100 # rescale the target in k$
data.head()
# %% [markdown]
# Now this is your turn to train a linear regression model on this dataset.
# You will need to:
# * create a linear regression model;
# * execute a cross-validation with 10 folds and use the mean absolute error
# (MAE) as metric. Ensure to return the fitted estimators;
# * compute mean and std of the MAE in thousands of dollars (k$);
# * show the values of the coefficients for each feature using a boxplot by
# inspecting the fitted model returned from the cross-validation. Hint: you
# use the function
# [`df.plot.box()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.box.html)
# to plot a box plot.
# %%
from sklearn.linear_model import LinearRegression
linear_regression = LinearRegression()
# %%
from sklearn.model_selection import cross_validate
cv_results = cross_validate(linear_regression, data, target,
scoring="neg_mean_absolute_error",
return_estimator=True, cv=10, n_jobs=2)
# %%
print(f"Mean absolute error on testing set: "
f"{-cv_results['test_score'].mean():.3f} k$ +/- "
f"{cv_results['test_score'].std():.3f}")
# %%
import pandas as pd
weights = pd.DataFrame(
[est.coef_ for est in cv_results["estimator"]], columns=data.columns)
# %%
import matplotlib.pyplot as plt
color = {"whiskers": "black", "medians": "black", "caps": "black"}
weights.plot.box(color=color, vert=False)
_ = plt.title("Value of linear regression coefficients")
| 35.666667
| 112
| 0.720405
|
arn.datasets import fetch_california_housing
data, target = fetch_california_housing(as_frame=True, return_X_y=True)
target *= 100
data.head()
from sklearn.linear_model import LinearRegression
linear_regression = LinearRegression()
from sklearn.model_selection import cross_validate
cv_results = cross_validate(linear_regression, data, target,
scoring="neg_mean_absolute_error",
return_estimator=True, cv=10, n_jobs=2)
print(f"Mean absolute error on testing set: "
f"{-cv_results['test_score'].mean():.3f} k$ +/- "
f"{cv_results['test_score'].std():.3f}")
import pandas as pd
weights = pd.DataFrame(
[est.coef_ for est in cv_results["estimator"]], columns=data.columns)
import matplotlib.pyplot as plt
color = {"whiskers": "black", "medians": "black", "caps": "black"}
weights.plot.box(color=color, vert=False)
_ = plt.title("Value of linear regression coefficients")
| true
| true
|
1c47920152539c32902149b890e26eb84bfb3c09
| 5,674
|
py
|
Python
|
novaclient/v1_1/volumes.py
|
citrix-openstack-build/python-novaclient
|
3d73fb36e7c5e5f933560760f46ff6aec74ff093
|
[
"Apache-1.1"
] | 1
|
2015-02-16T09:37:00.000Z
|
2015-02-16T09:37:00.000Z
|
novaclient/v1_1/volumes.py
|
sivel/python-novaclient
|
810857849ed32773c38df12785715f89d33e83af
|
[
"Apache-1.1"
] | null | null | null |
novaclient/v1_1/volumes.py
|
sivel/python-novaclient
|
810857849ed32773c38df12785715f89d33e83af
|
[
"Apache-1.1"
] | null | null | null |
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume interface (1.1 extension).
"""
import six
from novaclient import base
from novaclient.openstack.common.py3kcompat import urlutils
class Volume(base.Resource):
"""
A volume is an extra block level storage to the OpenStack instances.
"""
NAME_ATTR = 'display_name'
def __repr__(self):
return "<Volume: %s>" % self.id
def delete(self):
"""
Delete this volume.
"""
self.manager.delete(self)
class VolumeManager(base.ManagerWithFind):
"""
Manage :class:`Volume` resources.
"""
resource_class = Volume
def create(self, size, snapshot_id=None,
display_name=None, display_description=None,
volume_type=None, availability_zone=None,
imageRef=None):
"""
Create a volume.
:param size: Size of volume in GB
:param snapshot_id: ID of the snapshot
:param display_name: Name of the volume
:param display_description: Description of the volume
:param volume_type: Type of volume
:param availability_zone: Availability Zone for volume
:rtype: :class:`Volume`
:param imageRef: reference to an image stored in glance
"""
body = {'volume': {'size': size,
'snapshot_id': snapshot_id,
'display_name': display_name,
'display_description': display_description,
'volume_type': volume_type,
'availability_zone': availability_zone,
'imageRef': imageRef}}
return self._create('/volumes', body, 'volume')
def get(self, volume_id):
"""
Get a volume.
:param volume_id: The ID of the volume to delete.
:rtype: :class:`Volume`
"""
return self._get("/volumes/%s" % volume_id, "volume")
def list(self, detailed=True, search_opts=None):
"""
Get a list of all volumes.
:rtype: list of :class:`Volume`
"""
search_opts = search_opts or {}
qparams = dict((k, v) for (k, v) in six.iteritems(search_opts) if v)
query_string = '?%s' % urlutils.urlencode(qparams) if qparams else ''
if detailed is True:
return self._list("/volumes/detail%s" % query_string, "volumes")
else:
return self._list("/volumes%s" % query_string, "volumes")
def delete(self, volume):
"""
Delete a volume.
:param volume: The :class:`Volume` to delete.
"""
self._delete("/volumes/%s" % base.getid(volume))
def create_server_volume(self, server_id, volume_id, device):
"""
Attach a volume identified by the volume ID to the given server ID
:param server_id: The ID of the server
:param volume_id: The ID of the volume to attach.
:param device: The device name
:rtype: :class:`Volume`
"""
body = {'volumeAttachment': {'volumeId': volume_id,
'device': device}}
return self._create("/servers/%s/os-volume_attachments" % server_id,
body, "volumeAttachment")
def update_server_volume(self, server_id, attachment_id, new_volume_id):
"""
Update the volume identified by the attachment ID, that is attached to
the given server ID
:param server_id: The ID of the server
:param attachment_id: The ID of the attachment
:param new_volume_id: The ID of the new volume to attach
:rtype: :class:`Volume`
"""
body = {'volumeAttachment': {'volumeId': new_volume_id}}
return self._update("/servers/%s/os-volume_attachments/%s" %
(server_id, attachment_id,), body, "volumeAttachment")
def get_server_volume(self, server_id, attachment_id):
"""
Get the volume identified by the attachment ID, that is attached to
the given server ID
:param server_id: The ID of the server
:param attachment_id: The ID of the attachment
:rtype: :class:`Volume`
"""
return self._get("/servers/%s/os-volume_attachments/%s" % (server_id,
attachment_id,), "volumeAttachment")
def get_server_volumes(self, server_id):
"""
Get a list of all the attached volumes for the given server ID
:param server_id: The ID of the server
:rtype: list of :class:`Volume`
"""
return self._list("/servers/%s/os-volume_attachments" % server_id,
"volumeAttachments")
def delete_server_volume(self, server_id, attachment_id):
"""
Detach a volume identified by the attachment ID from the given server
:param server_id: The ID of the server
:param attachment_id: The ID of the attachment
"""
self._delete("/servers/%s/os-volume_attachments/%s" %
(server_id, attachment_id,))
| 34.180723
| 78
| 0.605217
|
import six
from novaclient import base
from novaclient.openstack.common.py3kcompat import urlutils
class Volume(base.Resource):
NAME_ATTR = 'display_name'
def __repr__(self):
return "<Volume: %s>" % self.id
def delete(self):
self.manager.delete(self)
class VolumeManager(base.ManagerWithFind):
resource_class = Volume
def create(self, size, snapshot_id=None,
display_name=None, display_description=None,
volume_type=None, availability_zone=None,
imageRef=None):
body = {'volume': {'size': size,
'snapshot_id': snapshot_id,
'display_name': display_name,
'display_description': display_description,
'volume_type': volume_type,
'availability_zone': availability_zone,
'imageRef': imageRef}}
return self._create('/volumes', body, 'volume')
def get(self, volume_id):
return self._get("/volumes/%s" % volume_id, "volume")
def list(self, detailed=True, search_opts=None):
search_opts = search_opts or {}
qparams = dict((k, v) for (k, v) in six.iteritems(search_opts) if v)
query_string = '?%s' % urlutils.urlencode(qparams) if qparams else ''
if detailed is True:
return self._list("/volumes/detail%s" % query_string, "volumes")
else:
return self._list("/volumes%s" % query_string, "volumes")
def delete(self, volume):
self._delete("/volumes/%s" % base.getid(volume))
def create_server_volume(self, server_id, volume_id, device):
body = {'volumeAttachment': {'volumeId': volume_id,
'device': device}}
return self._create("/servers/%s/os-volume_attachments" % server_id,
body, "volumeAttachment")
def update_server_volume(self, server_id, attachment_id, new_volume_id):
body = {'volumeAttachment': {'volumeId': new_volume_id}}
return self._update("/servers/%s/os-volume_attachments/%s" %
(server_id, attachment_id,), body, "volumeAttachment")
def get_server_volume(self, server_id, attachment_id):
return self._get("/servers/%s/os-volume_attachments/%s" % (server_id,
attachment_id,), "volumeAttachment")
def get_server_volumes(self, server_id):
return self._list("/servers/%s/os-volume_attachments" % server_id,
"volumeAttachments")
def delete_server_volume(self, server_id, attachment_id):
self._delete("/servers/%s/os-volume_attachments/%s" %
(server_id, attachment_id,))
| true
| true
|
1c47922da6f61b01101caee74d5b39091250523f
| 5,778
|
py
|
Python
|
deepspeech_pytorch/validation.py
|
RaphaelOlivier/deepspeech.pytorch
|
eb73ef61807ab01fad3662ad03dfea8fd44439aa
|
[
"MIT"
] | 1
|
2021-08-07T07:12:40.000Z
|
2021-08-07T07:12:40.000Z
|
deepspeech_pytorch/validation.py
|
RaphaelOlivier/deepspeech.pytorch
|
eb73ef61807ab01fad3662ad03dfea8fd44439aa
|
[
"MIT"
] | 1
|
2019-02-07T12:52:46.000Z
|
2019-02-07T12:52:46.000Z
|
deepspeech_pytorch/validation.py
|
RaphaelOlivier/deepspeech.pytorch
|
eb73ef61807ab01fad3662ad03dfea8fd44439aa
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
import torch
from torch.cuda.amp import autocast
from tqdm import tqdm
from deepspeech_pytorch.decoder import Decoder, GreedyDecoder
from pytorch_lightning.metrics import Metric
import Levenshtein as Lev
class ErrorRate(Metric, ABC):
def __init__(self,
decoder: Decoder,
target_decoder: GreedyDecoder,
save_output: bool = False,
dist_sync_on_step: bool = False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.decoder = decoder
self.target_decoder = target_decoder
self.save_output = save_output
@abstractmethod
def calculate_metric(self, transcript, reference):
raise NotImplementedError
def update(self, preds: torch.Tensor,
preds_sizes: torch.Tensor,
targets: torch.Tensor,
target_sizes: torch.Tensor):
# unflatten targets
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
decoded_output, _ = self.decoder.decode(preds, preds_sizes)
target_strings = self.target_decoder.convert_to_strings(split_targets)
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
self.calculate_metric(
transcript=transcript,
reference=reference
)
class CharErrorRate(ErrorRate):
def __init__(self,
decoder: Decoder,
target_decoder: GreedyDecoder,
save_output: bool = False,
dist_sync_on_step: bool = False):
super().__init__(
decoder=decoder,
target_decoder=target_decoder,
save_output=save_output,
dist_sync_on_step=dist_sync_on_step
)
self.decoder = decoder
self.target_decoder = target_decoder
self.save_output = save_output
self.add_state("cer", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("n_chars", default=torch.tensor(0), dist_reduce_fx="sum")
def calculate_metric(self, transcript, reference):
cer_inst = self.cer_calc(transcript, reference)
self.cer += cer_inst
self.n_chars += len(reference.replace(' ', ''))
def compute(self):
cer = float(self.cer) / self.n_chars
return cer.item() * 100
def cer_calc(self, s1, s2):
"""
Computes the Character Error Rate, defined as the edit distance.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
s1, s2, = s1.replace(' ', ''), s2.replace(' ', '')
return Lev.distance(s1, s2)
class WordErrorRate(ErrorRate):
def __init__(self,
decoder: Decoder,
target_decoder: GreedyDecoder,
save_output: bool = False,
dist_sync_on_step: bool = False):
super().__init__(
decoder=decoder,
target_decoder=target_decoder,
save_output=save_output,
dist_sync_on_step=dist_sync_on_step
)
self.decoder = decoder
self.target_decoder = target_decoder
self.save_output = save_output
self.add_state("wer", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("n_tokens", default=torch.tensor(0), dist_reduce_fx="sum")
def calculate_metric(self, transcript, reference):
wer_inst = self.wer_calc(transcript, reference)
self.wer += wer_inst
self.n_tokens += len(reference.split())
def compute(self):
wer = float(self.wer) / self.n_tokens
return wer.item() * 100
def wer_calc(self, s1, s2):
"""
Computes the Word Error Rate, defined as the edit distance between the
two provided sentences after tokenizing to words.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
# build mapping of words to integers
b = set(s1.split() + s2.split())
word2char = dict(zip(b, range(len(b))))
# map the words to a char array (Levenshtein packages only accepts
# strings)
w1 = [chr(word2char[w]) for w in s1.split()]
w2 = [chr(word2char[w]) for w in s2.split()]
return Lev.distance(''.join(w1), ''.join(w2))
@torch.no_grad()
def run_evaluation(test_loader,
model,
decoder: Decoder,
device: torch.device,
target_decoder: Decoder,
precision: int):
model.eval()
wer = WordErrorRate(
decoder=decoder,
target_decoder=target_decoder
)
cer = CharErrorRate(
decoder=decoder,
target_decoder=target_decoder
)
for i, (batch) in tqdm(enumerate(test_loader), total=len(test_loader)):
inputs, targets, input_percentages, target_sizes = batch
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
inputs = inputs.to(device)
with autocast(enabled=precision == 16):
out, output_sizes = model(inputs, input_sizes)
decoded_output, _ = decoder.decode(out, output_sizes)
wer.update(
preds=out,
preds_sizes=output_sizes,
targets=targets,
target_sizes=target_sizes
)
cer.update(
preds=out,
preds_sizes=output_sizes,
targets=targets,
target_sizes=target_sizes
)
return wer.compute(), cer.compute()
| 33.789474
| 81
| 0.602631
|
from abc import ABC, abstractmethod
import torch
from torch.cuda.amp import autocast
from tqdm import tqdm
from deepspeech_pytorch.decoder import Decoder, GreedyDecoder
from pytorch_lightning.metrics import Metric
import Levenshtein as Lev
class ErrorRate(Metric, ABC):
def __init__(self,
decoder: Decoder,
target_decoder: GreedyDecoder,
save_output: bool = False,
dist_sync_on_step: bool = False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.decoder = decoder
self.target_decoder = target_decoder
self.save_output = save_output
@abstractmethod
def calculate_metric(self, transcript, reference):
raise NotImplementedError
def update(self, preds: torch.Tensor,
preds_sizes: torch.Tensor,
targets: torch.Tensor,
target_sizes: torch.Tensor):
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
decoded_output, _ = self.decoder.decode(preds, preds_sizes)
target_strings = self.target_decoder.convert_to_strings(split_targets)
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
self.calculate_metric(
transcript=transcript,
reference=reference
)
class CharErrorRate(ErrorRate):
def __init__(self,
decoder: Decoder,
target_decoder: GreedyDecoder,
save_output: bool = False,
dist_sync_on_step: bool = False):
super().__init__(
decoder=decoder,
target_decoder=target_decoder,
save_output=save_output,
dist_sync_on_step=dist_sync_on_step
)
self.decoder = decoder
self.target_decoder = target_decoder
self.save_output = save_output
self.add_state("cer", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("n_chars", default=torch.tensor(0), dist_reduce_fx="sum")
def calculate_metric(self, transcript, reference):
cer_inst = self.cer_calc(transcript, reference)
self.cer += cer_inst
self.n_chars += len(reference.replace(' ', ''))
def compute(self):
cer = float(self.cer) / self.n_chars
return cer.item() * 100
def cer_calc(self, s1, s2):
s1, s2, = s1.replace(' ', ''), s2.replace(' ', '')
return Lev.distance(s1, s2)
class WordErrorRate(ErrorRate):
def __init__(self,
decoder: Decoder,
target_decoder: GreedyDecoder,
save_output: bool = False,
dist_sync_on_step: bool = False):
super().__init__(
decoder=decoder,
target_decoder=target_decoder,
save_output=save_output,
dist_sync_on_step=dist_sync_on_step
)
self.decoder = decoder
self.target_decoder = target_decoder
self.save_output = save_output
self.add_state("wer", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("n_tokens", default=torch.tensor(0), dist_reduce_fx="sum")
def calculate_metric(self, transcript, reference):
wer_inst = self.wer_calc(transcript, reference)
self.wer += wer_inst
self.n_tokens += len(reference.split())
def compute(self):
wer = float(self.wer) / self.n_tokens
return wer.item() * 100
def wer_calc(self, s1, s2):
b = set(s1.split() + s2.split())
word2char = dict(zip(b, range(len(b))))
w1 = [chr(word2char[w]) for w in s1.split()]
w2 = [chr(word2char[w]) for w in s2.split()]
return Lev.distance(''.join(w1), ''.join(w2))
@torch.no_grad()
def run_evaluation(test_loader,
model,
decoder: Decoder,
device: torch.device,
target_decoder: Decoder,
precision: int):
model.eval()
wer = WordErrorRate(
decoder=decoder,
target_decoder=target_decoder
)
cer = CharErrorRate(
decoder=decoder,
target_decoder=target_decoder
)
for i, (batch) in tqdm(enumerate(test_loader), total=len(test_loader)):
inputs, targets, input_percentages, target_sizes = batch
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
inputs = inputs.to(device)
with autocast(enabled=precision == 16):
out, output_sizes = model(inputs, input_sizes)
decoded_output, _ = decoder.decode(out, output_sizes)
wer.update(
preds=out,
preds_sizes=output_sizes,
targets=targets,
target_sizes=target_sizes
)
cer.update(
preds=out,
preds_sizes=output_sizes,
targets=targets,
target_sizes=target_sizes
)
return wer.compute(), cer.compute()
| true
| true
|
1c479507647de6ce6ea1f9c6b660694c87468544
| 4,167
|
py
|
Python
|
polish/utils/host_call_fn.py
|
kinoute/google-research
|
4a59cab927579ea9722e43252c695de5da4eb5e2
|
[
"Apache-2.0"
] | 11
|
2020-01-29T07:25:04.000Z
|
2022-03-05T16:01:21.000Z
|
polish/utils/host_call_fn.py
|
RubensZimbres/google-research
|
562c7c6ef959cb3cb382b1b660ccc45e8f5289c4
|
[
"Apache-2.0"
] | 13
|
2020-01-28T22:19:53.000Z
|
2022-02-10T00:39:26.000Z
|
polish/utils/host_call_fn.py
|
RubensZimbres/google-research
|
562c7c6ef959cb3cb382b1b660ccc45e8f5289c4
|
[
"Apache-2.0"
] | 2
|
2020-02-27T11:09:49.000Z
|
2021-08-25T07:32:15.000Z
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""APIs for building host call function for TF estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow as tf
from tensorflow.contrib import summary as contrib_summary
@gin.configurable
def build_host_call_fn_every_n_global_steps(
params,
names_and_tensors,
n,
summary_dir=None):
"""Wrapper to build `host_call` for `TPUEstimator`.
This function records the summaries if global_step % n == 0
Args:
params: A `tf.contrib.train.HParams` object.
names_and_tensors: List of elemens such as `("loss", loss)`. These are the
tensors' names and values.
n: Defines the frequency of recording the summaries.
Performance-wise on TPU, it is better to set n equal to
the number of iterations per loop.
In TPU, each training loop (each call to estimator.train)
consists of multiple iterations. There is a communication overhead
between host and TPU per training loop to send/receive data.
As such, it is better not to interrupt the TPU loop for saving
the summaries. You may also need to save the summaries
after multiple training loops.
summary_dir: Summary directory used to store TF summaries.
Returns:
A pair of `(host_call_fn, tensors)` for `TPUEstimatorSpec`.
"""
del params
assert summary_dir, 'Please specify a directory for summaries.'
names, tensors = zip(*names_and_tensors)
def host_call_fn(global_step, *tensors):
"""Training host call."""
global_step = global_step[0]
with contrib_summary.create_file_writer(summary_dir +
'/metrics').as_default():
with contrib_summary.record_summaries_every_n_global_steps(
n=n, global_step=global_step):
for i, tensor in enumerate(tensors):
contrib_summary.scalar(names[i], tensor[0], step=global_step)
return contrib_summary.all_summary_ops()
global_step = tf.reshape(tf.train.get_or_create_global_step(), [1])
tensors = [
tf.expand_dims(tf.cast(t, dtype=tf.float32), axis=0) for t in tensors
]
return (host_call_fn, [global_step] + tensors)
@gin.configurable
def build_host_call_fn(
params,
names_and_tensors,
summary_dir=None):
"""Wrapper to build `host_call` for `TPUEstimator`.
Adopted from: experimental/users/hyhieu/patch_based_unsup/utils.py
Args:
params: A `tf.contrib.train.HParams` object.
names_and_tensors: List of elemens such as `("loss", loss)`. These are the
tensors' names and values.
summary_dir: Summary directory used to store TF summaries.
Returns:
A pair of `(host_call_fn, tensors)` for `TPUEstimatorSpec`.
"""
del params
assert summary_dir, 'Please specify a directory for summaries.'
names, tensors = zip(*names_and_tensors)
def host_call_fn(global_step, *tensors):
"""Training host call."""
global_step = global_step[0]
with contrib_summary.create_file_writer(summary_dir +
'/metrics').as_default():
with contrib_summary.always_record_summaries():
for i, tensor in enumerate(tensors):
contrib_summary.scalar(names[i], tensor[0], step=global_step)
return contrib_summary.all_summary_ops()
global_step = tf.reshape(tf.train.get_or_create_global_step(), [1])
tensors = [
tf.expand_dims(tf.cast(t, dtype=tf.float32), axis=0) for t in tensors
]
return (host_call_fn, [global_step] + tensors)
| 36.552632
| 78
| 0.708903
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow as tf
from tensorflow.contrib import summary as contrib_summary
@gin.configurable
def build_host_call_fn_every_n_global_steps(
params,
names_and_tensors,
n,
summary_dir=None):
del params
assert summary_dir, 'Please specify a directory for summaries.'
names, tensors = zip(*names_and_tensors)
def host_call_fn(global_step, *tensors):
global_step = global_step[0]
with contrib_summary.create_file_writer(summary_dir +
'/metrics').as_default():
with contrib_summary.record_summaries_every_n_global_steps(
n=n, global_step=global_step):
for i, tensor in enumerate(tensors):
contrib_summary.scalar(names[i], tensor[0], step=global_step)
return contrib_summary.all_summary_ops()
global_step = tf.reshape(tf.train.get_or_create_global_step(), [1])
tensors = [
tf.expand_dims(tf.cast(t, dtype=tf.float32), axis=0) for t in tensors
]
return (host_call_fn, [global_step] + tensors)
@gin.configurable
def build_host_call_fn(
params,
names_and_tensors,
summary_dir=None):
del params
assert summary_dir, 'Please specify a directory for summaries.'
names, tensors = zip(*names_and_tensors)
def host_call_fn(global_step, *tensors):
global_step = global_step[0]
with contrib_summary.create_file_writer(summary_dir +
'/metrics').as_default():
with contrib_summary.always_record_summaries():
for i, tensor in enumerate(tensors):
contrib_summary.scalar(names[i], tensor[0], step=global_step)
return contrib_summary.all_summary_ops()
global_step = tf.reshape(tf.train.get_or_create_global_step(), [1])
tensors = [
tf.expand_dims(tf.cast(t, dtype=tf.float32), axis=0) for t in tensors
]
return (host_call_fn, [global_step] + tensors)
| true
| true
|
1c47967d6dc098c03dfcc9f615566eb99f55f87c
| 78,681
|
py
|
Python
|
src/transformers/modeling_tf_utils.py
|
holazzer/transformers
|
53191d75ecca21c028077b3227f9ac47379e4690
|
[
"Apache-2.0"
] | 9
|
2021-07-31T12:02:20.000Z
|
2021-09-21T00:40:43.000Z
|
src/transformers/modeling_tf_utils.py
|
holazzer/transformers
|
53191d75ecca21c028077b3227f9ac47379e4690
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/modeling_tf_utils.py
|
holazzer/transformers
|
53191d75ecca21c028077b3227f9ac47379e4690
|
[
"Apache-2.0"
] | 1
|
2021-10-01T05:32:22.000Z
|
2021-10-01T05:32:22.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF general model utils."""
import functools
import inspect
import os
import re
import warnings
from typing import Dict, List, Optional, Union
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.saving import hdf5_format
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
WEIGHTS_NAME,
ModelOutput,
PushToHubMixin,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
)
from .generation_tf_utils import TFGenerationMixin
from .tokenization_utils_base import BatchEncoding
from .utils import logging
logger = logging.get_logger(__name__)
tf_logger = tf.get_logger()
TFModelInputType = Union[
List[tf.Tensor], List[np.ndarray], Dict[str, tf.Tensor], Dict[str, np.ndarray], np.ndarray, tf.Tensor
]
class TFModelUtilsMixin:
"""
A few utilities for :obj:`tf.keras.Model`, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get the number of (optionally, trainable) parameters in the model.
Args:
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of trainable parameters
Returns:
:obj:`int`: The number of parameters.
"""
if only_trainable:
return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables))
else:
return self.count_params()
def keras_serializable(cls):
"""
Decorate a Keras Layer class to support Keras serialization.
This is done by:
1. Adding a :obj:`transformers_config` dict to the Keras config dictionary in :obj:`get_config` (called by Keras at
serialization time.
2. Wrapping :obj:`__init__` to accept that :obj:`transformers_config` dict (passed by Keras at deserialization
time) and convert it to a config object for the actual layer initializer.
3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not
need to be supplied in :obj:`custom_objects` in the call to :obj:`tf.keras.models.load_model`.
Args:
cls (a :obj:`tf.keras.layers.Layers subclass`):
Typically a :obj:`TF.MainLayer` class in this project, in general must accept a :obj:`config` argument to
its initializer.
Returns:
The same class object, with modifications for Keras deserialization.
"""
initializer = cls.__init__
config_class = getattr(cls, "config_class", None)
if config_class is None:
raise AttributeError("Must set `config_class` to use @keras_serializable")
@functools.wraps(initializer)
def wrapped_init(self, *args, **kwargs):
config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None)
if isinstance(config, dict):
config = config_class.from_dict(config)
initializer(self, config, *args, **kwargs)
elif isinstance(config, PretrainedConfig):
if len(args) > 0:
initializer(self, *args, **kwargs)
else:
initializer(self, config, *args, **kwargs)
else:
raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)")
self._config = config
self._kwargs = kwargs
cls.__init__ = wrapped_init
if not hasattr(cls, "get_config"):
raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses")
if hasattr(cls.get_config, "_is_default"):
def get_config(self):
cfg = super(cls, self).get_config()
cfg["config"] = self._config.to_dict()
cfg.update(self._kwargs)
return cfg
cls.get_config = get_config
cls._keras_serializable = True
if hasattr(tf.keras.utils, "register_keras_serializable"):
cls = tf.keras.utils.register_keras_serializable()(cls)
return cls
class TFCausalLanguageModelingLoss:
"""
Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100 affect the loss
active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFQuestionAnsweringLoss:
"""
Loss function suitable for question answering.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
start_loss = loss_fn(labels["start_position"], logits[0])
end_loss = loss_fn(labels["end_position"], logits[1])
return (start_loss + end_loss) / 2.0
class TFTokenClassificationLoss:
"""
Loss function suitable for token classification.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
if tf.math.reduce_any(labels == -1):
warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.")
active_loss = tf.reshape(labels, (-1,)) != -1
else:
active_loss = tf.reshape(labels, (-1,)) != -100
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFSequenceClassificationLoss:
"""
Loss function suitable for sequence classification.
"""
def compute_loss(self, labels, logits):
if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1:
loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return loss_fn(labels, logits)
class TFMultipleChoiceLoss:
"""Loss function suitable for multiple choice tasks."""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return loss_fn(labels, logits)
class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss):
"""
Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
class TFNextSentencePredictionLoss:
"""
Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss)
next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss)
return loss_fn(next_sentence_label, next_sentence_reduced_logits)
def booleans_processing(config, **kwargs):
"""
Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or
graph)
Args:
config (:class:`~transformers.PretrainedConfig`):
The config of the running model.
**kwargs:
The boolean parameters
Returns:
A dictionary with the proper values for each boolean
"""
final_booleans = {}
if tf.executing_eagerly():
final_booleans["output_attentions"] = (
kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
)
final_booleans["output_hidden_states"] = (
kwargs["output_hidden_states"]
if kwargs["output_hidden_states"] is not None
else config.output_hidden_states
)
final_booleans["return_dict"] = (
kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
)
if "use_cache" in kwargs:
final_booleans["use_cache"] = kwargs["use_cache"] if kwargs["use_cache"] is not None else config.use_cache
else:
if (
kwargs["output_attentions"] is not None
or kwargs["output_hidden_states"] is not None
or ("use_cache" in kwargs and kwargs["use_cache"] is not None)
):
tf_logger.warning(
"The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model."
"They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)."
)
final_booleans["output_attentions"] = config.output_attentions
final_booleans["output_hidden_states"] = config.output_hidden_states
if kwargs["return_dict"] is not None:
tf_logger.warning(
"The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
)
final_booleans["return_dict"] = True
if "use_cache" in kwargs:
final_booleans["use_cache"] = config.use_cache
return final_booleans
def input_processing(func, config, input_ids, **kwargs):
"""
Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input
has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32',
name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training.
Args:
func (:obj:`callable`):
The callable function of the TensorFlow model.
config (:class:`~transformers.PretrainedConfig`):
The config of the running model.
**kwargs:
The inputs of the model.
Returns:
Two lists, one for the missing layers, and another one for the unexpected layers.
"""
signature = dict(inspect.signature(func).parameters)
signature.pop("kwargs", None)
signature.pop("self", None)
parameter_names = list(signature.keys())
output = {}
allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray)
if "inputs" in kwargs["kwargs_call"]:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = kwargs["kwargs_call"].pop("inputs")
if "decoder_cached_states" in kwargs["kwargs_call"]:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states")
if len(kwargs["kwargs_call"]) > 0:
raise ValueError(
f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}."
)
kwargs.pop("kwargs_call")
for k, v in kwargs.items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
if isinstance(input_ids, (tuple, list)):
for i, input in enumerate(input_ids):
# EagerTensors don't allow to use the .name property so we check for a real Tensor
if type(input) == tf.Tensor:
# Tensor names have always the pattern `name:id` then we check only the
# `name` part
tensor_name = input.name.split(":")[0]
if tensor_name in parameter_names:
output[tensor_name] = input
else:
output[parameter_names[i]] = input
elif isinstance(input, allowed_types) or input is None:
output[parameter_names[i]] = input
else:
raise ValueError(
f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}."
)
elif isinstance(input_ids, (dict, BatchEncoding)):
if "inputs" in input_ids:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = input_ids.pop("inputs")
if "decoder_cached_states" in input_ids:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = input_ids.pop("decoder_cached_states")
for k, v in dict(input_ids).items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
elif k not in parameter_names and "args" not in parameter_names:
logger.warning(
f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
)
continue
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
else:
if isinstance(input_ids, tf.Tensor) or input_ids is None:
output[parameter_names[0]] = input_ids
else:
raise ValueError(
f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}."
)
for name in parameter_names:
if name not in list(output.keys()) and name != "args":
output[name] = kwargs.pop(name, signature[name].default)
# When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
# So to respect the proper output we have to add this exception
if "args" in output:
if output["args"] is not None and type(output["args"]) == tf.Tensor:
tensor_name = output["args"].name.split(":")[0]
output[tensor_name] = output["args"]
else:
# `args` in this case is always the first parameter, then `input_ids`
output["input_ids"] = output["args"]
del output["args"]
if "kwargs" in output:
del output["kwargs"]
boolean_dict = {
k: v
for k, v in output.items()
if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
}
output.update(
booleans_processing(
config=config,
**boolean_dict,
)
)
return output
def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
"""
Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes.
Args:
model (:obj:`tf.keras.models.Model`):
The model to load the weights into.
resolved_archive_file (:obj:`str`):
The location of the H5 file.
ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to ignore weights with shapes that don't match between the checkpoint of the model.
Returns:
Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the
mismatched layers.
"""
missing_layers = []
unexpected_layers = []
mismatched_layers = []
# Read the H5 file
with h5py.File(resolved_archive_file, "r") as f:
# Retrieve the name of each layer from the H5 file
saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
# Find the missing layers from the high level list of layers
missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name)
# Find the unexpected layers from the high level list of layers
unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers]))
saved_weight_names_set = set()
symbolic_weights_names = set()
weight_value_tuples = []
# Compute missing and unexpected sub layers
# Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
for layer in model.layers:
# if layer_name from the H5 file belongs to the layers from the instantiated model
if layer.name in saved_h5_model_layers_name:
# Get the H5 layer object from its name
h5_layer_object = f[layer.name]
# Get all the weights as a list from the layer object
symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
saved_weights = {}
# Create a dict from the H5 saved model that looks like {"weight_name": weight_value}
# And a set with only the names
for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"):
# TF names always start with the model name so we ignore it
name = "/".join(weight_name.split("/")[1:])
if _prefix is not None:
name = _prefix + "/" + name
saved_weights[name] = np.asarray(h5_layer_object[weight_name])
# Add the updated name to the final list for computing missing/unexpected values
saved_weight_names_set.add(name)
# Loop over each weights from the instantiated model and compare with the weights from the H5 file
for symbolic_weight in symbolic_weights:
# TF names always start with the model name so we ignore it
if _prefix is not None:
delimeter = len(_prefix.split("/"))
symbolic_weight_name = "/".join(
symbolic_weight.name.split("/")[:delimeter]
+ symbolic_weight.name.split("/")[delimeter + 1 :]
)
else:
symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:])
# here we check if the current weight is among the weights from the H5 file
# If yes, get the weight_value of the corresponding weight from the H5 file
# If not, make the value to None
saved_weight_value = saved_weights.get(symbolic_weight_name, None)
# Add the updated name to the final list for computing missing/unexpected values
symbolic_weights_names.add(symbolic_weight_name)
# If the current weight is found
if saved_weight_value is not None:
# Check if the shape of the current weight and the one from the H5 file are different
if K.int_shape(symbolic_weight) != saved_weight_value.shape:
# If yes we reshape the weight from the H5 file accordingly to the current weight
# If the two shapes are not compatible we raise an issue
try:
array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
except ValueError as e:
if ignore_mismatched_sizes:
mismatched_layers.append(
(symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight))
)
continue
else:
raise e
else:
array = saved_weight_value
# We create the tuple that will be loaded and add it to the final list
weight_value_tuples.append((symbolic_weight, array))
# Load all the weights
K.batch_set_value(weight_value_tuples)
# Compute the missing and unexpected layers
missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set))
unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names))
return missing_layers, unexpected_layers, mismatched_layers
def init_copy_embeddings(old_embeddings, new_num_tokens):
r"""
This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case
new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be
kept or not. Example:
- if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4]
- mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1]
- if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5]
- mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4]
"""
old_num_tokens, old_embedding_dim = shape_list(old_embeddings)
size_diff = new_num_tokens - old_num_tokens
# initialize new embeddings
# Copy token embeddings from the previous ones
if tf.math.greater(size_diff, 0):
# if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size
# and we create a mask to properly identify the padded values and be replaced by the values of the newly created
# embeddings
current_weights = tf.pad(
old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1
)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True)
mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False)
else:
# if the new size if lower than the old one, we take the current embeddings until the new size
current_weights = tf.slice(
old_embeddings.value(),
tf.convert_to_tensor([0, 0]),
tf.convert_to_tensor([new_num_tokens, old_embedding_dim]),
)
mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True)
return mask, current_weights
class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin):
r"""
Base class for all TF models.
:class:`~transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods
for loading, downloading and saving models as well as a few methods common to all models to:
* resize the input embeddings,
* prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
:class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
base_model_prefix = ""
# a list of re pattern of tensor names to ignore from the model when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_missing = None
# a list of re pattern of tensor names to ignore from the weights when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_unexpected = None
_requires_load_weight_prefix = False
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network.
Returns:
:obj:`Dict[str, tf.Tensor]`: The dummy inputs.
"""
return {
"input_ids": tf.constant(DUMMY_INPUTS),
}
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
if not isinstance(config, PretrainedConfig):
raise ValueError(
f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
"`PretrainedConfig`. To create a model from a pretrained model use "
f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
# Save config and origin of the pretrained weights if given in model
self.config = config
self.name_or_path = config.name_or_path
@classmethod
def _from_config(cls, config, **kwargs):
"""
All context managers that the model should be initialized under go here.
"""
return cls(config, **kwargs)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
"""
Method used for serving the model.
Args:
inputs (:obj:`Dict[str, tf.Tensor]`):
The input of the saved model as a dictionary of tensors.
"""
output = self.call(inputs)
return self.serving_output(output)
def serving_output(output):
"""
Prepare the output of the saved model. Each model must implement this function.
Args:
output (:obj:`~transformers.TFBaseModelOutput`):
The output returned by the model.
"""
raise NotImplementedError
def get_input_embeddings(self) -> tf.keras.layers.Layer:
"""
Returns the model's input embeddings layer.
Returns:
:obj:`tf.Variable`: The embeddings layer mapping vocabulary to hidden states.
"""
main_layer = getattr(self, self.base_model_prefix, self)
if main_layer is not self:
return main_layer.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
"""
Set model's input embeddings
Args:
value (:obj:`tf.Variable`):
The new weights mapping hidden states to vocabulary.
"""
main_layer = getattr(self, self.base_model_prefix)
if main_layer is None:
raise NotImplementedError("The model does not implements the base_model_prefix attribute.")
try:
main_layer.set_input_embeddings(value)
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
main_layer.set_input_embeddings(value)
def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]:
"""
Returns the model's output embeddings
Returns:
:obj:`tf.Variable`: The new weights mapping vocabulary to hidden states.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
return lm_head.get_output_embeddings()
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
return lm_head().get_output_embeddings()
return None # Overwrite for models with output embeddings
def set_output_embeddings(self, value):
"""
Set model's output embeddings
Args:
value (:obj:`tf.Variable`):
The new weights mapping hidden states to vocabulary.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_output_embeddings(value)
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
lm_head.set_output_embeddings(value)
def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]:
"""
Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the
embeddings
Return:
:obj:`tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model.
"""
warnings.warn(
"The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning
)
return self.get_lm_head()
def get_prefix_bias_name(self) -> Union[None, str]:
"""
Get the concatenated _prefix name of the bias from the model name to the parent layer
Return:
:obj:`str`: The _prefix name of the bias.
"""
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return None
def get_bias(self) -> Union[None, Dict[str, tf.Variable]]:
"""
Dict of bias attached to an LM head. The key represents the name of the bias attribute.
Return:
:obj:`tf.Variable`: The weights representing the bias, None if not an LM model.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
return lm_head.get_bias()
except AttributeError:
self(self.dummy_inputs)
return lm_head.get_bias()
return None
def set_bias(self, value):
"""
Set all the bias in the LM head.
Args:
value (:obj:`Dict[tf.Variable]`):
All the new bias attached to an LM head.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_bias(value)
except AttributeError:
self(self.dummy_inputs)
lm_head.set_bias(value)
def get_lm_head(self) -> tf.keras.layers.Layer:
"""
The LM Head layer. This method must be overwritten by all the models that have a lm head.
Return:
:obj:`tf.keras.layers.Layer`: The LM head layer if the model has one, None if not.
"""
return None
def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable:
"""
Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
Arguments:
new_num_tokens (:obj:`int`, `optional`):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
just returns a pointer to the input tokens :obj:`tf.Variable` module of the model without doing
anything.
Return:
:obj:`tf.Variable`: Pointer to the input tokens Embeddings Module of the model.
"""
if new_num_tokens is None or new_num_tokens == self.config.vocab_size:
return self._get_word_embedding_weight(self.get_input_embeddings())
model_embeds = self._resize_token_embeddings(new_num_tokens)
# Update base model and current model config
self.config.vocab_size = new_num_tokens
return model_embeds
def _get_word_embedding_weight(model, embedding_layer):
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
# The reason why the attributes don't exist might be
# because the model is not built, so retry getting
# the argument after building the model
model(model.dummy_inputs)
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
return None
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings())
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
# if word embeddings are not tied, make sure that lm head bias is resized as well
if self.get_bias() is not None:
old_lm_head_bias = self.get_bias()
new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens)
self.set_bias(new_lm_head_bias)
# if word embeddings are not tied, make sure that lm head decoder is resized as well
if self.get_output_embeddings() is not None:
old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings())
new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens)
self.set_output_embeddings(new_lm_head_decoder)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens):
"""
Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end.
Reducing the size will remove vectors from the end
Args:
old_lm_head_bias (:obj:`tf.Variable`):
Old lm head bias to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the linear matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns None
Return:
:obj:`tf.Variable`: Pointer to the resized bias.
"""
new_lm_head_bias = {}
for attr, weight in old_lm_head_bias.items():
first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)
size_diff = new_num_tokens - old_num_tokens
final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens]
# initialize new bias
if tf.math.greater(size_diff, 0):
padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]
current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy]
bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True)
bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False)
else:
slice_from = [0] if first_dim is None else [0, 0]
current_bias = tf.slice(
weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape)
)
bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True)
new_bias = self.add_weight(
shape=final_shape,
initializer="zeros",
trainable=True,
name=weight.name.split(":")[0],
)
init_bias = tf.where(bias_mask, current_bias, new_bias.value())
new_bias.assign(init_bias)
new_lm_head_bias[attr] = new_bias
return new_lm_head_bias
def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens):
"""
Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end.
Reducing the size will remove vectors from the end
Args:
old_lm_head_decoder (:obj:`tf.Variable`):
Old lm head decoder to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the linear matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns None
Return:
:obj:`tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the
input ones.
"""
new_lm_head_decoder = old_lm_head_decoder
is_input_output_equals = tf.reduce_any(
self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder
)
if old_lm_head_decoder is not None and not is_input_output_equals:
old_embedding_dim = shape_list(old_lm_head_decoder)[1]
decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens)
new_lm_head_decoder = self.add_weight(
shape=(new_num_tokens, old_embedding_dim),
initializer="zeros",
trainable=True,
name=old_lm_head_decoder.name.split(":")[0],
)
init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value())
new_lm_head_decoder.assign(init_decoder)
return new_lm_head_decoder
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:
"""
Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (:obj:`tf.Variable`):
Old embeddings to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`tf.Variable`` module of the model without doing anything.
Return:
:obj:`tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if
:obj:`new_num_tokens` is :obj:`None`
"""
old_embedding_dim = shape_list(old_embeddings)[1]
init_range = getattr(self.config, "initializer_range", 0.02)
embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens)
new_embeddings = self.add_weight(
name=old_embeddings.name.split(":")[0],
shape=[new_num_tokens, old_embedding_dim],
initializer=get_initializer(init_range),
dtype=tf.float32,
)
init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value())
new_embeddings.assign(init_embeddings)
return new_embeddings
def prune_heads(self, heads_to_prune):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (:obj:`Dict[int, List[int]]`):
Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
raise NotImplementedError
def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, **kwargs):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
:func:`~transformers.TFPreTrainedModel.from_pretrained` class method.
Arguments:
save_directory (:obj:`str`):
Directory to which to save. Will be created if it doesn't exist.
saved_model (:obj:`bool`, `optional`, defaults to :obj:`False`):
If the model has to be saved in saved model format as well or not.
version (:obj:`int`, `optional`, defaults to 1):
The version of the saved model. A saved model needs to be versioned in order to be properly loaded by
TensorFlow Serving as detailed in the official documentation
https://www.tensorflow.org/tfx/serving/serving_basic
push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
.. warning::
Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with
:obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are
pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory
instead.
kwargs:
Additional key word arguments passed along to the
:meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.
"""
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
if saved_model:
saved_model_dir = os.path.join(save_directory, "saved_model", str(version))
self.save(saved_model_dir, include_optimizer=False, signatures=self.serving)
logger.info(f"Saved model created in {saved_model_dir}")
# Save configuration file
self.config.architectures = [self.__class__.__name__[2:]]
self.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME)
self.save_weights(output_model_file)
logger.info(f"Model weights saved in {output_model_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Model pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Instantiate a pretrained TF 2.0 model from a pre-trained model configuration.
The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (:obj:`str`, `optional`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.TFPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In
this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the PyTorch model in a
TensorFlow model using the provided conversion scripts and loading the TensorFlow model
afterwards.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
model_args (sequence of positional arguments, `optional`):
All remaining positional arguments will be passed to the underlying model's ``__init__`` method.
config (:obj:`Union[PretrainedConfig, str]`, `optional`):
Can be either:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`,
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :func:`~transformers.TFPreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
from_pt: (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a PyTorch state_dict save file (see docstring of
``pretrained_model_name_or_path`` argument).
ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
checkpoint with 3 labels).
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies: (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try doanloading the model).
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
mirror(:obj:`str`, `optional`):
Mirror source to accelerate downloads in China. If you are from China and have an accessibility
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
Please refer to the mirror site for more information.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
Examples::
>>> from transformers import BertConfig, TFBertModel
>>> # Download model and configuration from huggingface.co and cache.
>>> model = TFBertModel.from_pretrained('bert-base-uncased')
>>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
>>> model = TFBertModel.from_pretrained('./test/saved_model/')
>>> # Update configuration during loading.
>>> model = TFBertModel.from_pretrained('bert-base-uncased', output_attentions=True)
>>> assert model.config.output_attentions == True
>>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).
>>> config = BertConfig.from_json_file('./pt_model/my_pt_model_config.json')
>>> model = TFBertModel.from_pretrained('./pt_model/my_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
cache_dir = kwargs.pop("cache_dir", None)
from_pt = kwargs.pop("from_pt", False)
ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
load_weight_prefix = kwargs.pop("load_weight_prefix", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
_from_auto=from_auto_class,
_from_pipeline=from_pipeline,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint in priority if from_pt
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
else:
raise EnvironmentError(
f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME]} found in directory "
f"{pretrained_model_name_or_path} or `from_pt` set to False"
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME),
revision=revision,
mirror=mirror,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info(f"loading weights file {archive_file}")
else:
logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
else:
resolved_archive_file = None
config.name_or_path = pretrained_model_name_or_path
# composed models, *e.g.* TFRag, require special treatment when it comes to loading
# pre-trained weights.
if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None:
model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name")
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if from_pt:
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
# Load from a PyTorch checkpoint
return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
# we might need to extend the variable scope for composite models
if load_weight_prefix is not None:
with tf.compat.v1.variable_scope(load_weight_prefix):
model(model.dummy_inputs) # build the network with dummy inputs
else:
model(model.dummy_inputs) # build the network with dummy inputs
assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}"
# 'by_name' allow us to do transfer learning by skipping/adding layers
# see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357
try:
missing_keys, unexpected_keys, mismatched_keys = load_tf_weights(
model,
resolved_archive_file,
ignore_mismatched_sizes=ignore_mismatched_sizes,
_prefix=load_weight_prefix,
)
except OSError as e:
try:
with open(resolved_archive_file) as f:
if f.read().startswith("version"):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please install "
"git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
"you cloned."
)
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise OSError(
"Unable to load weights from h5 file. "
"If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
)
model(model.dummy_inputs) # Make sure restore ops are run
if cls._keys_to_ignore_on_load_missing is not None:
for pat in cls._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if cls._keys_to_ignore_on_load_unexpected is not None:
for pat in cls._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
elif len(mismatched_keys) == 0:
logger.warning(
f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(mismatched_keys) > 0:
mismatched_warning = "\n".join(
[
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
for key, shape1, shape2 in mismatched_keys
]
)
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"mismatched_keys": mismatched_keys,
}
return model, loading_info
return model
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
TFPreTrainedModel.push_to_hub = copy_func(TFPreTrainedModel.push_to_hub)
TFPreTrainedModel.push_to_hub.__doc__ = TFPreTrainedModel.push_to_hub.__doc__.format(
object="model", object_class="TFAutoModel", object_files="model checkpoint"
)
class TFConv1D(tf.keras.layers.Layer):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (:obj:`int`):
The number of output features.
nx (:obj:`int`):
The number of input features.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation to use to initialize the weights.
kwargs:
Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
"""
def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.nf = nf
self.nx = nx
self.initializer_range = initializer_range
def build(self, input_shape):
self.weight = self.add_weight(
"weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)
)
self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer())
def call(self, x):
bz, sl = shape_list(x)[:2]
x = tf.reshape(x, [-1, self.nx])
x = tf.matmul(x, self.weight) + self.bias
x = tf.reshape(x, [bz, sl, self.nf])
return x
class TFSharedEmbeddings(tf.keras.layers.Layer):
r"""
Construct shared token embeddings.
The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language
modeling.
Args:
vocab_size (:obj:`int`):
The size of the vocabulary, e.g., the number of unique tokens.
hidden_size (:obj:`int`):
The size of the embedding vectors.
initializer_range (:obj:`float`, `optional`):
The standard deviation to use when initializing the weights. If no value is provided, it will default to
:math:`1/\sqrt{hidden\_size}`.
kwargs:
Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
"""
def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range
def build(self, input_shape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
self.weight = self.add_weight(
"weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)
)
super().build(input_shape)
def get_config(self):
config = {
"vocab_size": self.vocab_size,
"hidden_size": self.hidden_size,
"initializer_range": self.initializer_range,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor:
"""
Get token embeddings of inputs or decode final hidden state.
Args:
inputs (:obj:`tf.Tensor`):
In embedding mode, should be an int64 tensor with shape :obj:`[batch_size, length]`.
In linear mode, should be a float tensor with shape :obj:`[batch_size, length, hidden_size]`.
mode (:obj:`str`, defaults to :obj:`"embedding"`):
A valid value is either :obj:`"embedding"` or :obj:`"linear"`, the first one indicates that the layer
should be used as an embedding layer, the second one that the layer should be used as a linear decoder.
Returns:
:obj:`tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape
:obj:`[batch_size, length, embedding_size]`.
In linear mode, the output is a float32 with shape :obj:`[batch_size, length, vocab_size]`.
Raises:
ValueError: if :obj:`mode` is not valid.
Shared weights logic is adapted from `here
<https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24>`__.
"""
if mode == "embedding":
return self._embedding(inputs)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError(f"mode {mode} is not valid.")
def _embedding(self, input_ids):
"""Applies embedding based on inputs tensor."""
return tf.gather(self.weight, input_ids)
def _linear(self, inputs):
"""
Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [..., hidden_size]
Returns:
float32 tensor with shape [..., vocab_size].
"""
first_dims = shape_list(inputs)[:-1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.weight, transpose_b=True)
return tf.reshape(logits, first_dims + [self.vocab_size])
class TFSequenceSummary(tf.keras.layers.Layer):
"""
Compute a single vector summary of a sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
config class of your model for the default values it uses):
- **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:
- :obj:`"last"` -- Take the last token hidden state (like XLNet)
- :obj:`"first"` -- Take the first token hidden state (like Bert)
- :obj:`"mean"` -- Take the mean of all tokens hidden states
- :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- :obj:`"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
:obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
- **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
output, another string or :obj:`None` will add no activation.
- **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
activation.
- **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
activation.
initializer_range (:obj:`float`, defaults to 0.02): The standard deviation to use to initialize the weights.
kwargs:
Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
"""
def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs):
super().__init__(**kwargs)
self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last"
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj
if self.has_summary:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = tf.keras.layers.Dense(
num_classes, kernel_initializer=get_initializer(initializer_range), name="summary"
)
self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh"
if self.has_activation:
self.activation = tf.keras.activations.tanh
self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0
if self.has_first_dropout:
self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout)
self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0
if self.has_last_dropout:
self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout)
def call(self, inputs, cls_index=None, training=False):
if not isinstance(inputs, (dict, tuple, list)):
hidden_states = inputs
elif isinstance(inputs, (tuple, list)):
hidden_states = inputs[0]
cls_index = inputs[1] if len(inputs) > 1 else None
assert len(inputs) <= 2, "Too many inputs."
else:
hidden_states = inputs.get("hidden_states")
cls_index = inputs.get("cls_index", None)
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = tf.reduce_mean(hidden_states, axis=1)
elif self.summary_type == "cls_index":
hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims]
if cls_index is None:
cls_index = tf.fill(
hidden_shape[:-2], hidden_shape[-2] - 1
) # A tensor full of shape [batch] or [batch, num choices] full of sequence length
cls_shape = shape_list(cls_index)
if len(cls_shape) <= len(hidden_shape) - 2:
cls_index = tf.expand_dims(cls_index, axis=-1)
# else:
# cls_index = cls_index[..., tf.newaxis]
# cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
output = tf.squeeze(
output, axis=len(hidden_shape) - 2
) # shape of output: (batch, num choices, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
if self.has_first_dropout:
output = self.first_dropout(output, training=training)
if self.has_summary:
output = self.summary(output)
if self.has_activation:
output = self.activation(output)
if self.has_last_dropout:
output = self.last_dropout(output, training=training)
return output
def shape_list(tensor: tf.Tensor) -> List[int]:
"""
Deal with dynamic shape in tensorflow cleanly.
Args:
tensor (:obj:`tf.Tensor`): The tensor we want the shape of.
Returns:
:obj:`List[int]`: The shape of the tensor as a list.
"""
dynamic = tf.shape(tensor)
if tensor.shape == tf.TensorShape(None):
return dynamic
static = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal:
"""
Creates a :obj:`tf.initializers.TruncatedNormal` with the given range.
Args:
initializer_range (`float`, defaults to 0.02): Standard deviation of the initializer range.
Returns:
:obj:`tf.initializers.TruncatedNormal`: The truncated normal initializer.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
class TFWrappedEmbeddings:
"""
this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with
weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with
saving/storing the correct weights
"""
def __init__(self, layer, abs_scope_name=None):
self._layer = layer
self._abs_scope_name = abs_scope_name
def call(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer.call(inputs, mode)
# if an abs scope name is given to the embedding variable, call variable from absolute scope
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer.call(inputs, mode)
def __call__(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer(inputs, mode)
# if an abs scope name is given to the embedding variable, call variable from absolute scope
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer(inputs, mode)
| 45.192993
| 167
| 0.63202
|
import functools
import inspect
import os
import re
import warnings
from typing import Dict, List, Optional, Union
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.saving import hdf5_format
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
WEIGHTS_NAME,
ModelOutput,
PushToHubMixin,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
)
from .generation_tf_utils import TFGenerationMixin
from .tokenization_utils_base import BatchEncoding
from .utils import logging
logger = logging.get_logger(__name__)
tf_logger = tf.get_logger()
TFModelInputType = Union[
List[tf.Tensor], List[np.ndarray], Dict[str, tf.Tensor], Dict[str, np.ndarray], np.ndarray, tf.Tensor
]
class TFModelUtilsMixin:
def num_parameters(self, only_trainable: bool = False) -> int:
if only_trainable:
return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables))
else:
return self.count_params()
def keras_serializable(cls):
initializer = cls.__init__
config_class = getattr(cls, "config_class", None)
if config_class is None:
raise AttributeError("Must set `config_class` to use @keras_serializable")
@functools.wraps(initializer)
def wrapped_init(self, *args, **kwargs):
config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None)
if isinstance(config, dict):
config = config_class.from_dict(config)
initializer(self, config, *args, **kwargs)
elif isinstance(config, PretrainedConfig):
if len(args) > 0:
initializer(self, *args, **kwargs)
else:
initializer(self, config, *args, **kwargs)
else:
raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)")
self._config = config
self._kwargs = kwargs
cls.__init__ = wrapped_init
if not hasattr(cls, "get_config"):
raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses")
if hasattr(cls.get_config, "_is_default"):
def get_config(self):
cfg = super(cls, self).get_config()
cfg["config"] = self._config.to_dict()
cfg.update(self._kwargs)
return cfg
cls.get_config = get_config
cls._keras_serializable = True
if hasattr(tf.keras.utils, "register_keras_serializable"):
cls = tf.keras.utils.register_keras_serializable()(cls)
return cls
class TFCausalLanguageModelingLoss:
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFQuestionAnsweringLoss:
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
start_loss = loss_fn(labels["start_position"], logits[0])
end_loss = loss_fn(labels["end_position"], logits[1])
return (start_loss + end_loss) / 2.0
class TFTokenClassificationLoss:
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
if tf.math.reduce_any(labels == -1):
warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.")
active_loss = tf.reshape(labels, (-1,)) != -1
else:
active_loss = tf.reshape(labels, (-1,)) != -100
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFSequenceClassificationLoss:
def compute_loss(self, labels, logits):
if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1:
loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return loss_fn(labels, logits)
class TFMultipleChoiceLoss:
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return loss_fn(labels, logits)
class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss):
class TFNextSentencePredictionLoss:
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss)
next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss)
return loss_fn(next_sentence_label, next_sentence_reduced_logits)
def booleans_processing(config, **kwargs):
final_booleans = {}
if tf.executing_eagerly():
final_booleans["output_attentions"] = (
kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
)
final_booleans["output_hidden_states"] = (
kwargs["output_hidden_states"]
if kwargs["output_hidden_states"] is not None
else config.output_hidden_states
)
final_booleans["return_dict"] = (
kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
)
if "use_cache" in kwargs:
final_booleans["use_cache"] = kwargs["use_cache"] if kwargs["use_cache"] is not None else config.use_cache
else:
if (
kwargs["output_attentions"] is not None
or kwargs["output_hidden_states"] is not None
or ("use_cache" in kwargs and kwargs["use_cache"] is not None)
):
tf_logger.warning(
"The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model."
"They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)."
)
final_booleans["output_attentions"] = config.output_attentions
final_booleans["output_hidden_states"] = config.output_hidden_states
if kwargs["return_dict"] is not None:
tf_logger.warning(
"The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
)
final_booleans["return_dict"] = True
if "use_cache" in kwargs:
final_booleans["use_cache"] = config.use_cache
return final_booleans
def input_processing(func, config, input_ids, **kwargs):
signature = dict(inspect.signature(func).parameters)
signature.pop("kwargs", None)
signature.pop("self", None)
parameter_names = list(signature.keys())
output = {}
allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray)
if "inputs" in kwargs["kwargs_call"]:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = kwargs["kwargs_call"].pop("inputs")
if "decoder_cached_states" in kwargs["kwargs_call"]:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states")
if len(kwargs["kwargs_call"]) > 0:
raise ValueError(
f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}."
)
kwargs.pop("kwargs_call")
for k, v in kwargs.items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
if isinstance(input_ids, (tuple, list)):
for i, input in enumerate(input_ids):
if type(input) == tf.Tensor:
# Tensor names have always the pattern `name:id` then we check only the
# `name` part
tensor_name = input.name.split(":")[0]
if tensor_name in parameter_names:
output[tensor_name] = input
else:
output[parameter_names[i]] = input
elif isinstance(input, allowed_types) or input is None:
output[parameter_names[i]] = input
else:
raise ValueError(
f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}."
)
elif isinstance(input_ids, (dict, BatchEncoding)):
if "inputs" in input_ids:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = input_ids.pop("inputs")
if "decoder_cached_states" in input_ids:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = input_ids.pop("decoder_cached_states")
for k, v in dict(input_ids).items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
elif k not in parameter_names and "args" not in parameter_names:
logger.warning(
f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
)
continue
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
else:
if isinstance(input_ids, tf.Tensor) or input_ids is None:
output[parameter_names[0]] = input_ids
else:
raise ValueError(
f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}."
)
for name in parameter_names:
if name not in list(output.keys()) and name != "args":
output[name] = kwargs.pop(name, signature[name].default)
# When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
# So to respect the proper output we have to add this exception
if "args" in output:
if output["args"] is not None and type(output["args"]) == tf.Tensor:
tensor_name = output["args"].name.split(":")[0]
output[tensor_name] = output["args"]
else:
# `args` in this case is always the first parameter, then `input_ids`
output["input_ids"] = output["args"]
del output["args"]
if "kwargs" in output:
del output["kwargs"]
boolean_dict = {
k: v
for k, v in output.items()
if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
}
output.update(
booleans_processing(
config=config,
**boolean_dict,
)
)
return output
def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
missing_layers = []
unexpected_layers = []
mismatched_layers = []
# Read the H5 file
with h5py.File(resolved_archive_file, "r") as f:
# Retrieve the name of each layer from the H5 file
saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
# Find the missing layers from the high level list of layers
missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name)
# Find the unexpected layers from the high level list of layers
unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers]))
saved_weight_names_set = set()
symbolic_weights_names = set()
weight_value_tuples = []
# Compute missing and unexpected sub layers
# Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
for layer in model.layers:
# if layer_name from the H5 file belongs to the layers from the instantiated model
if layer.name in saved_h5_model_layers_name:
# Get the H5 layer object from its name
h5_layer_object = f[layer.name]
# Get all the weights as a list from the layer object
symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
saved_weights = {}
# Create a dict from the H5 saved model that looks like {"weight_name": weight_value}
# And a set with only the names
for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"):
# TF names always start with the model name so we ignore it
name = "/".join(weight_name.split("/")[1:])
if _prefix is not None:
name = _prefix + "/" + name
saved_weights[name] = np.asarray(h5_layer_object[weight_name])
# Add the updated name to the final list for computing missing/unexpected values
saved_weight_names_set.add(name)
# Loop over each weights from the instantiated model and compare with the weights from the H5 file
for symbolic_weight in symbolic_weights:
# TF names always start with the model name so we ignore it
if _prefix is not None:
delimeter = len(_prefix.split("/"))
symbolic_weight_name = "/".join(
symbolic_weight.name.split("/")[:delimeter]
+ symbolic_weight.name.split("/")[delimeter + 1 :]
)
else:
symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:])
# here we check if the current weight is among the weights from the H5 file
# If yes, get the weight_value of the corresponding weight from the H5 file
# If not, make the value to None
saved_weight_value = saved_weights.get(symbolic_weight_name, None)
# Add the updated name to the final list for computing missing/unexpected values
symbolic_weights_names.add(symbolic_weight_name)
# If the current weight is found
if saved_weight_value is not None:
# Check if the shape of the current weight and the one from the H5 file are different
if K.int_shape(symbolic_weight) != saved_weight_value.shape:
# If yes we reshape the weight from the H5 file accordingly to the current weight
# If the two shapes are not compatible we raise an issue
try:
array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
except ValueError as e:
if ignore_mismatched_sizes:
mismatched_layers.append(
(symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight))
)
continue
else:
raise e
else:
array = saved_weight_value
# We create the tuple that will be loaded and add it to the final list
weight_value_tuples.append((symbolic_weight, array))
# Load all the weights
K.batch_set_value(weight_value_tuples)
# Compute the missing and unexpected layers
missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set))
unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names))
return missing_layers, unexpected_layers, mismatched_layers
def init_copy_embeddings(old_embeddings, new_num_tokens):
old_num_tokens, old_embedding_dim = shape_list(old_embeddings)
size_diff = new_num_tokens - old_num_tokens
# initialize new embeddings
# Copy token embeddings from the previous ones
if tf.math.greater(size_diff, 0):
# if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size
# and we create a mask to properly identify the padded values and be replaced by the values of the newly created
# embeddings
current_weights = tf.pad(
old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1
)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True)
mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False)
else:
# if the new size if lower than the old one, we take the current embeddings until the new size
current_weights = tf.slice(
old_embeddings.value(),
tf.convert_to_tensor([0, 0]),
tf.convert_to_tensor([new_num_tokens, old_embedding_dim]),
)
mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True)
return mask, current_weights
class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin):
config_class = None
base_model_prefix = ""
# a list of re pattern of tensor names to ignore from the model when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_missing = None
# a list of re pattern of tensor names to ignore from the weights when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_unexpected = None
_requires_load_weight_prefix = False
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
return {
"input_ids": tf.constant(DUMMY_INPUTS),
}
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
if not isinstance(config, PretrainedConfig):
raise ValueError(
f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
"`PretrainedConfig`. To create a model from a pretrained model use "
f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
# Save config and origin of the pretrained weights if given in model
self.config = config
self.name_or_path = config.name_or_path
@classmethod
def _from_config(cls, config, **kwargs):
return cls(config, **kwargs)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
def serving_output(output):
raise NotImplementedError
def get_input_embeddings(self) -> tf.keras.layers.Layer:
main_layer = getattr(self, self.base_model_prefix, self)
if main_layer is not self:
return main_layer.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
main_layer = getattr(self, self.base_model_prefix)
if main_layer is None:
raise NotImplementedError("The model does not implements the base_model_prefix attribute.")
try:
main_layer.set_input_embeddings(value)
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
main_layer.set_input_embeddings(value)
def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]:
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
return lm_head.get_output_embeddings()
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
return lm_head().get_output_embeddings()
return None # Overwrite for models with output embeddings
def set_output_embeddings(self, value):
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_output_embeddings(value)
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
lm_head.set_output_embeddings(value)
def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]:
warnings.warn(
"The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning
)
return self.get_lm_head()
def get_prefix_bias_name(self) -> Union[None, str]:
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return None
def get_bias(self) -> Union[None, Dict[str, tf.Variable]]:
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
return lm_head.get_bias()
except AttributeError:
self(self.dummy_inputs)
return lm_head.get_bias()
return None
def set_bias(self, value):
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_bias(value)
except AttributeError:
self(self.dummy_inputs)
lm_head.set_bias(value)
def get_lm_head(self) -> tf.keras.layers.Layer:
return None
def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable:
if new_num_tokens is None or new_num_tokens == self.config.vocab_size:
return self._get_word_embedding_weight(self.get_input_embeddings())
model_embeds = self._resize_token_embeddings(new_num_tokens)
# Update base model and current model config
self.config.vocab_size = new_num_tokens
return model_embeds
def _get_word_embedding_weight(model, embedding_layer):
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
# The reason why the attributes don't exist might be
model(model.dummy_inputs)
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
return None
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings())
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
if self.get_bias() is not None:
old_lm_head_bias = self.get_bias()
new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens)
self.set_bias(new_lm_head_bias)
if self.get_output_embeddings() is not None:
old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings())
new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens)
self.set_output_embeddings(new_lm_head_decoder)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens):
new_lm_head_bias = {}
for attr, weight in old_lm_head_bias.items():
first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)
size_diff = new_num_tokens - old_num_tokens
final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens]
if tf.math.greater(size_diff, 0):
padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]
current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy]
bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True)
bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False)
else:
slice_from = [0] if first_dim is None else [0, 0]
current_bias = tf.slice(
weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape)
)
bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True)
new_bias = self.add_weight(
shape=final_shape,
initializer="zeros",
trainable=True,
name=weight.name.split(":")[0],
)
init_bias = tf.where(bias_mask, current_bias, new_bias.value())
new_bias.assign(init_bias)
new_lm_head_bias[attr] = new_bias
return new_lm_head_bias
def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens):
new_lm_head_decoder = old_lm_head_decoder
is_input_output_equals = tf.reduce_any(
self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder
)
if old_lm_head_decoder is not None and not is_input_output_equals:
old_embedding_dim = shape_list(old_lm_head_decoder)[1]
decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens)
new_lm_head_decoder = self.add_weight(
shape=(new_num_tokens, old_embedding_dim),
initializer="zeros",
trainable=True,
name=old_lm_head_decoder.name.split(":")[0],
)
init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value())
new_lm_head_decoder.assign(init_decoder)
return new_lm_head_decoder
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:
old_embedding_dim = shape_list(old_embeddings)[1]
init_range = getattr(self.config, "initializer_range", 0.02)
embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens)
new_embeddings = self.add_weight(
name=old_embeddings.name.split(":")[0],
shape=[new_num_tokens, old_embedding_dim],
initializer=get_initializer(init_range),
dtype=tf.float32,
)
init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value())
new_embeddings.assign(init_embeddings)
return new_embeddings
def prune_heads(self, heads_to_prune):
raise NotImplementedError
def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, **kwargs):
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
if saved_model:
saved_model_dir = os.path.join(save_directory, "saved_model", str(version))
self.save(saved_model_dir, include_optimizer=False, signatures=self.serving)
logger.info(f"Saved model created in {saved_model_dir}")
self.config.architectures = [self.__class__.__name__[2:]]
self.config.save_pretrained(save_directory)
output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME)
self.save_weights(output_model_file)
logger.info(f"Model weights saved in {output_model_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Model pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop("config", None)
cache_dir = kwargs.pop("cache_dir", None)
from_pt = kwargs.pop("from_pt", False)
ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
load_weight_prefix = kwargs.pop("load_weight_prefix", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
_from_auto=from_auto_class,
_from_pipeline=from_pipeline,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint in priority if from_pt
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
else:
raise EnvironmentError(
f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME]} found in directory "
f"{pretrained_model_name_or_path} or `from_pt` set to False"
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME),
revision=revision,
mirror=mirror,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info(f"loading weights file {archive_file}")
else:
logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
else:
resolved_archive_file = None
config.name_or_path = pretrained_model_name_or_path
if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None:
model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name")
model = cls(config, *model_args, **model_kwargs)
if from_pt:
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
if load_weight_prefix is not None:
with tf.compat.v1.variable_scope(load_weight_prefix):
model(model.dummy_inputs)
else:
model(model.dummy_inputs)
assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}"
:
missing_keys, unexpected_keys, mismatched_keys = load_tf_weights(
model,
resolved_archive_file,
ignore_mismatched_sizes=ignore_mismatched_sizes,
_prefix=load_weight_prefix,
)
except OSError as e:
try:
with open(resolved_archive_file) as f:
if f.read().startswith("version"):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please install "
"git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
"you cloned."
)
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise OSError(
"Unable to load weights from h5 file. "
"If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
)
model(model.dummy_inputs)
if cls._keys_to_ignore_on_load_missing is not None:
for pat in cls._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if cls._keys_to_ignore_on_load_unexpected is not None:
for pat in cls._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
elif len(mismatched_keys) == 0:
logger.warning(
f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(mismatched_keys) > 0:
mismatched_warning = "\n".join(
[
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
for key, shape1, shape2 in mismatched_keys
]
)
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"mismatched_keys": mismatched_keys,
}
return model, loading_info
return model
TFPreTrainedModel.push_to_hub = copy_func(TFPreTrainedModel.push_to_hub)
TFPreTrainedModel.push_to_hub.__doc__ = TFPreTrainedModel.push_to_hub.__doc__.format(
object="model", object_class="TFAutoModel", object_files="model checkpoint"
)
class TFConv1D(tf.keras.layers.Layer):
def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.nf = nf
self.nx = nx
self.initializer_range = initializer_range
def build(self, input_shape):
self.weight = self.add_weight(
"weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)
)
self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer())
def call(self, x):
bz, sl = shape_list(x)[:2]
x = tf.reshape(x, [-1, self.nx])
x = tf.matmul(x, self.weight) + self.bias
x = tf.reshape(x, [bz, sl, self.nf])
return x
class TFSharedEmbeddings(tf.keras.layers.Layer):
def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range
def build(self, input_shape):
self.weight = self.add_weight(
"weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)
)
super().build(input_shape)
def get_config(self):
config = {
"vocab_size": self.vocab_size,
"hidden_size": self.hidden_size,
"initializer_range": self.initializer_range,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor:
if mode == "embedding":
return self._embedding(inputs)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError(f"mode {mode} is not valid.")
def _embedding(self, input_ids):
return tf.gather(self.weight, input_ids)
def _linear(self, inputs):
first_dims = shape_list(inputs)[:-1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.weight, transpose_b=True)
return tf.reshape(logits, first_dims + [self.vocab_size])
class TFSequenceSummary(tf.keras.layers.Layer):
def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs):
super().__init__(**kwargs)
self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last"
if self.summary_type == "attn":
raise NotImplementedError
self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj
if self.has_summary:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = tf.keras.layers.Dense(
num_classes, kernel_initializer=get_initializer(initializer_range), name="summary"
)
self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh"
if self.has_activation:
self.activation = tf.keras.activations.tanh
self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0
if self.has_first_dropout:
self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout)
self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0
if self.has_last_dropout:
self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout)
def call(self, inputs, cls_index=None, training=False):
if not isinstance(inputs, (dict, tuple, list)):
hidden_states = inputs
elif isinstance(inputs, (tuple, list)):
hidden_states = inputs[0]
cls_index = inputs[1] if len(inputs) > 1 else None
assert len(inputs) <= 2, "Too many inputs."
else:
hidden_states = inputs.get("hidden_states")
cls_index = inputs.get("cls_index", None)
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = tf.reduce_mean(hidden_states, axis=1)
elif self.summary_type == "cls_index":
hidden_shape = shape_list(hidden_states)
if cls_index is None:
cls_index = tf.fill(
hidden_shape[:-2], hidden_shape[-2] - 1
)
cls_shape = shape_list(cls_index)
if len(cls_shape) <= len(hidden_shape) - 2:
cls_index = tf.expand_dims(cls_index, axis=-1)
output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
output = tf.squeeze(
output, axis=len(hidden_shape) - 2
)
elif self.summary_type == "attn":
raise NotImplementedError
if self.has_first_dropout:
output = self.first_dropout(output, training=training)
if self.has_summary:
output = self.summary(output)
if self.has_activation:
output = self.activation(output)
if self.has_last_dropout:
output = self.last_dropout(output, training=training)
return output
def shape_list(tensor: tf.Tensor) -> List[int]:
dynamic = tf.shape(tensor)
if tensor.shape == tf.TensorShape(None):
return dynamic
static = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal:
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
class TFWrappedEmbeddings:
def __init__(self, layer, abs_scope_name=None):
self._layer = layer
self._abs_scope_name = abs_scope_name
def call(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer.call(inputs, mode)
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer.call(inputs, mode)
def __call__(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer(inputs, mode)
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer(inputs, mode)
| true
| true
|
1c47973c175cf48b3b9eebccc97189614023378a
| 3,319
|
py
|
Python
|
zerver/lib/sessions.py
|
DD2480-group7-2020/zulip
|
9a1e18bcf383c38c35da168563a7345768c6d784
|
[
"Apache-2.0"
] | 1
|
2020-03-17T14:58:50.000Z
|
2020-03-17T14:58:50.000Z
|
zerver/lib/sessions.py
|
DD2480-group7-2020/zulip
|
9a1e18bcf383c38c35da168563a7345768c6d784
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/sessions.py
|
DD2480-group7-2020/zulip
|
9a1e18bcf383c38c35da168563a7345768c6d784
|
[
"Apache-2.0"
] | null | null | null |
import logging
from datetime import timedelta
from django.conf import settings
from django.contrib.auth import SESSION_KEY, get_user_model
from django.contrib.sessions.models import Session
from django.utils.timezone import now as timezone_now
from importlib import import_module
from typing import Any, List, Mapping, Optional
from zerver.models import Realm, UserProfile, get_user_profile_by_id
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
session_engine = import_module(settings.SESSION_ENGINE)
def get_session_dict_user(session_dict: Mapping[str, int]) -> Optional[int]:
# Compare django.contrib.auth._get_user_session_key
try:
return get_user_model()._meta.pk.to_python(session_dict[SESSION_KEY])
except KeyError:
return None
def get_session_user(session: Session) -> Optional[int]:
return get_session_dict_user(session.get_decoded())
def user_sessions(user_profile: UserProfile) -> List[Session]:
return [s for s in Session.objects.all()
if get_session_user(s) == user_profile.id]
def delete_session(session: Session) -> None:
session_engine.SessionStore(session.session_key).delete() # type: ignore # import_module
def delete_user_sessions(user_profile: UserProfile) -> None:
for session in Session.objects.all():
if get_session_user(session) == user_profile.id:
delete_session(session)
def delete_realm_user_sessions(realm: Realm) -> None:
realm_user_ids = [user_profile.id for user_profile in
UserProfile.objects.filter(realm=realm)]
for session in Session.objects.filter(expire_date__gte=timezone_now()):
if get_session_user(session) in realm_user_ids:
delete_session(session)
def delete_all_user_sessions() -> None:
for session in Session.objects.all():
delete_session(session)
def delete_all_deactivated_user_sessions() -> None:
for session in Session.objects.all():
user_profile_id = get_session_user(session)
if user_profile_id is None: # nocoverage # TODO: Investigate why we lost coverage on this
continue
user_profile = get_user_profile_by_id(user_profile_id)
if not user_profile.is_active or user_profile.realm.deactivated:
logging.info("Deactivating session for deactivated user %s" % (user_profile.id,))
delete_session(session)
def set_expirable_session_var(session: Session, var_name: str, var_value: Any, expiry_seconds: int) -> None:
expire_at = datetime_to_timestamp(timezone_now() + timedelta(seconds=expiry_seconds))
session[var_name] = {'value': var_value, 'expire_at': expire_at}
def get_expirable_session_var(session: Session, var_name: str, default_value: Any=None,
delete: bool=False) -> Any:
if var_name not in session:
return default_value
try:
value, expire_at = (session[var_name]['value'], session[var_name]['expire_at'])
except (KeyError, TypeError) as e:
logging.warning("get_expirable_session_var: Variable {}: {}".format(var_name, e))
return default_value
if timestamp_to_datetime(expire_at) < timezone_now():
del session[var_name]
return default_value
if delete:
del session[var_name]
return value
| 40.975309
| 108
| 0.730642
|
import logging
from datetime import timedelta
from django.conf import settings
from django.contrib.auth import SESSION_KEY, get_user_model
from django.contrib.sessions.models import Session
from django.utils.timezone import now as timezone_now
from importlib import import_module
from typing import Any, List, Mapping, Optional
from zerver.models import Realm, UserProfile, get_user_profile_by_id
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
session_engine = import_module(settings.SESSION_ENGINE)
def get_session_dict_user(session_dict: Mapping[str, int]) -> Optional[int]:
try:
return get_user_model()._meta.pk.to_python(session_dict[SESSION_KEY])
except KeyError:
return None
def get_session_user(session: Session) -> Optional[int]:
return get_session_dict_user(session.get_decoded())
def user_sessions(user_profile: UserProfile) -> List[Session]:
return [s for s in Session.objects.all()
if get_session_user(s) == user_profile.id]
def delete_session(session: Session) -> None:
session_engine.SessionStore(session.session_key).delete() er_sessions(user_profile: UserProfile) -> None:
for session in Session.objects.all():
if get_session_user(session) == user_profile.id:
delete_session(session)
def delete_realm_user_sessions(realm: Realm) -> None:
realm_user_ids = [user_profile.id for user_profile in
UserProfile.objects.filter(realm=realm)]
for session in Session.objects.filter(expire_date__gte=timezone_now()):
if get_session_user(session) in realm_user_ids:
delete_session(session)
def delete_all_user_sessions() -> None:
for session in Session.objects.all():
delete_session(session)
def delete_all_deactivated_user_sessions() -> None:
for session in Session.objects.all():
user_profile_id = get_session_user(session)
if user_profile_id is None: _user_profile_by_id(user_profile_id)
if not user_profile.is_active or user_profile.realm.deactivated:
logging.info("Deactivating session for deactivated user %s" % (user_profile.id,))
delete_session(session)
def set_expirable_session_var(session: Session, var_name: str, var_value: Any, expiry_seconds: int) -> None:
expire_at = datetime_to_timestamp(timezone_now() + timedelta(seconds=expiry_seconds))
session[var_name] = {'value': var_value, 'expire_at': expire_at}
def get_expirable_session_var(session: Session, var_name: str, default_value: Any=None,
delete: bool=False) -> Any:
if var_name not in session:
return default_value
try:
value, expire_at = (session[var_name]['value'], session[var_name]['expire_at'])
except (KeyError, TypeError) as e:
logging.warning("get_expirable_session_var: Variable {}: {}".format(var_name, e))
return default_value
if timestamp_to_datetime(expire_at) < timezone_now():
del session[var_name]
return default_value
if delete:
del session[var_name]
return value
| true
| true
|
1c47973f15053c421fd0ceb6b824666a3ce5fbc4
| 50,742
|
py
|
Python
|
Funções Analíticas/Virtualenv/Lib/site-packages/matplotlib/__init__.py
|
Leonardo-Maciel/PSO_Maciel
|
3939448da45716260f3ac7811afdd13be670f346
|
[
"MIT"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
Funções Analíticas/Virtualenv/Lib/site-packages/matplotlib/__init__.py
|
Leonardo-Maciel/PSO_Maciel
|
3939448da45716260f3ac7811afdd13be670f346
|
[
"MIT"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
Funções Analíticas/Virtualenv/Lib/site-packages/matplotlib/__init__.py
|
Leonardo-Maciel/PSO_Maciel
|
3939448da45716260f3ac7811afdd13be670f346
|
[
"MIT"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
"""
An object-oriented plotting library.
A procedural interface is provided by the companion pyplot module,
which may be imported directly, e.g.::
import matplotlib.pyplot as plt
or using ipython::
ipython
at your terminal, followed by::
In [1]: %matplotlib
In [2]: import matplotlib.pyplot as plt
at the ipython shell prompt.
For the most part, direct use of the object-oriented library is encouraged when
programming; pyplot is primarily for working interactively. The exceptions are
the pyplot functions `.pyplot.figure`, `.pyplot.subplot`, `.pyplot.subplots`,
and `.pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
The `~.axes.Axes` class. Most pyplot functions are wrappers for
`~.axes.Axes` methods. The axes module is the highest level of OO
access to the library.
:mod:`matplotlib.figure`
The `.Figure` class.
:mod:`matplotlib.artist`
The `.Artist` base class for all classes that draw things.
:mod:`matplotlib.lines`
The `.Line2D` class for drawing lines and markers.
:mod:`matplotlib.patches`
Classes for drawing polygons.
:mod:`matplotlib.text`
The `.Text` and `.Annotation` classes.
:mod:`matplotlib.image`
The `.AxesImage` and `.FigureImage` classes.
:mod:`matplotlib.collections`
Classes for efficient drawing of groups of lines or polygons.
:mod:`matplotlib.colors`
Color specifications and making colormaps.
:mod:`matplotlib.cm`
Colormaps, and the `.ScalarMappable` mixin class for providing color
mapping functionality to other classes.
:mod:`matplotlib.ticker`
Calculation of tick mark locations and formatting of tick labels.
:mod:`matplotlib.backends`
A subpackage with modules for various GUI libraries and output formats.
The base matplotlib namespace includes:
`~matplotlib.rcParams`
Default configuration settings; their defaults may be overridden using
a :file:`matplotlibrc` file.
`~matplotlib.use`
Setting the Matplotlib backend. This should be called before any
figure is created, because it is not possible to switch between
different GUI backends after that.
Matplotlib was initially written by John D. Hunter (1968-2012) and is now
developed and maintained by a host of others.
Occasionally the internal documentation (python docstrings) will refer
to MATLAB®, a registered trademark of The MathWorks, Inc.
"""
import atexit
from collections import namedtuple
from collections.abc import MutableMapping
import contextlib
from distutils.version import LooseVersion
import functools
import importlib
import inspect
from inspect import Parameter
import locale
import logging
import os
from pathlib import Path
import pprint
import re
import shutil
import subprocess
import sys
import tempfile
import warnings
# cbook must import matplotlib only within function
# definitions, so it is safe to import from it here.
from . import cbook, rcsetup
from matplotlib.cbook import MatplotlibDeprecationWarning, sanitize_sequence
from matplotlib.cbook import mplDeprecation # deprecated
from matplotlib.rcsetup import validate_backend, cycler
import numpy
# Get the version from the _version.py versioneer file. For a git checkout,
# this is computed based on the number of commits since the last tag.
from ._version import get_versions
__version__ = str(get_versions()['version'])
del get_versions
_log = logging.getLogger(__name__)
__bibtex__ = r"""@Article{Hunter:2007,
Author = {Hunter, J. D.},
Title = {Matplotlib: A 2D graphics environment},
Journal = {Computing in Science \& Engineering},
Volume = {9},
Number = {3},
Pages = {90--95},
abstract = {Matplotlib is a 2D graphics package used for Python
for application development, interactive scripting, and
publication-quality image generation across user
interfaces and operating systems.},
publisher = {IEEE COMPUTER SOC},
year = 2007
}"""
@cbook.deprecated("3.2")
def compare_versions(a, b):
"""Return whether version *a* is greater than or equal to version *b*."""
if isinstance(a, bytes):
cbook.warn_deprecated(
"3.0", message="compare_versions arguments should be strs.")
a = a.decode('ascii')
if isinstance(b, bytes):
cbook.warn_deprecated(
"3.0", message="compare_versions arguments should be strs.")
b = b.decode('ascii')
if a:
return LooseVersion(a) >= LooseVersion(b)
else:
return False
def _check_versions():
# Quickfix to ensure Microsoft Visual C++ redistributable
# DLLs are loaded before importing kiwisolver
from . import ft2font
for modname, minver in [
("cycler", "0.10"),
("dateutil", "2.1"),
("kiwisolver", "1.0.1"),
("numpy", "1.15"),
("pyparsing", "2.0.1"),
]:
module = importlib.import_module(modname)
if LooseVersion(module.__version__) < minver:
raise ImportError("Matplotlib requires {}>={}; you have {}"
.format(modname, minver, module.__version__))
_check_versions()
# The decorator ensures this always returns the same handler (and it is only
# attached once).
@functools.lru_cache()
def _ensure_handler():
"""
The first time this function is called, attach a `StreamHandler` using the
same format as `logging.basicConfig` to the Matplotlib root logger.
Return this handler every time this function is called.
"""
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
_log.addHandler(handler)
return handler
def set_loglevel(level):
"""
Set Matplotlib's root logger and root logger handler level, creating
the handler if it does not exist yet.
Typically, one should call ``set_loglevel("info")`` or
``set_loglevel("debug")`` to get additional debugging information.
Parameters
----------
level : {"notset", "debug", "info", "warning", "error", "critical"}
The log level of the handler.
Notes
-----
The first time this function is called, an additional handler is attached
to Matplotlib's root handler; this handler is reused every time and this
function simply manipulates the logger and handler's level.
"""
_log.setLevel(level.upper())
_ensure_handler().setLevel(level.upper())
def _logged_cached(fmt, func=None):
"""
Decorator that logs a function's return value, and memoizes that value.
After ::
@_logged_cached(fmt)
def func(): ...
the first call to *func* will log its return value at the DEBUG level using
%-format string *fmt*, and memoize it; later calls to *func* will directly
return that value.
"""
if func is None: # Return the actual decorator.
return functools.partial(_logged_cached, fmt)
called = False
ret = None
@functools.wraps(func)
def wrapper(**kwargs):
nonlocal called, ret
if not called:
ret = func(**kwargs)
called = True
_log.debug(fmt, ret)
return ret
return wrapper
_ExecInfo = namedtuple("_ExecInfo", "executable version")
class ExecutableNotFoundError(FileNotFoundError):
"""
Error raised when an executable that Matplotlib optionally
depends on can't be found.
"""
pass
@functools.lru_cache()
def _get_executable_info(name):
"""
Get the version of some executable that Matplotlib optionally depends on.
.. warning:
The list of executables that this function supports is set according to
Matplotlib's internal needs, and may change without notice.
Parameters
----------
name : str
The executable to query. The following values are currently supported:
"dvipng", "gs", "inkscape", "magick", "pdftops". This list is subject
to change without notice.
Returns
-------
If the executable is found, a namedtuple with fields ``executable`` (`str`)
and ``version`` (`distutils.version.LooseVersion`, or ``None`` if the
version cannot be determined).
Raises
------
ExecutableNotFoundError
If the executable is not found or older than the oldest version
supported by Matplotlib.
ValueError
If the executable is not one that we know how to query.
"""
def impl(args, regex, min_ver=None, ignore_exit_code=False):
# Execute the subprocess specified by args; capture stdout and stderr.
# Search for a regex match in the output; if the match succeeds, the
# first group of the match is the version.
# Return an _ExecInfo if the executable exists, and has a version of
# at least min_ver (if set); else, raise ExecutableNotFoundError.
try:
output = subprocess.check_output(
args, stderr=subprocess.STDOUT,
universal_newlines=True, errors="replace")
except subprocess.CalledProcessError as _cpe:
if ignore_exit_code:
output = _cpe.output
else:
raise ExecutableNotFoundError(str(_cpe)) from _cpe
except OSError as _ose:
raise ExecutableNotFoundError(str(_ose)) from _ose
match = re.search(regex, output)
if match:
version = LooseVersion(match.group(1))
if min_ver is not None and version < min_ver:
raise ExecutableNotFoundError(
f"You have {args[0]} version {version} but the minimum "
f"version supported by Matplotlib is {min_ver}")
return _ExecInfo(args[0], version)
else:
raise ExecutableNotFoundError(
f"Failed to determine the version of {args[0]} from "
f"{' '.join(args)}, which output {output}")
if name == "dvipng":
return impl(["dvipng", "-version"], "(?m)^dvipng(?: .*)? (.+)", "1.6")
elif name == "gs":
execs = (["gswin32c", "gswin64c", "mgs", "gs"] # "mgs" for miktex.
if sys.platform == "win32" else
["gs"])
for e in execs:
try:
return impl([e, "--version"], "(.*)", "9")
except ExecutableNotFoundError:
pass
message = "Failed to find a Ghostscript installation"
raise ExecutableNotFoundError(message)
elif name == "inkscape":
try:
# Try headless option first (needed for Inkscape version < 1.0):
return impl(["inkscape", "--without-gui", "-V"],
"Inkscape ([^ ]*)")
except ExecutableNotFoundError:
pass # Suppress exception chaining.
# If --without-gui is not accepted, we may be using Inkscape >= 1.0 so
# try without it:
return impl(["inkscape", "-V"], "Inkscape ([^ ]*)")
elif name == "magick":
path = None
if sys.platform == "win32":
# Check the registry to avoid confusing ImageMagick's convert with
# Windows's builtin convert.exe.
import winreg
binpath = ""
for flag in [0, winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY]:
try:
with winreg.OpenKeyEx(
winreg.HKEY_LOCAL_MACHINE,
r"Software\Imagemagick\Current",
0, winreg.KEY_QUERY_VALUE | flag) as hkey:
binpath = winreg.QueryValueEx(hkey, "BinPath")[0]
except OSError:
pass
if binpath:
for name in ["convert.exe", "magick.exe"]:
candidate = Path(binpath, name)
if candidate.exists():
path = str(candidate)
break
else:
path = "convert"
if path is None:
raise ExecutableNotFoundError(
"Failed to find an ImageMagick installation")
return impl([path, "--version"], r"^Version: ImageMagick (\S*)")
elif name == "pdftops":
info = impl(["pdftops", "-v"], "^pdftops version (.*)",
ignore_exit_code=True)
if info and not ("3.0" <= info.version
# poppler version numbers.
or "0.9" <= info.version <= "1.0"):
raise ExecutableNotFoundError(
f"You have pdftops version {info.version} but the minimum "
f"version supported by Matplotlib is 3.0")
return info
else:
raise ValueError("Unknown executable: {!r}".format(name))
@cbook.deprecated("3.2")
def checkdep_ps_distiller(s):
if not s:
return False
try:
_get_executable_info("gs")
except ExecutableNotFoundError:
_log.warning(
"Setting rcParams['ps.usedistiller'] requires ghostscript.")
return False
if s == "xpdf":
try:
_get_executable_info("pdftops")
except ExecutableNotFoundError:
_log.warning(
"Setting rcParams['ps.usedistiller'] to 'xpdf' requires xpdf.")
return False
return s
def checkdep_usetex(s):
if not s:
return False
if not shutil.which("tex"):
_log.warning("usetex mode requires TeX.")
return False
try:
_get_executable_info("dvipng")
except ExecutableNotFoundError:
_log.warning("usetex mode requires dvipng.")
return False
try:
_get_executable_info("gs")
except ExecutableNotFoundError:
_log.warning("usetex mode requires ghostscript.")
return False
return True
@cbook.deprecated("3.2", alternative="os.path.expanduser('~')")
@_logged_cached('$HOME=%s')
def get_home():
"""
Return the user's home directory.
If the user's home directory cannot be found, return None.
"""
try:
return str(Path.home())
except Exception:
return None
def _get_xdg_config_dir():
"""
Return the XDG configuration directory, according to the XDG base
directory spec:
https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
"""
return os.environ.get('XDG_CONFIG_HOME') or str(Path.home() / ".config")
def _get_xdg_cache_dir():
"""
Return the XDG cache directory, according to the XDG base directory spec:
https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
"""
return os.environ.get('XDG_CACHE_HOME') or str(Path.home() / ".cache")
def _get_config_or_cache_dir(xdg_base):
configdir = os.environ.get('MPLCONFIGDIR')
if configdir:
configdir = Path(configdir).resolve()
elif sys.platform.startswith(('linux', 'freebsd')) and xdg_base:
configdir = Path(xdg_base, "matplotlib")
else:
configdir = Path.home() / ".matplotlib"
try:
configdir.mkdir(parents=True, exist_ok=True)
except OSError:
pass
else:
if os.access(str(configdir), os.W_OK) and configdir.is_dir():
return str(configdir)
# If the config or cache directory cannot be created or is not a writable
# directory, create a temporary one.
tmpdir = os.environ["MPLCONFIGDIR"] = \
tempfile.mkdtemp(prefix="matplotlib-")
atexit.register(shutil.rmtree, tmpdir)
_log.warning(
"Matplotlib created a temporary config/cache directory at %s because "
"the default path (%s) is not a writable directory; it is highly "
"recommended to set the MPLCONFIGDIR environment variable to a "
"writable directory, in particular to speed up the import of "
"Matplotlib and to better support multiprocessing.",
tmpdir, configdir)
return tmpdir
@_logged_cached('CONFIGDIR=%s')
def get_configdir():
"""
Return the string path of the the configuration directory.
The directory is chosen as follows:
1. If the MPLCONFIGDIR environment variable is supplied, choose that.
2. On Linux, follow the XDG specification and look first in
``$XDG_CONFIG_HOME``, if defined, or ``$HOME/.config``. On other
platforms, choose ``$HOME/.matplotlib``.
3. If the chosen directory exists and is writable, use that as the
configuration directory.
4. Else, create a temporary directory, and use it as the configuration
directory.
"""
return _get_config_or_cache_dir(_get_xdg_config_dir())
@_logged_cached('CACHEDIR=%s')
def get_cachedir():
"""
Return the string path of the cache directory.
The procedure used to find the directory is the same as for
_get_config_dir, except using ``$XDG_CACHE_HOME``/``$HOME/.cache`` instead.
"""
return _get_config_or_cache_dir(_get_xdg_cache_dir())
@_logged_cached('matplotlib data path: %s')
def get_data_path(*, _from_rc=None):
"""Return the path to Matplotlib data."""
if _from_rc is not None:
cbook.warn_deprecated(
"3.2",
message=("Setting the datapath via matplotlibrc is deprecated "
"%(since)s and will be removed %(removal)s."),
removal='3.4')
path = Path(_from_rc)
if path.is_dir():
return str(path)
else:
warnings.warn(f"You passed datapath: {_from_rc!r} in your "
f"matplotribrc file ({matplotlib_fname()}). "
"However this path does not exist, falling back "
"to standard paths.")
return _get_data_path()
@_logged_cached('(private) matplotlib data path: %s')
def _get_data_path():
path = Path(__file__).with_name("mpl-data")
if path.is_dir():
return str(path)
cbook.warn_deprecated(
"3.2", message="Matplotlib installs where the data is not in the "
"mpl-data subdirectory of the package are deprecated since %(since)s "
"and support for them will be removed %(removal)s.")
def get_candidate_paths():
# setuptools' namespace_packages may hijack this init file
# so need to try something known to be in Matplotlib, not basemap.
import matplotlib.afm
yield Path(matplotlib.afm.__file__).with_name('mpl-data')
# py2exe zips pure python, so still need special check.
if getattr(sys, 'frozen', None):
yield Path(sys.executable).with_name('mpl-data')
# Try again assuming we need to step up one more directory.
yield Path(sys.executable).parent.with_name('mpl-data')
# Try again assuming sys.path[0] is a dir not a exe.
yield Path(sys.path[0]) / 'mpl-data'
for path in get_candidate_paths():
if path.is_dir():
defaultParams['datapath'][0] = str(path)
return str(path)
raise RuntimeError('Could not find the matplotlib data files')
def matplotlib_fname():
"""
Get the location of the config file.
The file location is determined in the following order
- ``$PWD/matplotlibrc``
- ``$MATPLOTLIBRC`` if it is not a directory
- ``$MATPLOTLIBRC/matplotlibrc``
- ``$MPLCONFIGDIR/matplotlibrc``
- On Linux,
- ``$XDG_CONFIG_HOME/matplotlib/matplotlibrc`` (if ``$XDG_CONFIG_HOME``
is defined)
- or ``$HOME/.config/matplotlib/matplotlibrc`` (if ``$XDG_CONFIG_HOME``
is not defined)
- On other platforms,
- ``$HOME/.matplotlib/matplotlibrc`` if ``$HOME`` is defined
- Lastly, it looks in ``$MATPLOTLIBDATA/matplotlibrc``, which should always
exist.
"""
def gen_candidates():
yield os.path.join(os.getcwd(), 'matplotlibrc')
try:
matplotlibrc = os.environ['MATPLOTLIBRC']
except KeyError:
pass
else:
yield matplotlibrc
yield os.path.join(matplotlibrc, 'matplotlibrc')
yield os.path.join(get_configdir(), 'matplotlibrc')
yield os.path.join(_get_data_path(), 'matplotlibrc')
for fname in gen_candidates():
if os.path.exists(fname) and not os.path.isdir(fname):
return fname
raise RuntimeError("Could not find matplotlibrc file; your Matplotlib "
"install is broken")
# rcParams deprecated and automatically mapped to another key.
# Values are tuples of (version, new_name, f_old2new, f_new2old).
_deprecated_map = {}
# rcParams deprecated; some can manually be mapped to another key.
# Values are tuples of (version, new_name_or_None).
_deprecated_ignore_map = {
}
# rcParams deprecated; can use None to suppress warnings; remain actually
# listed in the rcParams (not included in _all_deprecated).
# Values are tuples of (version,)
_deprecated_remain_as_none = {
'datapath': ('3.2.1',),
'animation.avconv_path': ('3.3',),
'animation.avconv_args': ('3.3',),
'animation.html_args': ('3.3',),
'mathtext.fallback_to_cm': ('3.3',),
'keymap.all_axes': ('3.3',),
'savefig.jpeg_quality': ('3.3',),
'text.latex.preview': ('3.3',),
}
_all_deprecated = {*_deprecated_map, *_deprecated_ignore_map}
class RcParams(MutableMapping, dict):
"""
A dictionary object including validation.
Validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`.
See Also
--------
:ref:`customizing-with-matplotlibrc-files`
"""
validate = rcsetup._validators
# validate values on the way in
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __setitem__(self, key, val):
try:
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
key = alt_key
val = alt_val(val)
elif key in _deprecated_remain_as_none and val is not None:
version, = _deprecated_remain_as_none[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam")
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return
elif key == 'backend':
if val is rcsetup._auto_backend_sentinel:
if 'backend' in self:
return
try:
cval = self.validate[key](val)
except ValueError as ve:
raise ValueError(f"Key {key}: {ve}") from None
dict.__setitem__(self, key, cval)
except KeyError as err:
raise KeyError(
f"{key} is not a valid rc parameter (see rcParams.keys() for "
f"a list of valid parameters)") from err
def __getitem__(self, key):
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return inverse_alt(dict.__getitem__(self, alt_key))
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return dict.__getitem__(self, alt_key) if alt_key else None
elif key == "backend":
val = dict.__getitem__(self, key)
if val is rcsetup._auto_backend_sentinel:
from matplotlib import pyplot as plt
plt.switch_backend(rcsetup._auto_backend_sentinel)
elif key == "datapath":
return get_data_path()
return dict.__getitem__(self, key)
def __repr__(self):
class_name = self.__class__.__name__
indent = len(class_name) + 1
with cbook._suppress_matplotlib_deprecation_warning():
repr_split = pprint.pformat(dict(self), indent=1,
width=80 - indent).split('\n')
repr_indented = ('\n' + ' ' * indent).join(repr_split)
return '{}({})'.format(class_name, repr_indented)
def __str__(self):
return '\n'.join(map('{0[0]}: {0[1]}'.format, sorted(self.items())))
def __iter__(self):
"""Yield sorted list of keys."""
with cbook._suppress_matplotlib_deprecation_warning():
yield from sorted(dict.__iter__(self))
def __len__(self):
return dict.__len__(self)
def find_all(self, pattern):
"""
Return the subset of this RcParams dictionary whose keys match,
using :func:`re.search`, the given ``pattern``.
.. note::
Changes to the returned dictionary are *not* propagated to
the parent RcParams dictionary.
"""
pattern_re = re.compile(pattern)
return RcParams((key, value)
for key, value in self.items()
if pattern_re.search(key))
def copy(self):
return {k: dict.__getitem__(self, k) for k in self}
def rc_params(fail_on_error=False):
"""Construct a `RcParams` instance from the default Matplotlib rc file."""
return rc_params_from_file(matplotlib_fname(), fail_on_error)
URL_REGEX = re.compile(r'^http://|^https://|^ftp://|^file:')
def is_url(filename):
"""Return True if string is an http, ftp, or file URL path."""
return URL_REGEX.match(filename) is not None
@functools.lru_cache()
def _get_ssl_context():
try:
import certifi
except ImportError:
_log.debug("Could not import certifi.")
return None
import ssl
return ssl.create_default_context(cafile=certifi.where())
@contextlib.contextmanager
def _open_file_or_url(fname):
if not isinstance(fname, Path) and is_url(fname):
import urllib.request
ssl_ctx = _get_ssl_context()
if ssl_ctx is None:
_log.debug(
"Could not get certifi ssl context, https may not work."
)
with urllib.request.urlopen(fname, context=ssl_ctx) as f:
yield (line.decode('utf-8') for line in f)
else:
fname = os.path.expanduser(fname)
encoding = locale.getpreferredencoding(do_setlocale=False)
if encoding is None:
encoding = "utf-8"
with open(fname, encoding=encoding) as f:
yield f
def _rc_params_in_file(fname, transform=lambda x: x, fail_on_error=False):
"""
Construct a `RcParams` instance from file *fname*.
Unlike `rc_params_from_file`, the configuration class only contains the
parameters specified in the file (i.e. default values are not filled in).
Parameters
----------
fname : path-like
The loaded file.
transform : callable, default: the identity function
A function called on each individual line of the file to transform it,
before further parsing.
fail_on_error : bool, default: False
Whether invalid entries should result in an exception or a warning.
"""
rc_temp = {}
with _open_file_or_url(fname) as fd:
try:
for line_no, line in enumerate(fd, 1):
line = transform(line)
strippedline = line.split('#', 1)[0].strip()
if not strippedline:
continue
tup = strippedline.split(':', 1)
if len(tup) != 2:
_log.warning('Missing colon in file %r, line %d (%r)',
fname, line_no, line.rstrip('\n'))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
_log.warning('Duplicate key in file %r, line %d (%r)',
fname, line_no, line.rstrip('\n'))
rc_temp[key] = (val, line, line_no)
except UnicodeDecodeError:
_log.warning('Cannot decode configuration file %s with encoding '
'%s, check LANG and LC_* variables.',
fname,
locale.getpreferredencoding(do_setlocale=False)
or 'utf-8 (default)')
raise
config = RcParams()
for key, (val, line, line_no) in rc_temp.items():
if key in rcsetup._validators:
if fail_on_error:
config[key] = val # try to convert to proper type or raise
else:
try:
config[key] = val # try to convert to proper type or skip
except Exception as msg:
_log.warning('Bad value in file %r, line %d (%r): %s',
fname, line_no, line.rstrip('\n'), msg)
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, alternative=alt_key,
addendum="Please update your matplotlibrc.")
else:
version = 'master' if '.post' in __version__ else f'v{__version__}'
_log.warning("""
Bad key %(key)s in file %(fname)s, line %(line_no)s (%(line)r)
You probably need to get an updated matplotlibrc file from
https://github.com/matplotlib/matplotlib/blob/%(version)s/matplotlibrc.template
or from the matplotlib source distribution""",
dict(key=key, fname=fname, line_no=line_no,
line=line.rstrip('\n'), version=version))
return config
def rc_params_from_file(fname, fail_on_error=False, use_default_template=True):
"""
Construct a `RcParams` from file *fname*.
Parameters
----------
fname : str or path-like
A file with Matplotlib rc settings.
fail_on_error : bool
If True, raise an error when the parser fails to convert a parameter.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the configuration class only contains the
parameters specified in the file. (Useful for updating dicts.)
"""
config_from_file = _rc_params_in_file(fname, fail_on_error=fail_on_error)
if not use_default_template:
return config_from_file
with cbook._suppress_matplotlib_deprecation_warning():
config = RcParams({**rcParamsDefault, **config_from_file})
with cbook._suppress_matplotlib_deprecation_warning():
if config['datapath'] is None:
config['datapath'] = _get_data_path()
else:
config['datapath'] = get_data_path(_from_rc=config['datapath'])
if "".join(config['text.latex.preamble']):
_log.info("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
""", '\n'.join(config['text.latex.preamble']))
_log.debug('loaded rc file %s', fname)
return config
# When constructing the global instances, we need to perform certain updates
# by explicitly calling the superclass (dict.update, dict.items) to avoid
# triggering resolution of _auto_backend_sentinel.
rcParamsDefault = _rc_params_in_file(
cbook._get_data_path("matplotlibrc"),
# Strip leading comment.
transform=lambda line: line[1:] if line.startswith("#") else line,
fail_on_error=True)
dict.update(rcParamsDefault, rcsetup._hardcoded_defaults)
rcParams = RcParams() # The global instance.
dict.update(rcParams, dict.items(rcParamsDefault))
dict.update(rcParams, _rc_params_in_file(matplotlib_fname()))
with cbook._suppress_matplotlib_deprecation_warning():
rcParamsOrig = RcParams(rcParams.copy())
# This also checks that all rcParams are indeed listed in the template.
# Assiging to rcsetup.defaultParams is left only for backcompat.
defaultParams = rcsetup.defaultParams = {
# We want to resolve deprecated rcParams, but not backend...
key: [(rcsetup._auto_backend_sentinel if key == "backend" else
rcParamsDefault[key]),
validator]
for key, validator in rcsetup._validators.items()}
if rcParams['axes.formatter.use_locale']:
locale.setlocale(locale.LC_ALL, '')
def rc(group, **kwargs):
"""
Set the current `.rcParams`. *group* is the grouping for the rc, e.g.,
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, e.g., (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, e.g.,::
rc('lines', linewidth=2, color='r')
sets the current `.rcParams` and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above call as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. e.g., you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations. Use
``matplotlib.style.use('default')`` or :func:`~matplotlib.rcdefaults` to
restore the default `.rcParams` after changes.
Notes
-----
Similar functionality is available by using the normal dict interface, i.e.
``rcParams.update({"lines.linewidth": 2, ...})`` (but ``rcParams.update``
does not support abbreviations or grouping).
"""
aliases = {
'lw': 'linewidth',
'ls': 'linestyle',
'c': 'color',
'fc': 'facecolor',
'ec': 'edgecolor',
'mew': 'markeredgewidth',
'aa': 'antialiased',
}
if isinstance(group, str):
group = (group,)
for g in group:
for k, v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
try:
rcParams[key] = v
except KeyError as err:
raise KeyError(('Unrecognized key "%s" for group "%s" and '
'name "%s"') % (key, g, name)) from err
def rcdefaults():
"""
Restore the `.rcParams` from Matplotlib's internal default style.
Style-blacklisted `.rcParams` (defined in
`matplotlib.style.core.STYLE_BLACKLIST`) are not updated.
See Also
--------
matplotlib.rc_file_defaults
Restore the `.rcParams` from the rc file originally loaded by
Matplotlib.
matplotlib.style.use
Use a specific style file. Call ``style.use('default')`` to restore
the default style.
"""
# Deprecation warnings were already handled when creating rcParamsDefault,
# no need to reemit them here.
with cbook._suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rcParams.clear()
rcParams.update({k: v for k, v in rcParamsDefault.items()
if k not in STYLE_BLACKLIST})
def rc_file_defaults():
"""
Restore the `.rcParams` from the original rc file loaded by Matplotlib.
Style-blacklisted `.rcParams` (defined in
`matplotlib.style.core.STYLE_BLACKLIST`) are not updated.
"""
# Deprecation warnings were already handled when creating rcParamsOrig, no
# need to reemit them here.
with cbook._suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig
if k not in STYLE_BLACKLIST})
def rc_file(fname, *, use_default_template=True):
"""
Update `.rcParams` from file.
Style-blacklisted `.rcParams` (defined in
`matplotlib.style.core.STYLE_BLACKLIST`) are not updated.
Parameters
----------
fname : str or path-like
A file with Matplotlib rc settings.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the current configuration persists
and only the parameters specified in the file are updated.
"""
# Deprecation warnings were already handled in rc_params_from_file, no need
# to reemit them here.
with cbook._suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rc_from_file = rc_params_from_file(
fname, use_default_template=use_default_template)
rcParams.update({k: rc_from_file[k] for k in rc_from_file
if k not in STYLE_BLACKLIST})
@contextlib.contextmanager
def rc_context(rc=None, fname=None):
"""
Return a context manager for temporarily changing rcParams.
Parameters
----------
rc : dict
The rcParams to temporarily set.
fname : str or path-like
A file with Matplotlib rc settings. If both *fname* and *rc* are given,
settings from *rc* take precedence.
See Also
--------
:ref:`customizing-with-matplotlibrc-files`
Examples
--------
Passing explicit values via a dict::
with mpl.rc_context({'interactive': False}):
fig, ax = plt.subplots()
ax.plot(range(3), range(3))
fig.savefig('example.png')
plt.close(fig)
Loading settings from a file::
with mpl.rc_context(fname='print.rc'):
plt.plot(x, y) # uses 'print.rc'
"""
orig = rcParams.copy()
try:
if fname:
rc_file(fname)
if rc:
rcParams.update(rc)
yield
finally:
dict.update(rcParams, orig) # Revert to the original rcs.
def use(backend, *, force=True):
"""
Select the backend used for rendering and GUI integration.
Parameters
----------
backend : str
The backend to switch to. This can either be one of the standard
backend names, which are case-insensitive:
- interactive backends:
GTK3Agg, GTK3Cairo, MacOSX, nbAgg,
Qt4Agg, Qt4Cairo, Qt5Agg, Qt5Cairo,
TkAgg, TkCairo, WebAgg, WX, WXAgg, WXCairo
- non-interactive backends:
agg, cairo, pdf, pgf, ps, svg, template
or a string of the form: ``module://my.module.name``.
force : bool, default: True
If True (the default), raise an `ImportError` if the backend cannot be
set up (either because it fails to import, or because an incompatible
GUI interactive framework is already running); if False, ignore the
failure.
See Also
--------
:ref:`backends`
matplotlib.get_backend
"""
name = validate_backend(backend)
# we need to use the base-class method here to avoid (prematurely)
# resolving the "auto" backend setting
if dict.__getitem__(rcParams, 'backend') == name:
# Nothing to do if the requested backend is already set
pass
else:
# if pyplot is not already imported, do not import it. Doing
# so may trigger a `plt.switch_backend` to the _default_ backend
# before we get a chance to change to the one the user just requested
plt = sys.modules.get('matplotlib.pyplot')
# if pyplot is imported, then try to change backends
if plt is not None:
try:
# we need this import check here to re-raise if the
# user does not have the libraries to support their
# chosen backend installed.
plt.switch_backend(name)
except ImportError:
if force:
raise
# if we have not imported pyplot, then we can set the rcParam
# value which will be respected when the user finally imports
# pyplot
else:
rcParams['backend'] = backend
# if the user has asked for a given backend, do not helpfully
# fallback
rcParams['backend_fallback'] = False
if os.environ.get('MPLBACKEND'):
rcParams['backend'] = os.environ.get('MPLBACKEND')
def get_backend():
"""
Return the name of the current backend.
See Also
--------
matplotlib.use
"""
return rcParams['backend']
def interactive(b):
"""
Set whether to redraw after every plotting command (e.g. `.pyplot.xlabel`).
"""
rcParams['interactive'] = b
def is_interactive():
"""Return whether to redraw after every plotting command."""
return rcParams['interactive']
default_test_modules = [
'matplotlib.tests',
'mpl_toolkits.tests',
]
def _init_tests():
# The version of FreeType to install locally for running the
# tests. This must match the value in `setupext.py`
LOCAL_FREETYPE_VERSION = '2.6.1'
from matplotlib import ft2font
if (ft2font.__freetype_version__ != LOCAL_FREETYPE_VERSION or
ft2font.__freetype_build_type__ != 'local'):
_log.warning(
f"Matplotlib is not built with the correct FreeType version to "
f"run tests. Rebuild without setting system_freetype=1 in "
f"setup.cfg. Expect many image comparison failures below. "
f"Expected freetype version {LOCAL_FREETYPE_VERSION}. "
f"Found freetype version {ft2font.__freetype_version__}. "
"Freetype build type is {}local".format(
"" if ft2font.__freetype_build_type__ == 'local' else "not "))
@cbook._delete_parameter("3.2", "switch_backend_warn")
@cbook._delete_parameter("3.3", "recursionlimit")
def test(verbosity=None, coverage=False, switch_backend_warn=True,
recursionlimit=0, **kwargs):
"""Run the matplotlib test suite."""
try:
import pytest
except ImportError:
print("matplotlib.test requires pytest to run.")
return -1
if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'tests')):
print("Matplotlib test data is not installed")
return -1
old_backend = get_backend()
old_recursionlimit = sys.getrecursionlimit()
try:
use('agg')
if recursionlimit:
sys.setrecursionlimit(recursionlimit)
args = kwargs.pop('argv', [])
provide_default_modules = True
use_pyargs = True
for arg in args:
if any(arg.startswith(module_path)
for module_path in default_test_modules):
provide_default_modules = False
break
if os.path.exists(arg):
provide_default_modules = False
use_pyargs = False
break
if use_pyargs:
args += ['--pyargs']
if provide_default_modules:
args += default_test_modules
if coverage:
args += ['--cov']
if verbosity:
args += ['-' + 'v' * verbosity]
retcode = pytest.main(args, **kwargs)
finally:
if old_backend.lower() != 'agg':
use(old_backend)
if recursionlimit:
sys.setrecursionlimit(old_recursionlimit)
return retcode
test.__test__ = False # pytest: this function is not a test
def _replacer(data, value):
"""
Either returns ``data[value]`` or passes ``data`` back, converts either to
a sequence.
"""
try:
# if key isn't a string don't bother
if isinstance(value, str):
# try to use __getitem__
value = data[value]
except Exception:
# key does not exist, silently fall back to key
pass
return sanitize_sequence(value)
def _label_from_arg(y, default_name):
try:
return y.name
except AttributeError:
if isinstance(default_name, str):
return default_name
return None
_DATA_DOC_TITLE = """
Notes
-----
"""
_DATA_DOC_APPENDIX = """
.. note::
In addition to the above described arguments, this function can take
a *data* keyword argument. If such a *data* argument is given,
{replaced}
Objects passed as **data** must support item access (``data[s]``) and
membership test (``s in data``).
"""
def _add_data_doc(docstring, replace_names):
"""
Add documentation for a *data* field to the given docstring.
Parameters
----------
docstring : str
The input docstring.
replace_names : list of str or None
The list of parameter names which arguments should be replaced by
``data[name]`` (if ``data[name]`` does not throw an exception). If
None, replacement is attempted for all arguments.
Returns
-------
str
The augmented docstring.
"""
if (docstring is None
or replace_names is not None and len(replace_names) == 0):
return docstring
docstring = inspect.cleandoc(docstring)
repl = (
(" every other argument can also be string ``s``, which is\n"
" interpreted as ``data[s]`` (unless this raises an exception).")
if replace_names is None else
(" the following arguments can also be string ``s``, which is\n"
" interpreted as ``data[s]`` (unless this raises an exception):\n"
" " + ", ".join(map("*{}*".format, replace_names))) + ".")
addendum = _DATA_DOC_APPENDIX.format(replaced=repl)
if _DATA_DOC_TITLE not in docstring:
addendum = _DATA_DOC_TITLE + addendum
return docstring + addendum
def _preprocess_data(func=None, *, replace_names=None, label_namer=None):
"""
A decorator to add a 'data' kwarg to a function.
When applied::
@_preprocess_data()
def func(ax, *args, **kwargs): ...
the signature is modified to ``decorated(ax, *args, data=None, **kwargs)``
with the following behavior:
- if called with ``data=None``, forward the other arguments to ``func``;
- otherwise, *data* must be a mapping; for any argument passed in as a
string ``name``, replace the argument by ``data[name]`` (if this does not
throw an exception), then forward the arguments to ``func``.
In either case, any argument that is a `MappingView` is also converted to a
list.
Parameters
----------
replace_names : list of str or None, default: None
The list of parameter names for which lookup into *data* should be
attempted. If None, replacement is attempted for all arguments.
label_namer : str, default: None
If set e.g. to "namer" (which must be a kwarg in the function's
signature -- not as ``**kwargs``), if the *namer* argument passed in is
a (string) key of *data* and no *label* kwarg is passed, then use the
(string) value of the *namer* as *label*. ::
@_preprocess_data(label_namer="foo")
def func(foo, label=None): ...
func("key", data={"key": value})
# is equivalent to
func.__wrapped__(value, label="key")
"""
if func is None: # Return the actual decorator.
return functools.partial(
_preprocess_data,
replace_names=replace_names, label_namer=label_namer)
sig = inspect.signature(func)
varargs_name = None
varkwargs_name = None
arg_names = []
params = list(sig.parameters.values())
for p in params:
if p.kind is Parameter.VAR_POSITIONAL:
varargs_name = p.name
elif p.kind is Parameter.VAR_KEYWORD:
varkwargs_name = p.name
else:
arg_names.append(p.name)
data_param = Parameter("data", Parameter.KEYWORD_ONLY, default=None)
if varkwargs_name:
params.insert(-1, data_param)
else:
params.append(data_param)
new_sig = sig.replace(parameters=params)
arg_names = arg_names[1:] # remove the first "ax" / self arg
assert {*arg_names}.issuperset(replace_names or []) or varkwargs_name, (
"Matplotlib internal error: invalid replace_names ({!r}) for {!r}"
.format(replace_names, func.__name__))
assert label_namer is None or label_namer in arg_names, (
"Matplotlib internal error: invalid label_namer ({!r}) for {!r}"
.format(label_namer, func.__name__))
@functools.wraps(func)
def inner(ax, *args, data=None, **kwargs):
if data is None:
return func(ax, *map(sanitize_sequence, args), **kwargs)
bound = new_sig.bind(ax, *args, **kwargs)
auto_label = (bound.arguments.get(label_namer)
or bound.kwargs.get(label_namer))
for k, v in bound.arguments.items():
if k == varkwargs_name:
for k1, v1 in v.items():
if replace_names is None or k1 in replace_names:
v[k1] = _replacer(data, v1)
elif k == varargs_name:
if replace_names is None:
bound.arguments[k] = tuple(_replacer(data, v1) for v1 in v)
else:
if replace_names is None or k in replace_names:
bound.arguments[k] = _replacer(data, v)
new_args = bound.args
new_kwargs = bound.kwargs
args_and_kwargs = {**bound.arguments, **bound.kwargs}
if label_namer and "label" not in args_and_kwargs:
new_kwargs["label"] = _label_from_arg(
args_and_kwargs.get(label_namer), auto_label)
return func(*new_args, **new_kwargs)
inner.__doc__ = _add_data_doc(inner.__doc__, replace_names)
inner.__signature__ = new_sig
return inner
_log.debug('matplotlib version %s', __version__)
_log.debug('interactive is %s', is_interactive())
_log.debug('platform is %s', sys.platform)
_log.debug('loaded modules: %s', list(sys.modules))
| 34.192722
| 79
| 0.619566
|
import atexit
from collections import namedtuple
from collections.abc import MutableMapping
import contextlib
from distutils.version import LooseVersion
import functools
import importlib
import inspect
from inspect import Parameter
import locale
import logging
import os
from pathlib import Path
import pprint
import re
import shutil
import subprocess
import sys
import tempfile
import warnings
from . import cbook, rcsetup
from matplotlib.cbook import MatplotlibDeprecationWarning, sanitize_sequence
from matplotlib.cbook import mplDeprecation
from matplotlib.rcsetup import validate_backend, cycler
import numpy
from ._version import get_versions
__version__ = str(get_versions()['version'])
del get_versions
_log = logging.getLogger(__name__)
__bibtex__ = r"""@Article{Hunter:2007,
Author = {Hunter, J. D.},
Title = {Matplotlib: A 2D graphics environment},
Journal = {Computing in Science \& Engineering},
Volume = {9},
Number = {3},
Pages = {90--95},
abstract = {Matplotlib is a 2D graphics package used for Python
for application development, interactive scripting, and
publication-quality image generation across user
interfaces and operating systems.},
publisher = {IEEE COMPUTER SOC},
year = 2007
}"""
@cbook.deprecated("3.2")
def compare_versions(a, b):
if isinstance(a, bytes):
cbook.warn_deprecated(
"3.0", message="compare_versions arguments should be strs.")
a = a.decode('ascii')
if isinstance(b, bytes):
cbook.warn_deprecated(
"3.0", message="compare_versions arguments should be strs.")
b = b.decode('ascii')
if a:
return LooseVersion(a) >= LooseVersion(b)
else:
return False
def _check_versions():
from . import ft2font
for modname, minver in [
("cycler", "0.10"),
("dateutil", "2.1"),
("kiwisolver", "1.0.1"),
("numpy", "1.15"),
("pyparsing", "2.0.1"),
]:
module = importlib.import_module(modname)
if LooseVersion(module.__version__) < minver:
raise ImportError("Matplotlib requires {}>={}; you have {}"
.format(modname, minver, module.__version__))
_check_versions()
@functools.lru_cache()
def _ensure_handler():
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
_log.addHandler(handler)
return handler
def set_loglevel(level):
_log.setLevel(level.upper())
_ensure_handler().setLevel(level.upper())
def _logged_cached(fmt, func=None):
if func is None:
return functools.partial(_logged_cached, fmt)
called = False
ret = None
@functools.wraps(func)
def wrapper(**kwargs):
nonlocal called, ret
if not called:
ret = func(**kwargs)
called = True
_log.debug(fmt, ret)
return ret
return wrapper
_ExecInfo = namedtuple("_ExecInfo", "executable version")
class ExecutableNotFoundError(FileNotFoundError):
pass
@functools.lru_cache()
def _get_executable_info(name):
def impl(args, regex, min_ver=None, ignore_exit_code=False):
try:
output = subprocess.check_output(
args, stderr=subprocess.STDOUT,
universal_newlines=True, errors="replace")
except subprocess.CalledProcessError as _cpe:
if ignore_exit_code:
output = _cpe.output
else:
raise ExecutableNotFoundError(str(_cpe)) from _cpe
except OSError as _ose:
raise ExecutableNotFoundError(str(_ose)) from _ose
match = re.search(regex, output)
if match:
version = LooseVersion(match.group(1))
if min_ver is not None and version < min_ver:
raise ExecutableNotFoundError(
f"You have {args[0]} version {version} but the minimum "
f"version supported by Matplotlib is {min_ver}")
return _ExecInfo(args[0], version)
else:
raise ExecutableNotFoundError(
f"Failed to determine the version of {args[0]} from "
f"{' '.join(args)}, which output {output}")
if name == "dvipng":
return impl(["dvipng", "-version"], "(?m)^dvipng(?: .*)? (.+)", "1.6")
elif name == "gs":
execs = (["gswin32c", "gswin64c", "mgs", "gs"]
if sys.platform == "win32" else
["gs"])
for e in execs:
try:
return impl([e, "--version"], "(.*)", "9")
except ExecutableNotFoundError:
pass
message = "Failed to find a Ghostscript installation"
raise ExecutableNotFoundError(message)
elif name == "inkscape":
try:
return impl(["inkscape", "--without-gui", "-V"],
"Inkscape ([^ ]*)")
except ExecutableNotFoundError:
pass
return impl(["inkscape", "-V"], "Inkscape ([^ ]*)")
elif name == "magick":
path = None
if sys.platform == "win32":
# Windows's builtin convert.exe.
import winreg
binpath = ""
for flag in [0, winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY]:
try:
with winreg.OpenKeyEx(
winreg.HKEY_LOCAL_MACHINE,
r"Software\Imagemagick\Current",
0, winreg.KEY_QUERY_VALUE | flag) as hkey:
binpath = winreg.QueryValueEx(hkey, "BinPath")[0]
except OSError:
pass
if binpath:
for name in ["convert.exe", "magick.exe"]:
candidate = Path(binpath, name)
if candidate.exists():
path = str(candidate)
break
else:
path = "convert"
if path is None:
raise ExecutableNotFoundError(
"Failed to find an ImageMagick installation")
return impl([path, "--version"], r"^Version: ImageMagick (\S*)")
elif name == "pdftops":
info = impl(["pdftops", "-v"], "^pdftops version (.*)",
ignore_exit_code=True)
if info and not ("3.0" <= info.version
or "0.9" <= info.version <= "1.0"):
raise ExecutableNotFoundError(
f"You have pdftops version {info.version} but the minimum "
f"version supported by Matplotlib is 3.0")
return info
else:
raise ValueError("Unknown executable: {!r}".format(name))
@cbook.deprecated("3.2")
def checkdep_ps_distiller(s):
if not s:
return False
try:
_get_executable_info("gs")
except ExecutableNotFoundError:
_log.warning(
"Setting rcParams['ps.usedistiller'] requires ghostscript.")
return False
if s == "xpdf":
try:
_get_executable_info("pdftops")
except ExecutableNotFoundError:
_log.warning(
"Setting rcParams['ps.usedistiller'] to 'xpdf' requires xpdf.")
return False
return s
def checkdep_usetex(s):
if not s:
return False
if not shutil.which("tex"):
_log.warning("usetex mode requires TeX.")
return False
try:
_get_executable_info("dvipng")
except ExecutableNotFoundError:
_log.warning("usetex mode requires dvipng.")
return False
try:
_get_executable_info("gs")
except ExecutableNotFoundError:
_log.warning("usetex mode requires ghostscript.")
return False
return True
@cbook.deprecated("3.2", alternative="os.path.expanduser('~')")
@_logged_cached('$HOME=%s')
def get_home():
try:
return str(Path.home())
except Exception:
return None
def _get_xdg_config_dir():
return os.environ.get('XDG_CONFIG_HOME') or str(Path.home() / ".config")
def _get_xdg_cache_dir():
return os.environ.get('XDG_CACHE_HOME') or str(Path.home() / ".cache")
def _get_config_or_cache_dir(xdg_base):
configdir = os.environ.get('MPLCONFIGDIR')
if configdir:
configdir = Path(configdir).resolve()
elif sys.platform.startswith(('linux', 'freebsd')) and xdg_base:
configdir = Path(xdg_base, "matplotlib")
else:
configdir = Path.home() / ".matplotlib"
try:
configdir.mkdir(parents=True, exist_ok=True)
except OSError:
pass
else:
if os.access(str(configdir), os.W_OK) and configdir.is_dir():
return str(configdir)
tmpdir = os.environ["MPLCONFIGDIR"] = \
tempfile.mkdtemp(prefix="matplotlib-")
atexit.register(shutil.rmtree, tmpdir)
_log.warning(
"Matplotlib created a temporary config/cache directory at %s because "
"the default path (%s) is not a writable directory; it is highly "
"recommended to set the MPLCONFIGDIR environment variable to a "
"writable directory, in particular to speed up the import of "
"Matplotlib and to better support multiprocessing.",
tmpdir, configdir)
return tmpdir
@_logged_cached('CONFIGDIR=%s')
def get_configdir():
return _get_config_or_cache_dir(_get_xdg_config_dir())
@_logged_cached('CACHEDIR=%s')
def get_cachedir():
return _get_config_or_cache_dir(_get_xdg_cache_dir())
@_logged_cached('matplotlib data path: %s')
def get_data_path(*, _from_rc=None):
if _from_rc is not None:
cbook.warn_deprecated(
"3.2",
message=("Setting the datapath via matplotlibrc is deprecated "
"%(since)s and will be removed %(removal)s."),
removal='3.4')
path = Path(_from_rc)
if path.is_dir():
return str(path)
else:
warnings.warn(f"You passed datapath: {_from_rc!r} in your "
f"matplotribrc file ({matplotlib_fname()}). "
"However this path does not exist, falling back "
"to standard paths.")
return _get_data_path()
@_logged_cached('(private) matplotlib data path: %s')
def _get_data_path():
path = Path(__file__).with_name("mpl-data")
if path.is_dir():
return str(path)
cbook.warn_deprecated(
"3.2", message="Matplotlib installs where the data is not in the "
"mpl-data subdirectory of the package are deprecated since %(since)s "
"and support for them will be removed %(removal)s.")
def get_candidate_paths():
# so need to try something known to be in Matplotlib, not basemap.
import matplotlib.afm
yield Path(matplotlib.afm.__file__).with_name('mpl-data')
# py2exe zips pure python, so still need special check.
if getattr(sys, 'frozen', None):
yield Path(sys.executable).with_name('mpl-data')
# Try again assuming we need to step up one more directory.
yield Path(sys.executable).parent.with_name('mpl-data')
# Try again assuming sys.path[0] is a dir not a exe.
yield Path(sys.path[0]) / 'mpl-data'
for path in get_candidate_paths():
if path.is_dir():
defaultParams['datapath'][0] = str(path)
return str(path)
raise RuntimeError('Could not find the matplotlib data files')
def matplotlib_fname():
def gen_candidates():
yield os.path.join(os.getcwd(), 'matplotlibrc')
try:
matplotlibrc = os.environ['MATPLOTLIBRC']
except KeyError:
pass
else:
yield matplotlibrc
yield os.path.join(matplotlibrc, 'matplotlibrc')
yield os.path.join(get_configdir(), 'matplotlibrc')
yield os.path.join(_get_data_path(), 'matplotlibrc')
for fname in gen_candidates():
if os.path.exists(fname) and not os.path.isdir(fname):
return fname
raise RuntimeError("Could not find matplotlibrc file; your Matplotlib "
"install is broken")
# rcParams deprecated and automatically mapped to another key.
# Values are tuples of (version, new_name, f_old2new, f_new2old).
_deprecated_map = {}
# rcParams deprecated; some can manually be mapped to another key.
# Values are tuples of (version, new_name_or_None).
_deprecated_ignore_map = {
}
# rcParams deprecated; can use None to suppress warnings; remain actually
# listed in the rcParams (not included in _all_deprecated).
# Values are tuples of (version,)
_deprecated_remain_as_none = {
'datapath': ('3.2.1',),
'animation.avconv_path': ('3.3',),
'animation.avconv_args': ('3.3',),
'animation.html_args': ('3.3',),
'mathtext.fallback_to_cm': ('3.3',),
'keymap.all_axes': ('3.3',),
'savefig.jpeg_quality': ('3.3',),
'text.latex.preview': ('3.3',),
}
_all_deprecated = {*_deprecated_map, *_deprecated_ignore_map}
class RcParams(MutableMapping, dict):
validate = rcsetup._validators
# validate values on the way in
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __setitem__(self, key, val):
try:
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
key = alt_key
val = alt_val(val)
elif key in _deprecated_remain_as_none and val is not None:
version, = _deprecated_remain_as_none[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam")
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return
elif key == 'backend':
if val is rcsetup._auto_backend_sentinel:
if 'backend' in self:
return
try:
cval = self.validate[key](val)
except ValueError as ve:
raise ValueError(f"Key {key}: {ve}") from None
dict.__setitem__(self, key, cval)
except KeyError as err:
raise KeyError(
f"{key} is not a valid rc parameter (see rcParams.keys() for "
f"a list of valid parameters)") from err
def __getitem__(self, key):
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return inverse_alt(dict.__getitem__(self, alt_key))
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return dict.__getitem__(self, alt_key) if alt_key else None
elif key == "backend":
val = dict.__getitem__(self, key)
if val is rcsetup._auto_backend_sentinel:
from matplotlib import pyplot as plt
plt.switch_backend(rcsetup._auto_backend_sentinel)
elif key == "datapath":
return get_data_path()
return dict.__getitem__(self, key)
def __repr__(self):
class_name = self.__class__.__name__
indent = len(class_name) + 1
with cbook._suppress_matplotlib_deprecation_warning():
repr_split = pprint.pformat(dict(self), indent=1,
width=80 - indent).split('\n')
repr_indented = ('\n' + ' ' * indent).join(repr_split)
return '{}({})'.format(class_name, repr_indented)
def __str__(self):
return '\n'.join(map('{0[0]}: {0[1]}'.format, sorted(self.items())))
def __iter__(self):
with cbook._suppress_matplotlib_deprecation_warning():
yield from sorted(dict.__iter__(self))
def __len__(self):
return dict.__len__(self)
def find_all(self, pattern):
pattern_re = re.compile(pattern)
return RcParams((key, value)
for key, value in self.items()
if pattern_re.search(key))
def copy(self):
return {k: dict.__getitem__(self, k) for k in self}
def rc_params(fail_on_error=False):
return rc_params_from_file(matplotlib_fname(), fail_on_error)
URL_REGEX = re.compile(r'^http://|^https://|^ftp://|^file:')
def is_url(filename):
return URL_REGEX.match(filename) is not None
@functools.lru_cache()
def _get_ssl_context():
try:
import certifi
except ImportError:
_log.debug("Could not import certifi.")
return None
import ssl
return ssl.create_default_context(cafile=certifi.where())
@contextlib.contextmanager
def _open_file_or_url(fname):
if not isinstance(fname, Path) and is_url(fname):
import urllib.request
ssl_ctx = _get_ssl_context()
if ssl_ctx is None:
_log.debug(
"Could not get certifi ssl context, https may not work."
)
with urllib.request.urlopen(fname, context=ssl_ctx) as f:
yield (line.decode('utf-8') for line in f)
else:
fname = os.path.expanduser(fname)
encoding = locale.getpreferredencoding(do_setlocale=False)
if encoding is None:
encoding = "utf-8"
with open(fname, encoding=encoding) as f:
yield f
def _rc_params_in_file(fname, transform=lambda x: x, fail_on_error=False):
rc_temp = {}
with _open_file_or_url(fname) as fd:
try:
for line_no, line in enumerate(fd, 1):
line = transform(line)
strippedline = line.split('
if not strippedline:
continue
tup = strippedline.split(':', 1)
if len(tup) != 2:
_log.warning('Missing colon in file %r, line %d (%r)',
fname, line_no, line.rstrip('\n'))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
_log.warning('Duplicate key in file %r, line %d (%r)',
fname, line_no, line.rstrip('\n'))
rc_temp[key] = (val, line, line_no)
except UnicodeDecodeError:
_log.warning('Cannot decode configuration file %s with encoding '
'%s, check LANG and LC_* variables.',
fname,
locale.getpreferredencoding(do_setlocale=False)
or 'utf-8 (default)')
raise
config = RcParams()
for key, (val, line, line_no) in rc_temp.items():
if key in rcsetup._validators:
if fail_on_error:
config[key] = val # try to convert to proper type or raise
else:
try:
config[key] = val # try to convert to proper type or skip
except Exception as msg:
_log.warning('Bad value in file %r, line %d (%r): %s',
fname, line_no, line.rstrip('\n'), msg)
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
cbook.warn_deprecated(
version, name=key, alternative=alt_key,
addendum="Please update your matplotlibrc.")
else:
version = 'master' if '.post' in __version__ else f'v{__version__}'
_log.warning("""
Bad key %(key)s in file %(fname)s, line %(line_no)s (%(line)r)
You probably need to get an updated matplotlibrc file from
https://github.com/matplotlib/matplotlib/blob/%(version)s/matplotlibrc.template
or from the matplotlib source distribution""",
dict(key=key, fname=fname, line_no=line_no,
line=line.rstrip('\n'), version=version))
return config
def rc_params_from_file(fname, fail_on_error=False, use_default_template=True):
config_from_file = _rc_params_in_file(fname, fail_on_error=fail_on_error)
if not use_default_template:
return config_from_file
with cbook._suppress_matplotlib_deprecation_warning():
config = RcParams({**rcParamsDefault, **config_from_file})
with cbook._suppress_matplotlib_deprecation_warning():
if config['datapath'] is None:
config['datapath'] = _get_data_path()
else:
config['datapath'] = get_data_path(_from_rc=config['datapath'])
if "".join(config['text.latex.preamble']):
_log.info("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
""", '\n'.join(config['text.latex.preamble']))
_log.debug('loaded rc file %s', fname)
return config
# When constructing the global instances, we need to perform certain updates
# by explicitly calling the superclass (dict.update, dict.items) to avoid
# triggering resolution of _auto_backend_sentinel.
rcParamsDefault = _rc_params_in_file(
cbook._get_data_path("matplotlibrc"),
# Strip leading comment.
transform=lambda line: line[1:] if line.startswith("#") else line,
fail_on_error=True)
dict.update(rcParamsDefault, rcsetup._hardcoded_defaults)
rcParams = RcParams() # The global instance.
dict.update(rcParams, dict.items(rcParamsDefault))
dict.update(rcParams, _rc_params_in_file(matplotlib_fname()))
with cbook._suppress_matplotlib_deprecation_warning():
rcParamsOrig = RcParams(rcParams.copy())
# This also checks that all rcParams are indeed listed in the template.
# Assiging to rcsetup.defaultParams is left only for backcompat.
defaultParams = rcsetup.defaultParams = {
# We want to resolve deprecated rcParams, but not backend...
key: [(rcsetup._auto_backend_sentinel if key == "backend" else
rcParamsDefault[key]),
validator]
for key, validator in rcsetup._validators.items()}
if rcParams['axes.formatter.use_locale']:
locale.setlocale(locale.LC_ALL, '')
def rc(group, **kwargs):
aliases = {
'lw': 'linewidth',
'ls': 'linestyle',
'c': 'color',
'fc': 'facecolor',
'ec': 'edgecolor',
'mew': 'markeredgewidth',
'aa': 'antialiased',
}
if isinstance(group, str):
group = (group,)
for g in group:
for k, v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
try:
rcParams[key] = v
except KeyError as err:
raise KeyError(('Unrecognized key "%s" for group "%s" and '
'name "%s"') % (key, g, name)) from err
def rcdefaults():
# Deprecation warnings were already handled when creating rcParamsDefault,
# no need to reemit them here.
with cbook._suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rcParams.clear()
rcParams.update({k: v for k, v in rcParamsDefault.items()
if k not in STYLE_BLACKLIST})
def rc_file_defaults():
# Deprecation warnings were already handled when creating rcParamsOrig, no
# need to reemit them here.
with cbook._suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig
if k not in STYLE_BLACKLIST})
def rc_file(fname, *, use_default_template=True):
# Deprecation warnings were already handled in rc_params_from_file, no need
# to reemit them here.
with cbook._suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rc_from_file = rc_params_from_file(
fname, use_default_template=use_default_template)
rcParams.update({k: rc_from_file[k] for k in rc_from_file
if k not in STYLE_BLACKLIST})
@contextlib.contextmanager
def rc_context(rc=None, fname=None):
orig = rcParams.copy()
try:
if fname:
rc_file(fname)
if rc:
rcParams.update(rc)
yield
finally:
dict.update(rcParams, orig) # Revert to the original rcs.
def use(backend, *, force=True):
name = validate_backend(backend)
# we need to use the base-class method here to avoid (prematurely)
# resolving the "auto" backend setting
if dict.__getitem__(rcParams, 'backend') == name:
# Nothing to do if the requested backend is already set
pass
else:
# if pyplot is not already imported, do not import it. Doing
# so may trigger a `plt.switch_backend` to the _default_ backend
# before we get a chance to change to the one the user just requested
plt = sys.modules.get('matplotlib.pyplot')
# if pyplot is imported, then try to change backends
if plt is not None:
try:
# we need this import check here to re-raise if the
# user does not have the libraries to support their
# chosen backend installed.
plt.switch_backend(name)
except ImportError:
if force:
raise
# if we have not imported pyplot, then we can set the rcParam
# value which will be respected when the user finally imports
# pyplot
else:
rcParams['backend'] = backend
# if the user has asked for a given backend, do not helpfully
# fallback
rcParams['backend_fallback'] = False
if os.environ.get('MPLBACKEND'):
rcParams['backend'] = os.environ.get('MPLBACKEND')
def get_backend():
return rcParams['backend']
def interactive(b):
rcParams['interactive'] = b
def is_interactive():
return rcParams['interactive']
default_test_modules = [
'matplotlib.tests',
'mpl_toolkits.tests',
]
def _init_tests():
# The version of FreeType to install locally for running the
# tests. This must match the value in `setupext.py`
LOCAL_FREETYPE_VERSION = '2.6.1'
from matplotlib import ft2font
if (ft2font.__freetype_version__ != LOCAL_FREETYPE_VERSION or
ft2font.__freetype_build_type__ != 'local'):
_log.warning(
f"Matplotlib is not built with the correct FreeType version to "
f"run tests. Rebuild without setting system_freetype=1 in "
f"setup.cfg. Expect many image comparison failures below. "
f"Expected freetype version {LOCAL_FREETYPE_VERSION}. "
f"Found freetype version {ft2font.__freetype_version__}. "
"Freetype build type is {}local".format(
"" if ft2font.__freetype_build_type__ == 'local' else "not "))
@cbook._delete_parameter("3.2", "switch_backend_warn")
@cbook._delete_parameter("3.3", "recursionlimit")
def test(verbosity=None, coverage=False, switch_backend_warn=True,
recursionlimit=0, **kwargs):
try:
import pytest
except ImportError:
print("matplotlib.test requires pytest to run.")
return -1
if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'tests')):
print("Matplotlib test data is not installed")
return -1
old_backend = get_backend()
old_recursionlimit = sys.getrecursionlimit()
try:
use('agg')
if recursionlimit:
sys.setrecursionlimit(recursionlimit)
args = kwargs.pop('argv', [])
provide_default_modules = True
use_pyargs = True
for arg in args:
if any(arg.startswith(module_path)
for module_path in default_test_modules):
provide_default_modules = False
break
if os.path.exists(arg):
provide_default_modules = False
use_pyargs = False
break
if use_pyargs:
args += ['--pyargs']
if provide_default_modules:
args += default_test_modules
if coverage:
args += ['--cov']
if verbosity:
args += ['-' + 'v' * verbosity]
retcode = pytest.main(args, **kwargs)
finally:
if old_backend.lower() != 'agg':
use(old_backend)
if recursionlimit:
sys.setrecursionlimit(old_recursionlimit)
return retcode
test.__test__ = False # pytest: this function is not a test
def _replacer(data, value):
try:
# if key isn't a string don't bother
if isinstance(value, str):
# try to use __getitem__
value = data[value]
except Exception:
# key does not exist, silently fall back to key
pass
return sanitize_sequence(value)
def _label_from_arg(y, default_name):
try:
return y.name
except AttributeError:
if isinstance(default_name, str):
return default_name
return None
_DATA_DOC_TITLE = """
Notes
-----
"""
_DATA_DOC_APPENDIX = """
.. note::
In addition to the above described arguments, this function can take
a *data* keyword argument. If such a *data* argument is given,
{replaced}
Objects passed as **data** must support item access (``data[s]``) and
membership test (``s in data``).
"""
def _add_data_doc(docstring, replace_names):
if (docstring is None
or replace_names is not None and len(replace_names) == 0):
return docstring
docstring = inspect.cleandoc(docstring)
repl = (
(" every other argument can also be string ``s``, which is\n"
" interpreted as ``data[s]`` (unless this raises an exception).")
if replace_names is None else
(" the following arguments can also be string ``s``, which is\n"
" interpreted as ``data[s]`` (unless this raises an exception):\n"
" " + ", ".join(map("*{}*".format, replace_names))) + ".")
addendum = _DATA_DOC_APPENDIX.format(replaced=repl)
if _DATA_DOC_TITLE not in docstring:
addendum = _DATA_DOC_TITLE + addendum
return docstring + addendum
def _preprocess_data(func=None, *, replace_names=None, label_namer=None):
if func is None: # Return the actual decorator.
return functools.partial(
_preprocess_data,
replace_names=replace_names, label_namer=label_namer)
sig = inspect.signature(func)
varargs_name = None
varkwargs_name = None
arg_names = []
params = list(sig.parameters.values())
for p in params:
if p.kind is Parameter.VAR_POSITIONAL:
varargs_name = p.name
elif p.kind is Parameter.VAR_KEYWORD:
varkwargs_name = p.name
else:
arg_names.append(p.name)
data_param = Parameter("data", Parameter.KEYWORD_ONLY, default=None)
if varkwargs_name:
params.insert(-1, data_param)
else:
params.append(data_param)
new_sig = sig.replace(parameters=params)
arg_names = arg_names[1:] # remove the first "ax" / self arg
assert {*arg_names}.issuperset(replace_names or []) or varkwargs_name, (
"Matplotlib internal error: invalid replace_names ({!r}) for {!r}"
.format(replace_names, func.__name__))
assert label_namer is None or label_namer in arg_names, (
"Matplotlib internal error: invalid label_namer ({!r}) for {!r}"
.format(label_namer, func.__name__))
@functools.wraps(func)
def inner(ax, *args, data=None, **kwargs):
if data is None:
return func(ax, *map(sanitize_sequence, args), **kwargs)
bound = new_sig.bind(ax, *args, **kwargs)
auto_label = (bound.arguments.get(label_namer)
or bound.kwargs.get(label_namer))
for k, v in bound.arguments.items():
if k == varkwargs_name:
for k1, v1 in v.items():
if replace_names is None or k1 in replace_names:
v[k1] = _replacer(data, v1)
elif k == varargs_name:
if replace_names is None:
bound.arguments[k] = tuple(_replacer(data, v1) for v1 in v)
else:
if replace_names is None or k in replace_names:
bound.arguments[k] = _replacer(data, v)
new_args = bound.args
new_kwargs = bound.kwargs
args_and_kwargs = {**bound.arguments, **bound.kwargs}
if label_namer and "label" not in args_and_kwargs:
new_kwargs["label"] = _label_from_arg(
args_and_kwargs.get(label_namer), auto_label)
return func(*new_args, **new_kwargs)
inner.__doc__ = _add_data_doc(inner.__doc__, replace_names)
inner.__signature__ = new_sig
return inner
_log.debug('matplotlib version %s', __version__)
_log.debug('interactive is %s', is_interactive())
_log.debug('platform is %s', sys.platform)
_log.debug('loaded modules: %s', list(sys.modules))
| true
| true
|
1c4797802e5895313ae0514ecdda3acd949bd084
| 6,958
|
py
|
Python
|
Lib/objc/CoreTelephony.py
|
snazari/Pyto
|
bcea7bbef35cab21ce73087b1a0c00a07d07ec72
|
[
"MIT"
] | 701
|
2018-10-22T11:54:09.000Z
|
2022-03-31T14:39:30.000Z
|
Lib/objc/CoreTelephony.py
|
snazari/Pyto
|
bcea7bbef35cab21ce73087b1a0c00a07d07ec72
|
[
"MIT"
] | 229
|
2018-10-24T09:15:31.000Z
|
2021-12-24T16:51:37.000Z
|
Lib/objc/CoreTelephony.py
|
snazari/Pyto
|
bcea7bbef35cab21ce73087b1a0c00a07d07ec72
|
[
"MIT"
] | 131
|
2018-11-25T18:33:03.000Z
|
2022-03-24T03:18:07.000Z
|
"""
Classes from the 'CoreTelephony' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
MuxNotificationSink = _Class("MuxNotificationSink")
CoreTelephonyClientRemoteAsyncProxy = _Class("CoreTelephonyClientRemoteAsyncProxy")
CoreTelephonyClientDelegateProxy = _Class("CoreTelephonyClientDelegateProxy")
CTDisplayPlanList = _Class("CTDisplayPlanList")
CTDisplayPlan = _Class("CTDisplayPlan")
CTEmergencyModeResult = _Class("CTEmergencyModeResult")
CTDeviceDataUsage = _Class("CTDeviceDataUsage")
CTPerAppDataUsage = _Class("CTPerAppDataUsage")
CTAppDataUsage = _Class("CTAppDataUsage")
CTDataUsed = _Class("CTDataUsed")
CTDataUsage = _Class("CTDataUsage")
CTXPCContexts = _Class("CTXPCContexts")
CTXPCContextInfo = _Class("CTXPCContextInfo")
CTXPCSimLessContexts = _Class("CTXPCSimLessContexts")
CTXPCSimLessContextInfo = _Class("CTXPCSimLessContextInfo")
CTXPCServiceSubscriptionInfo = _Class("CTXPCServiceSubscriptionInfo")
CTXPCServiceSubscriptionContext = _Class("CTXPCServiceSubscriptionContext")
CTBandInfo = _Class("CTBandInfo")
CTRadioAccessTechnology = _Class("CTRadioAccessTechnology")
CTSweetgumUsageAccountMetrics = _Class("CTSweetgumUsageAccountMetrics")
CTLocalDevice = _Class("CTLocalDevice")
CTSubscriber = _Class("CTSubscriber")
CTBundle = _Class("CTBundle")
CTCellularData = _Class("CTCellularData")
CTSubscriberInfo = _Class("CTSubscriberInfo")
CTCallForwardingValue = _Class("CTCallForwardingValue")
CTVoicemailInfoType = _Class("CTVoicemailInfoType")
CTSweetgumDataPlanMetrics = _Class("CTSweetgumDataPlanMetrics")
CTBinarySMS = _Class("CTBinarySMS")
CTSMSDataType = _Class("CTSMSDataType")
CTPlanList = _Class("CTPlanList")
CTSuppServicesNotificationData = _Class("CTSuppServicesNotificationData")
CTInstalledPlan = _Class("CTInstalledPlan")
CTRemoteDeviceList = _Class("CTRemoteDeviceList")
CTSubscriberAuthDataHolder = _Class("CTSubscriberAuthDataHolder")
CTDataSettings = _Class("CTDataSettings")
CTSweetgumCapabilities = _Class("CTSweetgumCapabilities")
CTPhoneBookEntry = _Class("CTPhoneBookEntry")
CTSweetgumUsagePlanItemMessages = _Class("CTSweetgumUsagePlanItemMessages")
CTIMSRegistrationTransportInfo = _Class("CTIMSRegistrationTransportInfo")
CTCallCapabilities = _Class("CTCallCapabilities")
CTDeviceIdentifier = _Class("CTDeviceIdentifier")
CTActivationPolicyState = _Class("CTActivationPolicyState")
CTSweetgumAppsInfo = _Class("CTSweetgumAppsInfo")
CTEmergencyMode = _Class("CTEmergencyMode")
CTPhoneNumberInfo = _Class("CTPhoneNumberInfo")
CTCellInfo = _Class("CTCellInfo")
CTSubscriberAuthResult = _Class("CTSubscriberAuthResult")
CTSubscriberAuthRequest = _Class("CTSubscriberAuthRequest")
CTSubscriberAlgorithm = _Class("CTSubscriberAlgorithm")
CTSubscriberAlgorithmEAPAKA = _Class("CTSubscriberAlgorithmEAPAKA")
CTSubscriberAlgorithmEAPSIM = _Class("CTSubscriberAlgorithmEAPSIM")
CTRemoteDevice = _Class("CTRemoteDevice")
CTSweetgumPlan = _Class("CTSweetgumPlan")
CTNetworkList = _Class("CTNetworkList")
CTSweetgumPlansInfo = _Class("CTSweetgumPlansInfo")
CTSIMToolkitMenu = _Class("CTSIMToolkitMenu")
CoreTelephonyClient = _Class("CoreTelephonyClient")
CTSignalStrengthMeasurements = _Class("CTSignalStrengthMeasurements")
CTSignalStrengthInfo = _Class("CTSignalStrengthInfo")
CTCall = _Class("CTCall")
CTCallCenter = _Class("CTCallCenter")
CoreTelephonyClientMux = _Class("CoreTelephonyClientMux")
CTRadioFrequencyFrontEndScanData = _Class("CTRadioFrequencyFrontEndScanData")
CTNetworkSelectionInfo = _Class("CTNetworkSelectionInfo")
CTEncryptionStatusInfo = _Class("CTEncryptionStatusInfo")
CTRemotePlanIdentifierList = _Class("CTRemotePlanIdentifierList")
CTPlanIdentifier = _Class("CTPlanIdentifier")
CTRemotePlanIdentifier = _Class("CTRemotePlanIdentifier")
CTXPCError = _Class("CTXPCError")
CTTelephonyNetworkInfo = _Class("CTTelephonyNetworkInfo")
CTPhoneNumber = _Class("CTPhoneNumber")
CTCarrier = _Class("CTCarrier")
CTCellularPlanProvisioningRequest = _Class("CTCellularPlanProvisioningRequest")
CTMobileEquipmentInfoList = _Class("CTMobileEquipmentInfoList")
CTMobileEquipmentInfo = _Class("CTMobileEquipmentInfo")
CTDataStatus = _Class("CTDataStatus")
CTEnhancedLinkQualityMetric = _Class("CTEnhancedLinkQualityMetric")
CTEnhancedDataLinkQualityMetric = _Class("CTEnhancedDataLinkQualityMetric")
CTVoiceLinkQualityMetric = _Class("CTVoiceLinkQualityMetric")
CTCellularPlanManagerCameraScanAction = _Class("CTCellularPlanManagerCameraScanAction")
CTCellularPlanProvisioning = _Class("CTCellularPlanProvisioning")
CTIMSRegistrationStatus = _Class("CTIMSRegistrationStatus")
CTServiceDescriptorContainer = _Class("CTServiceDescriptorContainer")
CTServiceDescriptor = _Class("CTServiceDescriptor")
CTEmailAddress = _Class("CTEmailAddress")
CTSIMToolkitItemList = _Class("CTSIMToolkitItemList")
CTSIMToolkitItem = _Class("CTSIMToolkitItem")
CTMessageStatus = _Class("CTMessageStatus")
CTCellularPlanProvisioningOnDeviceActivationRequest = _Class(
"CTCellularPlanProvisioningOnDeviceActivationRequest"
)
CTPNRContextInfo = _Class("CTPNRContextInfo")
CTPNRRequestSentInfo = _Class("CTPNRRequestSentInfo")
CTPNRRequestType = _Class("CTPNRRequestType")
CTPNRDataType = _Class("CTPNRDataType")
CTDataConnectionStatus = _Class("CTDataConnectionStatus")
CTAudioCodecInfo = _Class("CTAudioCodecInfo")
CTSimLabel = _Class("CTSimLabel")
CTMessagePart = _Class("CTMessagePart")
CTMmsEncoder = _Class("CTMmsEncoder")
CTCellIdInfo = _Class("CTCellIdInfo")
CTMmsRegistrationFailureInfoType = _Class("CTMmsRegistrationFailureInfoType")
CTMessageCenter = _Class("CTMessageCenter")
CTPlan = _Class("CTPlan")
CTRemotePlan = _Class("CTRemotePlan")
CTRemoteBlacklistPlan = _Class("CTRemoteBlacklistPlan")
CTPendingPlan = _Class("CTPendingPlan")
CTSweetgumUsagePlanItemData = _Class("CTSweetgumUsagePlanItemData")
CTSweetgumUserConsentFlowInfo = _Class("CTSweetgumUserConsentFlowInfo")
CTNetwork = _Class("CTNetwork")
CTSweetgumDataPlanMetricsItem = _Class("CTSweetgumDataPlanMetricsItem")
CTRegistrationDisplayStatus = _Class("CTRegistrationDisplayStatus")
CTRatSelection = _Class("CTRatSelection")
CTAsciiAddress = _Class("CTAsciiAddress")
CTSweetgumPlanGroup = _Class("CTSweetgumPlanGroup")
CTDataConnectionAvailabilityStatus = _Class("CTDataConnectionAvailabilityStatus")
CTSweetgumUsageInfo = _Class("CTSweetgumUsageInfo")
CTSupportedMaxDataRates = _Class("CTSupportedMaxDataRates")
CTMessage = _Class("CTMessage")
CTSweetgumUsagePlanMetrics = _Class("CTSweetgumUsagePlanMetrics")
CTServiceDisconnectionStatus = _Class("CTServiceDisconnectionStatus")
CTPlanTransferAttributes = _Class("CTPlanTransferAttributes")
CTTetheringStatus = _Class("CTTetheringStatus")
CTPriVersion = _Class("CTPriVersion")
CTSweetgumUsagePlanItemVoice = _Class("CTSweetgumUsagePlanItemVoice")
CTSweetgumDataPlanMetricsError = _Class("CTSweetgumDataPlanMetricsError")
| 47.333333
| 87
| 0.841765
|
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
MuxNotificationSink = _Class("MuxNotificationSink")
CoreTelephonyClientRemoteAsyncProxy = _Class("CoreTelephonyClientRemoteAsyncProxy")
CoreTelephonyClientDelegateProxy = _Class("CoreTelephonyClientDelegateProxy")
CTDisplayPlanList = _Class("CTDisplayPlanList")
CTDisplayPlan = _Class("CTDisplayPlan")
CTEmergencyModeResult = _Class("CTEmergencyModeResult")
CTDeviceDataUsage = _Class("CTDeviceDataUsage")
CTPerAppDataUsage = _Class("CTPerAppDataUsage")
CTAppDataUsage = _Class("CTAppDataUsage")
CTDataUsed = _Class("CTDataUsed")
CTDataUsage = _Class("CTDataUsage")
CTXPCContexts = _Class("CTXPCContexts")
CTXPCContextInfo = _Class("CTXPCContextInfo")
CTXPCSimLessContexts = _Class("CTXPCSimLessContexts")
CTXPCSimLessContextInfo = _Class("CTXPCSimLessContextInfo")
CTXPCServiceSubscriptionInfo = _Class("CTXPCServiceSubscriptionInfo")
CTXPCServiceSubscriptionContext = _Class("CTXPCServiceSubscriptionContext")
CTBandInfo = _Class("CTBandInfo")
CTRadioAccessTechnology = _Class("CTRadioAccessTechnology")
CTSweetgumUsageAccountMetrics = _Class("CTSweetgumUsageAccountMetrics")
CTLocalDevice = _Class("CTLocalDevice")
CTSubscriber = _Class("CTSubscriber")
CTBundle = _Class("CTBundle")
CTCellularData = _Class("CTCellularData")
CTSubscriberInfo = _Class("CTSubscriberInfo")
CTCallForwardingValue = _Class("CTCallForwardingValue")
CTVoicemailInfoType = _Class("CTVoicemailInfoType")
CTSweetgumDataPlanMetrics = _Class("CTSweetgumDataPlanMetrics")
CTBinarySMS = _Class("CTBinarySMS")
CTSMSDataType = _Class("CTSMSDataType")
CTPlanList = _Class("CTPlanList")
CTSuppServicesNotificationData = _Class("CTSuppServicesNotificationData")
CTInstalledPlan = _Class("CTInstalledPlan")
CTRemoteDeviceList = _Class("CTRemoteDeviceList")
CTSubscriberAuthDataHolder = _Class("CTSubscriberAuthDataHolder")
CTDataSettings = _Class("CTDataSettings")
CTSweetgumCapabilities = _Class("CTSweetgumCapabilities")
CTPhoneBookEntry = _Class("CTPhoneBookEntry")
CTSweetgumUsagePlanItemMessages = _Class("CTSweetgumUsagePlanItemMessages")
CTIMSRegistrationTransportInfo = _Class("CTIMSRegistrationTransportInfo")
CTCallCapabilities = _Class("CTCallCapabilities")
CTDeviceIdentifier = _Class("CTDeviceIdentifier")
CTActivationPolicyState = _Class("CTActivationPolicyState")
CTSweetgumAppsInfo = _Class("CTSweetgumAppsInfo")
CTEmergencyMode = _Class("CTEmergencyMode")
CTPhoneNumberInfo = _Class("CTPhoneNumberInfo")
CTCellInfo = _Class("CTCellInfo")
CTSubscriberAuthResult = _Class("CTSubscriberAuthResult")
CTSubscriberAuthRequest = _Class("CTSubscriberAuthRequest")
CTSubscriberAlgorithm = _Class("CTSubscriberAlgorithm")
CTSubscriberAlgorithmEAPAKA = _Class("CTSubscriberAlgorithmEAPAKA")
CTSubscriberAlgorithmEAPSIM = _Class("CTSubscriberAlgorithmEAPSIM")
CTRemoteDevice = _Class("CTRemoteDevice")
CTSweetgumPlan = _Class("CTSweetgumPlan")
CTNetworkList = _Class("CTNetworkList")
CTSweetgumPlansInfo = _Class("CTSweetgumPlansInfo")
CTSIMToolkitMenu = _Class("CTSIMToolkitMenu")
CoreTelephonyClient = _Class("CoreTelephonyClient")
CTSignalStrengthMeasurements = _Class("CTSignalStrengthMeasurements")
CTSignalStrengthInfo = _Class("CTSignalStrengthInfo")
CTCall = _Class("CTCall")
CTCallCenter = _Class("CTCallCenter")
CoreTelephonyClientMux = _Class("CoreTelephonyClientMux")
CTRadioFrequencyFrontEndScanData = _Class("CTRadioFrequencyFrontEndScanData")
CTNetworkSelectionInfo = _Class("CTNetworkSelectionInfo")
CTEncryptionStatusInfo = _Class("CTEncryptionStatusInfo")
CTRemotePlanIdentifierList = _Class("CTRemotePlanIdentifierList")
CTPlanIdentifier = _Class("CTPlanIdentifier")
CTRemotePlanIdentifier = _Class("CTRemotePlanIdentifier")
CTXPCError = _Class("CTXPCError")
CTTelephonyNetworkInfo = _Class("CTTelephonyNetworkInfo")
CTPhoneNumber = _Class("CTPhoneNumber")
CTCarrier = _Class("CTCarrier")
CTCellularPlanProvisioningRequest = _Class("CTCellularPlanProvisioningRequest")
CTMobileEquipmentInfoList = _Class("CTMobileEquipmentInfoList")
CTMobileEquipmentInfo = _Class("CTMobileEquipmentInfo")
CTDataStatus = _Class("CTDataStatus")
CTEnhancedLinkQualityMetric = _Class("CTEnhancedLinkQualityMetric")
CTEnhancedDataLinkQualityMetric = _Class("CTEnhancedDataLinkQualityMetric")
CTVoiceLinkQualityMetric = _Class("CTVoiceLinkQualityMetric")
CTCellularPlanManagerCameraScanAction = _Class("CTCellularPlanManagerCameraScanAction")
CTCellularPlanProvisioning = _Class("CTCellularPlanProvisioning")
CTIMSRegistrationStatus = _Class("CTIMSRegistrationStatus")
CTServiceDescriptorContainer = _Class("CTServiceDescriptorContainer")
CTServiceDescriptor = _Class("CTServiceDescriptor")
CTEmailAddress = _Class("CTEmailAddress")
CTSIMToolkitItemList = _Class("CTSIMToolkitItemList")
CTSIMToolkitItem = _Class("CTSIMToolkitItem")
CTMessageStatus = _Class("CTMessageStatus")
CTCellularPlanProvisioningOnDeviceActivationRequest = _Class(
"CTCellularPlanProvisioningOnDeviceActivationRequest"
)
CTPNRContextInfo = _Class("CTPNRContextInfo")
CTPNRRequestSentInfo = _Class("CTPNRRequestSentInfo")
CTPNRRequestType = _Class("CTPNRRequestType")
CTPNRDataType = _Class("CTPNRDataType")
CTDataConnectionStatus = _Class("CTDataConnectionStatus")
CTAudioCodecInfo = _Class("CTAudioCodecInfo")
CTSimLabel = _Class("CTSimLabel")
CTMessagePart = _Class("CTMessagePart")
CTMmsEncoder = _Class("CTMmsEncoder")
CTCellIdInfo = _Class("CTCellIdInfo")
CTMmsRegistrationFailureInfoType = _Class("CTMmsRegistrationFailureInfoType")
CTMessageCenter = _Class("CTMessageCenter")
CTPlan = _Class("CTPlan")
CTRemotePlan = _Class("CTRemotePlan")
CTRemoteBlacklistPlan = _Class("CTRemoteBlacklistPlan")
CTPendingPlan = _Class("CTPendingPlan")
CTSweetgumUsagePlanItemData = _Class("CTSweetgumUsagePlanItemData")
CTSweetgumUserConsentFlowInfo = _Class("CTSweetgumUserConsentFlowInfo")
CTNetwork = _Class("CTNetwork")
CTSweetgumDataPlanMetricsItem = _Class("CTSweetgumDataPlanMetricsItem")
CTRegistrationDisplayStatus = _Class("CTRegistrationDisplayStatus")
CTRatSelection = _Class("CTRatSelection")
CTAsciiAddress = _Class("CTAsciiAddress")
CTSweetgumPlanGroup = _Class("CTSweetgumPlanGroup")
CTDataConnectionAvailabilityStatus = _Class("CTDataConnectionAvailabilityStatus")
CTSweetgumUsageInfo = _Class("CTSweetgumUsageInfo")
CTSupportedMaxDataRates = _Class("CTSupportedMaxDataRates")
CTMessage = _Class("CTMessage")
CTSweetgumUsagePlanMetrics = _Class("CTSweetgumUsagePlanMetrics")
CTServiceDisconnectionStatus = _Class("CTServiceDisconnectionStatus")
CTPlanTransferAttributes = _Class("CTPlanTransferAttributes")
CTTetheringStatus = _Class("CTTetheringStatus")
CTPriVersion = _Class("CTPriVersion")
CTSweetgumUsagePlanItemVoice = _Class("CTSweetgumUsagePlanItemVoice")
CTSweetgumDataPlanMetricsError = _Class("CTSweetgumDataPlanMetricsError")
| true
| true
|
1c4798111c6d8c070c9d6fc6c731414b5eeea115
| 34
|
py
|
Python
|
main/views/admin/profile/__init__.py
|
tiberiucorbu/av-website
|
f26f44a367d718316442506b130a7034697670b8
|
[
"MIT"
] | null | null | null |
main/views/admin/profile/__init__.py
|
tiberiucorbu/av-website
|
f26f44a367d718316442506b130a7034697670b8
|
[
"MIT"
] | null | null | null |
main/views/admin/profile/__init__.py
|
tiberiucorbu/av-website
|
f26f44a367d718316442506b130a7034697670b8
|
[
"MIT"
] | null | null | null |
from .profile_controller import *
| 17
| 33
| 0.823529
|
from .profile_controller import *
| true
| true
|
1c4799987b867024deedfd8f407f6c7f0bdfb743
| 1,772
|
py
|
Python
|
keylime/tornado_requests.py
|
ansasaki/keylime
|
6aeb105975e8f2b3e9c83417dcf69b25dc2d69e4
|
[
"Apache-2.0"
] | null | null | null |
keylime/tornado_requests.py
|
ansasaki/keylime
|
6aeb105975e8f2b3e9c83417dcf69b25dc2d69e4
|
[
"Apache-2.0"
] | null | null | null |
keylime/tornado_requests.py
|
ansasaki/keylime
|
6aeb105975e8f2b3e9c83417dcf69b25dc2d69e4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import ssl
from tornado import httpclient
from keylime import json
async def request(method, url, params=None, data=None, context=None, headers=None):
http_client = httpclient.AsyncHTTPClient()
if params is not None and len(list(params.keys())) > 0:
url += '?'
for key in list(params.keys()):
url += f"{key}={params[key]}&"
url = url[:-1]
if context is not None:
url = url.replace('http://', 'https://', 1)
# Convert dict to JSON before sending
if isinstance(data, dict):
data = json.dumps(data)
if headers is None:
headers = {}
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
try:
req = httpclient.HTTPRequest(url=url,
method=method,
ssl_options=context,
body=data,
headers=headers)
response = await http_client.fetch(req)
except httpclient.HTTPError as e:
if e.response is None:
return TornadoResponse(500, str(e))
return TornadoResponse(e.response.code, e.response.body)
except ConnectionError as e:
return TornadoResponse(599, f"Connection error: {str(e)}")
except ssl.SSLError as e:
return TornadoResponse(599, f"SSL connection error: {str(e)}")
if response is None:
return None
return TornadoResponse(response.code, response.body)
class TornadoResponse:
def __init__(self, code, body):
self.status_code = code
self.body = body
| 28.580645
| 83
| 0.586907
|
import ssl
from tornado import httpclient
from keylime import json
async def request(method, url, params=None, data=None, context=None, headers=None):
http_client = httpclient.AsyncHTTPClient()
if params is not None and len(list(params.keys())) > 0:
url += '?'
for key in list(params.keys()):
url += f"{key}={params[key]}&"
url = url[:-1]
if context is not None:
url = url.replace('http://', 'https://', 1)
if isinstance(data, dict):
data = json.dumps(data)
if headers is None:
headers = {}
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
try:
req = httpclient.HTTPRequest(url=url,
method=method,
ssl_options=context,
body=data,
headers=headers)
response = await http_client.fetch(req)
except httpclient.HTTPError as e:
if e.response is None:
return TornadoResponse(500, str(e))
return TornadoResponse(e.response.code, e.response.body)
except ConnectionError as e:
return TornadoResponse(599, f"Connection error: {str(e)}")
except ssl.SSLError as e:
return TornadoResponse(599, f"SSL connection error: {str(e)}")
if response is None:
return None
return TornadoResponse(response.code, response.body)
class TornadoResponse:
def __init__(self, code, body):
self.status_code = code
self.body = body
| true
| true
|
1c479a27833091f86e7dce2d076b0b29113122e0
| 1,190
|
py
|
Python
|
rgbContrast.py
|
tsarjak/gsoc_code_library
|
961cea8e0833d28e5c78e7dd06f7c3823b38cbfb
|
[
"MIT"
] | null | null | null |
rgbContrast.py
|
tsarjak/gsoc_code_library
|
961cea8e0833d28e5c78e7dd06f7c3823b38cbfb
|
[
"MIT"
] | null | null | null |
rgbContrast.py
|
tsarjak/gsoc_code_library
|
961cea8e0833d28e5c78e7dd06f7c3823b38cbfb
|
[
"MIT"
] | null | null | null |
import cv2
from PIL import Image
import numpy as np
def arrayToImage(img,sizeX,sizeY,saveAs):
rgbArray = np.zeros((sizeX,sizeY,3),'uint8')
for i in range(0,sizeX):
for j in range(0,sizeY):
for k in range(0,3):
rgbArray[i,j,k] = img[i,j,k] * 255
img = Image.fromarray(rgbArray)
img.save(saveAs)
im = Image.open("inImage.jpg")
sizeX = im.size[0]
sizeY = im.size[1]
photo = im.load()
img = np.zeros((sizeX,sizeY,3),'float')
for i in range(0,sizeX):
for j in range(0,sizeY):
for k in range(0,3):
img[i,j,k] = photo[i,j][k]
img[i,j,k] = ((img[i,j,k])/255)
factor = 0.4
for i in range(0, sizeX):
for j in range(0,sizeY):
img[i,j,0] = ((1 - img[i,j,0]) * factor) + img[i,j,0]
img[i,j,1] = ((1 - img[i,j,1]) * factor) + img[i,j,1]
# Change in blue can be recctified for sure!
if img[i,j,0] > img[i,j,1] :
img[i,j,2] = img[i,j,2] - (img[i,j,2] * factor)
else:
img[i,j,2] = ((1 - img[i,j,2]) * factor) + img[i,j,2]
arrayToImage(img, sizeX, sizeY, "outImage6.jpg")
'''
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
| 25.869565
| 65
| 0.544538
|
import cv2
from PIL import Image
import numpy as np
def arrayToImage(img,sizeX,sizeY,saveAs):
rgbArray = np.zeros((sizeX,sizeY,3),'uint8')
for i in range(0,sizeX):
for j in range(0,sizeY):
for k in range(0,3):
rgbArray[i,j,k] = img[i,j,k] * 255
img = Image.fromarray(rgbArray)
img.save(saveAs)
im = Image.open("inImage.jpg")
sizeX = im.size[0]
sizeY = im.size[1]
photo = im.load()
img = np.zeros((sizeX,sizeY,3),'float')
for i in range(0,sizeX):
for j in range(0,sizeY):
for k in range(0,3):
img[i,j,k] = photo[i,j][k]
img[i,j,k] = ((img[i,j,k])/255)
factor = 0.4
for i in range(0, sizeX):
for j in range(0,sizeY):
img[i,j,0] = ((1 - img[i,j,0]) * factor) + img[i,j,0]
img[i,j,1] = ((1 - img[i,j,1]) * factor) + img[i,j,1]
if img[i,j,0] > img[i,j,1] :
img[i,j,2] = img[i,j,2] - (img[i,j,2] * factor)
else:
img[i,j,2] = ((1 - img[i,j,2]) * factor) + img[i,j,2]
arrayToImage(img, sizeX, sizeY, "outImage6.jpg")
| true
| true
|
1c479cab6063cd842005ff2b64e355a6610808bd
| 31,229
|
py
|
Python
|
fstunes/__init__.py
|
raxod502/fstunes
|
d54860ba1a709ce75855e6897d7f8019ecb92640
|
[
"MIT"
] | 1
|
2019-05-03T04:08:17.000Z
|
2019-05-03T04:08:17.000Z
|
fstunes/__init__.py
|
raxod502/fstunes
|
d54860ba1a709ce75855e6897d7f8019ecb92640
|
[
"MIT"
] | null | null | null |
fstunes/__init__.py
|
raxod502/fstunes
|
d54860ba1a709ce75855e6897d7f8019ecb92640
|
[
"MIT"
] | null | null | null |
import argparse
import bisect
import collections
import math
import mutagen
import os
import pathlib
import random
import re
import shutil
import string
import sys
def has_duplicates(l):
return len(l) != len(set(l))
def iter_len(iterable):
return sum(1 for _ in iterable)
def plural(n):
return "s" if n != 1 else ""
def pluralen(n):
return plural(len(n))
def plurals(n):
return n, plural(n)
def pluralens(n):
return plurals(len(n))
def log(message, *args, **kwargs):
print("fstunes: {}".format(message), *args, file=sys.stderr, **kwargs)
def die(message=None, *args, **kwargs):
if os.environ.get("FSTUNES_DEBUG"):
assert False, "stacktrace requested"
if message is not None:
log(message, *args, **kwargs)
sys.exit(1)
def are_you_sure(default, yes):
prompt = "[Y/n]" if default else "[y/N]"
print("Proceed? {} ".format(prompt), end="")
if yes:
response = "y (from command-line options)"
print(response)
else:
response = input()
if response.lower().startswith("y"):
return True
if response.lower().startswith("n"):
return False
return default
def add_yes_option(parser):
parser.add_argument("-y", "--yes", action="store_true",
help="Don't ask for confirmation")
def add_fields_option(parser):
parser.add_argument("-f", "--fields", metavar="FIELD1,FIELD2,...",
help="Which metadata fields to include")
def add_match_options(parser):
parser.add_argument("-m", "--match", metavar="FIELD=EXPR", action="append",
help="Filter songs")
parser.add_argument("--match-literal", metavar="FIELD=VALUE",
action="append", help="Filter songs by literal match")
parser.add_argument("--match-set", metavar="FIELD=VALUE1,VALUE2,...",
action="append", help="Filter songs by set membership")
parser.add_argument("--match-range", metavar="FIELD=LOW-HIGH",
action="append",
help="Filter songs by range inclusion")
parser.add_argument("-M", "--match-all", metavar="FIELD", action="append",
help="Do not filter songs")
parser.add_argument("--set-delimiter", default=",", metavar="DELIM",
help="Delimiter to use for set filtering")
parser.add_argument("--range-delimiter", default="-", metavar="DELIM",
help="Delimiter to use for range filtering")
SORT_OPTION_STRINGS = ("-s", "--sort")
REVERSE_OPTION_STRINGS = ("-r", "--reverse")
SHUFFLE_OPTION_STRINGS = ("-x", "--shuffle")
class SortAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string):
if option_string in SORT_OPTION_STRINGS:
modifier = "sort"
elif option_string in REVERSE_OPTION_STRINGS:
modifier = "reverse"
elif option_string in SHUFFLE_OPTION_STRINGS:
modifier = "shuffle"
else:
assert False, "unexpected modifier: {}".format(modifier)
if namespace.sort is None:
namespace.sort = []
namespace.sort.append({
"field": value,
"modifier": modifier,
})
def add_sort_options(parser):
parser.add_argument(*SORT_OPTION_STRINGS, action=SortAction,
help="Sort by field")
parser.add_argument(*REVERSE_OPTION_STRINGS, action=SortAction,
help="Sort by field in reverse order")
parser.add_argument(*SHUFFLE_OPTION_STRINGS, action=SortAction,
help="Shuffle by field")
def get_parser():
parser = argparse.ArgumentParser(
description=(
"Minimal command-line music library manager and media player."))
subparsers = parser.add_subparsers(dest="subcommand")
parser_import = subparsers.add_parser(
"import", help="Add media files to library")
parser_import.add_argument(
"paths", nargs="+", metavar="path", help="Media file or directory")
parser_playlist = subparsers.add_parser(
"playlist", help="Create or delete playlists")
subparsers_playlist = parser_playlist.add_subparsers(
dest="subcommand_playlist")
parser_playlist_create = subparsers_playlist.add_parser(
"create", help="Create a playlist")
parser_playlist_create.add_argument(
"playlists", nargs="+", metavar="playlist",
help="Name of playlist to create")
parser_playlist_delete = subparsers_playlist.add_parser(
"delete", help="Delete a playlist")
parser_playlist_delete.add_argument(
"playlists", nargs="+", metavar="playlist",
help="Name of playlist to delete")
add_yes_option(parser_playlist_delete)
parser_insert = subparsers.add_parser(
"insert", help="Add songs to a playlist or the queue")
add_match_options(parser_insert)
add_sort_options(parser_insert)
parser_insert.add_argument(
"-t", "--transfer", action="store_true",
help="Also remove songs from original playlists")
add_yes_option(parser_insert)
group_insert_before = parser_insert.add_mutually_exclusive_group()
group_insert_before.add_argument(
"--after", action="store_false", dest="before",
help="Insert after given index")
group_insert_before.add_argument(
"--before", action="store_true", help="Insert before given index")
parser_insert.add_argument(
"playlist", help="Name of playlist in which to insert")
parser_insert.add_argument(
"index", type=int, help="Index at which to insert")
parser_remove = subparsers.add_parser(
"remove", help="Remove songs from a playlist or the queue")
add_match_options(parser_remove)
add_yes_option(parser_remove)
parser_edit = subparsers.add_parser(
"edit", help="Edit song metadata")
add_match_options(parser_edit)
add_sort_options(parser_edit)
add_fields_option(parser_edit)
parser_edit.add_argument(
"-e", "--editor", help="Shell command to run text editor")
add_yes_option(parser_edit)
parser_list = subparsers.add_parser(
"list", help="List songs and associated information")
add_match_options(parser_list)
add_sort_options(parser_list)
add_fields_option(parser_list)
parser_delete = subparsers.add_parser(
"delete", help="Delete media files from library")
add_match_options(parser_delete)
add_yes_option(parser_delete)
parser_seek = subparsers.add_parser(
"seek", help="Change place in queue and play/pause")
group_seek_play_pause = parser_seek.add_mutually_exclusive_group()
group_seek_play_pause.add_argument(
"-p", "--play", action="store_true", help="Start playing")
group_seek_play_pause.add_argument(
"-P", "--pause", action="store_true", help="Stop playing")
parser_seek.add_argument(
"index", type=int, nargs="?", help="Relative index to which to seek")
return parser
def read_mutagen_key(m, key):
try:
return ", ".join(m[key].text) or None
except KeyError:
return None
def read_metadata(filepath):
m = mutagen.File(filepath)
metadata = {}
metadata["artist"] = (read_mutagen_key(m, "TPE2") or
read_mutagen_key(m, "TPE1"))
metadata["album"] = read_mutagen_key(m, "TALB")
metadata["disk"] = None
disk_and_total = read_mutagen_key(m, "TPOS")
if disk_and_total:
match = re.match(r"[0-9]+", disk_and_total)
if match:
metadata["disk"] = int(match.group())
metadata["track"] = None
track_and_total = read_mutagen_key(m, "TRCK")
if track_and_total:
match = re.match(r"[0-9]+", track_and_total)
if match:
metadata["track"] = int(match.group())
metadata["song"] = read_mutagen_key(m, "TIT2")
metadata["extension"] = filepath.suffix
return metadata
SAFE_CHARS = (
string.ascii_letters + string.digits + " !\"$%&'()*+,-.[]^_`{|}~")
ESCAPE_CHAR = "#"
def escape_string(s):
results = []
for char in s:
if char in SAFE_CHARS:
results.append(char)
else:
results.append("{0}{1:x}{0}".format(ESCAPE_CHAR, ord(char)))
return "".join(results)
def unescape_string(s):
return re.sub(r"#([0-9a-f]+)#", lambda m: chr(int(m.group(1), base=16)), s)
MISSING_FIELD = "---"
def create_relpath(metadata):
disk_str = (
"{}-".format(metadata["disk"]) if "disk" in metadata else "")
return pathlib.Path("{}/{}/{}{} {}{}".format(
escape_string(metadata["artist"] or MISSING_FIELD),
escape_string(metadata["album"] or MISSING_FIELD),
disk_str,
metadata.get("track", ""),
escape_string(metadata.get("song") or MISSING_FIELD),
metadata["extension"]))
def parse_relpath(relpath):
match = re.fullmatch(
r"([^/]+)/([^/]+)/(?:([0-9]+)-)?([0-9]+)? (.+)", str(relpath))
artist = unescape_string(match.group(1))
if artist == MISSING_FIELD:
artist = None
album = unescape_string(match.group(2))
if album == MISSING_FIELD:
album = None
disk = match.group(3)
if disk:
disk = int(disk)
track = match.group(4)
if track:
track = int(track)
song_and_extension = match.group(5)
song_match = re.fullmatch(r"(.+?)(\..*)", song_and_extension)
if song_match:
song, extension = song_match.groups()
else:
song = song_and_extension
extension = ""
song = unescape_string(song)
if song == MISSING_FIELD:
song = None
return {
"artist": artist,
"album": album,
"disk": disk,
"track": track,
"song": song,
"extension": extension,
}
def import_song(env, filepath):
metadata = read_metadata(filepath)
relpath = create_relpath(metadata)
target = env["media"] / relpath
if target.exists() or target.is_symlink():
log("skipping, already exists: {} => {}"
.format(filepath, target))
return False
target.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(filepath, target)
return True
MEDIA_EXTENSIONS = [".mp3"]
def import_music(env, paths):
copied = 0
already_present = 0
skipped = 0
for path in paths:
path = pathlib.Path(path).resolve()
for dirpath, dirnames, filenames in os.walk(path):
dirnames.sort()
filenames.sort()
already_reported_dir = False
for filename in filenames:
filepath = pathlib.Path(dirpath) / filename
suffix = filepath.suffix
if suffix not in MEDIA_EXTENSIONS:
log("skipping, extension {} not recognized: {}"
.format(repr(suffix), filepath))
skipped += 1
continue
if not already_reported_dir:
log("importing media from directory: {}"
.format(filepath.parent))
already_reported_dir = True
if import_song(env, filepath):
copied += 1
else:
already_present += 1
log(("imported {} media file{}, skipped {} "
"already present and {} unrecognized")
.format(*plurals(copied), already_present, skipped))
MEDIA_PLAYLIST = "media"
QUEUE_PLAYLIST = "queue"
RESERVED_PLAYLISTS = (MEDIA_PLAYLIST, QUEUE_PLAYLIST)
def create_playlists(env, playlists):
for reserved_name in RESERVED_PLAYLISTS:
if reserved_name in playlists:
die("playlist name is reserved for fstunes: {}"
.format(reserved_name))
if has_duplicates(playlists):
die("more than one playlist with the same name")
paths = [env["playlists"] / escape_string(p) for p in playlists]
should_die = False
for playlist, path in zip(playlists, paths):
if path.exists() or path.is_symlink():
if path.is_dir():
log("playlist already exists: {}".format(playlist))
else:
log("already exists and not a directory: {}".format(path))
should_die = True
if should_die:
die()
for path in paths:
path.mkdir(parents=True)
log("created {} playlist{}".format(*pluralens(playlists)))
def delete_playlists(env, playlists, yes):
for reserved_name in RESERVED_PLAYLISTS:
if reserved_name in playlists:
die("playlist name is reserved for fstunes: {}"
.format(reserved_name))
if has_duplicates(playlists):
die("more than one playlist with the same name")
paths = [env["playlists"] / escape_string(p) for p in playlists]
should_die = False
for playlist, path in zip(playlists, paths):
if not path.is_dir():
if path.exists() or path.is_symlink():
log("already exists and not a directory: {}".format(path))
else:
log("playlist does not exist: {}".format(playlist))
should_die = True
if should_die:
die()
total_songs = 0
deletion_list = []
for playlist, path in zip(playlists, paths):
num_songs = 0
for entry_path in path.iterdir():
if not entry_path.is_symlink():
continue
try:
int(entry_path.name)
except ValueError:
continue
num_songs += 1
total_songs += num_songs
deletion_list.append(
"\n {} ({} song{})"
.format(playlist, *plurals(num_songs)))
log("will delete the following {} playlist{} with {} total songs:{}"
.format(*pluralens(paths), total_songs, "".join(deletion_list)))
if not are_you_sure(default=total_songs == 0, yes=yes):
die()
for path in paths:
shutil.rmtree(path)
log("deleted {} playlist{}".format(*pluralens(playlists)))
FSTUNES_HOME_ENV_VAR = "FSTUNES_HOME"
FSTUNES_QUEUE_LENGTH_ENV_VAR = "FSTUNES_QUEUE_LENGTH"
METADATA_FIELDS = (
"artist",
"album",
"disk",
"track",
"song",
"extension",
"from",
"index",
)
METADATA_INT_FIELDS = (
"disk",
"track",
"index",
)
assert set(METADATA_INT_FIELDS).issubset(set(METADATA_FIELDS))
def split_matcher(matcher):
return matcher.split("=", maxsplit=1)
def combine_matchers(true_matchers, false_matchers):
return ([(True, t) for t in true_matchers] +
[(False, f) for f in false_matchers])
def parse_matchers(args, default_to_media):
match = args.match or []
match_literal = args.match_literal or []
match_set = args.match_set or []
match_range = args.match_range or []
match_all = args.match_all or []
matchers = collections.defaultdict(list)
for matcher_type, unparsed_matchers in (
("guess", match),
("literal", match_literal),
("set", match_set),
("range", match_range),
("all", match_all)):
for unparsed_matcher in unparsed_matchers:
if matcher_type != "all":
try:
field, orig_expr = unparsed_matcher.split("=", maxsplit=1)
except ValueError:
die("invalid match expression: {}"
.format(unparsed_matcher))
else:
field = unparsed_matcher
if field not in METADATA_FIELDS:
die("unsupported field: {}".format(field))
desc = {}
if matcher_type not in ("guess", "literal", "set", "range", "all"):
assert False, (
"unexpected matcher type: {}".format(matcher_type))
if matcher_type in ("literal", "guess") and "type" not in desc:
skip = False
expr = orig_expr
if field in METADATA_INT_FIELDS:
try:
expr = int(orig_expr)
except ValueError:
if matcher_type != "guess":
die("invalid integer literal: {}"
.format(orig_expr))
else:
skip = True
if not skip:
desc["type"] = "literal"
desc["value"] = expr
if matcher_type in ("set", "guess") and "type" not in desc:
skip = False
expr = orig_expr.split(args.set_delimiter)
if field in METADATA_INT_FIELDS:
try:
expr = list(map(int, expr))
except ValueError:
if matcher_type != "guess":
die("invalid integer set: {}".format(orig_expr))
else:
skip = True
if not skip:
desc["type"] = "set"
desc["values"] = expr
if matcher_type in ("range", "guess") and "type" not in desc:
skip = False
try:
low, high = orig_expr.split(
args.range_delimiter, maxsplit=1)
except ValueError:
if matcher_type != "guess":
die("invalid range (does not contain {}): {}"
.format(repr(args.range_delimiter), orig_expr))
else:
skip = True
if not skip and field in METADATA_INT_FIELDS:
try:
low = int(low)
high = int(high)
except ValueError:
if matcher_type != "guess":
die("invalid integer range: {}".format(orig_expr))
else:
skip = True
if not skip:
desc["type"] = "range"
desc["low"] = low
desc["high"] = high
if matcher_type == "all" and "type" not in desc:
desc["type"] = "all"
if "type" not in desc:
die("invalid match expression: {}".format(orig_expr))
matchers[field].append(desc)
if not matchers["from"]:
if default_to_media:
matchers["from"] = [{
"type": "literal",
"value": "media",
}]
else:
die("you must select a playlist using -m from=PLAYLIST or similar")
return matchers
def parse_sorters(args):
sorters = []
for sorter in args.sort or []:
field = sorter["field"]
if field not in METADATA_FIELDS:
die("unsupported field: {}".format(field))
sorters.append(dict(sorter))
for field in (
"from", "index", "artist", "album", "disk", "track",
"song", "extension"):
sorters.append({
"field": field,
"modifier": "sort",
})
sorters.reverse()
return sorters
def apply_matchers(matchers, value):
for matcher in matchers:
if matcher["type"] == "all":
return True
elif matcher["type"] == "literal":
if value == matcher["value"]:
return True
elif matcher["type"] == "set":
if value in matcher["values"]:
return True
elif matcher["type"] == "range":
if matcher["low"] <= value <= matcher["high"]:
return True
else:
assert False, "unexpected matcher type: {}".format(matcher["type"])
return not matchers
def get_queue_index(env):
try:
index = int(os.readlink(env["queue_current"]))
except (OSError, ValueError):
min_value = math.inf
try:
for entry_path in env["queue"].iterdir():
try:
min_value = min(min_value, int(entry_path.name))
except ValueError:
continue
except OSError:
pass
index = min_value if min_value != math.inf else 0
return index
def set_queue_index(env, index):
queue_current_path = env["queue_current"]
queue_current_path.parent.mkdir(parents=True, exist_ok=True)
queue_current_path_new = env["temp"] / env["queue_current"].name
queue_current_path_new.parent.mkdir(parents=True, exist_ok=True)
queue_current_path_new.symlink_to(str(index))
queue_current_path_new.rename(queue_current_path)
def collect_matched_songs(env, matchers):
songs = []
matches_media = (
apply_matchers(matchers["from"], MEDIA_PLAYLIST) and
env["media"].is_dir())
if matches_media:
for artist_path in env["media"].iterdir():
artist = unescape_string(artist_path.name)
if not apply_matchers(matchers["artist"], artist):
continue
if not artist_path.is_dir():
continue
for album_path in artist_path.iterdir():
album = unescape_string(album_path.name)
if not apply_matchers(matchers["album"], album):
continue
if not album_path.is_dir():
continue
for song_path in album_path.iterdir():
if song_path.suffix not in MEDIA_EXTENSIONS:
continue
if not song_path.is_file():
continue
relpath = song_path.relative_to(env["media"])
metadata = parse_relpath(relpath)
disqualified = False
for field in ("disk", "track", "song", "extension"):
if not apply_matchers(
matchers[field], metadata[field]):
disqualified = True
break
if disqualified:
continue
metadata["relpath"] = relpath
songs.append(metadata)
if env["playlists"].is_dir():
for playlist_path in env["playlists"].iterdir():
playlist = unescape_string(playlist_path.name)
if not apply_matchers(matchers["from"], playlist):
continue
if not playlist_path.is_dir():
continue
offset = get_queue_index(env) if playlist == QUEUE_PLAYLIST else 0
for entry_path in playlist_path.iterdir():
try:
index = int(entry_path.name)
except ValueError:
continue
index += offset
if not apply_matchers(matchers["index"], index):
continue
if not entry_path.is_symlink():
continue
song_path = entry_path.resolve()
relpath = song_path.relative_to(env["media"])
metadata = parse_relpath(relpath)
disqualified = False
for field in ("artist", "album", "disk", "track", "song",
"extension"):
if not apply_matchers(matchers[field], metadata[field]):
disqualified = True
break
if disqualified:
continue
metadata["from"] = playlist
metadata["index"] = index
metadata["relpath"] = relpath
songs.append(metadata)
return songs
def sort_songs(songs, sorters):
for sorter in sorters:
field = sorter["field"]
modifier = sorter["modifier"]
reverse = False
assert modifier in ("sort", "reverse", "shuffle"), (
"unexpected sort modifier: {}".format(modifier))
if modifier == "shuffle":
memo = collections.defaultdict(lambda: random.getrandbits(64))
def key(value):
if field in value:
return memo[value[field]]
elif field in METADATA_INT_FIELDS:
return -math.inf
else:
return ""
else:
def key(value):
if field in value:
return value[field]
elif field in METADATA_INT_FIELDS:
return -math.inf
else:
return ""
reverse = modifier == "reverse"
songs.sort(key=key, reverse=reverse)
CONTEXT = 3
def song_description(song, index):
return ("\n [{}]. {}{}{}{} ({}, {})"
.format(index,
"{}-".format(song["disk"]) if "disk" in song else "",
song.get("track", ""),
" " if "disk" in song or "track" in song else "",
song["song"], song["album"], song["artist"]))
CONTEXT_DIVIDER = "\n-----"
def insert_in_playlist(env, songs, playlist, insert_index, before, yes):
if not before:
insert_index += 1
if playlist == MEDIA_PLAYLIST:
die("playlist name is reserved for fstunes: {}"
.format(MEDIA_PLAYLIST))
if playlist == QUEUE_PLAYLIST:
current_index = get_queue_index(env)
insert_index += current_index
global_offset = current_index
else:
global_offset = 0
playlist_path = env["playlists"] / playlist
if playlist == QUEUE_PLAYLIST:
playlist_path.mkdir(parents=True, exist_ok=True)
elif not playlist_path.is_dir():
die("playlist does not exist: {}".format(playlist))
existing_indices = []
for entry_path in playlist_path.iterdir():
try:
index = int(entry_path.name)
except ValueError:
continue
existing_indices.append(index)
existing_indices.sort()
insertion_point = bisect.bisect_left(existing_indices, insert_index)
insertion_list = []
removals = []
if playlist == QUEUE_PLAYLIST:
removal_point = bisect.bisect_left(existing_indices, current_index)
for i in range(removal_point - env["queue_length"]):
index = existing_indices[i]
removals.append(playlist_path / str(index))
for i in range(max(0, insertion_point - CONTEXT), insertion_point):
index = existing_indices[i]
song = parse_relpath(
(playlist_path / str(index)).resolve().relative_to(env["media"]))
insertion_list.append(song_description(song, index - global_offset))
insertion_list.append(CONTEXT_DIVIDER)
creates = []
for offset, song in enumerate(songs):
song_index = insert_index + offset
target = pathlib.Path("..") / ".." / MEDIA_PLAYLIST / song["relpath"]
creates.append((playlist_path / str(song_index), target))
insertion_list.append(
song_description(song, song_index - global_offset))
insertion_list.append(CONTEXT_DIVIDER)
for i in range(insertion_point,
min(insertion_point + CONTEXT, len(existing_indices))):
index = existing_indices[i]
song = parse_relpath(
(playlist_path / str(index)).resolve().relative_to(env["media"]))
insertion_list.append(
song_description(song, index + len(songs) - global_offset))
renames = []
for i in range(insertion_point, len(existing_indices)):
old_index = existing_indices[i]
new_index = old_index + len(songs)
renames.append((playlist_path / str(old_index),
playlist_path / str(new_index)))
renames.reverse()
advance = False
if playlist == QUEUE_PLAYLIST:
if current_index > insert_index:
new_current_index = current_index + len(songs)
advance = True
log(("will insert the following {} song{} into "
"playlist {} with {} song{} already:{}")
.format(*pluralens(songs), repr(playlist),
*pluralens(existing_indices),
"".join(insertion_list)))
log("will move {} symlink{}, insert {}, prune {}{}"
.format(*pluralens(renames), len(creates), len(removals),
", advance pointer" if advance else ""))
if not are_you_sure(default=True, yes=yes):
die()
for removal in removals:
removal.unlink()
for rename, target in renames:
rename.rename(target)
for create, target in creates:
create.symlink_to(target)
if advance:
set_queue_index(env, new_current_index)
log("inserted {} song{} into playlist {} and pruned {} (length {} -> {})"
.format(*pluralens(songs), repr(playlist),
len(removals), len(existing_indices),
len(existing_indices) + len(songs) - len(removals)))
def insert_songs(
env, matchers, sorters, playlist, index, transfer, before, yes):
if transfer:
raise NotImplementedError
songs = collect_matched_songs(env, matchers)
if not songs:
die("no songs matched")
sort_songs(songs, sorters)
insert_in_playlist(env, songs, playlist, index, before=before, yes=yes)
def handle_args(args):
home = os.environ.get(FSTUNES_HOME_ENV_VAR)
if not home:
die("environment variable not set: {}".format(FSTUNES_HOME_ENV_VAR))
home = pathlib.Path(home)
if not home.is_dir():
if home.exists() or home.is_symlink():
die("not a directory: {}".format(home))
die("directory does not exist: {}".format(home))
queue_length = os.environ.get(FSTUNES_QUEUE_LENGTH_ENV_VAR)
if queue_length:
try:
queue_length = int(queue_length)
except ValueError:
die("invalid integer literal in {}: {}"
.format(FSTUNES_QUEUE_LENGTH_ENV_VAR, queue_length))
if queue_length < 0:
die("queue length cannot be negative in {}: {}"
.format(FSTUNES_QUEUE_LENGTH_ENV_VAR, queue_length))
else:
queue_length = 10000
env = {
"home": home,
"media": home / MEDIA_PLAYLIST,
"playlists": home / "playlists",
"queue": home / "playlists" / QUEUE_PLAYLIST,
"queue_current": home / "playlists" / QUEUE_PLAYLIST / "_current",
"queue_length": queue_length,
"temp": home / "temp",
}
if args.subcommand == "import":
import_music(env, args.paths)
elif args.subcommand == "playlist":
if args.subcommand_playlist == "create":
create_playlists(env, args.playlists)
else:
delete_playlists(env, args.playlists, yes=args.yes)
elif args.subcommand == "insert":
matchers = parse_matchers(args, default_to_media=True)
sorters = parse_sorters(args)
insert_songs(
env, matchers, sorters, args.playlist, args.index,
transfer=args.transfer, before=args.before, yes=args.yes)
else:
raise NotImplementedError
def main():
parser = get_parser()
args = parser.parse_args()
handle_args(args)
| 36.957396
| 79
| 0.573666
|
import argparse
import bisect
import collections
import math
import mutagen
import os
import pathlib
import random
import re
import shutil
import string
import sys
def has_duplicates(l):
return len(l) != len(set(l))
def iter_len(iterable):
return sum(1 for _ in iterable)
def plural(n):
return "s" if n != 1 else ""
def pluralen(n):
return plural(len(n))
def plurals(n):
return n, plural(n)
def pluralens(n):
return plurals(len(n))
def log(message, *args, **kwargs):
print("fstunes: {}".format(message), *args, file=sys.stderr, **kwargs)
def die(message=None, *args, **kwargs):
if os.environ.get("FSTUNES_DEBUG"):
assert False, "stacktrace requested"
if message is not None:
log(message, *args, **kwargs)
sys.exit(1)
def are_you_sure(default, yes):
prompt = "[Y/n]" if default else "[y/N]"
print("Proceed? {} ".format(prompt), end="")
if yes:
response = "y (from command-line options)"
print(response)
else:
response = input()
if response.lower().startswith("y"):
return True
if response.lower().startswith("n"):
return False
return default
def add_yes_option(parser):
parser.add_argument("-y", "--yes", action="store_true",
help="Don't ask for confirmation")
def add_fields_option(parser):
parser.add_argument("-f", "--fields", metavar="FIELD1,FIELD2,...",
help="Which metadata fields to include")
def add_match_options(parser):
parser.add_argument("-m", "--match", metavar="FIELD=EXPR", action="append",
help="Filter songs")
parser.add_argument("--match-literal", metavar="FIELD=VALUE",
action="append", help="Filter songs by literal match")
parser.add_argument("--match-set", metavar="FIELD=VALUE1,VALUE2,...",
action="append", help="Filter songs by set membership")
parser.add_argument("--match-range", metavar="FIELD=LOW-HIGH",
action="append",
help="Filter songs by range inclusion")
parser.add_argument("-M", "--match-all", metavar="FIELD", action="append",
help="Do not filter songs")
parser.add_argument("--set-delimiter", default=",", metavar="DELIM",
help="Delimiter to use for set filtering")
parser.add_argument("--range-delimiter", default="-", metavar="DELIM",
help="Delimiter to use for range filtering")
SORT_OPTION_STRINGS = ("-s", "--sort")
REVERSE_OPTION_STRINGS = ("-r", "--reverse")
SHUFFLE_OPTION_STRINGS = ("-x", "--shuffle")
class SortAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string):
if option_string in SORT_OPTION_STRINGS:
modifier = "sort"
elif option_string in REVERSE_OPTION_STRINGS:
modifier = "reverse"
elif option_string in SHUFFLE_OPTION_STRINGS:
modifier = "shuffle"
else:
assert False, "unexpected modifier: {}".format(modifier)
if namespace.sort is None:
namespace.sort = []
namespace.sort.append({
"field": value,
"modifier": modifier,
})
def add_sort_options(parser):
parser.add_argument(*SORT_OPTION_STRINGS, action=SortAction,
help="Sort by field")
parser.add_argument(*REVERSE_OPTION_STRINGS, action=SortAction,
help="Sort by field in reverse order")
parser.add_argument(*SHUFFLE_OPTION_STRINGS, action=SortAction,
help="Shuffle by field")
def get_parser():
parser = argparse.ArgumentParser(
description=(
"Minimal command-line music library manager and media player."))
subparsers = parser.add_subparsers(dest="subcommand")
parser_import = subparsers.add_parser(
"import", help="Add media files to library")
parser_import.add_argument(
"paths", nargs="+", metavar="path", help="Media file or directory")
parser_playlist = subparsers.add_parser(
"playlist", help="Create or delete playlists")
subparsers_playlist = parser_playlist.add_subparsers(
dest="subcommand_playlist")
parser_playlist_create = subparsers_playlist.add_parser(
"create", help="Create a playlist")
parser_playlist_create.add_argument(
"playlists", nargs="+", metavar="playlist",
help="Name of playlist to create")
parser_playlist_delete = subparsers_playlist.add_parser(
"delete", help="Delete a playlist")
parser_playlist_delete.add_argument(
"playlists", nargs="+", metavar="playlist",
help="Name of playlist to delete")
add_yes_option(parser_playlist_delete)
parser_insert = subparsers.add_parser(
"insert", help="Add songs to a playlist or the queue")
add_match_options(parser_insert)
add_sort_options(parser_insert)
parser_insert.add_argument(
"-t", "--transfer", action="store_true",
help="Also remove songs from original playlists")
add_yes_option(parser_insert)
group_insert_before = parser_insert.add_mutually_exclusive_group()
group_insert_before.add_argument(
"--after", action="store_false", dest="before",
help="Insert after given index")
group_insert_before.add_argument(
"--before", action="store_true", help="Insert before given index")
parser_insert.add_argument(
"playlist", help="Name of playlist in which to insert")
parser_insert.add_argument(
"index", type=int, help="Index at which to insert")
parser_remove = subparsers.add_parser(
"remove", help="Remove songs from a playlist or the queue")
add_match_options(parser_remove)
add_yes_option(parser_remove)
parser_edit = subparsers.add_parser(
"edit", help="Edit song metadata")
add_match_options(parser_edit)
add_sort_options(parser_edit)
add_fields_option(parser_edit)
parser_edit.add_argument(
"-e", "--editor", help="Shell command to run text editor")
add_yes_option(parser_edit)
parser_list = subparsers.add_parser(
"list", help="List songs and associated information")
add_match_options(parser_list)
add_sort_options(parser_list)
add_fields_option(parser_list)
parser_delete = subparsers.add_parser(
"delete", help="Delete media files from library")
add_match_options(parser_delete)
add_yes_option(parser_delete)
parser_seek = subparsers.add_parser(
"seek", help="Change place in queue and play/pause")
group_seek_play_pause = parser_seek.add_mutually_exclusive_group()
group_seek_play_pause.add_argument(
"-p", "--play", action="store_true", help="Start playing")
group_seek_play_pause.add_argument(
"-P", "--pause", action="store_true", help="Stop playing")
parser_seek.add_argument(
"index", type=int, nargs="?", help="Relative index to which to seek")
return parser
def read_mutagen_key(m, key):
try:
return ", ".join(m[key].text) or None
except KeyError:
return None
def read_metadata(filepath):
m = mutagen.File(filepath)
metadata = {}
metadata["artist"] = (read_mutagen_key(m, "TPE2") or
read_mutagen_key(m, "TPE1"))
metadata["album"] = read_mutagen_key(m, "TALB")
metadata["disk"] = None
disk_and_total = read_mutagen_key(m, "TPOS")
if disk_and_total:
match = re.match(r"[0-9]+", disk_and_total)
if match:
metadata["disk"] = int(match.group())
metadata["track"] = None
track_and_total = read_mutagen_key(m, "TRCK")
if track_and_total:
match = re.match(r"[0-9]+", track_and_total)
if match:
metadata["track"] = int(match.group())
metadata["song"] = read_mutagen_key(m, "TIT2")
metadata["extension"] = filepath.suffix
return metadata
SAFE_CHARS = (
string.ascii_letters + string.digits + " !\"$%&'()*+,-.[]^_`{|}~")
ESCAPE_CHAR = "
def escape_string(s):
results = []
for char in s:
if char in SAFE_CHARS:
results.append(char)
else:
results.append("{0}{1:x}{0}".format(ESCAPE_CHAR, ord(char)))
return "".join(results)
def unescape_string(s):
return re.sub(r"adata):
disk_str = (
"{}-".format(metadata["disk"]) if "disk" in metadata else "")
return pathlib.Path("{}/{}/{}{} {}{}".format(
escape_string(metadata["artist"] or MISSING_FIELD),
escape_string(metadata["album"] or MISSING_FIELD),
disk_str,
metadata.get("track", ""),
escape_string(metadata.get("song") or MISSING_FIELD),
metadata["extension"]))
def parse_relpath(relpath):
match = re.fullmatch(
r"([^/]+)/([^/]+)/(?:([0-9]+)-)?([0-9]+)? (.+)", str(relpath))
artist = unescape_string(match.group(1))
if artist == MISSING_FIELD:
artist = None
album = unescape_string(match.group(2))
if album == MISSING_FIELD:
album = None
disk = match.group(3)
if disk:
disk = int(disk)
track = match.group(4)
if track:
track = int(track)
song_and_extension = match.group(5)
song_match = re.fullmatch(r"(.+?)(\..*)", song_and_extension)
if song_match:
song, extension = song_match.groups()
else:
song = song_and_extension
extension = ""
song = unescape_string(song)
if song == MISSING_FIELD:
song = None
return {
"artist": artist,
"album": album,
"disk": disk,
"track": track,
"song": song,
"extension": extension,
}
def import_song(env, filepath):
metadata = read_metadata(filepath)
relpath = create_relpath(metadata)
target = env["media"] / relpath
if target.exists() or target.is_symlink():
log("skipping, already exists: {} => {}"
.format(filepath, target))
return False
target.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(filepath, target)
return True
MEDIA_EXTENSIONS = [".mp3"]
def import_music(env, paths):
copied = 0
already_present = 0
skipped = 0
for path in paths:
path = pathlib.Path(path).resolve()
for dirpath, dirnames, filenames in os.walk(path):
dirnames.sort()
filenames.sort()
already_reported_dir = False
for filename in filenames:
filepath = pathlib.Path(dirpath) / filename
suffix = filepath.suffix
if suffix not in MEDIA_EXTENSIONS:
log("skipping, extension {} not recognized: {}"
.format(repr(suffix), filepath))
skipped += 1
continue
if not already_reported_dir:
log("importing media from directory: {}"
.format(filepath.parent))
already_reported_dir = True
if import_song(env, filepath):
copied += 1
else:
already_present += 1
log(("imported {} media file{}, skipped {} "
"already present and {} unrecognized")
.format(*plurals(copied), already_present, skipped))
MEDIA_PLAYLIST = "media"
QUEUE_PLAYLIST = "queue"
RESERVED_PLAYLISTS = (MEDIA_PLAYLIST, QUEUE_PLAYLIST)
def create_playlists(env, playlists):
for reserved_name in RESERVED_PLAYLISTS:
if reserved_name in playlists:
die("playlist name is reserved for fstunes: {}"
.format(reserved_name))
if has_duplicates(playlists):
die("more than one playlist with the same name")
paths = [env["playlists"] / escape_string(p) for p in playlists]
should_die = False
for playlist, path in zip(playlists, paths):
if path.exists() or path.is_symlink():
if path.is_dir():
log("playlist already exists: {}".format(playlist))
else:
log("already exists and not a directory: {}".format(path))
should_die = True
if should_die:
die()
for path in paths:
path.mkdir(parents=True)
log("created {} playlist{}".format(*pluralens(playlists)))
def delete_playlists(env, playlists, yes):
for reserved_name in RESERVED_PLAYLISTS:
if reserved_name in playlists:
die("playlist name is reserved for fstunes: {}"
.format(reserved_name))
if has_duplicates(playlists):
die("more than one playlist with the same name")
paths = [env["playlists"] / escape_string(p) for p in playlists]
should_die = False
for playlist, path in zip(playlists, paths):
if not path.is_dir():
if path.exists() or path.is_symlink():
log("already exists and not a directory: {}".format(path))
else:
log("playlist does not exist: {}".format(playlist))
should_die = True
if should_die:
die()
total_songs = 0
deletion_list = []
for playlist, path in zip(playlists, paths):
num_songs = 0
for entry_path in path.iterdir():
if not entry_path.is_symlink():
continue
try:
int(entry_path.name)
except ValueError:
continue
num_songs += 1
total_songs += num_songs
deletion_list.append(
"\n {} ({} song{})"
.format(playlist, *plurals(num_songs)))
log("will delete the following {} playlist{} with {} total songs:{}"
.format(*pluralens(paths), total_songs, "".join(deletion_list)))
if not are_you_sure(default=total_songs == 0, yes=yes):
die()
for path in paths:
shutil.rmtree(path)
log("deleted {} playlist{}".format(*pluralens(playlists)))
FSTUNES_HOME_ENV_VAR = "FSTUNES_HOME"
FSTUNES_QUEUE_LENGTH_ENV_VAR = "FSTUNES_QUEUE_LENGTH"
METADATA_FIELDS = (
"artist",
"album",
"disk",
"track",
"song",
"extension",
"from",
"index",
)
METADATA_INT_FIELDS = (
"disk",
"track",
"index",
)
assert set(METADATA_INT_FIELDS).issubset(set(METADATA_FIELDS))
def split_matcher(matcher):
return matcher.split("=", maxsplit=1)
def combine_matchers(true_matchers, false_matchers):
return ([(True, t) for t in true_matchers] +
[(False, f) for f in false_matchers])
def parse_matchers(args, default_to_media):
match = args.match or []
match_literal = args.match_literal or []
match_set = args.match_set or []
match_range = args.match_range or []
match_all = args.match_all or []
matchers = collections.defaultdict(list)
for matcher_type, unparsed_matchers in (
("guess", match),
("literal", match_literal),
("set", match_set),
("range", match_range),
("all", match_all)):
for unparsed_matcher in unparsed_matchers:
if matcher_type != "all":
try:
field, orig_expr = unparsed_matcher.split("=", maxsplit=1)
except ValueError:
die("invalid match expression: {}"
.format(unparsed_matcher))
else:
field = unparsed_matcher
if field not in METADATA_FIELDS:
die("unsupported field: {}".format(field))
desc = {}
if matcher_type not in ("guess", "literal", "set", "range", "all"):
assert False, (
"unexpected matcher type: {}".format(matcher_type))
if matcher_type in ("literal", "guess") and "type" not in desc:
skip = False
expr = orig_expr
if field in METADATA_INT_FIELDS:
try:
expr = int(orig_expr)
except ValueError:
if matcher_type != "guess":
die("invalid integer literal: {}"
.format(orig_expr))
else:
skip = True
if not skip:
desc["type"] = "literal"
desc["value"] = expr
if matcher_type in ("set", "guess") and "type" not in desc:
skip = False
expr = orig_expr.split(args.set_delimiter)
if field in METADATA_INT_FIELDS:
try:
expr = list(map(int, expr))
except ValueError:
if matcher_type != "guess":
die("invalid integer set: {}".format(orig_expr))
else:
skip = True
if not skip:
desc["type"] = "set"
desc["values"] = expr
if matcher_type in ("range", "guess") and "type" not in desc:
skip = False
try:
low, high = orig_expr.split(
args.range_delimiter, maxsplit=1)
except ValueError:
if matcher_type != "guess":
die("invalid range (does not contain {}): {}"
.format(repr(args.range_delimiter), orig_expr))
else:
skip = True
if not skip and field in METADATA_INT_FIELDS:
try:
low = int(low)
high = int(high)
except ValueError:
if matcher_type != "guess":
die("invalid integer range: {}".format(orig_expr))
else:
skip = True
if not skip:
desc["type"] = "range"
desc["low"] = low
desc["high"] = high
if matcher_type == "all" and "type" not in desc:
desc["type"] = "all"
if "type" not in desc:
die("invalid match expression: {}".format(orig_expr))
matchers[field].append(desc)
if not matchers["from"]:
if default_to_media:
matchers["from"] = [{
"type": "literal",
"value": "media",
}]
else:
die("you must select a playlist using -m from=PLAYLIST or similar")
return matchers
def parse_sorters(args):
sorters = []
for sorter in args.sort or []:
field = sorter["field"]
if field not in METADATA_FIELDS:
die("unsupported field: {}".format(field))
sorters.append(dict(sorter))
for field in (
"from", "index", "artist", "album", "disk", "track",
"song", "extension"):
sorters.append({
"field": field,
"modifier": "sort",
})
sorters.reverse()
return sorters
def apply_matchers(matchers, value):
for matcher in matchers:
if matcher["type"] == "all":
return True
elif matcher["type"] == "literal":
if value == matcher["value"]:
return True
elif matcher["type"] == "set":
if value in matcher["values"]:
return True
elif matcher["type"] == "range":
if matcher["low"] <= value <= matcher["high"]:
return True
else:
assert False, "unexpected matcher type: {}".format(matcher["type"])
return not matchers
def get_queue_index(env):
try:
index = int(os.readlink(env["queue_current"]))
except (OSError, ValueError):
min_value = math.inf
try:
for entry_path in env["queue"].iterdir():
try:
min_value = min(min_value, int(entry_path.name))
except ValueError:
continue
except OSError:
pass
index = min_value if min_value != math.inf else 0
return index
def set_queue_index(env, index):
queue_current_path = env["queue_current"]
queue_current_path.parent.mkdir(parents=True, exist_ok=True)
queue_current_path_new = env["temp"] / env["queue_current"].name
queue_current_path_new.parent.mkdir(parents=True, exist_ok=True)
queue_current_path_new.symlink_to(str(index))
queue_current_path_new.rename(queue_current_path)
def collect_matched_songs(env, matchers):
songs = []
matches_media = (
apply_matchers(matchers["from"], MEDIA_PLAYLIST) and
env["media"].is_dir())
if matches_media:
for artist_path in env["media"].iterdir():
artist = unescape_string(artist_path.name)
if not apply_matchers(matchers["artist"], artist):
continue
if not artist_path.is_dir():
continue
for album_path in artist_path.iterdir():
album = unescape_string(album_path.name)
if not apply_matchers(matchers["album"], album):
continue
if not album_path.is_dir():
continue
for song_path in album_path.iterdir():
if song_path.suffix not in MEDIA_EXTENSIONS:
continue
if not song_path.is_file():
continue
relpath = song_path.relative_to(env["media"])
metadata = parse_relpath(relpath)
disqualified = False
for field in ("disk", "track", "song", "extension"):
if not apply_matchers(
matchers[field], metadata[field]):
disqualified = True
break
if disqualified:
continue
metadata["relpath"] = relpath
songs.append(metadata)
if env["playlists"].is_dir():
for playlist_path in env["playlists"].iterdir():
playlist = unescape_string(playlist_path.name)
if not apply_matchers(matchers["from"], playlist):
continue
if not playlist_path.is_dir():
continue
offset = get_queue_index(env) if playlist == QUEUE_PLAYLIST else 0
for entry_path in playlist_path.iterdir():
try:
index = int(entry_path.name)
except ValueError:
continue
index += offset
if not apply_matchers(matchers["index"], index):
continue
if not entry_path.is_symlink():
continue
song_path = entry_path.resolve()
relpath = song_path.relative_to(env["media"])
metadata = parse_relpath(relpath)
disqualified = False
for field in ("artist", "album", "disk", "track", "song",
"extension"):
if not apply_matchers(matchers[field], metadata[field]):
disqualified = True
break
if disqualified:
continue
metadata["from"] = playlist
metadata["index"] = index
metadata["relpath"] = relpath
songs.append(metadata)
return songs
def sort_songs(songs, sorters):
for sorter in sorters:
field = sorter["field"]
modifier = sorter["modifier"]
reverse = False
assert modifier in ("sort", "reverse", "shuffle"), (
"unexpected sort modifier: {}".format(modifier))
if modifier == "shuffle":
memo = collections.defaultdict(lambda: random.getrandbits(64))
def key(value):
if field in value:
return memo[value[field]]
elif field in METADATA_INT_FIELDS:
return -math.inf
else:
return ""
else:
def key(value):
if field in value:
return value[field]
elif field in METADATA_INT_FIELDS:
return -math.inf
else:
return ""
reverse = modifier == "reverse"
songs.sort(key=key, reverse=reverse)
CONTEXT = 3
def song_description(song, index):
return ("\n [{}]. {}{}{}{} ({}, {})"
.format(index,
"{}-".format(song["disk"]) if "disk" in song else "",
song.get("track", ""),
" " if "disk" in song or "track" in song else "",
song["song"], song["album"], song["artist"]))
CONTEXT_DIVIDER = "\n-----"
def insert_in_playlist(env, songs, playlist, insert_index, before, yes):
if not before:
insert_index += 1
if playlist == MEDIA_PLAYLIST:
die("playlist name is reserved for fstunes: {}"
.format(MEDIA_PLAYLIST))
if playlist == QUEUE_PLAYLIST:
current_index = get_queue_index(env)
insert_index += current_index
global_offset = current_index
else:
global_offset = 0
playlist_path = env["playlists"] / playlist
if playlist == QUEUE_PLAYLIST:
playlist_path.mkdir(parents=True, exist_ok=True)
elif not playlist_path.is_dir():
die("playlist does not exist: {}".format(playlist))
existing_indices = []
for entry_path in playlist_path.iterdir():
try:
index = int(entry_path.name)
except ValueError:
continue
existing_indices.append(index)
existing_indices.sort()
insertion_point = bisect.bisect_left(existing_indices, insert_index)
insertion_list = []
removals = []
if playlist == QUEUE_PLAYLIST:
removal_point = bisect.bisect_left(existing_indices, current_index)
for i in range(removal_point - env["queue_length"]):
index = existing_indices[i]
removals.append(playlist_path / str(index))
for i in range(max(0, insertion_point - CONTEXT), insertion_point):
index = existing_indices[i]
song = parse_relpath(
(playlist_path / str(index)).resolve().relative_to(env["media"]))
insertion_list.append(song_description(song, index - global_offset))
insertion_list.append(CONTEXT_DIVIDER)
creates = []
for offset, song in enumerate(songs):
song_index = insert_index + offset
target = pathlib.Path("..") / ".." / MEDIA_PLAYLIST / song["relpath"]
creates.append((playlist_path / str(song_index), target))
insertion_list.append(
song_description(song, song_index - global_offset))
insertion_list.append(CONTEXT_DIVIDER)
for i in range(insertion_point,
min(insertion_point + CONTEXT, len(existing_indices))):
index = existing_indices[i]
song = parse_relpath(
(playlist_path / str(index)).resolve().relative_to(env["media"]))
insertion_list.append(
song_description(song, index + len(songs) - global_offset))
renames = []
for i in range(insertion_point, len(existing_indices)):
old_index = existing_indices[i]
new_index = old_index + len(songs)
renames.append((playlist_path / str(old_index),
playlist_path / str(new_index)))
renames.reverse()
advance = False
if playlist == QUEUE_PLAYLIST:
if current_index > insert_index:
new_current_index = current_index + len(songs)
advance = True
log(("will insert the following {} song{} into "
"playlist {} with {} song{} already:{}")
.format(*pluralens(songs), repr(playlist),
*pluralens(existing_indices),
"".join(insertion_list)))
log("will move {} symlink{}, insert {}, prune {}{}"
.format(*pluralens(renames), len(creates), len(removals),
", advance pointer" if advance else ""))
if not are_you_sure(default=True, yes=yes):
die()
for removal in removals:
removal.unlink()
for rename, target in renames:
rename.rename(target)
for create, target in creates:
create.symlink_to(target)
if advance:
set_queue_index(env, new_current_index)
log("inserted {} song{} into playlist {} and pruned {} (length {} -> {})"
.format(*pluralens(songs), repr(playlist),
len(removals), len(existing_indices),
len(existing_indices) + len(songs) - len(removals)))
def insert_songs(
env, matchers, sorters, playlist, index, transfer, before, yes):
if transfer:
raise NotImplementedError
songs = collect_matched_songs(env, matchers)
if not songs:
die("no songs matched")
sort_songs(songs, sorters)
insert_in_playlist(env, songs, playlist, index, before=before, yes=yes)
def handle_args(args):
home = os.environ.get(FSTUNES_HOME_ENV_VAR)
if not home:
die("environment variable not set: {}".format(FSTUNES_HOME_ENV_VAR))
home = pathlib.Path(home)
if not home.is_dir():
if home.exists() or home.is_symlink():
die("not a directory: {}".format(home))
die("directory does not exist: {}".format(home))
queue_length = os.environ.get(FSTUNES_QUEUE_LENGTH_ENV_VAR)
if queue_length:
try:
queue_length = int(queue_length)
except ValueError:
die("invalid integer literal in {}: {}"
.format(FSTUNES_QUEUE_LENGTH_ENV_VAR, queue_length))
if queue_length < 0:
die("queue length cannot be negative in {}: {}"
.format(FSTUNES_QUEUE_LENGTH_ENV_VAR, queue_length))
else:
queue_length = 10000
env = {
"home": home,
"media": home / MEDIA_PLAYLIST,
"playlists": home / "playlists",
"queue": home / "playlists" / QUEUE_PLAYLIST,
"queue_current": home / "playlists" / QUEUE_PLAYLIST / "_current",
"queue_length": queue_length,
"temp": home / "temp",
}
if args.subcommand == "import":
import_music(env, args.paths)
elif args.subcommand == "playlist":
if args.subcommand_playlist == "create":
create_playlists(env, args.playlists)
else:
delete_playlists(env, args.playlists, yes=args.yes)
elif args.subcommand == "insert":
matchers = parse_matchers(args, default_to_media=True)
sorters = parse_sorters(args)
insert_songs(
env, matchers, sorters, args.playlist, args.index,
transfer=args.transfer, before=args.before, yes=args.yes)
else:
raise NotImplementedError
def main():
parser = get_parser()
args = parser.parse_args()
handle_args(args)
| true
| true
|
1c479d15f72832953af2ac415b7d3ec3543095c2
| 1,214
|
py
|
Python
|
setup.py
|
DanielR59/mljar-supervised
|
04a90ffbff33b2c93a7c212825b987e73b7f62fe
|
[
"MIT"
] | null | null | null |
setup.py
|
DanielR59/mljar-supervised
|
04a90ffbff33b2c93a7c212825b987e73b7f62fe
|
[
"MIT"
] | null | null | null |
setup.py
|
DanielR59/mljar-supervised
|
04a90ffbff33b2c93a7c212825b987e73b7f62fe
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="mljar-supervised",
version="0.11.2",
description="Automated Machine Learning for Humans",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mljar/mljar-supervised",
author="MLJAR, Sp. z o.o.",
author_email="contact@mljar.com",
license="MIT",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=open("requirements.txt").readlines(),
include_package_data=True,
python_requires='>=3.7.1',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
keywords=[
"automated machine learning",
"automl",
"machine learning",
"data science",
"data mining",
"mljar"
],
)
| 30.35
| 81
| 0.644152
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="mljar-supervised",
version="0.11.2",
description="Automated Machine Learning for Humans",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mljar/mljar-supervised",
author="MLJAR, Sp. z o.o.",
author_email="contact@mljar.com",
license="MIT",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=open("requirements.txt").readlines(),
include_package_data=True,
python_requires='>=3.7.1',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
keywords=[
"automated machine learning",
"automl",
"machine learning",
"data science",
"data mining",
"mljar"
],
)
| true
| true
|
1c479d38ba2d385729e4a2e779104cd41110084d
| 1,146
|
py
|
Python
|
tekstovni_vmesnik.py
|
kavcicm/Vislice
|
04c3c09bad456321ee9da04c6af8deaeaa509842
|
[
"MIT"
] | null | null | null |
tekstovni_vmesnik.py
|
kavcicm/Vislice
|
04c3c09bad456321ee9da04c6af8deaeaa509842
|
[
"MIT"
] | null | null | null |
tekstovni_vmesnik.py
|
kavcicm/Vislice
|
04c3c09bad456321ee9da04c6af8deaeaa509842
|
[
"MIT"
] | null | null | null |
import model
lojtrice = "#############################\n"
def izpis_zmage(igra):
tekst = lojtrice + "Uganili ste geslo {0}.\n".format(igra.geslo)
return tekst
def izpis_poraza(igra):
tekst = lojtrice + "Obešeni ste! Pravilno geslo je blio {0}.\n".format(igra.geslo)
return tekst
def izpis_igre(igra):
tekst = (lojtrice +
igra.pravilni_del_gesla() + "\n" +
("Preostalo število poizkusov: {0}\n Napačni ugibi: {1} "
).format(model.STEVILO_DOVOLJENIH_NAPAK - igra.stevilo_napak() + 1,
igra.nepravilni_ugibi()) + lojtrice)
return tekst
def zahtevaj_vnos():
return input("Ugibaj črko: ")
def pozeni_vmesnik():
igra = model.nova_igra()
while True:
#Izpišemo stanje
print(izpis_igre(igra))
#zahtevaj vnos uporabnika
poskus = zahtevaj_vnos()
igra.ugibaj(poskus)
# preveri ali smo končali
if igra.poraz():
print(izpis_poraza(igra))
break
if igra.zmaga():
print(izpis_zmage(igra))
break
else:
pass
return None
pozeni_vmesnik()
| 26.651163
| 86
| 0.579407
|
import model
lojtrice = "#############################\n"
def izpis_zmage(igra):
tekst = lojtrice + "Uganili ste geslo {0}.\n".format(igra.geslo)
return tekst
def izpis_poraza(igra):
tekst = lojtrice + "Obešeni ste! Pravilno geslo je blio {0}.\n".format(igra.geslo)
return tekst
def izpis_igre(igra):
tekst = (lojtrice +
igra.pravilni_del_gesla() + "\n" +
("Preostalo število poizkusov: {0}\n Napačni ugibi: {1} "
).format(model.STEVILO_DOVOLJENIH_NAPAK - igra.stevilo_napak() + 1,
igra.nepravilni_ugibi()) + lojtrice)
return tekst
def zahtevaj_vnos():
return input("Ugibaj črko: ")
def pozeni_vmesnik():
igra = model.nova_igra()
while True:
print(izpis_igre(igra))
poskus = zahtevaj_vnos()
igra.ugibaj(poskus)
if igra.poraz():
print(izpis_poraza(igra))
break
if igra.zmaga():
print(izpis_zmage(igra))
break
else:
pass
return None
pozeni_vmesnik()
| true
| true
|
1c479e4d6b65a786785934f82983844d7a1b5553
| 443
|
py
|
Python
|
run_blast.py
|
denkovarik/EC-Scrape
|
e6340fe852b204f4813ec6ede4d20138a85644b6
|
[
"MIT"
] | null | null | null |
run_blast.py
|
denkovarik/EC-Scrape
|
e6340fe852b204f4813ec6ede4d20138a85644b6
|
[
"MIT"
] | null | null | null |
run_blast.py
|
denkovarik/EC-Scrape
|
e6340fe852b204f4813ec6ede4d20138a85644b6
|
[
"MIT"
] | null | null | null |
import sys, os, time
from utils import *
import shutil
from run_blast_utils import *
blast_rslt_dir = 'blast_rslts\\'
blast_working_dir = 'temp_blast\\'
commands = []
args = parse_args(sys.argv)
# Compile command line arguments
commands = compile_cmd(args, blast_rslt_dir, blast_working_dir)
start_time = time.time()
exec_commands(commands)
shutil.rmtree(blast_working_dir)
print("---%s seconds ---" % (time.time() - start_time))
| 24.611111
| 63
| 0.740406
|
import sys, os, time
from utils import *
import shutil
from run_blast_utils import *
blast_rslt_dir = 'blast_rslts\\'
blast_working_dir = 'temp_blast\\'
commands = []
args = parse_args(sys.argv)
commands = compile_cmd(args, blast_rslt_dir, blast_working_dir)
start_time = time.time()
exec_commands(commands)
shutil.rmtree(blast_working_dir)
print("---%s seconds ---" % (time.time() - start_time))
| true
| true
|
1c479e5971d949fcf67c534f48a3d16b3e4c4a28
| 2,063
|
py
|
Python
|
zfused_maya/zfused_maya/core/color.py
|
qinningfx/zfused_outsource
|
bfc5558f05e3d6005653794a47bd863b61b009b1
|
[
"Apache-2.0"
] | 2
|
2019-02-22T03:33:26.000Z
|
2019-02-23T03:29:26.000Z
|
zfused_maya/zfused_maya/core/color.py
|
qinningfx/zfused_outsource
|
bfc5558f05e3d6005653794a47bd863b61b009b1
|
[
"Apache-2.0"
] | null | null | null |
zfused_maya/zfused_maya/core/color.py
|
qinningfx/zfused_outsource
|
bfc5558f05e3d6005653794a47bd863b61b009b1
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf-8
# --author-- lanhua.zhou
import os
import json
import logging
__all__ = ["get_component_color_data", "LetterColor"]
DIRNAME = os.path.dirname(__file__)
MENU_DIRNAME = os.path.dirname(os.path.dirname(DIRNAME))
COMPONENT_COLOR_FILE = "{}/conf/componentcolor.json".format(MENU_DIRNAME)
logger = logging.getLogger(__name__)
def get_component_color_data():
"""
get menu scripts
rtype: list
"""
_menu_data = []
logger.info("read menu json file data")
with open(COMPONENT_COLOR_FILE, "r") as _file_handle:
_data = _file_handle.read()
_menu_data = json.loads(_data)
return _menu_data
class LetterColor(object):
_color_dict = {
"a":"#E5A3B4",
"b":"#EDC89A",
"c":"#F2F08F",
"d":"#E0E67A",
"e":"#BBDB97",
"f":"#ACD9BA",
"g":"#A1DAE1",
"h":"#C19FCA",
"i":"#CF2027",
"j":"#D96927",
"k":"#ECDA42",
"l":"#A5C33B",
"m":"#77C258",
"n":"#54958C",
"o":"#486EB6",
"p":"#77449A",
"q":"#7F7E80",
"r":"#7C1214",
"s":"#83421B",
"t":"#86792F",
"u":"#587232",
"v":"#417135",
"w":"#3D6C4C",
"x":"#253676",
"y":"#462165",
"z":"#1D1D1D"
}
@classmethod
def color(cls, letter):
return cls._color_dict[letter]
def convert(value):
""" change color value type
"""
digit = list(map(str, range(10))) + list("ABCDEF")
if isinstance(value, tuple):
string = '#'
for i in value:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1]) * 16 + digit.index(value[2])
a2 = digit.index(value[3]) * 16 + digit.index(value[4])
a3 = digit.index(value[5]) * 16 + digit.index(value[6])
return (a1, a2, a3)
| 25.158537
| 73
| 0.492002
|
import os
import json
import logging
__all__ = ["get_component_color_data", "LetterColor"]
DIRNAME = os.path.dirname(__file__)
MENU_DIRNAME = os.path.dirname(os.path.dirname(DIRNAME))
COMPONENT_COLOR_FILE = "{}/conf/componentcolor.json".format(MENU_DIRNAME)
logger = logging.getLogger(__name__)
def get_component_color_data():
_menu_data = []
logger.info("read menu json file data")
with open(COMPONENT_COLOR_FILE, "r") as _file_handle:
_data = _file_handle.read()
_menu_data = json.loads(_data)
return _menu_data
class LetterColor(object):
_color_dict = {
"a":"#E5A3B4",
"b":"#EDC89A",
"c":"#F2F08F",
"d":"#E0E67A",
"e":"#BBDB97",
"f":"#ACD9BA",
"g":"#A1DAE1",
"h":"#C19FCA",
"i":"#CF2027",
"j":"#D96927",
"k":"#ECDA42",
"l":"#A5C33B",
"m":"#77C258",
"n":"#54958C",
"o":"#486EB6",
"p":"#77449A",
"q":"#7F7E80",
"r":"#7C1214",
"s":"#83421B",
"t":"#86792F",
"u":"#587232",
"v":"#417135",
"w":"#3D6C4C",
"x":"#253676",
"y":"#462165",
"z":"#1D1D1D"
}
@classmethod
def color(cls, letter):
return cls._color_dict[letter]
def convert(value):
digit = list(map(str, range(10))) + list("ABCDEF")
if isinstance(value, tuple):
string = '#'
for i in value:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1]) * 16 + digit.index(value[2])
a2 = digit.index(value[3]) * 16 + digit.index(value[4])
a3 = digit.index(value[5]) * 16 + digit.index(value[6])
return (a1, a2, a3)
| true
| true
|
1c479e9907ca8ed897efe7210ee012940850571b
| 181
|
py
|
Python
|
DJANGO PROJECT/Configurator/ConfigWebApp/forms.py
|
BobbyElmes/Fusion-Configurator-Source-Code
|
08e6c14789a2e8d073b312422ce893ee463369f5
|
[
"MIT"
] | null | null | null |
DJANGO PROJECT/Configurator/ConfigWebApp/forms.py
|
BobbyElmes/Fusion-Configurator-Source-Code
|
08e6c14789a2e8d073b312422ce893ee463369f5
|
[
"MIT"
] | null | null | null |
DJANGO PROJECT/Configurator/ConfigWebApp/forms.py
|
BobbyElmes/Fusion-Configurator-Source-Code
|
08e6c14789a2e8d073b312422ce893ee463369f5
|
[
"MIT"
] | null | null | null |
from django import forms
class Register(forms.Form):
username = forms.CharField(label='username', max_length=35)
password = forms.CharField(label='password', max_length=35)
| 36.2
| 63
| 0.756906
|
from django import forms
class Register(forms.Form):
username = forms.CharField(label='username', max_length=35)
password = forms.CharField(label='password', max_length=35)
| true
| true
|
1c479fcc08d0b2f40c0963da403abaa4ff01ae81
| 4,853
|
py
|
Python
|
qa/rpc-tests/httpbasics.py
|
PapicoinProject/Papicoin
|
c971fcd1f81d07fe9de2e2c3893f362d9a8529f5
|
[
"MIT"
] | 1
|
2022-03-19T16:50:57.000Z
|
2022-03-19T16:50:57.000Z
|
qa/rpc-tests/httpbasics.py
|
PapicoinProject/Papicoin
|
c971fcd1f81d07fe9de2e2c3893f362d9a8529f5
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/httpbasics.py
|
PapicoinProject/Papicoin
|
c971fcd1f81d07fe9de2e2c3893f362d9a8529f5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test rpc http basics
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because bitcoind should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 42.570175
| 108
| 0.632186
|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
thpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| true
| true
|
1c47a0df09096f9bfb11acb7116db2b3e4c3ba4a
| 1,076
|
py
|
Python
|
api/views.py
|
masoodmomin/django-react-todoapp
|
06fb4f7603bba726e6b0b13cf7dfc5e0aa068f0c
|
[
"MIT"
] | 1
|
2020-12-06T12:32:23.000Z
|
2020-12-06T12:32:23.000Z
|
api/views.py
|
masoodmomin/django-react-todoapp
|
06fb4f7603bba726e6b0b13cf7dfc5e0aa068f0c
|
[
"MIT"
] | null | null | null |
api/views.py
|
masoodmomin/django-react-todoapp
|
06fb4f7603bba726e6b0b13cf7dfc5e0aa068f0c
|
[
"MIT"
] | null | null | null |
from django.http import request
from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import TodoSerializer
from .models import Todo
from django.http import JsonResponse
@api_view(['GET'])
def all(request):
todo = Todo.objects.all()
serializer = TodoSerializer(todo, many=True)
return JsonResponse(serializer.data, safe=False)
@api_view(['POST'])
def create(request):
serializer = TodoSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response("Created successfully.")
@api_view(['DELETE'])
def delete(request):
text = request.data["text"]
todo = Todo.objects.get(text=text)
todo.delete()
return Response("Deleted successfully.")
@api_view(['PUT'])
def status(request):
text = request.data["text"]
todo = Todo.objects.get(text=text)
serializer = TodoSerializer(instance = todo,data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
| 29.081081
| 66
| 0.727695
|
from django.http import request
from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import TodoSerializer
from .models import Todo
from django.http import JsonResponse
@api_view(['GET'])
def all(request):
todo = Todo.objects.all()
serializer = TodoSerializer(todo, many=True)
return JsonResponse(serializer.data, safe=False)
@api_view(['POST'])
def create(request):
serializer = TodoSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response("Created successfully.")
@api_view(['DELETE'])
def delete(request):
text = request.data["text"]
todo = Todo.objects.get(text=text)
todo.delete()
return Response("Deleted successfully.")
@api_view(['PUT'])
def status(request):
text = request.data["text"]
todo = Todo.objects.get(text=text)
serializer = TodoSerializer(instance = todo,data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
| true
| true
|
1c47a10742a03a90e69f50b632ec06af813dc613
| 18,268
|
py
|
Python
|
core/controllers/suggestion.py
|
ReshuKumari/oppia
|
cb89b633275b3d0b2d02e0d22e0c472d8b8da0e1
|
[
"Apache-2.0"
] | null | null | null |
core/controllers/suggestion.py
|
ReshuKumari/oppia
|
cb89b633275b3d0b2d02e0d22e0c472d8b8da0e1
|
[
"Apache-2.0"
] | null | null | null |
core/controllers/suggestion.py
|
ReshuKumari/oppia
|
cb89b633275b3d0b2d02e0d22e0c472d8b8da0e1
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for suggestions."""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import fs_services
from core.domain import html_cleaner
from core.domain import image_validation_services
from core.domain import opportunity_services
from core.domain import skill_fetchers
from core.domain import state_domain
from core.domain import suggestion_services
import feconf
import python_utils
import utils
def _get_target_id_to_exploration_opportunity_dict(suggestions):
"""Returns a dict of target_id to exploration opportunity summary dict.
Args:
suggestions: list(BaseSuggestion). A list of suggestions to retrieve
opportunity dicts.
Returns:
dict. Dict mapping target_id to corresponding exploration opportunity
summary dict.
"""
target_ids = set([s.target_id for s in suggestions])
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in (
opportunity_services.get_exploration_opportunity_summaries_by_ids(
list(target_ids)).items())
}
return opportunity_id_to_opportunity_dict
def _get_target_id_to_skill_opportunity_dict(suggestions):
"""Returns a dict of target_id to skill opportunity summary dict.
Args:
suggestions: list(BaseSuggestion). A list of suggestions to retrieve
opportunity dicts.
Returns:
dict. Dict mapping target_id to corresponding skill opportunity dict.
"""
target_ids = set([s.target_id for s in suggestions])
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in opportunity_services.get_skill_opportunities_by_ids(
list(target_ids)).items()
}
opportunity_id_to_skill = {
skill.id: skill
for skill in skill_fetchers.get_multi_skills([
opp['id']
for opp in opportunity_id_to_opportunity_dict.values()
if opp is not None])
}
for opp_id, skill in opportunity_id_to_skill.items():
if skill is not None:
opportunity_id_to_opportunity_dict[opp_id]['skill_rubrics'] = [
rubric.to_dict() for rubric in skill.rubrics]
return opportunity_id_to_opportunity_dict
class SuggestionHandler(base.BaseHandler):
""""Handles operations relating to suggestions."""
@acl_decorators.can_suggest_changes
def post(self):
"""Handles POST requests."""
if (self.payload.get('suggestion_type') ==
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT):
raise self.InvalidInputException(
'Content suggestion submissions are no longer supported.')
try:
suggestion = suggestion_services.create_suggestion(
self.payload.get('suggestion_type'),
self.payload.get('target_type'), self.payload.get('target_id'),
self.payload.get('target_version_at_submission'),
self.user_id, self.payload.get('change'),
self.payload.get('description'))
except utils.ValidationError as e:
raise self.InvalidInputException(e)
# TODO(#10513) : Find a way to save the images before the suggestion is
# created.
suggestion_image_context = suggestion.image_context
new_image_filenames = (
suggestion.get_new_image_filenames_added_in_suggestion())
for filename in new_image_filenames:
image = self.request.get(filename)
if not image:
logging.exception(
'Image not provided for file with name %s when the '
' suggestion with target id %s was created.' % (
filename, suggestion.target_id))
raise self.InvalidInputException(
'No image data provided for file with name %s.'
% (filename))
try:
file_format = (
image_validation_services.validate_image_and_filename(
image, filename))
except utils.ValidationError as e:
raise self.InvalidInputException('%s' % (e))
image_is_compressible = (
file_format in feconf.COMPRESSIBLE_IMAGE_FORMATS)
fs_services.save_original_and_compressed_versions_of_image(
filename, suggestion_image_context, suggestion.target_id,
image, 'image', image_is_compressible)
target_entity_html_list = suggestion.get_target_entity_html_strings()
target_image_filenames = (
html_cleaner.get_image_filenames_from_html_strings(
target_entity_html_list))
fs_services.copy_images(
suggestion.target_type, suggestion.target_id,
suggestion_image_context, suggestion.target_id,
target_image_filenames)
self.render_json(self.values)
class SuggestionToExplorationActionHandler(base.BaseHandler):
"""Handles actions performed on suggestions to explorations."""
@acl_decorators.get_decorator_for_accepting_suggestion(
acl_decorators.can_edit_exploration)
def put(self, target_id, suggestion_id):
"""Handles PUT requests.
Args:
target_id: str. The ID of the suggestion target.
suggestion_id: str. The ID of the suggestion.
"""
if (
suggestion_id.split('.')[0] !=
feconf.ENTITY_TYPE_EXPLORATION):
raise self.InvalidInputException(
'This handler allows actions only'
' on suggestions to explorations.')
if suggestion_id.split('.')[1] != target_id:
raise self.InvalidInputException(
'The exploration id provided does not match the exploration id '
'present as part of the suggestion_id')
action = self.payload.get('action')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.author_id == self.user_id:
raise self.UnauthorizedUserException(
'You cannot accept/reject your own suggestion.')
if action == constants.ACTION_ACCEPT_SUGGESTION:
commit_message = self.payload.get('commit_message')
if (commit_message is not None and
len(commit_message) > constants.MAX_COMMIT_MESSAGE_LENGTH):
raise self.InvalidInputException(
'Commit messages must be at most %s characters long.'
% constants.MAX_COMMIT_MESSAGE_LENGTH)
suggestion_services.accept_suggestion(
suggestion_id, self.user_id, self.payload.get('commit_message'),
self.payload.get('review_message'))
elif action == constants.ACTION_REJECT_SUGGESTION:
suggestion_services.reject_suggestion(
suggestion_id, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class ResubmitSuggestionHandler(base.BaseHandler):
"""Handler to reopen a rejected suggestion."""
@acl_decorators.can_resubmit_suggestion
def put(self, suggestion_id):
"""Handles PUT requests.
Args:
suggestion_id: str. The ID of the suggestion.
"""
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
new_change = self.payload.get('change')
change_cls = type(suggestion.change)
change_object = change_cls(new_change)
summary_message = self.payload.get('summary_message')
suggestion_services.resubmit_rejected_suggestion(
suggestion_id, summary_message, self.user_id, change_object)
self.render_json(self.values)
class SuggestionToSkillActionHandler(base.BaseHandler):
"""Handles actions performed on suggestions to skills."""
@acl_decorators.get_decorator_for_accepting_suggestion(
acl_decorators.can_edit_skill)
def put(self, target_id, suggestion_id):
"""Handles PUT requests.
Args:
target_id: str. The ID of the suggestion target.
suggestion_id: str. The ID of the suggestion.
"""
if suggestion_id.split('.')[0] != feconf.ENTITY_TYPE_SKILL:
raise self.InvalidInputException(
'This handler allows actions only on suggestions to skills.')
if suggestion_id.split('.')[1] != target_id:
raise self.InvalidInputException(
'The skill id provided does not match the skill id present as '
'part of the suggestion_id')
action = self.payload.get('action')
if action == constants.ACTION_ACCEPT_SUGGESTION:
# Question suggestions do not use commit messages.
suggestion_services.accept_suggestion(
suggestion_id, self.user_id, 'UNUSED_COMMIT_MESSAGE',
self.payload.get('review_message'))
elif action == constants.ACTION_REJECT_SUGGESTION:
suggestion_services.reject_suggestion(
suggestion_id, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class SuggestionsProviderHandler(base.BaseHandler):
"""Provides suggestions for a user and given suggestion type."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
def _require_valid_suggestion_and_target_types(
self, target_type, suggestion_type):
"""Checks whether the given target_type and suggestion_type are valid.
Args:
target_type: str. The type of the suggestion target.
suggestion_type: str. The type of the suggestion.
Raises:
InvalidInputException. If the given target_type of suggestion_type
are invalid.
"""
if target_type not in feconf.SUGGESTION_TARGET_TYPE_CHOICES:
raise self.InvalidInputException(
'Invalid target_type: %s' % target_type)
if suggestion_type not in feconf.SUGGESTION_TYPE_CHOICES:
raise self.InvalidInputException(
'Invalid suggestion_type: %s' % suggestion_type)
def _render_suggestions(self, target_type, suggestions):
"""Renders retrieved suggestions.
Args:
target_type: str. The suggestion type.
suggestions: list(BaseSuggestion). A list of suggestions to render.
"""
if target_type == feconf.ENTITY_TYPE_EXPLORATION:
target_id_to_opportunity_dict = (
_get_target_id_to_exploration_opportunity_dict(suggestions))
self.render_json({
'suggestions': [s.to_dict() for s in suggestions],
'target_id_to_opportunity_dict':
target_id_to_opportunity_dict
})
elif target_type == feconf.ENTITY_TYPE_SKILL:
target_id_to_opportunity_dict = (
_get_target_id_to_skill_opportunity_dict(suggestions))
self.render_json({
'suggestions': [s.to_dict() for s in suggestions],
'target_id_to_opportunity_dict':
target_id_to_opportunity_dict
})
else:
self.render_json({})
class ReviewableSuggestionsHandler(SuggestionsProviderHandler):
"""Provides all suggestions which can be reviewed by the user for a given
suggestion type.
"""
@acl_decorators.can_view_reviewable_suggestions
def get(self, target_type, suggestion_type):
"""Handles GET requests.
Args:
target_type: str. The type of the suggestion target.
suggestion_type: str. The type of the suggestion.
"""
self._require_valid_suggestion_and_target_types(
target_type, suggestion_type)
suggestions = suggestion_services.get_reviewable_suggestions(
self.user_id, suggestion_type)
self._render_suggestions(target_type, suggestions)
class UserSubmittedSuggestionsHandler(SuggestionsProviderHandler):
"""Provides all suggestions which are submitted by the user for a given
suggestion type.
"""
@acl_decorators.can_suggest_changes
def get(self, target_type, suggestion_type):
"""Handles GET requests.
Args:
target_type: str. The type of the suggestion target.
suggestion_type: str. The type of the suggestion.
"""
self._require_valid_suggestion_and_target_types(
target_type, suggestion_type)
suggestions = suggestion_services.get_submitted_suggestions(
self.user_id, suggestion_type)
self._render_suggestions(target_type, suggestions)
class SuggestionListHandler(base.BaseHandler):
"""Handles list operations on suggestions."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
# The query_fields_and_values variable is a list of tuples. The first
# element in each tuple is the field being queried and the second
# element is the value of the field being queried.
# request.GET.items() parses the params from the url into the above
# format. So in the url, the query should be passed as:
# ?field1=value1&field2=value2...fieldN=valueN.
query_fields_and_values = list(self.request.GET.items())
for query in query_fields_and_values:
if query[0] not in feconf.ALLOWED_SUGGESTION_QUERY_FIELDS:
raise self.InvalidInputException(
'Not allowed to query on field %s' % query[0])
suggestions = suggestion_services.query_suggestions(
query_fields_and_values)
self.values.update({'suggestions': [s.to_dict() for s in suggestions]})
self.render_json(self.values)
class UpdateTranslationSuggestionHandler(base.BaseHandler):
"""Handles update operations relating to translation suggestions."""
@acl_decorators.can_update_suggestion
def put(self, suggestion_id):
"""Handles PUT requests.
Raises:
InvalidInputException. The suggestion is already handled.
InvalidInputException. The 'translation_html' parameter is missing.
InvalidInputException. The 'translation_html' parameter is not a
string.
"""
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.is_handled:
raise self.InvalidInputException(
'The suggestion with id %s has been accepted or rejected'
% (suggestion_id)
)
if self.payload.get('translation_html') is None:
raise self.InvalidInputException(
'The parameter \'translation_html\' is missing.'
)
if not isinstance(
self.payload.get('translation_html'), python_utils.BASESTRING):
raise self.InvalidInputException(
'The parameter \'translation_html\' should be a string.'
)
suggestion_services.update_translation_suggestion(
suggestion_id, self.payload.get('translation_html'))
self.render_json(self.values)
class UpdateQuestionSuggestionHandler(base.BaseHandler):
"""Handles update operations relating to question suggestions."""
@acl_decorators.can_update_suggestion
def put(self, suggestion_id):
"""Handles PUT requests.
Raises:
InvalidInputException. The suggestion is already handled.
InvalidInputException. The 'skill_difficulty' parameter is missing.
InvalidInputException. The 'skill_difficulty' is not a decimal.
InvalidInputException. The 'question_state_data' parameter is
missing.
InvalidInputException. The 'question_state_data' parameter is
invalid.
"""
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.is_handled:
raise self.InvalidInputException(
'The suggestion with id %s has been accepted or rejected'
% (suggestion_id)
)
if self.payload.get('skill_difficulty') is None:
raise self.InvalidInputException(
'The parameter \'skill_difficulty\' is missing.'
)
if not isinstance(self.payload.get('skill_difficulty'), float):
raise self.InvalidInputException(
'The parameter \'skill_difficulty\' should be a decimal.'
)
if self.payload.get('question_state_data') is None:
raise self.InvalidInputException(
'The parameter \'question_state_data\' is missing.'
)
question_state_data_obj = state_domain.State.from_dict(
self.payload.get('question_state_data'))
question_state_data_obj.validate(None, False)
suggestion_services.update_question_suggestion(
suggestion_id,
self.payload.get('skill_difficulty'),
self.payload.get('question_state_data'))
self.render_json(self.values)
| 39.117773
| 80
| 0.661047
|
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import fs_services
from core.domain import html_cleaner
from core.domain import image_validation_services
from core.domain import opportunity_services
from core.domain import skill_fetchers
from core.domain import state_domain
from core.domain import suggestion_services
import feconf
import python_utils
import utils
def _get_target_id_to_exploration_opportunity_dict(suggestions):
target_ids = set([s.target_id for s in suggestions])
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in (
opportunity_services.get_exploration_opportunity_summaries_by_ids(
list(target_ids)).items())
}
return opportunity_id_to_opportunity_dict
def _get_target_id_to_skill_opportunity_dict(suggestions):
target_ids = set([s.target_id for s in suggestions])
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in opportunity_services.get_skill_opportunities_by_ids(
list(target_ids)).items()
}
opportunity_id_to_skill = {
skill.id: skill
for skill in skill_fetchers.get_multi_skills([
opp['id']
for opp in opportunity_id_to_opportunity_dict.values()
if opp is not None])
}
for opp_id, skill in opportunity_id_to_skill.items():
if skill is not None:
opportunity_id_to_opportunity_dict[opp_id]['skill_rubrics'] = [
rubric.to_dict() for rubric in skill.rubrics]
return opportunity_id_to_opportunity_dict
class SuggestionHandler(base.BaseHandler):
@acl_decorators.can_suggest_changes
def post(self):
if (self.payload.get('suggestion_type') ==
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT):
raise self.InvalidInputException(
'Content suggestion submissions are no longer supported.')
try:
suggestion = suggestion_services.create_suggestion(
self.payload.get('suggestion_type'),
self.payload.get('target_type'), self.payload.get('target_id'),
self.payload.get('target_version_at_submission'),
self.user_id, self.payload.get('change'),
self.payload.get('description'))
except utils.ValidationError as e:
raise self.InvalidInputException(e)
ntext
new_image_filenames = (
suggestion.get_new_image_filenames_added_in_suggestion())
for filename in new_image_filenames:
image = self.request.get(filename)
if not image:
logging.exception(
'Image not provided for file with name %s when the '
' suggestion with target id %s was created.' % (
filename, suggestion.target_id))
raise self.InvalidInputException(
'No image data provided for file with name %s.'
% (filename))
try:
file_format = (
image_validation_services.validate_image_and_filename(
image, filename))
except utils.ValidationError as e:
raise self.InvalidInputException('%s' % (e))
image_is_compressible = (
file_format in feconf.COMPRESSIBLE_IMAGE_FORMATS)
fs_services.save_original_and_compressed_versions_of_image(
filename, suggestion_image_context, suggestion.target_id,
image, 'image', image_is_compressible)
target_entity_html_list = suggestion.get_target_entity_html_strings()
target_image_filenames = (
html_cleaner.get_image_filenames_from_html_strings(
target_entity_html_list))
fs_services.copy_images(
suggestion.target_type, suggestion.target_id,
suggestion_image_context, suggestion.target_id,
target_image_filenames)
self.render_json(self.values)
class SuggestionToExplorationActionHandler(base.BaseHandler):
@acl_decorators.get_decorator_for_accepting_suggestion(
acl_decorators.can_edit_exploration)
def put(self, target_id, suggestion_id):
if (
suggestion_id.split('.')[0] !=
feconf.ENTITY_TYPE_EXPLORATION):
raise self.InvalidInputException(
'This handler allows actions only'
' on suggestions to explorations.')
if suggestion_id.split('.')[1] != target_id:
raise self.InvalidInputException(
'The exploration id provided does not match the exploration id '
'present as part of the suggestion_id')
action = self.payload.get('action')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.author_id == self.user_id:
raise self.UnauthorizedUserException(
'You cannot accept/reject your own suggestion.')
if action == constants.ACTION_ACCEPT_SUGGESTION:
commit_message = self.payload.get('commit_message')
if (commit_message is not None and
len(commit_message) > constants.MAX_COMMIT_MESSAGE_LENGTH):
raise self.InvalidInputException(
'Commit messages must be at most %s characters long.'
% constants.MAX_COMMIT_MESSAGE_LENGTH)
suggestion_services.accept_suggestion(
suggestion_id, self.user_id, self.payload.get('commit_message'),
self.payload.get('review_message'))
elif action == constants.ACTION_REJECT_SUGGESTION:
suggestion_services.reject_suggestion(
suggestion_id, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class ResubmitSuggestionHandler(base.BaseHandler):
@acl_decorators.can_resubmit_suggestion
def put(self, suggestion_id):
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
new_change = self.payload.get('change')
change_cls = type(suggestion.change)
change_object = change_cls(new_change)
summary_message = self.payload.get('summary_message')
suggestion_services.resubmit_rejected_suggestion(
suggestion_id, summary_message, self.user_id, change_object)
self.render_json(self.values)
class SuggestionToSkillActionHandler(base.BaseHandler):
@acl_decorators.get_decorator_for_accepting_suggestion(
acl_decorators.can_edit_skill)
def put(self, target_id, suggestion_id):
if suggestion_id.split('.')[0] != feconf.ENTITY_TYPE_SKILL:
raise self.InvalidInputException(
'This handler allows actions only on suggestions to skills.')
if suggestion_id.split('.')[1] != target_id:
raise self.InvalidInputException(
'The skill id provided does not match the skill id present as '
'part of the suggestion_id')
action = self.payload.get('action')
if action == constants.ACTION_ACCEPT_SUGGESTION:
suggestion_services.accept_suggestion(
suggestion_id, self.user_id, 'UNUSED_COMMIT_MESSAGE',
self.payload.get('review_message'))
elif action == constants.ACTION_REJECT_SUGGESTION:
suggestion_services.reject_suggestion(
suggestion_id, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class SuggestionsProviderHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
def _require_valid_suggestion_and_target_types(
self, target_type, suggestion_type):
if target_type not in feconf.SUGGESTION_TARGET_TYPE_CHOICES:
raise self.InvalidInputException(
'Invalid target_type: %s' % target_type)
if suggestion_type not in feconf.SUGGESTION_TYPE_CHOICES:
raise self.InvalidInputException(
'Invalid suggestion_type: %s' % suggestion_type)
def _render_suggestions(self, target_type, suggestions):
if target_type == feconf.ENTITY_TYPE_EXPLORATION:
target_id_to_opportunity_dict = (
_get_target_id_to_exploration_opportunity_dict(suggestions))
self.render_json({
'suggestions': [s.to_dict() for s in suggestions],
'target_id_to_opportunity_dict':
target_id_to_opportunity_dict
})
elif target_type == feconf.ENTITY_TYPE_SKILL:
target_id_to_opportunity_dict = (
_get_target_id_to_skill_opportunity_dict(suggestions))
self.render_json({
'suggestions': [s.to_dict() for s in suggestions],
'target_id_to_opportunity_dict':
target_id_to_opportunity_dict
})
else:
self.render_json({})
class ReviewableSuggestionsHandler(SuggestionsProviderHandler):
@acl_decorators.can_view_reviewable_suggestions
def get(self, target_type, suggestion_type):
self._require_valid_suggestion_and_target_types(
target_type, suggestion_type)
suggestions = suggestion_services.get_reviewable_suggestions(
self.user_id, suggestion_type)
self._render_suggestions(target_type, suggestions)
class UserSubmittedSuggestionsHandler(SuggestionsProviderHandler):
@acl_decorators.can_suggest_changes
def get(self, target_type, suggestion_type):
self._require_valid_suggestion_and_target_types(
target_type, suggestion_type)
suggestions = suggestion_services.get_submitted_suggestions(
self.user_id, suggestion_type)
self._render_suggestions(target_type, suggestions)
class SuggestionListHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
query_fields_and_values = list(self.request.GET.items())
for query in query_fields_and_values:
if query[0] not in feconf.ALLOWED_SUGGESTION_QUERY_FIELDS:
raise self.InvalidInputException(
'Not allowed to query on field %s' % query[0])
suggestions = suggestion_services.query_suggestions(
query_fields_and_values)
self.values.update({'suggestions': [s.to_dict() for s in suggestions]})
self.render_json(self.values)
class UpdateTranslationSuggestionHandler(base.BaseHandler):
@acl_decorators.can_update_suggestion
def put(self, suggestion_id):
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.is_handled:
raise self.InvalidInputException(
'The suggestion with id %s has been accepted or rejected'
% (suggestion_id)
)
if self.payload.get('translation_html') is None:
raise self.InvalidInputException(
'The parameter \'translation_html\' is missing.'
)
if not isinstance(
self.payload.get('translation_html'), python_utils.BASESTRING):
raise self.InvalidInputException(
'The parameter \'translation_html\' should be a string.'
)
suggestion_services.update_translation_suggestion(
suggestion_id, self.payload.get('translation_html'))
self.render_json(self.values)
class UpdateQuestionSuggestionHandler(base.BaseHandler):
@acl_decorators.can_update_suggestion
def put(self, suggestion_id):
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.is_handled:
raise self.InvalidInputException(
'The suggestion with id %s has been accepted or rejected'
% (suggestion_id)
)
if self.payload.get('skill_difficulty') is None:
raise self.InvalidInputException(
'The parameter \'skill_difficulty\' is missing.'
)
if not isinstance(self.payload.get('skill_difficulty'), float):
raise self.InvalidInputException(
'The parameter \'skill_difficulty\' should be a decimal.'
)
if self.payload.get('question_state_data') is None:
raise self.InvalidInputException(
'The parameter \'question_state_data\' is missing.'
)
question_state_data_obj = state_domain.State.from_dict(
self.payload.get('question_state_data'))
question_state_data_obj.validate(None, False)
suggestion_services.update_question_suggestion(
suggestion_id,
self.payload.get('skill_difficulty'),
self.payload.get('question_state_data'))
self.render_json(self.values)
| true
| true
|
1c47a212af9aebe31a9460335ed92e68251a9076
| 3,160
|
py
|
Python
|
Exec/testing/Viscous-Vortex/check.py
|
darylbond/cerberus
|
a1b99f6b50ba6876d4705f26e6be98ed6e1c5c6a
|
[
"MIT"
] | 5
|
2021-05-10T01:21:52.000Z
|
2022-03-10T17:26:41.000Z
|
Exec/testing/Viscous-Vortex/check.py
|
darylbond/cerberus
|
a1b99f6b50ba6876d4705f26e6be98ed6e1c5c6a
|
[
"MIT"
] | 3
|
2021-05-26T01:12:12.000Z
|
2021-12-14T00:34:06.000Z
|
Exec/testing/Viscous-Vortex/check.py
|
darylbond/cerberus
|
a1b99f6b50ba6876d4705f26e6be98ed6e1c5c6a
|
[
"MIT"
] | 3
|
2021-05-11T02:45:27.000Z
|
2021-09-06T12:08:23.000Z
|
import sys
cmd_folder = "../../../vis"
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from get_boxlib import ReadBoxLib, get_files
import numpy as np
import pylab as plt
import matplotlib.ticker as ticker
def check():
#==============================================================================
# Simulation results
#==============================================================================
# get a list of all the files in this directory
files = get_files('.', include=['plt'], exclude=["temp"], get_all=True)
f = files[-1]
data = ReadBoxLib(f)
t = data.time
data = ReadBoxLib(f, max_level=-1)
xc, u = data.get("x_vel-air")
xc, v = data.get("y_vel-air")
vel = np.sqrt(u**2 + v**2)
yc, xc = np.meshgrid(xc[1], xc[0])
R = np.sqrt(xc**2 + yc**2)
R_linear = np.ravel(R)
vel_linear = np.ravel(vel)
r_max = 8.0
R_linear = np.ma.masked_where(R_linear>r_max, R_linear)
vel_linear = np.ma.masked_where(R_linear>r_max, vel_linear)
I = np.argsort(R_linear)
R_linear = R_linear[I]
vel_linear = vel_linear[I]
# =============================================================================
# analytical solution
# =============================================================================
# D. J. Munoz, V. Springel, R. Marcus, M. Vogelsberger, L. Hernquist,
# Multidimensional, compressible viscous flow on a moving Voronoi mesh,
# Monthly Notices of the Royal Astronomical Society,
# Volume 428, Issue 1, 1 January 2013, Pages 254-279,
# https://doi.org/10.1093/mnras/sts015
G = 1.0
mu0 = 0.08
rho0 = 1.0
nu = mu0/rho0
t0 = 10.0
def vtheta(R,t):
return G/(2*np.pi*R)*(1-np.exp(-R**2/(4*nu*t)))
vt = vtheta(R_linear, data.time+t0)
# =============================================================================
# check
# =============================================================================
success = 0
rel_err = np.abs((vel_linear - vt)/vt)
if np.max(rel_err) > 0.01:
success = 1
# =============================================================================
# plot
# =============================================================================
plt.rc("font", family="serif")
plt.rc("font", size=8)
plt.rc("mathtext", fontset="cm")
# matplotlib.rc('text', usetex = True)
params= {'text.latex.preamble' : [r'\usepackage{amsmath}']}
plt.rcParams.update(params)
fig = plt.figure(figsize=(5,2))
ax = fig.add_subplot(111)
ax.plot(R_linear, vel_linear,'.', ms=2, mfc='none')
ax.plot(R_linear, vt, 'k--', lw=1)
ax.set_xlabel(r"$r$")
ax.set_ylabel(r"$v_\theta$")
ax = ax.twinx()
ax.plot(R_linear, rel_err*1000, 'r.', ms=0.5)
ax.set_ylabel(r'$\left| \frac{\hat{v}_\theta - v_\theta}{v_\theta} \right|\times 10^3$')
ax.set_xlim(0,8)
ylim = ax.get_ylim()
ax.set_ylim(0, ylim[1])
fig.tight_layout()
fig.savefig("plot.pdf", dpi=300)
return success
if __name__ == "__main__":
sys.exit(check())
| 27.241379
| 92
| 0.472468
|
import sys
cmd_folder = "../../../vis"
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from get_boxlib import ReadBoxLib, get_files
import numpy as np
import pylab as plt
import matplotlib.ticker as ticker
def check():
files = get_files('.', include=['plt'], exclude=["temp"], get_all=True)
f = files[-1]
data = ReadBoxLib(f)
t = data.time
data = ReadBoxLib(f, max_level=-1)
xc, u = data.get("x_vel-air")
xc, v = data.get("y_vel-air")
vel = np.sqrt(u**2 + v**2)
yc, xc = np.meshgrid(xc[1], xc[0])
R = np.sqrt(xc**2 + yc**2)
R_linear = np.ravel(R)
vel_linear = np.ravel(vel)
r_max = 8.0
R_linear = np.ma.masked_where(R_linear>r_max, R_linear)
vel_linear = np.ma.masked_where(R_linear>r_max, vel_linear)
I = np.argsort(R_linear)
R_linear = R_linear[I]
vel_linear = vel_linear[I]
G = 1.0
mu0 = 0.08
rho0 = 1.0
nu = mu0/rho0
t0 = 10.0
def vtheta(R,t):
return G/(2*np.pi*R)*(1-np.exp(-R**2/(4*nu*t)))
vt = vtheta(R_linear, data.time+t0)
success = 0
rel_err = np.abs((vel_linear - vt)/vt)
if np.max(rel_err) > 0.01:
success = 1
plt.rc("font", family="serif")
plt.rc("font", size=8)
plt.rc("mathtext", fontset="cm")
params= {'text.latex.preamble' : [r'\usepackage{amsmath}']}
plt.rcParams.update(params)
fig = plt.figure(figsize=(5,2))
ax = fig.add_subplot(111)
ax.plot(R_linear, vel_linear,'.', ms=2, mfc='none')
ax.plot(R_linear, vt, 'k--', lw=1)
ax.set_xlabel(r"$r$")
ax.set_ylabel(r"$v_\theta$")
ax = ax.twinx()
ax.plot(R_linear, rel_err*1000, 'r.', ms=0.5)
ax.set_ylabel(r'$\left| \frac{\hat{v}_\theta - v_\theta}{v_\theta} \right|\times 10^3$')
ax.set_xlim(0,8)
ylim = ax.get_ylim()
ax.set_ylim(0, ylim[1])
fig.tight_layout()
fig.savefig("plot.pdf", dpi=300)
return success
if __name__ == "__main__":
sys.exit(check())
| true
| true
|
1c47a21bcb817eb9aae5fdc55c17b7fec9d7bcef
| 1,310
|
py
|
Python
|
auger_cli/commands/experiment_sessions.py
|
deeplearninc/auger-cli
|
afa52224043834e11f40d69d2042d53dfccc5ae5
|
[
"MIT"
] | 1
|
2019-04-17T12:40:58.000Z
|
2019-04-17T12:40:58.000Z
|
auger_cli/commands/experiment_sessions.py
|
deeplearninc/auger-cli
|
afa52224043834e11f40d69d2042d53dfccc5ae5
|
[
"MIT"
] | 25
|
2019-03-06T08:20:04.000Z
|
2019-07-07T06:00:20.000Z
|
auger_cli/commands/experiment_sessions.py
|
deeplearninc/auger-cli
|
afa52224043834e11f40d69d2042d53dfccc5ae5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import click
from auger_cli.cli_client import pass_client
from auger_cli.formatter import (
print_list,
print_record,
print_table
)
from auger_cli.api import experiment_sessions
@click.group(
'experiment_sessions',
invoke_without_command=True,
short_help='Manage Auger project experiment sessions.'
)
@click.option(
'--project-id',
'-p',
default='',
help='Experiment sessions project ID.'
)
@click.option(
'--experiment-id',
'-e',
default='',
help='Experiment sessions experiment ID.'
)
@click.pass_context
def experiment_sessions_group(ctx, project_id, experiment_id):
if ctx.invoked_subcommand is None:
with ctx.obj.cli_error_handler():
print_table(
experiment_sessions.list(ctx.obj, project_id, experiment_id),
attributes=experiment_sessions.display_list_attributes
)
else:
pass
@click.command(short_help='Display experiment session details.')
@click.argument('experiment_session_id')
@pass_client
def show(client, experiment_session_id):
with client.cli_error_handler():
print_record(experiment_sessions.read(client, experiment_session_id), experiment_sessions.display_attributes)
experiment_sessions_group.add_command(show)
| 24.716981
| 117
| 0.714504
|
import click
from auger_cli.cli_client import pass_client
from auger_cli.formatter import (
print_list,
print_record,
print_table
)
from auger_cli.api import experiment_sessions
@click.group(
'experiment_sessions',
invoke_without_command=True,
short_help='Manage Auger project experiment sessions.'
)
@click.option(
'--project-id',
'-p',
default='',
help='Experiment sessions project ID.'
)
@click.option(
'--experiment-id',
'-e',
default='',
help='Experiment sessions experiment ID.'
)
@click.pass_context
def experiment_sessions_group(ctx, project_id, experiment_id):
if ctx.invoked_subcommand is None:
with ctx.obj.cli_error_handler():
print_table(
experiment_sessions.list(ctx.obj, project_id, experiment_id),
attributes=experiment_sessions.display_list_attributes
)
else:
pass
@click.command(short_help='Display experiment session details.')
@click.argument('experiment_session_id')
@pass_client
def show(client, experiment_session_id):
with client.cli_error_handler():
print_record(experiment_sessions.read(client, experiment_session_id), experiment_sessions.display_attributes)
experiment_sessions_group.add_command(show)
| true
| true
|
1c47a2331be5ca842b9b76d50b82dda69ffca458
| 5,055
|
py
|
Python
|
test/functional/test_framework/netutil.py
|
knotcoin/knotcoin
|
3f4ade4e2cabf94acd80bc043deec3d9a4209938
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/netutil.py
|
knotcoin/knotcoin
|
3f4ade4e2cabf94acd80bc043deec3d9a4209938
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/netutil.py
|
knotcoin/knotcoin
|
3f4ade4e2cabf94acd80bc043deec3d9a4209938
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Knotcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Linux network utilities.
Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
"""
import sys
import socket
import fcntl
import struct
import array
import os
from binascii import unhexlify, hexlify
# STATE_ESTABLISHED = '01'
# STATE_SYN_SENT = '02'
# STATE_SYN_RECV = '03'
# STATE_FIN_WAIT1 = '04'
# STATE_FIN_WAIT2 = '05'
# STATE_TIME_WAIT = '06'
# STATE_CLOSE = '07'
# STATE_CLOSE_WAIT = '08'
# STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
# STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
'''
Check for (local) IPv6 support.
'''
import socket
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
| 32.197452
| 111
| 0.600198
|
import sys
import socket
import fcntl
import struct
import array
import os
from binascii import unhexlify, hexlify
STATE_LISTEN = '0A'
def get_socket_inodes(pid):
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' '))
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9])
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
def all_interfaces():
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912,
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
if '.' in addr:
addr = [int(x) for x in addr.split('.')]
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
import socket
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
| true
| true
|
1c47a240eda919b8a1cb429d2d0afedc165532f8
| 263
|
py
|
Python
|
Statistics/randomNum.py
|
ssm29njit/calculator601SheethalJedidiah
|
2812fbabcf5249eeee8a2f34edd6152cfa2d175e
|
[
"MIT"
] | 1
|
2020-11-08T05:11:27.000Z
|
2020-11-08T05:11:27.000Z
|
Statistics/randomNum.py
|
ssm29njit/calculator601SheethalJedidiah
|
2812fbabcf5249eeee8a2f34edd6152cfa2d175e
|
[
"MIT"
] | null | null | null |
Statistics/randomNum.py
|
ssm29njit/calculator601SheethalJedidiah
|
2812fbabcf5249eeee8a2f34edd6152cfa2d175e
|
[
"MIT"
] | 1
|
2020-12-09T15:37:51.000Z
|
2020-12-09T15:37:51.000Z
|
from random import random
def getRandomNum(data,sample_size):
random_values = random.sample(data, k=sample_size-1)
return random_values
#def getSample(data, sample_size):
# random_values = random.sample(data, k=sample_size)
# return random_values
| 26.3
| 56
| 0.760456
|
from random import random
def getRandomNum(data,sample_size):
random_values = random.sample(data, k=sample_size-1)
return random_values
| true
| true
|
1c47a26a1e9d995623a6018575abb2f888b8d25f
| 11,579
|
py
|
Python
|
tests/learning/test_rumelhart_semantic_network.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
tests/learning/test_rumelhart_semantic_network.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
tests/learning/test_rumelhart_semantic_network.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import psyneulink as pnl
import psyneulink.core.components.functions.transferfunctions
def validate_learning_mechs(sys):
def get_learning_mech(name):
return next(lm for lm in sys.learning_mechanisms if lm.name == name)
REP_IN_to_REP_HIDDEN_LM = get_learning_mech('LearningMechanism for MappingProjection from REP_IN to REP_HIDDEN')
REP_HIDDEN_to_REL_HIDDEN_LM = get_learning_mech('LearningMechanism for MappingProjection from REP_HIDDEN to REL_HIDDEN')
REL_IN_to_REL_HIDDEN_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_IN to REL_HIDDEN')
REL_HIDDEN_to_REP_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to REP_OUT')
REL_HIDDEN_to_PROP_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to PROP_OUT')
REL_HIDDEN_to_QUAL_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to QUAL_OUT')
REL_HIDDEN_to_ACT_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to ACT_OUT')
# Validate error_signal Projections for REP_IN to REP_HIDDEN
assert len(REP_IN_to_REP_HIDDEN_LM.input_states) == 3
assert REP_IN_to_REP_HIDDEN_LM.input_states[pnl.ERROR_SIGNAL].path_afferents[0].sender.owner == \
REP_HIDDEN_to_REL_HIDDEN_LM
# Validate error_signal Projections to LearningMechanisms for REP_HIDDEN_to REL_HIDDEN Projections
assert all(lm in [input_state.path_afferents[0].sender.owner for input_state in
REP_HIDDEN_to_REL_HIDDEN_LM.input_states]
for lm in {REL_HIDDEN_to_REP_OUT_LM, REL_HIDDEN_to_PROP_OUT_LM,
REL_HIDDEN_to_QUAL_OUT_LM, REL_HIDDEN_to_ACT_OUT_LM})
# Validate error_signal Projections to LearningMechanisms for REL_IN to REL_HIDDEN Projections
assert all(lm in [input_state.path_afferents[0].sender.owner for input_state in
REL_IN_to_REL_HIDDEN_LM.input_states]
for lm in {REL_HIDDEN_to_REP_OUT_LM, REL_HIDDEN_to_PROP_OUT_LM,
REL_HIDDEN_to_QUAL_OUT_LM, REL_HIDDEN_to_ACT_OUT_LM})
class TestRumelhartSemanticNetwork:
"""
Tests construction and training of network with both convergent and divergent pathways
with the following structure:
# Semantic Network:
# _
# REP PROP QUAL ACT |
# \___\__/____/ |
# | _ | Output Processes
# HIDDEN | _|
# / \ |
# HIDDEN REL_IN | Input Processes
# / |
# REP_IN _|
"""
def test_rumelhart_semantic_network_sequential(self):
rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
rep_hidden = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions
.Logistic, name='REP_HIDDEN')
rel_hidden = pnl.TransferMechanism(size=5, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REL_HIDDEN')
rep_out = pnl.TransferMechanism(size=10, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_OUT')
prop_out = pnl.TransferMechanism(size=12, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='PROP_OUT')
qual_out = pnl.TransferMechanism(size=13, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='QUAL_OUT')
act_out = pnl.TransferMechanism(size=14, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='ACT_OUT')
rep_hidden_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden],
learning=pnl.LEARNING,
name='REP_HIDDEN_PROC')
rel_hidden_proc = pnl.Process(pathway=[rel_in, rel_hidden],
learning=pnl.LEARNING,
name='REL_HIDDEN_PROC')
rel_rep_proc = pnl.Process(pathway=[rel_hidden, rep_out],
learning=pnl.LEARNING,
name='REL_REP_PROC')
rel_prop_proc = pnl.Process(pathway=[rel_hidden, prop_out],
learning=pnl.LEARNING,
name='REL_PROP_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
learning=pnl.LEARNING,
name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
learning=pnl.LEARNING,
name='REL_ACT_PROC')
S = pnl.System(processes=[rep_hidden_proc,
rel_hidden_proc,
rel_rep_proc,
rel_prop_proc,
rel_qual_proc,
rel_act_proc])
# S.show_graph(show_learning=pnl.ALL, show_dimensions=True)
validate_learning_mechs(S)
print(S.origin_mechanisms)
print(S.terminal_mechanisms)
S.run(inputs={rel_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]},
# targets={rep_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# prop_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# qual_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# act_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]}
)
def test_rumelhart_semantic_network_convergent(self):
rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
rep_hidden = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_HIDDEN')
rel_hidden = pnl.TransferMechanism(size=5, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REL_HIDDEN')
rep_out = pnl.TransferMechanism(size=10, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_OUT')
prop_out = pnl.TransferMechanism(size=12, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='PROP_OUT')
qual_out = pnl.TransferMechanism(size=13, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='QUAL_OUT')
act_out = pnl.TransferMechanism(size=14, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='ACT_OUT')
rep_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden, rep_out],
learning=pnl.LEARNING,
name='REP_PROC')
rel_proc = pnl.Process(pathway=[rel_in, rel_hidden],
learning=pnl.LEARNING,
name='REL_PROC')
rel_prop_proc = pnl.Process(pathway=[rel_hidden, prop_out],
learning=pnl.LEARNING,
name='REL_PROP_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
learning=pnl.LEARNING,
name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
learning=pnl.LEARNING,
name='REL_ACT_PROC')
S = pnl.System(processes=[rep_proc,
rel_proc,
rel_prop_proc,
rel_qual_proc,
rel_act_proc])
# S.show_graph(show_learning=pnl.ALL, show_dimensions=True)
validate_learning_mechs(S)
print(S.origin_mechanisms)
print(S.terminal_mechanisms)
S.run(inputs={rel_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]},
# targets={rep_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# prop_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# qual_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# act_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]}
)
def test_rumelhart_semantic_network_crossing(self):
rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
rep_hidden = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_HIDDEN')
rel_hidden = pnl.TransferMechanism(size=5, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REL_HIDDEN')
rep_out = pnl.TransferMechanism(size=10, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_OUT')
prop_out = pnl.TransferMechanism(size=12, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='PROP_OUT')
qual_out = pnl.TransferMechanism(size=13, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='QUAL_OUT')
act_out = pnl.TransferMechanism(size=14, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='ACT_OUT')
rep_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden, rep_out],
learning=pnl.LEARNING,
name='REP_PROC')
rel_proc = pnl.Process(pathway=[rel_in, rel_hidden, prop_out],
learning=pnl.LEARNING,
name='REL_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
learning=pnl.LEARNING,
name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
learning=pnl.LEARNING,
name='REL_ACT_PROC')
S = pnl.System(processes=[rep_proc,
rel_proc,
rel_qual_proc,
rel_act_proc])
# S.show_graph(show_learning=pnl.ALL, show_dimensions=True)
validate_learning_mechs(S)
print(S.origin_mechanisms)
print(S.terminal_mechanisms)
S.run(inputs={rel_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]},
# targets={rep_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# prop_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# qual_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# act_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]}
)
| 62.589189
| 143
| 0.586925
|
import pytest
import psyneulink as pnl
import psyneulink.core.components.functions.transferfunctions
def validate_learning_mechs(sys):
def get_learning_mech(name):
return next(lm for lm in sys.learning_mechanisms if lm.name == name)
REP_IN_to_REP_HIDDEN_LM = get_learning_mech('LearningMechanism for MappingProjection from REP_IN to REP_HIDDEN')
REP_HIDDEN_to_REL_HIDDEN_LM = get_learning_mech('LearningMechanism for MappingProjection from REP_HIDDEN to REL_HIDDEN')
REL_IN_to_REL_HIDDEN_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_IN to REL_HIDDEN')
REL_HIDDEN_to_REP_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to REP_OUT')
REL_HIDDEN_to_PROP_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to PROP_OUT')
REL_HIDDEN_to_QUAL_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to QUAL_OUT')
REL_HIDDEN_to_ACT_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to ACT_OUT')
assert len(REP_IN_to_REP_HIDDEN_LM.input_states) == 3
assert REP_IN_to_REP_HIDDEN_LM.input_states[pnl.ERROR_SIGNAL].path_afferents[0].sender.owner == \
REP_HIDDEN_to_REL_HIDDEN_LM
assert all(lm in [input_state.path_afferents[0].sender.owner for input_state in
REP_HIDDEN_to_REL_HIDDEN_LM.input_states]
for lm in {REL_HIDDEN_to_REP_OUT_LM, REL_HIDDEN_to_PROP_OUT_LM,
REL_HIDDEN_to_QUAL_OUT_LM, REL_HIDDEN_to_ACT_OUT_LM})
assert all(lm in [input_state.path_afferents[0].sender.owner for input_state in
REL_IN_to_REL_HIDDEN_LM.input_states]
for lm in {REL_HIDDEN_to_REP_OUT_LM, REL_HIDDEN_to_PROP_OUT_LM,
REL_HIDDEN_to_QUAL_OUT_LM, REL_HIDDEN_to_ACT_OUT_LM})
class TestRumelhartSemanticNetwork:
def test_rumelhart_semantic_network_sequential(self):
rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
rep_hidden = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions
.Logistic, name='REP_HIDDEN')
rel_hidden = pnl.TransferMechanism(size=5, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REL_HIDDEN')
rep_out = pnl.TransferMechanism(size=10, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_OUT')
prop_out = pnl.TransferMechanism(size=12, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='PROP_OUT')
qual_out = pnl.TransferMechanism(size=13, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='QUAL_OUT')
act_out = pnl.TransferMechanism(size=14, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='ACT_OUT')
rep_hidden_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden],
learning=pnl.LEARNING,
name='REP_HIDDEN_PROC')
rel_hidden_proc = pnl.Process(pathway=[rel_in, rel_hidden],
learning=pnl.LEARNING,
name='REL_HIDDEN_PROC')
rel_rep_proc = pnl.Process(pathway=[rel_hidden, rep_out],
learning=pnl.LEARNING,
name='REL_REP_PROC')
rel_prop_proc = pnl.Process(pathway=[rel_hidden, prop_out],
learning=pnl.LEARNING,
name='REL_PROP_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
learning=pnl.LEARNING,
name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
learning=pnl.LEARNING,
name='REL_ACT_PROC')
S = pnl.System(processes=[rep_hidden_proc,
rel_hidden_proc,
rel_rep_proc,
rel_prop_proc,
rel_qual_proc,
rel_act_proc])
validate_learning_mechs(S)
print(S.origin_mechanisms)
print(S.terminal_mechanisms)
S.run(inputs={rel_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]},
)
def test_rumelhart_semantic_network_convergent(self):
rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
rep_hidden = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_HIDDEN')
rel_hidden = pnl.TransferMechanism(size=5, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REL_HIDDEN')
rep_out = pnl.TransferMechanism(size=10, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_OUT')
prop_out = pnl.TransferMechanism(size=12, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='PROP_OUT')
qual_out = pnl.TransferMechanism(size=13, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='QUAL_OUT')
act_out = pnl.TransferMechanism(size=14, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='ACT_OUT')
rep_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden, rep_out],
learning=pnl.LEARNING,
name='REP_PROC')
rel_proc = pnl.Process(pathway=[rel_in, rel_hidden],
learning=pnl.LEARNING,
name='REL_PROC')
rel_prop_proc = pnl.Process(pathway=[rel_hidden, prop_out],
learning=pnl.LEARNING,
name='REL_PROP_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
learning=pnl.LEARNING,
name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
learning=pnl.LEARNING,
name='REL_ACT_PROC')
S = pnl.System(processes=[rep_proc,
rel_proc,
rel_prop_proc,
rel_qual_proc,
rel_act_proc])
validate_learning_mechs(S)
print(S.origin_mechanisms)
print(S.terminal_mechanisms)
S.run(inputs={rel_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]},
)
def test_rumelhart_semantic_network_crossing(self):
rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
rep_hidden = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_HIDDEN')
rel_hidden = pnl.TransferMechanism(size=5, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REL_HIDDEN')
rep_out = pnl.TransferMechanism(size=10, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_OUT')
prop_out = pnl.TransferMechanism(size=12, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='PROP_OUT')
qual_out = pnl.TransferMechanism(size=13, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='QUAL_OUT')
act_out = pnl.TransferMechanism(size=14, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='ACT_OUT')
rep_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden, rep_out],
learning=pnl.LEARNING,
name='REP_PROC')
rel_proc = pnl.Process(pathway=[rel_in, rel_hidden, prop_out],
learning=pnl.LEARNING,
name='REL_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
learning=pnl.LEARNING,
name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
learning=pnl.LEARNING,
name='REL_ACT_PROC')
S = pnl.System(processes=[rep_proc,
rel_proc,
rel_qual_proc,
rel_act_proc])
validate_learning_mechs(S)
print(S.origin_mechanisms)
print(S.terminal_mechanisms)
S.run(inputs={rel_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]},
)
| true
| true
|
1c47a3c567eca3d2d1212e401d44eb434aeea753
| 124
|
py
|
Python
|
blog/blog/api/urls.py
|
akiracadet/django-rest-sandbox
|
d5eb8667328b20b85b41b814e1071aad4627fac3
|
[
"MIT"
] | null | null | null |
blog/blog/api/urls.py
|
akiracadet/django-rest-sandbox
|
d5eb8667328b20b85b41b814e1071aad4627fac3
|
[
"MIT"
] | 4
|
2021-04-08T19:39:29.000Z
|
2021-09-22T19:33:36.000Z
|
blog/blog/api/urls.py
|
akiracadet/django-rest-sandbox
|
d5eb8667328b20b85b41b814e1071aad4627fac3
|
[
"MIT"
] | null | null | null |
from django.urls import include
from django.urls import path
urlpatterns = [
path('posts/', include('posts.urls')),
]
| 15.5
| 42
| 0.701613
|
from django.urls import include
from django.urls import path
urlpatterns = [
path('posts/', include('posts.urls')),
]
| true
| true
|
1c47a4d77aa127fc90e8639b68a267f11d0041c2
| 403
|
py
|
Python
|
bookshop_proj/asgi.py
|
ravenda900/bookshop-django
|
d66308a75c69854d55f8093aa8d35d4940cb5689
|
[
"MIT"
] | null | null | null |
bookshop_proj/asgi.py
|
ravenda900/bookshop-django
|
d66308a75c69854d55f8093aa8d35d4940cb5689
|
[
"MIT"
] | null | null | null |
bookshop_proj/asgi.py
|
ravenda900/bookshop-django
|
d66308a75c69854d55f8093aa8d35d4940cb5689
|
[
"MIT"
] | null | null | null |
"""
ASGI config for bookshop_proj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bookshop_proj.settings')
application = get_asgi_application()
| 23.705882
| 78
| 0.791563
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bookshop_proj.settings')
application = get_asgi_application()
| true
| true
|
1c47a53589ababd0727d0971d389fb95baaeab43
| 4,168
|
py
|
Python
|
research/minigo/evaluation.py
|
SimiaCryptus/models
|
c652a23a650070b71e286f1ded93726670161940
|
[
"Apache-2.0"
] | null | null | null |
research/minigo/evaluation.py
|
SimiaCryptus/models
|
c652a23a650070b71e286f1ded93726670161940
|
[
"Apache-2.0"
] | null | null | null |
research/minigo/evaluation.py
|
SimiaCryptus/models
|
c652a23a650070b71e286f1ded93726670161940
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation of playing games between two neural nets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import go
import sgf_wrapper
from gtp_wrapper import MCTSPlayer
def play_match(params, black_net, white_net, games, readouts,
sgf_dir, verbosity):
"""Plays matches between two neural nets.
One net that wins by a margin of 55% will be the winner.
Args:
params: An object of hyperparameters.
black_net: Instance of the DualNetRunner class to play as black.
white_net: Instance of the DualNetRunner class to play as white.
games: Number of games to play. We play all the games at the same time.
readouts: Number of readouts to perform for each step in each game.
sgf_dir: Directory to write the sgf results.
verbosity: Verbosity to show evaluation process.
Returns:
'B' is the winner is black_net, otherwise 'W'.
"""
# For n games, we create lists of n black and n white players
black = MCTSPlayer(
params.board_size, black_net, verbosity=verbosity, two_player_mode=True,
num_parallel=params.simultaneous_leaves)
white = MCTSPlayer(
params.board_size, white_net, verbosity=verbosity, two_player_mode=True,
num_parallel=params.simultaneous_leaves)
black_name = os.path.basename(black_net.save_file)
white_name = os.path.basename(white_net.save_file)
black_win_counts = 0
white_win_counts = 0
for i in range(games):
num_move = 0 # The move number of the current game
black.initialize_game()
white.initialize_game()
while True:
start = time.time()
active = white if num_move % 2 else black
inactive = black if num_move % 2 else white
current_readouts = active.root.N
while active.root.N < current_readouts + readouts:
active.tree_search()
# print some stats on the search
if verbosity >= 3:
print(active.root.position)
# First, check the roots for hopeless games.
if active.should_resign(): # Force resign
active.set_result(-active.root.position.to_play, was_resign=True)
inactive.set_result(
active.root.position.to_play, was_resign=True)
if active.is_done():
fname = '{:d}-{:s}-vs-{:s}-{:d}.sgf'.format(
int(time.time()), white_name, black_name, i)
with open(os.path.join(sgf_dir, fname), 'w') as f:
sgfstr = sgf_wrapper.make_sgf(
params.board_size, active.position.recent, active.result_string,
black_name=black_name, white_name=white_name)
f.write(sgfstr)
print('Finished game', i, active.result_string)
if active.result_string is not None:
if active.result_string[0] == 'B':
black_win_counts += 1
elif active.result_string[0] == 'W':
white_win_counts += 1
break
move = active.pick_move()
active.play_move(move)
inactive.play_move(move)
dur = time.time() - start
num_move += 1
if (verbosity > 1) or (verbosity == 1 and num_move % 10 == 9):
timeper = (dur / readouts) * 100.0
print(active.root.position)
print('{:d}: {:d} readouts, {:.3f} s/100. ({:.2f} sec)'.format(
num_move, readouts, timeper, dur))
if (black_win_counts - white_win_counts) > params.eval_win_rate * games:
return go.BLACK_NAME
else:
return go.WHITE_NAME
| 35.02521
| 80
| 0.669626
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import go
import sgf_wrapper
from gtp_wrapper import MCTSPlayer
def play_match(params, black_net, white_net, games, readouts,
sgf_dir, verbosity):
black = MCTSPlayer(
params.board_size, black_net, verbosity=verbosity, two_player_mode=True,
num_parallel=params.simultaneous_leaves)
white = MCTSPlayer(
params.board_size, white_net, verbosity=verbosity, two_player_mode=True,
num_parallel=params.simultaneous_leaves)
black_name = os.path.basename(black_net.save_file)
white_name = os.path.basename(white_net.save_file)
black_win_counts = 0
white_win_counts = 0
for i in range(games):
num_move = 0
black.initialize_game()
white.initialize_game()
while True:
start = time.time()
active = white if num_move % 2 else black
inactive = black if num_move % 2 else white
current_readouts = active.root.N
while active.root.N < current_readouts + readouts:
active.tree_search()
if verbosity >= 3:
print(active.root.position)
if active.should_resign():
active.set_result(-active.root.position.to_play, was_resign=True)
inactive.set_result(
active.root.position.to_play, was_resign=True)
if active.is_done():
fname = '{:d}-{:s}-vs-{:s}-{:d}.sgf'.format(
int(time.time()), white_name, black_name, i)
with open(os.path.join(sgf_dir, fname), 'w') as f:
sgfstr = sgf_wrapper.make_sgf(
params.board_size, active.position.recent, active.result_string,
black_name=black_name, white_name=white_name)
f.write(sgfstr)
print('Finished game', i, active.result_string)
if active.result_string is not None:
if active.result_string[0] == 'B':
black_win_counts += 1
elif active.result_string[0] == 'W':
white_win_counts += 1
break
move = active.pick_move()
active.play_move(move)
inactive.play_move(move)
dur = time.time() - start
num_move += 1
if (verbosity > 1) or (verbosity == 1 and num_move % 10 == 9):
timeper = (dur / readouts) * 100.0
print(active.root.position)
print('{:d}: {:d} readouts, {:.3f} s/100. ({:.2f} sec)'.format(
num_move, readouts, timeper, dur))
if (black_win_counts - white_win_counts) > params.eval_win_rate * games:
return go.BLACK_NAME
else:
return go.WHITE_NAME
| true
| true
|
1c47a5a2c3724fb74c6a56157c990c41856f9b53
| 401
|
py
|
Python
|
tweepy/error.py
|
skoczen/tweepy
|
3b4bbabe1ecafee40d9d5942fbd59c4056c8997c
|
[
"MIT"
] | 24
|
2015-11-12T06:33:24.000Z
|
2019-04-16T11:11:13.000Z
|
tweepy/error.py
|
skoczen/tweepy
|
3b4bbabe1ecafee40d9d5942fbd59c4056c8997c
|
[
"MIT"
] | 3
|
2015-11-12T22:16:22.000Z
|
2021-08-09T07:00:27.000Z
|
tweepy/error.py
|
skoczen/tweepy
|
3b4bbabe1ecafee40d9d5942fbd59c4056c8997c
|
[
"MIT"
] | 7
|
2015-11-12T20:09:56.000Z
|
2020-12-16T17:59:02.000Z
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from __future__ import print_function
import six
class TweepError(Exception):
"""Tweepy exception"""
def __init__(self, reason, response=None):
self.reason = six.text_type(reason)
self.response = response
Exception.__init__(self, reason)
def __str__(self):
return self.reason
| 20.05
| 46
| 0.693267
|
from __future__ import print_function
import six
class TweepError(Exception):
def __init__(self, reason, response=None):
self.reason = six.text_type(reason)
self.response = response
Exception.__init__(self, reason)
def __str__(self):
return self.reason
| true
| true
|
1c47a9283d75ce997bacf6c8e784da408d98f090
| 164
|
py
|
Python
|
python/kyu-6/detect-pangram/test_detect_pangram.py
|
ledwindra/codewars
|
0552669a69e801cfe5f9a3696a4d98be63a96951
|
[
"WTFPL"
] | 1
|
2020-11-13T16:55:04.000Z
|
2020-11-13T16:55:04.000Z
|
python/kyu-6/detect-pangram/test_detect_pangram.py
|
ledwindra/codewars
|
0552669a69e801cfe5f9a3696a4d98be63a96951
|
[
"WTFPL"
] | 1
|
2020-01-28T15:48:17.000Z
|
2020-01-28T15:48:17.000Z
|
python/kyu-6/detect-pangram/test_detect_pangram.py
|
ledwindra/codewars
|
0552669a69e801cfe5f9a3696a4d98be63a96951
|
[
"WTFPL"
] | null | null | null |
from detect_pangram import is_pangram
class TestPangram:
def test_0(self):
assert is_pangram('The quick, brown fox jumps over the lazy dog!') == True
| 23.428571
| 82
| 0.719512
|
from detect_pangram import is_pangram
class TestPangram:
def test_0(self):
assert is_pangram('The quick, brown fox jumps over the lazy dog!') == True
| true
| true
|
1c47a928f0a5aff8aff873bfd002dda97fcd6bb1
| 15,931
|
py
|
Python
|
notebooks/__code/metadata_overlapping_images/metadata_overlapping_images.py
|
ornlneutronimaging/notebooks
|
d219cdc9ec103fd8bb45891b984f45d3d6facecd
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/metadata_overlapping_images/metadata_overlapping_images.py
|
ornlneutronimaging/notebooks
|
d219cdc9ec103fd8bb45891b984f45d3d6facecd
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/metadata_overlapping_images/metadata_overlapping_images.py
|
ornlneutronimaging/notebooks
|
d219cdc9ec103fd8bb45891b984f45d3d6facecd
|
[
"BSD-3-Clause"
] | null | null | null |
from IPython.core.display import HTML
from IPython.core.display import display
import os
import copy
from qtpy.QtWidgets import QMainWindow, QFileDialog
from qtpy import QtGui
from collections import OrderedDict
from __code import load_ui
from .initialization import Initializer
from .event_handler import MetadataTableHandler
from __code.metadata_overlapping_images.export_images import ExportImages
from .display import DisplayImages, DisplayScalePyqtUi, DisplayMetadataPyqtUi
from .export_table import ExportTable
from __code.metadata_overlapping_images import HELP_PAGE
class MetadataOverlappingImagesUi(QMainWindow):
x_axis_column_index = 0
y_axis_column_index = 2
xy_axis_menu_logo = {'enable': u"\u2713 ", # \u25CF (dark circle)
'disable': " "}
metadata_operation = {0: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
2: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
3: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
}
data_dict = {}
data_dict_raw = {}
timestamp_dict = {}
default_scale_roi = None
rotation_angle = 0
histogram_level = []
# scale pyqtgraph
scale_pyqt_ui = None
scale_legend_pyqt_ui = None
metadata1_pyqt_ui = None # metadata 1 text
metadata2_pyqt_ui = None # metadata 2 text
graph_pyqt_ui = None
# size of tables
guide_table_width = [40, 400, 150, 150]
live_image = []
display_ui = []
# guide and profile pg ROIs
list_guide_pyqt_roi = list()
list_profile_pyqt_roi = list()
list_table_widget_checkbox = list()
list_metadata = []
dict_list_metadata = OrderedDict() # {0: '10', 1: 'hfir', ...}
list_scale_units = ["mm", u"\u00B5m", "nm"]
list_scale_units = {'string': ["mm", u"\u00B5m", "nm"],
'html': ["mm", "<span>µm</span>", "nm"]}
rgba_color = {'white': (255, 255, 255, 255, None),
'red': (255, 0, 0, 255, None),
'green': (0, 255, 0, 255, None),
'blue': (0, 0, 255, 255, None),
'black': (0, 0, 0, 255, None)}
rgb_color = {'white': (255, 255, 255),
'red': (255, 0, 0),
'green': (0, 255, 0),
'blue': (0, 0, 255),
'black': (0, 0, 0)}
html_color = {'white': "#FFF",
'red': "#F00",
'green': "#0F0",
'blue': "#00F",
'black': "#000"}
# ui of pop up window that allows to define metadata column value (format it)
metadata_string_format_ui = None
def __init__(self, parent=None, working_dir='', data_dict=None):
display(HTML('<span style="font-size: 20px; color:blue">Check UI that popped up \
(maybe hidden behind this browser!)</span>'))
super(MetadataOverlappingImagesUi, self).__init__(parent)
ui_full_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
os.path.join('ui', 'ui_metadata_overlapping_images.ui'))
self.ui = load_ui(ui_full_path, baseinstance=self)
self.setWindowTitle("Metadata Overlapping Images")
self.working_dir = working_dir
self.data_dict = data_dict # Normalization data dictionary {'file_name': [],
#'data': [[...],[...]]],
#'metadata': [],
#'shape': {}}
# untouched array of images (used to move and rotate images)
self.data_dict_raw = copy.deepcopy(data_dict)
# initialization
o_initialization = Initializer(parent=self)
o_initialization.pyqtgraph()
o_initialization.parameters()
o_initialization.statusbar()
o_initialization.table()
o_initialization.widgets()
o_initialization.event()
# display first images
self.slider_file_changed(0)
self.text_metadata_1_enable_pressed(self.ui.checkBox.isChecked())
self.text_metadata_2_enable_pressed(self.ui.checkBox_2.isChecked())
# ========================================================================================
# MAIN UI EVENTs
def metadata_table_right_click(self, position):
o_metadata_table = MetadataTableHandler(parent=self)
o_metadata_table.right_click(position)
def previous_image_button_clicked(self):
self.change_slider(offset=-1)
self.update_metadata_pyqt_ui()
def next_image_button_clicked(self):
self.change_slider(offset = +1)
self.update_metadata_pyqt_ui()
def help_button_clicked(self):
import webbrowser
webbrowser.open(HELP_PAGE)
def closeEvent(self, event=None):
if self.metadata_string_format_ui:
self.metadata_string_format_ui.close()
def slider_file_changed(self, slider_value):
self.display_image()
self.ui.image_slider_value.setText(str(slider_value))
self.check_status_next_prev_image_button()
self.update_metadata_pyqt_ui()
def slider_file_clicked(self):
current_slider_value = self.ui.file_slider.value()
self.slider_file_changed(current_slider_value)
self.update_metadata_pyqt_ui()
def scale_checkbox_clicked(self, status):
self.ui.scale_groupbox.setEnabled(status)
self.ui.scale_position_frame.setEnabled(status)
o_display = DisplayScalePyqtUi(parent=self)
o_display.run()
def metadata_checkbox_clicked(self, status):
self.ui.metadata_groupbox.setEnabled(status)
self.ui.metadata_position_frame.setEnabled(status)
self.ui.enable_graph_checkbox.setEnabled(status)
self.ui.text_graph_tabWidget.setEnabled(status)
self.ui.toolBox.setEnabled(status)
if status:
self.ui.graph_groupBox.setEnabled(self.ui.enable_graph_checkbox.isChecked())
else:
self.ui.graph_groupBox.setEnabled(False)
o_display = DisplayMetadataPyqtUi(parent=self)
o_display.run()
def select_metadata_checkbox_clicked(self, status):
self.ui.select_metadata_combobox.setEnabled(status)
self.update_metadata_pyqt_ui()
def font_size_slider_pressed(self):
self.update_metadata_pyqt_ui()
def font_size_slider_moved(self, value):
self.update_metadata_pyqt_ui()
def graph_font_size_slider_pressed(self):
self.update_metadata_pyqt_ui()
def graph_font_size_slider_moved(self, value):
self.update_metadata_pyqt_ui()
def metadata_list_changed(self, index, column):
o_event = MetadataTableHandler(parent=self)
o_event.metadata_list_changed(index, column)
def scale_orientation_clicked(self):
o_init = Initializer(parent=self)
o_init.set_scale_spinbox_max_value()
self.update_scale_pyqt_ui()
def scale_thickness_value_changed(self, value):
self.update_scale_pyqt_ui()
def scale_color_changed(self, value):
self.update_scale_pyqt_ui()
def scale_size_changed(self, value):
self.update_scale_pyqt_ui()
def scale_real_size_changed(self):
"""update the label of the scale"""
self.update_scale_pyqt_ui()
def scale_units_changed(self):
self.update_scale_pyqt_ui()
def scale_position_moved(self, new_value):
self.update_scale_pyqt_ui()
def scale_position_clicked(self):
self.update_scale_pyqt_ui()
def metadata_position_moved(self, new_value):
self.update_metadata_pyqt_ui()
def metadata_position_clicked(self):
self.update_metadata_pyqt_ui()
def metadata2_position_moved(self, new_value):
self.update_metadata_pyqt_ui()
def metadata2_position_clicked(self):
self.update_metadata_pyqt_ui()
def metadata_color_changed(self, value):
self.update_metadata_pyqt_ui()
def metadata_name_return_pressed(self):
self.update_metadata_pyqt_ui()
def graph_position_moved(self, value):
self.update_metadata_pyqt_ui()
def graph_position_clicked(self):
self.update_metadata_pyqt_ui()
def graph_color_changed(self, value):
self.update_metadata_pyqt_ui()
def graph_axis_label_changed(self, new_value):
self.update_metadata_pyqt_ui()
def metadata_text_or_graph_clicked(self):
status = self.ui.metadata_graph_option.isChecked()
self.ui.metadata_graph_size_label.setVisible(status)
self.ui.metadata_graph_size_slider.setVisible(status)
self.update_metadata_pyqt_ui()
def metadata_graph_size_pressed(self):
self.update_metadata_pyqt_ui()
def metadata_graph_size_moved(self, slider_value):
self.update_metadata_pyqt_ui()
def table_cell_changed(self, row, column):
self.update_metadata_pyqt_ui()
def export_table_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(self,
directory=os.path.dirname(self.working_dir),
caption="Select Output Folder",
options=QFileDialog.ShowDirsOnly)
QtGui.QGuiApplication.processEvents()
if _export_folder:
o_export = ExportTable(parent=self,
export_folder=_export_folder)
o_export.run()
def export_button_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(self,
directory=os.path.dirname(self.working_dir),
caption="Select Output Folder",
options=QFileDialog.ShowDirsOnly)
QtGui.QGuiApplication.processEvents()
if _export_folder:
o_export = ExportImages(parent=self,
export_folder=_export_folder)
o_export.run()
# def import_table_pressed(self):
# _table_file = QFileDialog.getOpenFileName(self,
# directory=os.path.dirname(self.working_dir),
# caption="Select Input File")
# QtGui.QGuiApplication.processEvents()
#
# if type(_table_file) is tuple:
# _table_file = _table_file[0]
#
# if _table_file:
# o_import = TableLoader(parent=self,
# filename=str(_table_file))
# o_import.load_table()
# o_import.populate()
# self.update_metadata_pyqt_ui()
def enable_graph_button_clicked(self, new_state):
self.ui.graph_groupBox.setEnabled(new_state)
self.ui.metadata_position_frame_3.setEnabled(new_state)
self.ui.graph_position_y.setEnabled(new_state)
self.ui.graph_position_x.setEnabled(new_state)
self.ui.label_15.setEnabled(new_state)
self.ui.label_16.setEnabled(new_state)
self.update_metadata_pyqt_ui()
def display_red_vertical_marker_clicked(self):
self.update_metadata_pyqt_ui()
def text_metadata_1_enable_pressed(self, status):
self.ui.metadata_position_frame.setEnabled(status)
self.ui.metadata_position_x.setEnabled(status)
self.ui.metadata_position_y.setEnabled(status)
self.ui.label_10.setEnabled(status)
self.ui.label_11.setEnabled(status)
self.ui.label_14.setEnabled(status)
self.ui.font_size_slider.setEnabled(status)
self.ui.prefix_label_1.setEnabled(status)
self.ui.suffix_label_1.setEnabled(status)
self.ui.prefix_lineEdit_1.setEnabled(status)
self.ui.suffix_lineEdit_1.setEnabled(status)
self.ui.metadata_1_name_groupBox.setEnabled(status)
self.update_metadata_pyqt_ui()
def text_metadata_2_enable_pressed(self, status):
self.ui.metadata_position_frame_2.setEnabled(status)
self.ui.metadata_position_x_2.setEnabled(status)
self.ui.metadata_position_y_2.setEnabled(status)
self.ui.label_18.setEnabled(status)
self.ui.label_19.setEnabled(status)
self.ui.label_20.setEnabled(status)
self.ui.font_size_slider_2.setEnabled(status)
self.ui.prefix_label_2.setEnabled(status)
self.ui.suffix_label_2.setEnabled(status)
self.ui.prefix_lineEdit_2.setEnabled(status)
self.ui.suffix_lineEdit_2.setEnabled(status)
self.ui.metadata_2_name_groupBox.setEnabled(status)
self.update_metadata_pyqt_ui()
def metadata_1_suffix_prefix_changed(self, new_text):
self.update_metadata_pyqt_ui()
def metadata_2_suffix_prefix_changed(self, new_text):
self.update_metadata_pyqt_ui()
# ========================================================================================
def update_metadata_pyqt_ui(self):
o_display = DisplayMetadataPyqtUi(parent=self)
o_display.clear_pyqt_items()
o_display.run()
def update_scale_pyqt_ui(self):
# if self.scale_pyqt_ui:
# self.ui.image_view.removeItem(self.scale_pyqt_ui)
# if self.scale_legend_pyqt_ui:
# self.ui.image_view.removeItem(self.scale_legend_pyqt_ui)
o_display = DisplayScalePyqtUi(parent=self)
o_display.clear_pyqt_items()
o_display.run()
def display_image(self, recalculate_image=False):
"""display the image selected by the file slider"""
DisplayImages(parent=self, recalculate_image=recalculate_image)
def check_status_next_prev_image_button(self):
"""this will enable or not the prev or next button next to the slider file image"""
current_slider_value = self.ui.file_slider.value()
min_slider_value = self.ui.file_slider.minimum()
max_slider_value = self.ui.file_slider.maximum()
_prev = True
_next = True
if current_slider_value == min_slider_value:
_prev = False
elif current_slider_value == max_slider_value:
_next = False
self.ui.previous_image_button.setEnabled(_prev)
self.ui.next_image_button.setEnabled(_next)
def change_slider(self, offset=+1):
self.ui.file_slider.blockSignals(True)
current_slider_value = self.ui.file_slider.value()
new_row_selected = current_slider_value + offset
self.ui.image_slider_value.setText(str(new_row_selected))
self.ui.file_slider.setValue(new_row_selected)
self.check_status_next_prev_image_button()
self.display_image()
self.ui.file_slider.blockSignals(False)
| 37.751185
| 102
| 0.612203
|
from IPython.core.display import HTML
from IPython.core.display import display
import os
import copy
from qtpy.QtWidgets import QMainWindow, QFileDialog
from qtpy import QtGui
from collections import OrderedDict
from __code import load_ui
from .initialization import Initializer
from .event_handler import MetadataTableHandler
from __code.metadata_overlapping_images.export_images import ExportImages
from .display import DisplayImages, DisplayScalePyqtUi, DisplayMetadataPyqtUi
from .export_table import ExportTable
from __code.metadata_overlapping_images import HELP_PAGE
class MetadataOverlappingImagesUi(QMainWindow):
x_axis_column_index = 0
y_axis_column_index = 2
xy_axis_menu_logo = {'enable': u"\u2713 ",
'disable': " "}
metadata_operation = {0: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
2: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
3: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
}
data_dict = {}
data_dict_raw = {}
timestamp_dict = {}
default_scale_roi = None
rotation_angle = 0
histogram_level = []
scale_pyqt_ui = None
scale_legend_pyqt_ui = None
metadata1_pyqt_ui = None
metadata2_pyqt_ui = None
graph_pyqt_ui = None
guide_table_width = [40, 400, 150, 150]
live_image = []
display_ui = []
list_guide_pyqt_roi = list()
list_profile_pyqt_roi = list()
list_table_widget_checkbox = list()
list_metadata = []
dict_list_metadata = OrderedDict()
list_scale_units = ["mm", u"\u00B5m", "nm"]
list_scale_units = {'string': ["mm", u"\u00B5m", "nm"],
'html': ["mm", "<span>µm</span>", "nm"]}
rgba_color = {'white': (255, 255, 255, 255, None),
'red': (255, 0, 0, 255, None),
'green': (0, 255, 0, 255, None),
'blue': (0, 0, 255, 255, None),
'black': (0, 0, 0, 255, None)}
rgb_color = {'white': (255, 255, 255),
'red': (255, 0, 0),
'green': (0, 255, 0),
'blue': (0, 0, 255),
'black': (0, 0, 0)}
html_color = {'white': "#FFF",
'red': "#F00",
'green': "#0F0",
'blue': "#00F",
'black': "#000"}
metadata_string_format_ui = None
def __init__(self, parent=None, working_dir='', data_dict=None):
display(HTML('<span style="font-size: 20px; color:blue">Check UI that popped up \
(maybe hidden behind this browser!)</span>'))
super(MetadataOverlappingImagesUi, self).__init__(parent)
ui_full_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
os.path.join('ui', 'ui_metadata_overlapping_images.ui'))
self.ui = load_ui(ui_full_path, baseinstance=self)
self.setWindowTitle("Metadata Overlapping Images")
self.working_dir = working_dir
self.data_dict = data_dict
self.data_dict_raw = copy.deepcopy(data_dict)
o_initialization = Initializer(parent=self)
o_initialization.pyqtgraph()
o_initialization.parameters()
o_initialization.statusbar()
o_initialization.table()
o_initialization.widgets()
o_initialization.event()
self.slider_file_changed(0)
self.text_metadata_1_enable_pressed(self.ui.checkBox.isChecked())
self.text_metadata_2_enable_pressed(self.ui.checkBox_2.isChecked())
def metadata_table_right_click(self, position):
o_metadata_table = MetadataTableHandler(parent=self)
o_metadata_table.right_click(position)
def previous_image_button_clicked(self):
self.change_slider(offset=-1)
self.update_metadata_pyqt_ui()
def next_image_button_clicked(self):
self.change_slider(offset = +1)
self.update_metadata_pyqt_ui()
def help_button_clicked(self):
import webbrowser
webbrowser.open(HELP_PAGE)
def closeEvent(self, event=None):
if self.metadata_string_format_ui:
self.metadata_string_format_ui.close()
def slider_file_changed(self, slider_value):
self.display_image()
self.ui.image_slider_value.setText(str(slider_value))
self.check_status_next_prev_image_button()
self.update_metadata_pyqt_ui()
def slider_file_clicked(self):
current_slider_value = self.ui.file_slider.value()
self.slider_file_changed(current_slider_value)
self.update_metadata_pyqt_ui()
def scale_checkbox_clicked(self, status):
self.ui.scale_groupbox.setEnabled(status)
self.ui.scale_position_frame.setEnabled(status)
o_display = DisplayScalePyqtUi(parent=self)
o_display.run()
def metadata_checkbox_clicked(self, status):
self.ui.metadata_groupbox.setEnabled(status)
self.ui.metadata_position_frame.setEnabled(status)
self.ui.enable_graph_checkbox.setEnabled(status)
self.ui.text_graph_tabWidget.setEnabled(status)
self.ui.toolBox.setEnabled(status)
if status:
self.ui.graph_groupBox.setEnabled(self.ui.enable_graph_checkbox.isChecked())
else:
self.ui.graph_groupBox.setEnabled(False)
o_display = DisplayMetadataPyqtUi(parent=self)
o_display.run()
def select_metadata_checkbox_clicked(self, status):
self.ui.select_metadata_combobox.setEnabled(status)
self.update_metadata_pyqt_ui()
def font_size_slider_pressed(self):
self.update_metadata_pyqt_ui()
def font_size_slider_moved(self, value):
self.update_metadata_pyqt_ui()
def graph_font_size_slider_pressed(self):
self.update_metadata_pyqt_ui()
def graph_font_size_slider_moved(self, value):
self.update_metadata_pyqt_ui()
def metadata_list_changed(self, index, column):
o_event = MetadataTableHandler(parent=self)
o_event.metadata_list_changed(index, column)
def scale_orientation_clicked(self):
o_init = Initializer(parent=self)
o_init.set_scale_spinbox_max_value()
self.update_scale_pyqt_ui()
def scale_thickness_value_changed(self, value):
self.update_scale_pyqt_ui()
def scale_color_changed(self, value):
self.update_scale_pyqt_ui()
def scale_size_changed(self, value):
self.update_scale_pyqt_ui()
def scale_real_size_changed(self):
self.update_scale_pyqt_ui()
def scale_units_changed(self):
self.update_scale_pyqt_ui()
def scale_position_moved(self, new_value):
self.update_scale_pyqt_ui()
def scale_position_clicked(self):
self.update_scale_pyqt_ui()
def metadata_position_moved(self, new_value):
self.update_metadata_pyqt_ui()
def metadata_position_clicked(self):
self.update_metadata_pyqt_ui()
def metadata2_position_moved(self, new_value):
self.update_metadata_pyqt_ui()
def metadata2_position_clicked(self):
self.update_metadata_pyqt_ui()
def metadata_color_changed(self, value):
self.update_metadata_pyqt_ui()
def metadata_name_return_pressed(self):
self.update_metadata_pyqt_ui()
def graph_position_moved(self, value):
self.update_metadata_pyqt_ui()
def graph_position_clicked(self):
self.update_metadata_pyqt_ui()
def graph_color_changed(self, value):
self.update_metadata_pyqt_ui()
def graph_axis_label_changed(self, new_value):
self.update_metadata_pyqt_ui()
def metadata_text_or_graph_clicked(self):
status = self.ui.metadata_graph_option.isChecked()
self.ui.metadata_graph_size_label.setVisible(status)
self.ui.metadata_graph_size_slider.setVisible(status)
self.update_metadata_pyqt_ui()
def metadata_graph_size_pressed(self):
self.update_metadata_pyqt_ui()
def metadata_graph_size_moved(self, slider_value):
self.update_metadata_pyqt_ui()
def table_cell_changed(self, row, column):
self.update_metadata_pyqt_ui()
def export_table_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(self,
directory=os.path.dirname(self.working_dir),
caption="Select Output Folder",
options=QFileDialog.ShowDirsOnly)
QtGui.QGuiApplication.processEvents()
if _export_folder:
o_export = ExportTable(parent=self,
export_folder=_export_folder)
o_export.run()
def export_button_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(self,
directory=os.path.dirname(self.working_dir),
caption="Select Output Folder",
options=QFileDialog.ShowDirsOnly)
QtGui.QGuiApplication.processEvents()
if _export_folder:
o_export = ExportImages(parent=self,
export_folder=_export_folder)
o_export.run()
def enable_graph_button_clicked(self, new_state):
self.ui.graph_groupBox.setEnabled(new_state)
self.ui.metadata_position_frame_3.setEnabled(new_state)
self.ui.graph_position_y.setEnabled(new_state)
self.ui.graph_position_x.setEnabled(new_state)
self.ui.label_15.setEnabled(new_state)
self.ui.label_16.setEnabled(new_state)
self.update_metadata_pyqt_ui()
def display_red_vertical_marker_clicked(self):
self.update_metadata_pyqt_ui()
def text_metadata_1_enable_pressed(self, status):
self.ui.metadata_position_frame.setEnabled(status)
self.ui.metadata_position_x.setEnabled(status)
self.ui.metadata_position_y.setEnabled(status)
self.ui.label_10.setEnabled(status)
self.ui.label_11.setEnabled(status)
self.ui.label_14.setEnabled(status)
self.ui.font_size_slider.setEnabled(status)
self.ui.prefix_label_1.setEnabled(status)
self.ui.suffix_label_1.setEnabled(status)
self.ui.prefix_lineEdit_1.setEnabled(status)
self.ui.suffix_lineEdit_1.setEnabled(status)
self.ui.metadata_1_name_groupBox.setEnabled(status)
self.update_metadata_pyqt_ui()
def text_metadata_2_enable_pressed(self, status):
self.ui.metadata_position_frame_2.setEnabled(status)
self.ui.metadata_position_x_2.setEnabled(status)
self.ui.metadata_position_y_2.setEnabled(status)
self.ui.label_18.setEnabled(status)
self.ui.label_19.setEnabled(status)
self.ui.label_20.setEnabled(status)
self.ui.font_size_slider_2.setEnabled(status)
self.ui.prefix_label_2.setEnabled(status)
self.ui.suffix_label_2.setEnabled(status)
self.ui.prefix_lineEdit_2.setEnabled(status)
self.ui.suffix_lineEdit_2.setEnabled(status)
self.ui.metadata_2_name_groupBox.setEnabled(status)
self.update_metadata_pyqt_ui()
def metadata_1_suffix_prefix_changed(self, new_text):
self.update_metadata_pyqt_ui()
def metadata_2_suffix_prefix_changed(self, new_text):
self.update_metadata_pyqt_ui()
def update_metadata_pyqt_ui(self):
o_display = DisplayMetadataPyqtUi(parent=self)
o_display.clear_pyqt_items()
o_display.run()
def update_scale_pyqt_ui(self):
o_display = DisplayScalePyqtUi(parent=self)
o_display.clear_pyqt_items()
o_display.run()
def display_image(self, recalculate_image=False):
DisplayImages(parent=self, recalculate_image=recalculate_image)
def check_status_next_prev_image_button(self):
current_slider_value = self.ui.file_slider.value()
min_slider_value = self.ui.file_slider.minimum()
max_slider_value = self.ui.file_slider.maximum()
_prev = True
_next = True
if current_slider_value == min_slider_value:
_prev = False
elif current_slider_value == max_slider_value:
_next = False
self.ui.previous_image_button.setEnabled(_prev)
self.ui.next_image_button.setEnabled(_next)
def change_slider(self, offset=+1):
self.ui.file_slider.blockSignals(True)
current_slider_value = self.ui.file_slider.value()
new_row_selected = current_slider_value + offset
self.ui.image_slider_value.setText(str(new_row_selected))
self.ui.file_slider.setValue(new_row_selected)
self.check_status_next_prev_image_button()
self.display_image()
self.ui.file_slider.blockSignals(False)
| true
| true
|
1c47a9ba768369e5fcda639a537396d54a754795
| 142
|
py
|
Python
|
mysite/users/apps.py
|
saademad200/SE_Visualri
|
f01e22a5e47a44eb9219199027b68d1bd0bb4bca
|
[
"BSL-1.0"
] | null | null | null |
mysite/users/apps.py
|
saademad200/SE_Visualri
|
f01e22a5e47a44eb9219199027b68d1bd0bb4bca
|
[
"BSL-1.0"
] | null | null | null |
mysite/users/apps.py
|
saademad200/SE_Visualri
|
f01e22a5e47a44eb9219199027b68d1bd0bb4bca
|
[
"BSL-1.0"
] | null | null | null |
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
def ready(self):
import users.signals
| 17.75
| 34
| 0.65493
|
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
def ready(self):
import users.signals
| true
| true
|
1c47aaec11d06eff56c121c77fb592d8b28a697b
| 13,676
|
py
|
Python
|
tornado/autoreload.py
|
DengJackNo1/tornado
|
895a4fa69817c24fbf6ada6c5fb07351c6e91cd5
|
[
"Apache-2.0"
] | 640
|
2018-09-12T03:14:13.000Z
|
2022-03-30T04:38:09.000Z
|
tornado/autoreload.py
|
DengJackNo1/tornado
|
895a4fa69817c24fbf6ada6c5fb07351c6e91cd5
|
[
"Apache-2.0"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
tornado/autoreload.py
|
DengJackNo1/tornado
|
895a4fa69817c24fbf6ada6c5fb07351c6e91cd5
|
[
"Apache-2.0"
] | 230
|
2018-09-13T02:40:49.000Z
|
2022-03-29T11:53:58.000Z
|
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Automatically restart the server when a source file is modified.
Most applications should not access this module directly. Instead,
pass the keyword argument ``autoreload=True`` to the
`tornado.web.Application` constructor (or ``debug=True``, which
enables this setting and several others). This will enable autoreload
mode as well as checking for changes to templates and static
resources. Note that restarting is a destructive operation and any
requests in progress will be aborted when the process restarts. (If
you want to disable autoreload while using other debug-mode features,
pass both ``debug=True`` and ``autoreload=False``).
This module can also be used as a command-line wrapper around scripts
such as unit test runners. See the `main` method for details.
The command-line wrapper and Application debug modes can be used together.
This combination is encouraged as the wrapper catches syntax errors and
other import-time failures, while debug mode catches changes once
the server has started.
This module will not work correctly when `.HTTPServer`'s multi-process
mode is used.
Reloading loses any Python interpreter command-line arguments (e.g. ``-u``)
because it re-executes Python using ``sys.executable`` and ``sys.argv``.
Additionally, modifying these variables will cause reloading to behave
incorrectly.
"""
import os
import sys
# sys.path handling
# -----------------
#
# If a module is run with "python -m", the current directory (i.e. "")
# is automatically prepended to sys.path, but not if it is run as
# "path/to/file.py". The processing for "-m" rewrites the former to
# the latter, so subsequent executions won't have the same path as the
# original.
#
# Conversely, when run as path/to/file.py, the directory containing
# file.py gets added to the path, which can cause confusion as imports
# may become relative in spite of the future import.
#
# We address the former problem by reconstructing the original command
# line (Python >= 3.4) or by setting the $PYTHONPATH environment
# variable (Python < 3.4) before re-execution so the new process will
# see the correct path. We attempt to address the latter problem when
# tornado.autoreload is run as __main__.
if __name__ == "__main__":
# This sys.path manipulation must come before our imports (as much
# as possible - if we introduced a tornado.sys or tornado.os
# module we'd be in trouble), or else our imports would become
# relative again despite the future import.
#
# There is a separate __main__ block at the end of the file to call main().
if sys.path[0] == os.path.dirname(__file__):
del sys.path[0]
import functools
import logging
import os
import pkgutil # type: ignore
import sys
import traceback
import types
import subprocess
import weakref
from tornado import ioloop
from tornado.log import gen_log
from tornado import process
from tornado.util import exec_in
try:
import signal
except ImportError:
signal = None # type: ignore
import typing
from typing import Callable, Dict
if typing.TYPE_CHECKING:
from typing import List, Optional, Union # noqa: F401
# os.execv is broken on Windows and can't properly parse command line
# arguments and executable name if they contain whitespaces. subprocess
# fixes that behavior.
_has_execv = sys.platform != "win32"
_watched_files = set()
_reload_hooks = []
_reload_attempted = False
_io_loops = weakref.WeakKeyDictionary() # type: ignore
_autoreload_is_main = False
_original_argv = None # type: Optional[List[str]]
_original_spec = None
def start(check_time: int = 500) -> None:
"""Begins watching source files for changes.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
io_loop = ioloop.IOLoop.current()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
gen_log.warning("tornado.autoreload started more than once in the same process")
modify_times = {} # type: Dict[str, float]
callback = functools.partial(_reload_on_update, modify_times)
scheduler = ioloop.PeriodicCallback(callback, check_time)
scheduler.start()
def wait() -> None:
"""Wait for a watched file to change, then restart the process.
Intended to be used at the end of scripts like unit test runners,
to run the tests again after any source file changes (but see also
the command-line interface in `main`)
"""
io_loop = ioloop.IOLoop()
io_loop.add_callback(start)
io_loop.start()
def watch(filename: str) -> None:
"""Add a file to the watch list.
All imported modules are watched by default.
"""
_watched_files.add(filename)
def add_reload_hook(fn: Callable[[], None]) -> None:
"""Add a function to be called before reloading the process.
Note that for open file and socket handles it is generally
preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
``tornado.platform.auto.set_close_exec``) instead
of using a reload hook to close them.
"""
_reload_hooks.append(fn)
def _reload_on_update(modify_times: Dict[str, float]) -> None:
if _reload_attempted:
# We already tried to reload and it didn't work, so don't try again.
return
if process.task_id() is not None:
# We're in a child process created by fork_processes. If child
# processes restarted themselves, they'd all restart and then
# all call fork_processes again.
return
for module in list(sys.modules.values()):
# Some modules play games with sys.modules (e.g. email/__init__.py
# in the standard library), and occasionally this can cause strange
# failures in getattr. Just ignore anything that's not an ordinary
# module.
if not isinstance(module, types.ModuleType):
continue
path = getattr(module, "__file__", None)
if not path:
continue
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
_check_file(modify_times, path)
for path in _watched_files:
_check_file(modify_times, path)
def _check_file(modify_times: Dict[str, float], path: str) -> None:
try:
modified = os.stat(path).st_mtime
except Exception:
return
if path not in modify_times:
modify_times[path] = modified
return
if modify_times[path] != modified:
gen_log.info("%s modified; restarting server", path)
_reload()
def _reload() -> None:
global _reload_attempted
_reload_attempted = True
for fn in _reload_hooks:
fn()
if hasattr(signal, "setitimer"):
# Clear the alarm signal set by
# ioloop.set_blocking_log_threshold so it doesn't fire
# after the exec.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
# sys.path fixes: see comments at top of file. If __main__.__spec__
# exists, we were invoked with -m and the effective path is about to
# change on re-exec. Reconstruct the original command line to
# ensure that the new process sees the same path we did. If
# __spec__ is not available (Python < 3.4), check instead if
# sys.path[0] is an empty string and add the current directory to
# $PYTHONPATH.
if _autoreload_is_main:
assert _original_argv is not None
spec = _original_spec
argv = _original_argv
else:
spec = getattr(sys.modules["__main__"], "__spec__", None)
argv = sys.argv
if spec:
argv = ["-m", spec.name] + argv[1:]
else:
path_prefix = "." + os.pathsep
if sys.path[0] == "" and not os.environ.get("PYTHONPATH", "").startswith(
path_prefix
):
os.environ["PYTHONPATH"] = path_prefix + os.environ.get("PYTHONPATH", "")
if not _has_execv:
subprocess.Popen([sys.executable] + argv)
os._exit(0)
else:
try:
os.execv(sys.executable, [sys.executable] + argv)
except OSError:
# Mac OS X versions prior to 10.6 do not support execv in
# a process that contains multiple threads. Instead of
# re-executing in the current process, start a new one
# and cause the current process to exit. This isn't
# ideal since the new process is detached from the parent
# terminal and thus cannot easily be killed with ctrl-C,
# but it's better than not being able to autoreload at
# all.
# Unfortunately the errno returned in this case does not
# appear to be consistent, so we can't easily check for
# this error specifically.
os.spawnv( # type: ignore
os.P_NOWAIT, sys.executable, [sys.executable] + argv
)
# At this point the IOLoop has been closed and finally
# blocks will experience errors if we allow the stack to
# unwind, so just exit uncleanly.
os._exit(0)
_USAGE = """\
Usage:
python -m tornado.autoreload -m module.to.run [args...]
python -m tornado.autoreload path/to/script.py [args...]
"""
def main() -> None:
"""Command-line wrapper to re-run a script whenever its source changes.
Scripts may be specified by filename or module name::
python -m tornado.autoreload -m tornado.test.runtests
python -m tornado.autoreload tornado/test/runtests.py
Running a script with this wrapper is similar to calling
`tornado.autoreload.wait` at the end of the script, but this wrapper
can catch import-time problems like syntax errors that would otherwise
prevent the script from reaching its call to `wait`.
"""
# Remember that we were launched with autoreload as main.
# The main module can be tricky; set the variables both in our globals
# (which may be __main__) and the real importable version.
import tornado.autoreload
global _autoreload_is_main
global _original_argv, _original_spec
tornado.autoreload._autoreload_is_main = _autoreload_is_main = True
original_argv = sys.argv
tornado.autoreload._original_argv = _original_argv = original_argv
original_spec = getattr(sys.modules["__main__"], "__spec__", None)
tornado.autoreload._original_spec = _original_spec = original_spec
sys.argv = sys.argv[:]
if len(sys.argv) >= 3 and sys.argv[1] == "-m":
mode = "module"
module = sys.argv[2]
del sys.argv[1:3]
elif len(sys.argv) >= 2:
mode = "script"
script = sys.argv[1]
sys.argv = sys.argv[1:]
else:
print(_USAGE, file=sys.stderr)
sys.exit(1)
try:
if mode == "module":
import runpy
runpy.run_module(module, run_name="__main__", alter_sys=True)
elif mode == "script":
with open(script) as f:
# Execute the script in our namespace instead of creating
# a new one so that something that tries to import __main__
# (e.g. the unittest module) will see names defined in the
# script instead of just those defined in this module.
global __file__
__file__ = script
# If __package__ is defined, imports may be incorrectly
# interpreted as relative to this module.
global __package__
del __package__
exec_in(f.read(), globals(), globals())
except SystemExit as e:
logging.basicConfig()
gen_log.info("Script exited with status %s", e.code)
except Exception as e:
logging.basicConfig()
gen_log.warning("Script exited with uncaught exception", exc_info=True)
# If an exception occurred at import time, the file with the error
# never made it into sys.modules and so we won't know to watch it.
# Just to make sure we've covered everything, walk the stack trace
# from the exception and watch every file.
for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]):
watch(filename)
if isinstance(e, SyntaxError):
# SyntaxErrors are special: their innermost stack frame is fake
# so extract_tb won't see it and we have to get the filename
# from the exception object.
watch(e.filename)
else:
logging.basicConfig()
gen_log.info("Script exited normally")
# restore sys.argv so subsequent executions will include autoreload
sys.argv = original_argv
if mode == "module":
# runpy did a fake import of the module as __main__, but now it's
# no longer in sys.modules. Figure out where it is and watch it.
loader = pkgutil.get_loader(module)
if loader is not None:
watch(loader.get_filename()) # type: ignore
wait()
if __name__ == "__main__":
# See also the other __main__ block at the top of the file, which modifies
# sys.path before our imports
main()
| 37.468493
| 88
| 0.674101
|
import os
import sys
# original.
#
# Conversely, when run as path/to/file.py, the directory containing
# file.py gets added to the path, which can cause confusion as imports
# may become relative in spite of the future import.
#
# We address the former problem by reconstructing the original command
# line (Python >= 3.4) or by setting the $PYTHONPATH environment
# variable (Python < 3.4) before re-execution so the new process will
# see the correct path. We attempt to address the latter problem when
# tornado.autoreload is run as __main__.
if __name__ == "__main__":
# This sys.path manipulation must come before our imports (as much
# as possible - if we introduced a tornado.sys or tornado.os
# module we'd be in trouble), or else our imports would become
if sys.path[0] == os.path.dirname(__file__):
del sys.path[0]
import functools
import logging
import os
import pkgutil
import sys
import traceback
import types
import subprocess
import weakref
from tornado import ioloop
from tornado.log import gen_log
from tornado import process
from tornado.util import exec_in
try:
import signal
except ImportError:
signal = None
import typing
from typing import Callable, Dict
if typing.TYPE_CHECKING:
from typing import List, Optional, Union
# arguments and executable name if they contain whitespaces. subprocess
# fixes that behavior.
_has_execv = sys.platform != "win32"
_watched_files = set()
_reload_hooks = []
_reload_attempted = False
_io_loops = weakref.WeakKeyDictionary() # type: ignore
_autoreload_is_main = False
_original_argv = None # type: Optional[List[str]]
_original_spec = None
def start(check_time: int = 500) -> None:
io_loop = ioloop.IOLoop.current()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
gen_log.warning("tornado.autoreload started more than once in the same process")
modify_times = {} # type: Dict[str, float]
callback = functools.partial(_reload_on_update, modify_times)
scheduler = ioloop.PeriodicCallback(callback, check_time)
scheduler.start()
def wait() -> None:
io_loop = ioloop.IOLoop()
io_loop.add_callback(start)
io_loop.start()
def watch(filename: str) -> None:
_watched_files.add(filename)
def add_reload_hook(fn: Callable[[], None]) -> None:
_reload_hooks.append(fn)
def _reload_on_update(modify_times: Dict[str, float]) -> None:
if _reload_attempted:
# We already tried to reload and it didn't work, so don't try again.
return
if process.task_id() is not None:
# We're in a child process created by fork_processes. If child
# all call fork_processes again.
return
for module in list(sys.modules.values()):
# Some modules play games with sys.modules (e.g. email/__init__.py
# in the standard library), and occasionally this can cause strange
# failures in getattr. Just ignore anything that's not an ordinary
if not isinstance(module, types.ModuleType):
continue
path = getattr(module, "__file__", None)
if not path:
continue
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
_check_file(modify_times, path)
for path in _watched_files:
_check_file(modify_times, path)
def _check_file(modify_times: Dict[str, float], path: str) -> None:
try:
modified = os.stat(path).st_mtime
except Exception:
return
if path not in modify_times:
modify_times[path] = modified
return
if modify_times[path] != modified:
gen_log.info("%s modified; restarting server", path)
_reload()
def _reload() -> None:
global _reload_attempted
_reload_attempted = True
for fn in _reload_hooks:
fn()
if hasattr(signal, "setitimer"):
# after the exec.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
# sys.path fixes: see comments at top of file. If __main__.__spec__
# exists, we were invoked with -m and the effective path is about to
# change on re-exec. Reconstruct the original command line to
# ensure that the new process sees the same path we did. If
# __spec__ is not available (Python < 3.4), check instead if
# sys.path[0] is an empty string and add the current directory to
# $PYTHONPATH.
if _autoreload_is_main:
assert _original_argv is not None
spec = _original_spec
argv = _original_argv
else:
spec = getattr(sys.modules["__main__"], "__spec__", None)
argv = sys.argv
if spec:
argv = ["-m", spec.name] + argv[1:]
else:
path_prefix = "." + os.pathsep
if sys.path[0] == "" and not os.environ.get("PYTHONPATH", "").startswith(
path_prefix
):
os.environ["PYTHONPATH"] = path_prefix + os.environ.get("PYTHONPATH", "")
if not _has_execv:
subprocess.Popen([sys.executable] + argv)
os._exit(0)
else:
try:
os.execv(sys.executable, [sys.executable] + argv)
except OSError:
# Mac OS X versions prior to 10.6 do not support execv in
# a process that contains multiple threads. Instead of
# re-executing in the current process, start a new one
# and cause the current process to exit. This isn't
# all.
# Unfortunately the errno returned in this case does not
# appear to be consistent, so we can't easily check for
os.spawnv(
os.P_NOWAIT, sys.executable, [sys.executable] + argv
)
os._exit(0)
_USAGE = """\
Usage:
python -m tornado.autoreload -m module.to.run [args...]
python -m tornado.autoreload path/to/script.py [args...]
"""
def main() -> None:
import tornado.autoreload
global _autoreload_is_main
global _original_argv, _original_spec
tornado.autoreload._autoreload_is_main = _autoreload_is_main = True
original_argv = sys.argv
tornado.autoreload._original_argv = _original_argv = original_argv
original_spec = getattr(sys.modules["__main__"], "__spec__", None)
tornado.autoreload._original_spec = _original_spec = original_spec
sys.argv = sys.argv[:]
if len(sys.argv) >= 3 and sys.argv[1] == "-m":
mode = "module"
module = sys.argv[2]
del sys.argv[1:3]
elif len(sys.argv) >= 2:
mode = "script"
script = sys.argv[1]
sys.argv = sys.argv[1:]
else:
print(_USAGE, file=sys.stderr)
sys.exit(1)
try:
if mode == "module":
import runpy
runpy.run_module(module, run_name="__main__", alter_sys=True)
elif mode == "script":
with open(script) as f:
global __file__
__file__ = script
global __package__
del __package__
exec_in(f.read(), globals(), globals())
except SystemExit as e:
logging.basicConfig()
gen_log.info("Script exited with status %s", e.code)
except Exception as e:
logging.basicConfig()
gen_log.warning("Script exited with uncaught exception", exc_info=True)
# Just to make sure we've covered everything, walk the stack trace
for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]):
watch(filename)
if isinstance(e, SyntaxError):
# from the exception object.
watch(e.filename)
else:
logging.basicConfig()
gen_log.info("Script exited normally")
# restore sys.argv so subsequent executions will include autoreload
sys.argv = original_argv
if mode == "module":
# runpy did a fake import of the module as __main__, but now it's
loader = pkgutil.get_loader(module)
if loader is not None:
watch(loader.get_filename())
wait()
if __name__ == "__main__":
main()
| true
| true
|
1c47ab394fb23448ceb4c13702c16990ae7535cf
| 649
|
py
|
Python
|
Ex056.py
|
andrade-lcs/ex_curso_em_video_python
|
f2d029efe7a20cdf0fcb5b602f9992e27d37c263
|
[
"MIT"
] | null | null | null |
Ex056.py
|
andrade-lcs/ex_curso_em_video_python
|
f2d029efe7a20cdf0fcb5b602f9992e27d37c263
|
[
"MIT"
] | null | null | null |
Ex056.py
|
andrade-lcs/ex_curso_em_video_python
|
f2d029efe7a20cdf0fcb5b602f9992e27d37c263
|
[
"MIT"
] | null | null | null |
m = int()
q = int()
ma = int()
mm = int()
me = float()
nma = str()
a = int(input('Digite quantas pessoas tem o Grupo: '))
for c in range(0, a):
n = str(input('Digite o nome: '))
i = int(input('Digite a idade: '))
s = int(input('Digite o sexo:\n[1] para masculino\n[2]para feminino\n'))
m = m + i
q = q + 1
me = float(m / q)
if s == 1 and i > ma:
ma = i
nma = n
elif s == 2 and i < 20:
mm = mm + 1
print('A média de idade das pessoas digitas é {:.2f} anos.\nO homem mais velho é o {} com {} anos.\nE no grupo há {} '
'mulheres com menos de 20 anos.'.format(me, nma, ma, mm))
print('FIM')
| 28.217391
| 118
| 0.534669
|
m = int()
q = int()
ma = int()
mm = int()
me = float()
nma = str()
a = int(input('Digite quantas pessoas tem o Grupo: '))
for c in range(0, a):
n = str(input('Digite o nome: '))
i = int(input('Digite a idade: '))
s = int(input('Digite o sexo:\n[1] para masculino\n[2]para feminino\n'))
m = m + i
q = q + 1
me = float(m / q)
if s == 1 and i > ma:
ma = i
nma = n
elif s == 2 and i < 20:
mm = mm + 1
print('A média de idade das pessoas digitas é {:.2f} anos.\nO homem mais velho é o {} com {} anos.\nE no grupo há {} '
'mulheres com menos de 20 anos.'.format(me, nma, ma, mm))
print('FIM')
| true
| true
|
1c47ac62262bb7d3b7efc480a2952496dfd81d53
| 571
|
py
|
Python
|
core/da/sqlitedriver.py
|
ramkj/xman
|
8ab14b0754e0ef3c44c27259c0df7c10697d3502
|
[
"Apache-2.0"
] | null | null | null |
core/da/sqlitedriver.py
|
ramkj/xman
|
8ab14b0754e0ef3c44c27259c0df7c10697d3502
|
[
"Apache-2.0"
] | null | null | null |
core/da/sqlitedriver.py
|
ramkj/xman
|
8ab14b0754e0ef3c44c27259c0df7c10697d3502
|
[
"Apache-2.0"
] | null | null | null |
import sqlite3
class SQLiteDriver:
def __init__(self, dbname: str ):
self.config = dbname
def __enter__(self) -> 'cursor':
self.connection = sqlite3.connect(self.config)
assert self.connection is not None, 'failed getting connection from DB'
self.connection.execute( 'PRAGMA foreign_keys=ON ' )
self.cursor = self.connection.cursor()
return self.cursor
def __exit__(self, exc_type, exc_value, exc_trace) -> None:
self.connection.commit()
self.cursor.close()
self.connection.close()
| 30.052632
| 79
| 0.654991
|
import sqlite3
class SQLiteDriver:
def __init__(self, dbname: str ):
self.config = dbname
def __enter__(self) -> 'cursor':
self.connection = sqlite3.connect(self.config)
assert self.connection is not None, 'failed getting connection from DB'
self.connection.execute( 'PRAGMA foreign_keys=ON ' )
self.cursor = self.connection.cursor()
return self.cursor
def __exit__(self, exc_type, exc_value, exc_trace) -> None:
self.connection.commit()
self.cursor.close()
self.connection.close()
| true
| true
|
1c47ac976cbf51fb5ea1439ce4c43e00aa534a40
| 1,027
|
py
|
Python
|
salt/runners/mine.py
|
bruce-one/salt
|
0715f6c29a8e19c3cf7a67ad41aff84801c9f5ae
|
[
"Apache-2.0"
] | 1
|
2016-04-20T08:18:07.000Z
|
2016-04-20T08:18:07.000Z
|
salt/runners/mine.py
|
quantonganh/salt
|
8f1df678573153970c08b33978fe185d9ed1b71c
|
[
"Apache-2.0"
] | null | null | null |
salt/runners/mine.py
|
quantonganh/salt
|
8f1df678573153970c08b33978fe185d9ed1b71c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
A runner to access data from the salt mine
'''
# Import python libs
import os
# Import salt libs
import salt.payload
import salt.utils.minions
import salt.utils
def get(tgt, fun, tgt_type='glob'):
'''
Gathers the data from the specified minions' mine, pass in the target,
function to look up and the target type
CLI Example::
salt-run mine.get '*' network.interfaces
'''
ret = {}
serial = salt.payload.Serial(__opts__)
checker = salt.utils.minions.CkMinions(__opts__)
minions = checker.check_minions(
tgt,
tgt_type)
for minion in minions:
mine = os.path.join(
__opts__['cachedir'],
'minions',
minion,
'mine.p')
try:
with salt.utils.fopen(mine) as fp_:
fdata = serial.load(fp_).get(fun)
if fdata:
ret[minion] = fdata
except Exception:
continue
return ret
| 23.883721
| 74
| 0.558909
|
import os
import salt.payload
import salt.utils.minions
import salt.utils
def get(tgt, fun, tgt_type='glob'):
ret = {}
serial = salt.payload.Serial(__opts__)
checker = salt.utils.minions.CkMinions(__opts__)
minions = checker.check_minions(
tgt,
tgt_type)
for minion in minions:
mine = os.path.join(
__opts__['cachedir'],
'minions',
minion,
'mine.p')
try:
with salt.utils.fopen(mine) as fp_:
fdata = serial.load(fp_).get(fun)
if fdata:
ret[minion] = fdata
except Exception:
continue
return ret
| true
| true
|
1c47ade190e28d7400249b8c5dab37fe86d3fefc
| 1,489
|
py
|
Python
|
parser/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionTrigonometric/Tanh.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionTrigonometric/Tanh.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionTrigonometric/Tanh.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
import math
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Excepcion import Excepcion
class Tanh(Instruccion):
def __init__(self, valor, strGram, linea, columna):
Instruccion.__init__(self,Tipo(Tipo_Dato.DOUBLE_PRECISION),linea,columna,strGram)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
resultado = self.valor.ejecutar(tabla,arbol)
if isinstance(resultado, Excepcion):
return resultado
if self.valor.tipo.tipo != Tipo_Dato.SMALLINT and self.valor.tipo.tipo != Tipo_Dato.INTEGER and self.valor.tipo.tipo != Tipo_Dato.BIGINT and self.valor.tipo.tipo != Tipo_Dato.DECIMAL and self.valor.tipo.tipo != Tipo_Dato.NUMERIC and self.valor.tipo.tipo != Tipo_Dato.REAL and self.valor.tipo.tipo != Tipo_Dato.DOUBLE_PRECISION:
error = Excepcion('42883',"Semántico","No existe la función tanh("+self.valor.tipo.toString()+")",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
try:
return math.tanh(resultado)
except ValueError as c:
error = Excepcion('22003',"Semántico","La entrada está fuera de rango",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
| 55.148148
| 335
| 0.697112
|
import math
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Excepcion import Excepcion
class Tanh(Instruccion):
def __init__(self, valor, strGram, linea, columna):
Instruccion.__init__(self,Tipo(Tipo_Dato.DOUBLE_PRECISION),linea,columna,strGram)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
resultado = self.valor.ejecutar(tabla,arbol)
if isinstance(resultado, Excepcion):
return resultado
if self.valor.tipo.tipo != Tipo_Dato.SMALLINT and self.valor.tipo.tipo != Tipo_Dato.INTEGER and self.valor.tipo.tipo != Tipo_Dato.BIGINT and self.valor.tipo.tipo != Tipo_Dato.DECIMAL and self.valor.tipo.tipo != Tipo_Dato.NUMERIC and self.valor.tipo.tipo != Tipo_Dato.REAL and self.valor.tipo.tipo != Tipo_Dato.DOUBLE_PRECISION:
error = Excepcion('42883',"Semántico","No existe la función tanh("+self.valor.tipo.toString()+")",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
try:
return math.tanh(resultado)
except ValueError as c:
error = Excepcion('22003',"Semántico","La entrada está fuera de rango",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
| true
| true
|
1c47ae46f17c882072873a49257d173aa670600d
| 6,395
|
py
|
Python
|
examples/benchmarks/json/errors.py
|
eerimoq/textparser
|
cc4a85f8b7e6d6be83f5072f45af4a7baf6c35df
|
[
"MIT"
] | 23
|
2018-09-01T14:39:07.000Z
|
2021-11-08T11:52:43.000Z
|
examples/benchmarks/json/errors.py
|
risingdeveloper007/TextParser
|
c0f7b0268f86b77f4eb8366016987140792faff8
|
[
"MIT"
] | 1
|
2020-07-06T13:19:25.000Z
|
2020-08-01T08:16:34.000Z
|
examples/benchmarks/json/errors.py
|
risingdeveloper007/TextParser
|
c0f7b0268f86b77f4eb8366016987140792faff8
|
[
"MIT"
] | 6
|
2019-05-01T21:31:03.000Z
|
2021-08-24T11:57:21.000Z
|
#!/usr/bin/env python
"""Parse error comparsion for a few JSON parsers.
Example execution:
$ env PYTHONPATH=. python3 examples/benchmarks/json/errors.py
-----------------------------------------------------------------
Input string between BEGIN and END:
BEGIN
END
textparser: "Invalid syntax at line 1, column 1: ">>!<<""
lark_lalr: "'NoneType' object has no attribute 'pos_in_stream'"
lark_earley: "Incomplete parse: Could not find a solution to input"
pyparsing: "Expected {string enclosed in double quotes | real number with scientific notation | real number | signed integer | Group:(Forward: ...) | Group:({Suppress:("[") [Forward: ... [, Forward: ...]...] Suppress:("]")}) | "true" | "false" | "null"} (at char 0), (line:1, col:1)"
parsita: "No exception raised!"
funcparserlib: "no tokens left in the stream: <EOF>"
parsy: "expected one of '"', '-?(0|[1-9][0-9]*)([.][0-9]+)?([eE][+-]?[0-9]+)?', '[', 'false', 'null', 'true', '{' at 0:0"
parsimonious: "Rule 'json_file' didn't match at '' (line 1, column 1)."
pyleri: "No exception raised!"
textx: "None:1:1: error: Expected '[' or '{' at position (1, 1) => '*'."
-----------------------------------------------------------------
Input string between BEGIN and END:
BEGIN
[
1,
{"a": {]}
]
END
textparser: "Invalid syntax at line 3, column 10: " {"a": {>>!<<]}""
lark_lalr: "Unexpected token Token(RSQB, ']') at line 3, column 10.
Expected: ESCAPED_STRING, RBRACE, string, pair
"
lark_earley: "Unexpected token Token(RSQB, ']') at line 3, column 10.
Expected: ESCAPED_STRING, RBRACE
"
pyparsing: "Expected {string enclosed in double quotes | real number with scientific notation | real number | signed integer | Group:(Forward: ...) | Group:({Suppress:("[") [Forward: ... [, Forward: ...]...] Suppress:("]")}) | "true" | "false" | "null"} (at char 5), (line:2, col:4)"
parsita: "No exception raised!"
funcparserlib: "got unexpected token: 3,10-3,10: Op ']'"
parsy: "expected one of '"', '}' at 2:9"
parsimonious: "Rule 'members' didn't match at ']}
]
' (line 3, column 10)."
pyleri: "No exception raised!"
textx: "None:3:10: error: Expected STRING or '}' at position (3, 10) => ' {"a": {*]} ] '."
-----------------------------------------------------------------
Input string between BEGIN and END:
BEGIN
[
1,
{3: null}
]
END
textparser: "Invalid syntax at line 3, column 4: " {>>!<<3: null}""
lark_lalr: "Unexpected token Token(SIGNED_NUMBER, '3') at line 3, column 4.
Expected: RBRACE, pair, string, ESCAPED_STRING
"
lark_earley: "Unexpected token Token(SIGNED_NUMBER, '3') at line 3, column 4.
Expected: ESCAPED_STRING, RBRACE
"
pyparsing: "Expected {string enclosed in double quotes | real number with scientific notation | real number | signed integer | Group:(Forward: ...) | Group:({Suppress:("[") [Forward: ... [, Forward: ...]...] Suppress:("]")}) | "true" | "false" | "null"} (at char 5), (line:2, col:4)"
parsita: "No exception raised!"
funcparserlib: "got unexpected token: 3,4-3,4: Number '3'"
parsy: "expected one of '"', '}' at 2:3"
parsimonious: "Rule 'members' didn't match at '3: null}
]
' (line 3, column 4)."
pyleri: "No exception raised!"
textx: "None:3:4: error: Expected STRING or '}' at position (3, 4) => '[ 1, {*3: null} ]'."
-----------------------------------------------------------------
Input string between BEGIN and END:
BEGIN
nul
END
textparser: "Invalid syntax at line 1, column 1: ">>!<<nul""
lark_lalr: "No terminal defined for 'n' at line 1 col 1
nul
^
"
lark_earley: "No terminal defined for 'n' at line 1 col 1
nul
^
"
pyparsing: "Expected {string enclosed in double quotes | real number with scientific notation | real number | signed integer | Group:(Forward: ...) | Group:({Suppress:("[") [Forward: ... [, Forward: ...]...] Suppress:("]")}) | "true" | "false" | "null"} (at char 0), (line:1, col:1)"
parsita: "No exception raised!"
funcparserlib: "got unexpected token: 1,1-1,3: Name 'nul'"
parsy: "expected one of '"', '-?(0|[1-9][0-9]*)([.][0-9]+)?([eE][+-]?[0-9]+)?', '[', 'false', 'null', 'true', '{' at 0:0"
parsimonious: "Rule 'json_file' didn't match at 'nul
' (line 1, column 1)."
pyleri: "No exception raised!"
textx: "None:1:1: error: Expected '[' or '{' at position (1, 1) => '*nul '."
$
"""
from __future__ import print_function
from parsers import textparser_json
from parsers import lark_json
from parsers import pyparsing_json
from parsers import funcparserlib_json
from parsers import parsimonious_json
from parsers import textx_json
try:
from parsers import parsita_json
except:
class parsita_json(object):
@staticmethod
def parse(_json_string):
raise Exception('Import failed!')
try:
from parsers import parsy_json
except:
class parsy_json(object):
@staticmethod
def parse(_json_string):
raise Exception('Import failed!')
try:
from parsers import pyleri_json
except:
class pyleri_json(object):
@staticmethod
def parse(_json_string):
raise Exception('Import failed!')
def parse(string):
def _parse(function):
try:
function(string)
except Exception as e:
return str(e)
return 'No exception raised!'
results = [
('textparser', _parse(textparser_json.parse)),
('lark_lalr', _parse(lark_json.parse_lalr)),
('lark_earley', _parse(lark_json.parse_earley)),
('pyparsing', _parse(pyparsing_json.parse)),
('parsita', _parse(parsita_json.parse)),
('funcparserlib', _parse(funcparserlib_json.parse)),
('parsy', _parse(parsy_json.parse)),
('parsimonious', _parse(parsimonious_json.parse)),
('pyleri', _parse(pyleri_json.parse)),
('textx', _parse(textx_json.parse))
]
print('-----------------------------------------------------------------')
print()
print('Input string between BEGIN and END:')
print()
print('BEGIN')
print(string, end='')
print('END')
print()
for parser, error in results:
print('{}: "{}"'.format(parser, error))
print()
EMPTY_STRING = '''\
'''
BAD_DICT_END_STRING = '''\
[
1,
{"a": {]}
]
'''
BAD_DICT_KEY_STRING = '''\
[
1,
{3: null}
]
'''
BAD_NULL_STRING = '''\
nul
'''
parse(EMPTY_STRING)
parse(BAD_DICT_END_STRING)
parse(BAD_DICT_KEY_STRING)
parse(BAD_NULL_STRING)
| 25.682731
| 283
| 0.602033
|
from __future__ import print_function
from parsers import textparser_json
from parsers import lark_json
from parsers import pyparsing_json
from parsers import funcparserlib_json
from parsers import parsimonious_json
from parsers import textx_json
try:
from parsers import parsita_json
except:
class parsita_json(object):
@staticmethod
def parse(_json_string):
raise Exception('Import failed!')
try:
from parsers import parsy_json
except:
class parsy_json(object):
@staticmethod
def parse(_json_string):
raise Exception('Import failed!')
try:
from parsers import pyleri_json
except:
class pyleri_json(object):
@staticmethod
def parse(_json_string):
raise Exception('Import failed!')
def parse(string):
def _parse(function):
try:
function(string)
except Exception as e:
return str(e)
return 'No exception raised!'
results = [
('textparser', _parse(textparser_json.parse)),
('lark_lalr', _parse(lark_json.parse_lalr)),
('lark_earley', _parse(lark_json.parse_earley)),
('pyparsing', _parse(pyparsing_json.parse)),
('parsita', _parse(parsita_json.parse)),
('funcparserlib', _parse(funcparserlib_json.parse)),
('parsy', _parse(parsy_json.parse)),
('parsimonious', _parse(parsimonious_json.parse)),
('pyleri', _parse(pyleri_json.parse)),
('textx', _parse(textx_json.parse))
]
print('-----------------------------------------------------------------')
print()
print('Input string between BEGIN and END:')
print()
print('BEGIN')
print(string, end='')
print('END')
print()
for parser, error in results:
print('{}: "{}"'.format(parser, error))
print()
EMPTY_STRING = '''\
'''
BAD_DICT_END_STRING = '''\
[
1,
{"a": {]}
]
'''
BAD_DICT_KEY_STRING = '''\
[
1,
{3: null}
]
'''
BAD_NULL_STRING = '''\
nul
'''
parse(EMPTY_STRING)
parse(BAD_DICT_END_STRING)
parse(BAD_DICT_KEY_STRING)
parse(BAD_NULL_STRING)
| true
| true
|
1c47af64e57d9e011aed97ff68c6f130de74836b
| 1,067
|
py
|
Python
|
setup.py
|
timmypidashev/poilet
|
40535f9d22f1722de130458e9e487a945abd653f
|
[
"MIT"
] | null | null | null |
setup.py
|
timmypidashev/poilet
|
40535f9d22f1722de130458e9e487a945abd653f
|
[
"MIT"
] | null | null | null |
setup.py
|
timmypidashev/poilet
|
40535f9d22f1722de130458e9e487a945abd653f
|
[
"MIT"
] | null | null | null |
import re
from setuptools import setup
# README will be shown on PyPi
with open('README.md') as file:
readme = file.read()
# Track version number
with open('poilet/__init__.py') as file:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', file.read(), re.MULTILINE)
setup(
name='poilet',
author='timmypidashev',
url='https://github.com/timmypidashev/poilet',
project_urls={
'Discussions': 'https://github.com/timmypidashev/poilet/discussions',
'Issues': 'https://github.com/timmypidashev/poilet/issues',
},
version=version,
packages=['poilet'],
license='MIT',
description='Python variant of The Other Implementation of figLET',
long_description=readme,
long_description_content_type='text/markdown',
python_requires='>=3.10.4',
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.10'
]
)
| 30.485714
| 93
| 0.645736
|
import re
from setuptools import setup
with open('README.md') as file:
readme = file.read()
with open('poilet/__init__.py') as file:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', file.read(), re.MULTILINE)
setup(
name='poilet',
author='timmypidashev',
url='https://github.com/timmypidashev/poilet',
project_urls={
'Discussions': 'https://github.com/timmypidashev/poilet/discussions',
'Issues': 'https://github.com/timmypidashev/poilet/issues',
},
version=version,
packages=['poilet'],
license='MIT',
description='Python variant of The Other Implementation of figLET',
long_description=readme,
long_description_content_type='text/markdown',
python_requires='>=3.10.4',
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.10'
]
)
| true
| true
|
1c47affeae4e58845137235341df557f0710b03f
| 50,416
|
py
|
Python
|
mypy/main.py
|
noudald/mypy
|
ecdd4b2e81945d998eb1e1116fb901ff7b63a703
|
[
"PSF-2.0"
] | null | null | null |
mypy/main.py
|
noudald/mypy
|
ecdd4b2e81945d998eb1e1116fb901ff7b63a703
|
[
"PSF-2.0"
] | null | null | null |
mypy/main.py
|
noudald/mypy
|
ecdd4b2e81945d998eb1e1116fb901ff7b63a703
|
[
"PSF-2.0"
] | null | null | null |
"""Mypy type checker command line tool."""
import argparse
import ast
import configparser
import os
import re
import subprocess
import sys
import time
from typing import Any, Dict, List, Mapping, Optional, Tuple, Callable
from mypy import build
from mypy import defaults
from mypy import experiments
from mypy import util
from mypy.build import BuildResult
from mypy.modulefinder import BuildSource, FindModuleCache, mypy_path, SearchPaths
from mypy.find_sources import create_source_list, InvalidSourceList
from mypy.fscache import FileSystemCache
from mypy.errors import CompileError
from mypy.options import Options, BuildType, PER_MODULE_OPTIONS
from mypy.report import reporter_classes
from mypy.version import __version__
MYPY = False
if MYPY:
from typing_extensions import Final
orig_stat = os.stat # type: Final
MEM_PROFILE = False # type: Final # If True, dump memory profile
def stat_proxy(path: str) -> os.stat_result:
try:
st = orig_stat(path)
except os.error as err:
print("stat(%r) -> %s" % (path, err))
raise
else:
print("stat(%r) -> (st_mode=%o, st_mtime=%d, st_size=%d)" %
(path, st.st_mode, st.st_mtime, st.st_size))
return st
def main(script_path: Optional[str], args: Optional[List[str]] = None) -> None:
"""Main entry point to the type checker.
Args:
script_path: Path to the 'mypy' script (used for finding data files).
args: Custom command-line arguments. If not given, sys.argv[1:] will
be used.
"""
# Check for known bad Python versions.
if sys.version_info[:2] < (3, 4):
sys.exit("Running mypy with Python 3.3 or lower is not supported; "
"please upgrade to 3.4 or newer")
if sys.version_info[:3] == (3, 5, 0):
sys.exit("Running mypy with Python 3.5.0 is not supported; "
"please upgrade to 3.5.1 or newer")
t0 = time.time()
# To log stat() calls: os.stat = stat_proxy
sys.setrecursionlimit(2 ** 14)
if args is None:
args = sys.argv[1:]
fscache = FileSystemCache()
sources, options = process_options(args, fscache=fscache)
messages = []
def flush_errors(new_messages: List[str], serious: bool) -> None:
messages.extend(new_messages)
f = sys.stderr if serious else sys.stdout
try:
for msg in new_messages:
f.write(msg + '\n')
f.flush()
except BrokenPipeError:
sys.exit(2)
serious = False
blockers = False
res = None
try:
# Keep a dummy reference (res) for memory profiling below, as otherwise
# the result could be freed.
res = build.build(sources, options, None, flush_errors, fscache)
except CompileError as e:
blockers = True
if not e.use_stdout:
serious = True
if options.warn_unused_configs and options.unused_configs:
print("Warning: unused section(s) in %s: %s" %
(options.config_file,
", ".join("[mypy-%s]" % glob for glob in options.per_module_options.keys()
if glob in options.unused_configs)),
file=sys.stderr)
if options.junit_xml:
t1 = time.time()
util.write_junit_xml(t1 - t0, serious, messages, options.junit_xml)
if MEM_PROFILE:
from mypy.memprofile import print_memory_profile
print_memory_profile()
del res # Now it's safe to delete
code = 0
if messages:
code = 2 if blockers else 1
if options.fast_exit:
# Exit without freeing objects -- it's faster.
#
# NOTE: We don't flush all open files on exit (or run other destructors)!
util.hard_exit(code)
elif code:
sys.exit(code)
def readlinkabs(link: str) -> str:
"""Return an absolute path to symbolic link destination."""
# Adapted from code by Greg Smith.
assert os.path.islink(link)
path = os.readlink(link)
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(link), path)
class SplitNamespace(argparse.Namespace):
def __init__(self, standard_namespace: object, alt_namespace: object, alt_prefix: str) -> None:
self.__dict__['_standard_namespace'] = standard_namespace
self.__dict__['_alt_namespace'] = alt_namespace
self.__dict__['_alt_prefix'] = alt_prefix
def _get(self) -> Tuple[Any, Any]:
return (self._standard_namespace, self._alt_namespace)
def __setattr__(self, name: str, value: Any) -> None:
if name.startswith(self._alt_prefix):
setattr(self._alt_namespace, name[len(self._alt_prefix):], value)
else:
setattr(self._standard_namespace, name, value)
def __getattr__(self, name: str) -> Any:
if name.startswith(self._alt_prefix):
return getattr(self._alt_namespace, name[len(self._alt_prefix):])
else:
return getattr(self._standard_namespace, name)
def parse_version(v: str) -> Tuple[int, int]:
m = re.match(r'\A(\d)\.(\d+)\Z', v)
if not m:
raise argparse.ArgumentTypeError(
"Invalid python version '{}' (expected format: 'x.y')".format(v))
major, minor = int(m.group(1)), int(m.group(2))
if major == 2:
if minor != 7:
raise argparse.ArgumentTypeError(
"Python 2.{} is not supported (must be 2.7)".format(minor))
elif major == 3:
if minor < defaults.PYTHON3_VERSION_MIN[1]:
raise argparse.ArgumentTypeError(
"Python 3.{0} is not supported (must be {1}.{2} or higher)".format(minor,
*defaults.PYTHON3_VERSION_MIN))
else:
raise argparse.ArgumentTypeError(
"Python major version '{}' out of range (must be 2 or 3)".format(major))
return major, minor
# Make the help output a little less jarring.
class AugmentedHelpFormatter(argparse.RawDescriptionHelpFormatter):
def __init__(self, prog: str) -> None:
super().__init__(prog=prog, max_help_position=28)
def _fill_text(self, text: str, width: int, indent: int) -> str:
if '\n' in text:
# Assume we want to manually format the text
return super()._fill_text(text, width, indent)
else:
# Assume we want argparse to manage wrapping, indentating, and
# formatting the text for us.
return argparse.HelpFormatter._fill_text(self, text, width, indent)
# Define pairs of flag prefixes with inverse meaning.
flag_prefix_pairs = [
('allow', 'disallow'),
('show', 'hide'),
] # type: Final
flag_prefix_map = {} # type: Final[Dict[str, str]]
for a, b in flag_prefix_pairs:
flag_prefix_map[a] = b
flag_prefix_map[b] = a
def invert_flag_name(flag: str) -> str:
split = flag[2:].split('-', 1)
if len(split) == 2:
prefix, rest = split
if prefix in flag_prefix_map:
return '--{}-{}'.format(flag_prefix_map[prefix], rest)
elif prefix == 'no':
return '--{}'.format(rest)
return '--no-{}'.format(flag[2:])
class PythonExecutableInferenceError(Exception):
"""Represents a failure to infer the version or executable while searching."""
def python_executable_prefix(v: str) -> List[str]:
if sys.platform == 'win32':
# on Windows, all Python executables are named `python`. To handle this, there
# is the `py` launcher, which can be passed a version e.g. `py -3.5`, and it will
# execute an installed Python 3.5 interpreter. See also:
# https://docs.python.org/3/using/windows.html#python-launcher-for-windows
return ['py', '-{}'.format(v)]
else:
return ['python{}'.format(v)]
def _python_version_from_executable(python_executable: str) -> Tuple[int, int]:
try:
check = subprocess.check_output([python_executable, '-c',
'import sys; print(repr(sys.version_info[:2]))'],
stderr=subprocess.STDOUT).decode()
return ast.literal_eval(check)
except (subprocess.CalledProcessError, FileNotFoundError):
raise PythonExecutableInferenceError(
'invalid Python executable {}'.format(python_executable))
def _python_executable_from_version(python_version: Tuple[int, int]) -> str:
if sys.version_info[:2] == python_version:
return sys.executable
str_ver = '.'.join(map(str, python_version))
try:
sys_exe = subprocess.check_output(python_executable_prefix(str_ver) +
['-c', 'import sys; print(sys.executable)'],
stderr=subprocess.STDOUT).decode().strip()
return sys_exe
except (subprocess.CalledProcessError, FileNotFoundError):
raise PythonExecutableInferenceError(
'failed to find a Python executable matching version {},'
' perhaps try --python-executable, or --no-site-packages?'.format(python_version))
def infer_python_version_and_executable(options: Options,
special_opts: argparse.Namespace) -> None:
"""Infer the Python version or executable from each other. Check they are consistent.
This function mutates options based on special_opts to infer the correct Python version and
executable to use.
"""
# Infer Python version and/or executable if one is not given
# TODO: (ethanhs) Look at folding these checks and the site packages subprocess calls into
# one subprocess call for speed.
if special_opts.python_executable is not None and special_opts.python_version is not None:
py_exe_ver = _python_version_from_executable(special_opts.python_executable)
if py_exe_ver != special_opts.python_version:
raise PythonExecutableInferenceError(
'Python version {} did not match executable {}, got version {}.'.format(
special_opts.python_version, special_opts.python_executable, py_exe_ver
))
else:
options.python_version = special_opts.python_version
options.python_executable = special_opts.python_executable
elif special_opts.python_executable is None and special_opts.python_version is not None:
options.python_version = special_opts.python_version
py_exe = None
if not special_opts.no_executable:
py_exe = _python_executable_from_version(special_opts.python_version)
options.python_executable = py_exe
elif special_opts.python_version is None and special_opts.python_executable is not None:
options.python_version = _python_version_from_executable(
special_opts.python_executable)
options.python_executable = special_opts.python_executable
HEADER = """%(prog)s [-h] [-v] [-V] [more options; see below]
[-m MODULE] [-p PACKAGE] [-c PROGRAM_TEXT] [files ...]""" # type: Final
DESCRIPTION = """
Mypy is a program that will type check your Python code.
Pass in any files or folders you want to type check. Mypy will
recursively traverse any provided folders to find .py files:
$ mypy my_program.py my_src_folder
For more information on getting started, see:
- http://mypy.readthedocs.io/en/latest/getting_started.html
For more details on both running mypy and using the flags below, see:
- http://mypy.readthedocs.io/en/latest/running_mypy.html
- http://mypy.readthedocs.io/en/latest/command_line.html
You can also use a config file to configure mypy instead of using
command line flags. For more details, see:
- http://mypy.readthedocs.io/en/latest/config_file.html
""" # type: Final
FOOTER = """Environment variables:
Define MYPYPATH for additional module search path entries.""" # type: Final
def process_options(args: List[str],
require_targets: bool = True,
server_options: bool = False,
fscache: Optional[FileSystemCache] = None,
) -> Tuple[List[BuildSource], Options]:
"""Parse command line arguments.
If a FileSystemCache is passed in, and package_root options are given,
call fscache.set_package_root() to set the cache's package root.
"""
parser = argparse.ArgumentParser(prog='mypy',
usage=HEADER,
description=DESCRIPTION,
epilog=FOOTER,
fromfile_prefix_chars='@',
formatter_class=AugmentedHelpFormatter,
add_help=False)
strict_flag_names = [] # type: List[str]
strict_flag_assignments = [] # type: List[Tuple[str, bool]]
def add_invertible_flag(flag: str,
*,
inverse: Optional[str] = None,
default: bool,
dest: Optional[str] = None,
help: str,
strict_flag: bool = False,
group: Optional[argparse._ActionsContainer] = None
) -> None:
if inverse is None:
inverse = invert_flag_name(flag)
if group is None:
group = parser
if help is not argparse.SUPPRESS:
help += " (inverse: {})".format(inverse)
arg = group.add_argument(flag,
action='store_false' if default else 'store_true',
dest=dest,
help=help)
dest = arg.dest
arg = group.add_argument(inverse,
action='store_true' if default else 'store_false',
dest=dest,
help=argparse.SUPPRESS)
if strict_flag:
assert dest is not None
strict_flag_names.append(flag)
strict_flag_assignments.append((dest, not default))
# Unless otherwise specified, arguments will be parsed directly onto an
# Options object. Options that require further processing should have
# their `dest` prefixed with `special-opts:`, which will cause them to be
# parsed into the separate special_opts namespace object.
# Note: we have a style guide for formatting the mypy --help text. See
# https://github.com/python/mypy/wiki/Documentation-Conventions
general_group = parser.add_argument_group(
title='Optional arguments')
general_group.add_argument(
'-h', '--help', action='help',
help="Show this help message and exit")
general_group.add_argument(
'-v', '--verbose', action='count', dest='verbosity',
help="More verbose messages")
general_group.add_argument(
'-V', '--version', action='version',
version='%(prog)s ' + __version__,
help="Show program's version number and exit")
config_group = parser.add_argument_group(
title='Config file',
description="Use a config file instead of command line arguments. "
"This is useful if you are using many flags or want "
"to set different options per each module.")
config_group.add_argument(
'--config-file',
help="Configuration file, must have a [mypy] section "
"(defaults to {})".format(', '.join(defaults.CONFIG_FILES)))
add_invertible_flag('--warn-unused-configs', default=False, strict_flag=True,
help="Warn about unused '[mypy-<pattern>]' config sections",
group=config_group)
imports_group = parser.add_argument_group(
title='Import discovery',
description="Configure how imports are discovered and followed.")
imports_group.add_argument(
'--ignore-missing-imports', action='store_true',
help="Silently ignore imports of missing modules")
imports_group.add_argument(
'--follow-imports', choices=['normal', 'silent', 'skip', 'error'],
default='normal', help="How to treat imports (default normal)")
imports_group.add_argument(
'--python-executable', action='store', metavar='EXECUTABLE',
help="Python executable used for finding PEP 561 compliant installed"
" packages and stubs",
dest='special-opts:python_executable')
imports_group.add_argument(
'--no-site-packages', action='store_true',
dest='special-opts:no_executable',
help="Do not search for installed PEP 561 compliant packages")
imports_group.add_argument(
'--no-silence-site-packages', action='store_true',
help="Do not silence errors in PEP 561 compliant installed packages")
add_invertible_flag(
'--namespace-packages', default=False,
help="Support namespace packages (PEP 420, __init__.py-less)",
group=imports_group)
platform_group = parser.add_argument_group(
title='Platform configuration',
description="Type check code assuming it will be run under certain "
"runtime conditions. By default, mypy assumes your code "
"will be run using the same operating system and Python "
"version you are using to run mypy itself.")
platform_group.add_argument(
'--python-version', type=parse_version, metavar='x.y',
help='Type check code assuming it will be running on Python x.y',
dest='special-opts:python_version')
platform_group.add_argument(
'-2', '--py2', dest='special-opts:python_version', action='store_const',
const=defaults.PYTHON2_VERSION,
help="Use Python 2 mode (same as --python-version 2.7)")
platform_group.add_argument(
'--platform', action='store', metavar='PLATFORM',
help="Type check special-cased code for the given OS platform "
"(defaults to sys.platform)")
platform_group.add_argument(
'--always-true', metavar='NAME', action='append', default=[],
help="Additional variable to be considered True (may be repeated)")
platform_group.add_argument(
'--always-false', metavar='NAME', action='append', default=[],
help="Additional variable to be considered False (may be repeated)")
disallow_any_group = parser.add_argument_group(
title='Dynamic typing',
description="Disallow the use of the dynamic 'Any' type under certain conditions.")
disallow_any_group.add_argument(
'--disallow-any-unimported', default=False, action='store_true',
help="Disallow Any types resulting from unfollowed imports")
add_invertible_flag('--disallow-subclassing-any', default=False, strict_flag=True,
help="Disallow subclassing values of type 'Any' when defining classes",
group=disallow_any_group)
disallow_any_group.add_argument(
'--disallow-any-expr', default=False, action='store_true',
help='Disallow all expressions that have type Any')
disallow_any_group.add_argument(
'--disallow-any-decorated', default=False, action='store_true',
help='Disallow functions that have Any in their signature '
'after decorator transformation')
disallow_any_group.add_argument(
'--disallow-any-explicit', default=False, action='store_true',
help='Disallow explicit Any in type positions')
disallow_any_group.add_argument(
'--disallow-any-generics', default=False, action='store_true',
help='Disallow usage of generic types that do not specify explicit '
'type parameters')
untyped_group = parser.add_argument_group(
title='Untyped definitions and calls',
description="Configure how untyped definitions and calls are handled. "
"Note: by default, mypy ignores any untyped function definitions "
"and assumes any calls to such functions have a return "
"type of 'Any'.")
add_invertible_flag('--disallow-untyped-calls', default=False, strict_flag=True,
help="Disallow calling functions without type annotations"
" from functions with type annotations",
group=untyped_group)
add_invertible_flag('--disallow-untyped-defs', default=False, strict_flag=True,
help="Disallow defining functions without type annotations"
" or with incomplete type annotations",
group=untyped_group)
add_invertible_flag('--disallow-incomplete-defs', default=False, strict_flag=True,
help="Disallow defining functions with incomplete type annotations",
group=untyped_group)
add_invertible_flag('--check-untyped-defs', default=False, strict_flag=True,
help="Type check the interior of functions without type annotations",
group=untyped_group)
add_invertible_flag('--disallow-untyped-decorators', default=False, strict_flag=True,
help="Disallow decorating typed functions with untyped decorators",
group=untyped_group)
none_group = parser.add_argument_group(
title='None and Optional handling',
description="Adjust how values of type 'None' are handled. For more context on "
"how mypy handles values of type 'None', see: "
"mypy.readthedocs.io/en/latest/kinds_of_types.html#no-strict-optional")
add_invertible_flag('--no-implicit-optional', default=False, strict_flag=True,
help="Don't assume arguments with default values of None are Optional",
group=none_group)
none_group.add_argument(
'--strict-optional', action='store_true',
help=argparse.SUPPRESS)
none_group.add_argument(
'--no-strict-optional', action='store_false', dest='strict_optional',
help="Disable strict Optional checks (inverse: --strict-optional)")
none_group.add_argument(
'--strict-optional-whitelist', metavar='GLOB', nargs='*',
help="Suppress strict Optional errors in all but the provided files; "
"implies --strict-optional (may suppress certain other errors "
"in non-whitelisted files)")
lint_group = parser.add_argument_group(
title='Warnings',
description="Detect code that is sound but redundant or problematic.")
add_invertible_flag('--warn-redundant-casts', default=False, strict_flag=True,
help="Warn about casting an expression to its inferred type",
group=lint_group)
add_invertible_flag('--warn-unused-ignores', default=False, strict_flag=True,
help="Warn about unneeded '# type: ignore' comments",
group=lint_group)
add_invertible_flag('--no-warn-no-return', dest='warn_no_return', default=True,
help="Do not warn about functions that end without returning",
group=lint_group)
add_invertible_flag('--warn-return-any', default=False, strict_flag=True,
help="Warn about returning values of type Any"
" from non-Any typed functions",
group=lint_group)
# Note: this group is intentionally added here even though we don't add
# --strict to this group near the end.
#
# That way, this group will appear after the various strictness groups
# but before the remaining flags.
# We add `--strict` near the end so we don't accidentally miss any strictness
# flags that are added after this group.
strictness_group = parser.add_argument_group(
title='Other strictness checks')
add_invertible_flag('--allow-untyped-globals', default=False, strict_flag=False,
help="Suppress toplevel errors caused by missing annotations",
group=strictness_group)
incremental_group = parser.add_argument_group(
title='Incremental mode',
description="Adjust how mypy incrementally type checks and caches modules. "
"Mypy caches type information about modules into a cache to "
"let you speed up future invocations of mypy. Also see "
"mypy's daemon mode: "
"mypy.readthedocs.io/en/latest/mypy_daemon.html#mypy-daemon")
incremental_group.add_argument(
'-i', '--incremental', action='store_true',
help=argparse.SUPPRESS)
incremental_group.add_argument(
'--no-incremental', action='store_false', dest='incremental',
help="Disable module cache (inverse: --incremental)")
incremental_group.add_argument(
'--cache-dir', action='store', metavar='DIR',
help="Store module cache info in the given folder in incremental mode "
"(defaults to '{}')".format(defaults.CACHE_DIR))
incremental_group.add_argument(
'--cache-fine-grained', action='store_true',
help="Include fine-grained dependency information in the cache for the mypy daemon")
incremental_group.add_argument(
'--quick-and-dirty', action='store_true',
help="Use cache even if dependencies out of date (implies --incremental)")
incremental_group.add_argument(
'--skip-version-check', action='store_true',
help="Allow using cache written by older mypy version")
internals_group = parser.add_argument_group(
title='Mypy internals',
description="Debug and customize mypy internals.")
internals_group.add_argument(
'--pdb', action='store_true', help="Invoke pdb on fatal error")
internals_group.add_argument(
'--show-traceback', '--tb', action='store_true',
help="Show traceback on fatal error")
internals_group.add_argument(
'--custom-typing', metavar='MODULE', dest='custom_typing_module',
help="Use a custom typing module")
internals_group.add_argument(
'--custom-typeshed-dir', metavar='DIR',
help="Use the custom typeshed in DIR")
add_invertible_flag('--warn-incomplete-stub', default=False,
help="Warn if missing type annotation in typeshed, only relevant with"
" --disallow-untyped-defs or --disallow-incomplete-defs enabled",
group=internals_group)
internals_group.add_argument(
'--shadow-file', nargs=2, metavar=('SOURCE_FILE', 'SHADOW_FILE'),
dest='shadow_file', action='append',
help="When encountering SOURCE_FILE, read and type check "
"the contents of SHADOW_FILE instead.")
add_invertible_flag('--fast-exit', default=False, help=argparse.SUPPRESS,
group=internals_group)
error_group = parser.add_argument_group(
title='Error reporting',
description="Adjust the amount of detail shown in error messages.")
add_invertible_flag('--show-error-context', default=False,
dest='show_error_context',
help='Precede errors with "note:" messages explaining context',
group=error_group)
add_invertible_flag('--show-column-numbers', default=False,
help="Show column numbers in error messages",
group=error_group)
strict_help = "Strict mode; enables the following flags: {}".format(
", ".join(strict_flag_names))
strictness_group.add_argument(
'--strict', action='store_true', dest='special-opts:strict',
help=strict_help)
report_group = parser.add_argument_group(
title='Report generation',
description='Generate a report in the specified format.')
for report_type in sorted(reporter_classes):
report_group.add_argument('--%s-report' % report_type.replace('_', '-'),
metavar='DIR',
dest='special-opts:%s_report' % report_type)
other_group = parser.add_argument_group(
title='Miscellaneous')
other_group.add_argument(
'--junit-xml', help="Write junit.xml to the given file")
other_group.add_argument(
'--scripts-are-modules', action='store_true',
help="Script x becomes module x instead of __main__")
other_group.add_argument(
'--find-occurrences', metavar='CLASS.MEMBER',
dest='special-opts:find_occurrences',
help="Print out all usages of a class member (experimental)")
if server_options:
# TODO: This flag is superfluous; remove after a short transition (2018-03-16)
other_group.add_argument(
'--experimental', action='store_true', dest='fine_grained_incremental',
help="Enable fine-grained incremental mode")
other_group.add_argument(
'--use-fine-grained-cache', action='store_true',
help="Use the cache in fine-grained incremental mode")
# hidden options
parser.add_argument(
'--stats', action='store_true', dest='dump_type_stats', help=argparse.SUPPRESS)
parser.add_argument(
'--inferstats', action='store_true', dest='dump_inference_stats',
help=argparse.SUPPRESS)
# --debug-cache will disable any cache-related compressions/optimizations,
# which will make the cache writing process output pretty-printed JSON (which
# is easier to debug).
parser.add_argument('--debug-cache', action='store_true', help=argparse.SUPPRESS)
# --dump-deps will dump all fine-grained dependencies to stdout
parser.add_argument('--dump-deps', action='store_true', help=argparse.SUPPRESS)
# --dump-graph will dump the contents of the graph of SCCs and exit.
parser.add_argument('--dump-graph', action='store_true', help=argparse.SUPPRESS)
# --semantic-analysis-only does exactly that.
parser.add_argument('--semantic-analysis-only', action='store_true', help=argparse.SUPPRESS)
# --local-partial-types disallows partial types spanning module top level and a function
# (implicitly defined in fine-grained incremental mode)
parser.add_argument('--local-partial-types', action='store_true', help=argparse.SUPPRESS)
# --logical-deps adds some more dependencies that are not semantically needed, but
# may be helpful to determine relative importance of classes and functions for overall
# type precision in a code base. It also _removes_ some deps, so this flag should be never
# used except for generating code stats. This also automatically enables --cache-fine-grained.
# NOTE: This is an experimental option that may be modified or removed at any time.
parser.add_argument('--logical-deps', action='store_true', help=argparse.SUPPRESS)
# --bazel changes some behaviors for use with Bazel (https://bazel.build).
parser.add_argument('--bazel', action='store_true', help=argparse.SUPPRESS)
# --package-root adds a directory below which directories are considered
# packages even without __init__.py. May be repeated.
parser.add_argument('--package-root', metavar='ROOT', action='append', default=[],
help=argparse.SUPPRESS)
# --cache-map FILE ... gives a mapping from source files to cache files.
# Each triple of arguments is a source file, a cache meta file, and a cache data file.
# Modules not mentioned in the file will go through cache_dir.
# Must be followed by another flag or by '--' (and then only file args may follow).
parser.add_argument('--cache-map', nargs='+', dest='special-opts:cache_map',
help=argparse.SUPPRESS)
# deprecated options
parser.add_argument('--disallow-any', dest='special-opts:disallow_any',
help=argparse.SUPPRESS)
add_invertible_flag('--strict-boolean', default=False,
help=argparse.SUPPRESS)
parser.add_argument('-f', '--dirty-stubs', action='store_true',
dest='special-opts:dirty_stubs',
help=argparse.SUPPRESS)
parser.add_argument('--use-python-path', action='store_true',
dest='special-opts:use_python_path',
help=argparse.SUPPRESS)
parser.add_argument('-s', '--silent-imports', action='store_true',
dest='special-opts:silent_imports',
help=argparse.SUPPRESS)
parser.add_argument('--almost-silent', action='store_true',
dest='special-opts:almost_silent',
help=argparse.SUPPRESS)
parser.add_argument('--fast-parser', action='store_true', dest='special-opts:fast_parser',
help=argparse.SUPPRESS)
parser.add_argument('--no-fast-parser', action='store_true',
dest='special-opts:no_fast_parser',
help=argparse.SUPPRESS)
code_group = parser.add_argument_group(
title="Running code",
description="Specify the code you want to type check. For more details, see "
"mypy.readthedocs.io/en/latest/running_mypy.html#running-mypy")
code_group.add_argument(
'-m', '--module', action='append', metavar='MODULE',
default=[],
dest='special-opts:modules',
help="Type-check module; can repeat for more modules")
code_group.add_argument(
'-p', '--package', action='append', metavar='PACKAGE',
default=[],
dest='special-opts:packages',
help="Type-check package recursively; can be repeated")
code_group.add_argument(
'-c', '--command', action='append', metavar='PROGRAM_TEXT',
dest='special-opts:command',
help="Type-check program passed in as string")
code_group.add_argument(
metavar='files', nargs='*', dest='special-opts:files',
help="Type-check given files or directories")
# Parse arguments once into a dummy namespace so we can get the
# filename for the config file and know if the user requested all strict options.
dummy = argparse.Namespace()
parser.parse_args(args, dummy)
config_file = dummy.config_file
if config_file is not None and not os.path.exists(config_file):
parser.error("Cannot find config file '%s'" % config_file)
# Parse config file first, so command line can override.
options = Options()
parse_config_file(options, config_file)
# Set strict flags before parsing (if strict mode enabled), so other command
# line options can override.
if getattr(dummy, 'special-opts:strict'):
for dest, value in strict_flag_assignments:
setattr(options, dest, value)
# Parse command line for real, using a split namespace.
special_opts = argparse.Namespace()
parser.parse_args(args, SplitNamespace(options, special_opts, 'special-opts:'))
# --use-python-path is no longer supported; explain why.
if special_opts.use_python_path:
parser.error("Sorry, --use-python-path is no longer supported.\n"
"If you are trying this because your code depends on a library module,\n"
"you should really investigate how to obtain stubs for that module.\n"
"See https://github.com/python/mypy/issues/1411 for more discussion."
)
# Process deprecated options
if special_opts.disallow_any:
print("--disallow-any option was split up into multiple flags. "
"See http://mypy.readthedocs.io/en/latest/command_line.html#disallow-dynamic-typing")
if options.strict_boolean:
print("Warning: --strict-boolean is deprecated; "
"see https://github.com/python/mypy/issues/3195", file=sys.stderr)
if special_opts.almost_silent:
print("Warning: --almost-silent has been replaced by "
"--follow-imports=errors", file=sys.stderr)
if options.follow_imports == 'normal':
options.follow_imports = 'errors'
elif special_opts.silent_imports:
print("Warning: --silent-imports has been replaced by "
"--ignore-missing-imports --follow-imports=skip", file=sys.stderr)
options.ignore_missing_imports = True
if options.follow_imports == 'normal':
options.follow_imports = 'skip'
if special_opts.dirty_stubs:
print("Warning: -f/--dirty-stubs is deprecated and no longer necessary. Mypy no longer "
"checks the git status of stubs.",
file=sys.stderr)
if special_opts.fast_parser:
print("Warning: --fast-parser is now the default (and only) parser.")
if special_opts.no_fast_parser:
print("Warning: --no-fast-parser no longer has any effect. The fast parser "
"is now mypy's default and only parser.")
try:
infer_python_version_and_executable(options, special_opts)
except PythonExecutableInferenceError as e:
parser.error(str(e))
if special_opts.no_executable:
options.python_executable = None
# Check for invalid argument combinations.
if require_targets:
code_methods = sum(bool(c) for c in [special_opts.modules + special_opts.packages,
special_opts.command,
special_opts.files])
if code_methods == 0:
parser.error("Missing target module, package, files, or command.")
elif code_methods > 1:
parser.error("May only specify one of: module/package, files, or command.")
# Check for overlapping `--always-true` and `--always-false` flags.
overlap = set(options.always_true) & set(options.always_false)
if overlap:
parser.error("You can't make a variable always true and always false (%s)" %
', '.join(sorted(overlap)))
# Set build flags.
if options.strict_optional_whitelist is not None:
# TODO: Deprecate, then kill this flag
options.strict_optional = True
if special_opts.find_occurrences:
experiments.find_occurrences = special_opts.find_occurrences.split('.')
assert experiments.find_occurrences is not None
if len(experiments.find_occurrences) < 2:
parser.error("Can only find occurrences of class members.")
if len(experiments.find_occurrences) != 2:
parser.error("Can only find occurrences of non-nested class members.")
# Set reports.
for flag, val in vars(special_opts).items():
if flag.endswith('_report') and val is not None:
report_type = flag[:-7].replace('_', '-')
report_dir = val
options.report_dirs[report_type] = report_dir
# Process --package-root.
if options.package_root:
process_package_roots(fscache, parser, options)
# Process --cache-map.
if special_opts.cache_map:
process_cache_map(parser, special_opts, options)
# Let quick_and_dirty imply incremental.
if options.quick_and_dirty:
options.incremental = True
# Let logical_deps imply cache_fine_grained (otherwise the former is useless).
if options.logical_deps:
options.cache_fine_grained = True
# Set target.
if special_opts.modules + special_opts.packages:
options.build_type = BuildType.MODULE
search_paths = SearchPaths((os.getcwd(),), tuple(mypy_path()), (), ())
targets = []
# TODO: use the same cache that the BuildManager will
cache = FindModuleCache(search_paths, fscache)
for p in special_opts.packages:
if os.sep in p or os.altsep and os.altsep in p:
fail("Package name '{}' cannot have a slash in it.".format(p))
p_targets = cache.find_modules_recursive(p)
if not p_targets:
fail("Can't find package '{}'".format(p))
targets.extend(p_targets)
for m in special_opts.modules:
targets.append(BuildSource(None, m, None))
return targets, options
elif special_opts.command:
options.build_type = BuildType.PROGRAM_TEXT
targets = [BuildSource(None, None, '\n'.join(special_opts.command))]
return targets, options
else:
try:
targets = create_source_list(special_opts.files, options, fscache)
except InvalidSourceList as e:
fail(str(e))
return targets, options
def process_package_roots(fscache: Optional[FileSystemCache],
parser: argparse.ArgumentParser,
options: Options) -> None:
"""Validate and normalize package_root."""
if fscache is None:
parser.error("--package-root does not work here (no fscache)")
assert fscache is not None # Since mypy doesn't know parser.error() raises.
# Do some stuff with drive letters to make Windows happy (esp. tests).
current_drive, _ = os.path.splitdrive(os.getcwd())
dot = os.curdir
dotslash = os.curdir + os.sep
dotdotslash = os.pardir + os.sep
trivial_paths = {dot, dotslash}
package_root = []
for root in options.package_root:
if os.path.isabs(root):
parser.error("Package root cannot be absolute: %r" % root)
drive, root = os.path.splitdrive(root)
if drive and drive != current_drive:
parser.error("Package root must be on current drive: %r" % (drive + root))
# Empty package root is always okay.
if root:
root = os.path.relpath(root) # Normalize the heck out of it.
if root.startswith(dotdotslash):
parser.error("Package root cannot be above current directory: %r" % root)
if root in trivial_paths:
root = ''
elif not root.endswith(os.sep):
root = root + os.sep
package_root.append(root)
options.package_root = package_root
# Pass the package root on the the filesystem cache.
fscache.set_package_root(package_root)
def process_cache_map(parser: argparse.ArgumentParser,
special_opts: argparse.Namespace,
options: Options) -> None:
"""Validate cache_map and copy into options.cache_map."""
n = len(special_opts.cache_map)
if n % 3 != 0:
parser.error("--cache-map requires one or more triples (see source)")
for i in range(0, n, 3):
source, meta_file, data_file = special_opts.cache_map[i:i + 3]
if source in options.cache_map:
parser.error("Duplicate --cache-map source %s)" % source)
if not source.endswith('.py') and not source.endswith('.pyi'):
parser.error("Invalid --cache-map source %s (triple[0] must be *.py[i])" % source)
if not meta_file.endswith('.meta.json'):
parser.error("Invalid --cache-map meta_file %s (triple[1] must be *.meta.json)" %
meta_file)
if not data_file.endswith('.data.json'):
parser.error("Invalid --cache-map data_file %s (triple[2] must be *.data.json)" %
data_file)
options.cache_map[source] = (meta_file, data_file)
# For most options, the type of the default value set in options.py is
# sufficient, and we don't have to do anything here. This table
# exists to specify types for values initialized to None or container
# types.
config_types = {
'python_version': parse_version,
'strict_optional_whitelist': lambda s: s.split(),
'custom_typing_module': str,
'custom_typeshed_dir': str,
'mypy_path': lambda s: [p.strip() for p in re.split('[,:]', s)],
'junit_xml': str,
# These two are for backwards compatibility
'silent_imports': bool,
'almost_silent': bool,
'plugins': lambda s: [p.strip() for p in s.split(',')],
'always_true': lambda s: [p.strip() for p in s.split(',')],
'always_false': lambda s: [p.strip() for p in s.split(',')],
'package_root': lambda s: [p.strip() for p in s.split(',')],
} # type: Final
def parse_config_file(options: Options, filename: Optional[str]) -> None:
"""Parse a config file into an Options object.
Errors are written to stderr but are not fatal.
If filename is None, fall back to default config files.
"""
if filename is not None:
config_files = (filename,) # type: Tuple[str, ...]
else:
config_files = tuple(map(os.path.expanduser, defaults.CONFIG_FILES))
parser = configparser.RawConfigParser()
for config_file in config_files:
if not os.path.exists(config_file):
continue
try:
parser.read(config_file)
except configparser.Error as err:
print("%s: %s" % (config_file, err), file=sys.stderr)
else:
file_read = config_file
options.config_file = file_read
break
else:
return
if 'mypy' not in parser:
if filename or file_read not in defaults.SHARED_CONFIG_FILES:
print("%s: No [mypy] section in config file" % file_read, file=sys.stderr)
else:
section = parser['mypy']
prefix = '%s: [%s]' % (file_read, 'mypy')
updates, report_dirs = parse_section(prefix, options, section)
for k, v in updates.items():
setattr(options, k, v)
options.report_dirs.update(report_dirs)
for name, section in parser.items():
if name.startswith('mypy-'):
prefix = '%s: [%s]' % (file_read, name)
updates, report_dirs = parse_section(prefix, options, section)
if report_dirs:
print("%s: Per-module sections should not specify reports (%s)" %
(prefix, ', '.join(s + '_report' for s in sorted(report_dirs))),
file=sys.stderr)
if set(updates) - PER_MODULE_OPTIONS:
print("%s: Per-module sections should only specify per-module flags (%s)" %
(prefix, ', '.join(sorted(set(updates) - PER_MODULE_OPTIONS))),
file=sys.stderr)
updates = {k: v for k, v in updates.items() if k in PER_MODULE_OPTIONS}
globs = name[5:]
for glob in globs.split(','):
# For backwards compatibility, replace (back)slashes with dots.
glob = glob.replace(os.sep, '.')
if os.altsep:
glob = glob.replace(os.altsep, '.')
if (any(c in glob for c in '?[]!') or
any('*' in x and x != '*' for x in glob.split('.'))):
print("%s: Patterns must be fully-qualified module names, optionally "
"with '*' in some components (e.g spam.*.eggs.*)"
% prefix,
file=sys.stderr)
else:
options.per_module_options[glob] = updates
def parse_section(prefix: str, template: Options,
section: Mapping[str, str]) -> Tuple[Dict[str, object], Dict[str, str]]:
"""Parse one section of a config file.
Returns a dict of option values encountered, and a dict of report directories.
"""
results = {} # type: Dict[str, object]
report_dirs = {} # type: Dict[str, str]
for key in section:
if key in config_types:
ct = config_types[key]
else:
dv = getattr(template, key, None)
if dv is None:
if key.endswith('_report'):
report_type = key[:-7].replace('_', '-')
if report_type in reporter_classes:
report_dirs[report_type] = section[key]
else:
print("%s: Unrecognized report type: %s" % (prefix, key),
file=sys.stderr)
continue
if key.startswith('x_'):
continue # Don't complain about `x_blah` flags
elif key == 'strict':
print("%s: Strict mode is not supported in configuration files: specify "
"individual flags instead (see 'mypy -h' for the list of flags enabled "
"in strict mode)" % prefix, file=sys.stderr)
else:
print("%s: Unrecognized option: %s = %s" % (prefix, key, section[key]),
file=sys.stderr)
continue
ct = type(dv)
v = None # type: Any
try:
if ct is bool:
v = section.getboolean(key) # type: ignore # Until better stub
elif callable(ct):
try:
v = ct(section.get(key))
except argparse.ArgumentTypeError as err:
print("%s: %s: %s" % (prefix, key, err), file=sys.stderr)
continue
else:
print("%s: Don't know what type %s should have" % (prefix, key), file=sys.stderr)
continue
except ValueError as err:
print("%s: %s: %s" % (prefix, key, err), file=sys.stderr)
continue
if key == 'silent_imports':
print("%s: silent_imports has been replaced by "
"ignore_missing_imports=True; follow_imports=skip" % prefix, file=sys.stderr)
if v:
if 'ignore_missing_imports' not in results:
results['ignore_missing_imports'] = True
if 'follow_imports' not in results:
results['follow_imports'] = 'skip'
if key == 'almost_silent':
print("%s: almost_silent has been replaced by "
"follow_imports=error" % prefix, file=sys.stderr)
if v:
if 'follow_imports' not in results:
results['follow_imports'] = 'error'
results[key] = v
return results, report_dirs
def fail(msg: str) -> None:
sys.stderr.write('%s\n' % msg)
sys.exit(1)
| 45.708069
| 99
| 0.623433
|
import argparse
import ast
import configparser
import os
import re
import subprocess
import sys
import time
from typing import Any, Dict, List, Mapping, Optional, Tuple, Callable
from mypy import build
from mypy import defaults
from mypy import experiments
from mypy import util
from mypy.build import BuildResult
from mypy.modulefinder import BuildSource, FindModuleCache, mypy_path, SearchPaths
from mypy.find_sources import create_source_list, InvalidSourceList
from mypy.fscache import FileSystemCache
from mypy.errors import CompileError
from mypy.options import Options, BuildType, PER_MODULE_OPTIONS
from mypy.report import reporter_classes
from mypy.version import __version__
MYPY = False
if MYPY:
from typing_extensions import Final
orig_stat = os.stat
MEM_PROFILE = False > os.stat_result:
try:
st = orig_stat(path)
except os.error as err:
print("stat(%r) -> %s" % (path, err))
raise
else:
print("stat(%r) -> (st_mode=%o, st_mtime=%d, st_size=%d)" %
(path, st.st_mode, st.st_mtime, st.st_size))
return st
def main(script_path: Optional[str], args: Optional[List[str]] = None) -> None:
if sys.version_info[:2] < (3, 4):
sys.exit("Running mypy with Python 3.3 or lower is not supported; "
"please upgrade to 3.4 or newer")
if sys.version_info[:3] == (3, 5, 0):
sys.exit("Running mypy with Python 3.5.0 is not supported; "
"please upgrade to 3.5.1 or newer")
t0 = time.time()
sys.setrecursionlimit(2 ** 14)
if args is None:
args = sys.argv[1:]
fscache = FileSystemCache()
sources, options = process_options(args, fscache=fscache)
messages = []
def flush_errors(new_messages: List[str], serious: bool) -> None:
messages.extend(new_messages)
f = sys.stderr if serious else sys.stdout
try:
for msg in new_messages:
f.write(msg + '\n')
f.flush()
except BrokenPipeError:
sys.exit(2)
serious = False
blockers = False
res = None
try:
res = build.build(sources, options, None, flush_errors, fscache)
except CompileError as e:
blockers = True
if not e.use_stdout:
serious = True
if options.warn_unused_configs and options.unused_configs:
print("Warning: unused section(s) in %s: %s" %
(options.config_file,
", ".join("[mypy-%s]" % glob for glob in options.per_module_options.keys()
if glob in options.unused_configs)),
file=sys.stderr)
if options.junit_xml:
t1 = time.time()
util.write_junit_xml(t1 - t0, serious, messages, options.junit_xml)
if MEM_PROFILE:
from mypy.memprofile import print_memory_profile
print_memory_profile()
del res
code = 0
if messages:
code = 2 if blockers else 1
if options.fast_exit:
# Exit without freeing objects -- it's faster.
util.hard_exit(code)
elif code:
sys.exit(code)
def readlinkabs(link: str) -> str:
# Adapted from code by Greg Smith.
assert os.path.islink(link)
path = os.readlink(link)
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(link), path)
class SplitNamespace(argparse.Namespace):
def __init__(self, standard_namespace: object, alt_namespace: object, alt_prefix: str) -> None:
self.__dict__['_standard_namespace'] = standard_namespace
self.__dict__['_alt_namespace'] = alt_namespace
self.__dict__['_alt_prefix'] = alt_prefix
def _get(self) -> Tuple[Any, Any]:
return (self._standard_namespace, self._alt_namespace)
def __setattr__(self, name: str, value: Any) -> None:
if name.startswith(self._alt_prefix):
setattr(self._alt_namespace, name[len(self._alt_prefix):], value)
else:
setattr(self._standard_namespace, name, value)
def __getattr__(self, name: str) -> Any:
if name.startswith(self._alt_prefix):
return getattr(self._alt_namespace, name[len(self._alt_prefix):])
else:
return getattr(self._standard_namespace, name)
def parse_version(v: str) -> Tuple[int, int]:
m = re.match(r'\A(\d)\.(\d+)\Z', v)
if not m:
raise argparse.ArgumentTypeError(
"Invalid python version '{}' (expected format: 'x.y')".format(v))
major, minor = int(m.group(1)), int(m.group(2))
if major == 2:
if minor != 7:
raise argparse.ArgumentTypeError(
"Python 2.{} is not supported (must be 2.7)".format(minor))
elif major == 3:
if minor < defaults.PYTHON3_VERSION_MIN[1]:
raise argparse.ArgumentTypeError(
"Python 3.{0} is not supported (must be {1}.{2} or higher)".format(minor,
*defaults.PYTHON3_VERSION_MIN))
else:
raise argparse.ArgumentTypeError(
"Python major version '{}' out of range (must be 2 or 3)".format(major))
return major, minor
# Make the help output a little less jarring.
class AugmentedHelpFormatter(argparse.RawDescriptionHelpFormatter):
def __init__(self, prog: str) -> None:
super().__init__(prog=prog, max_help_position=28)
def _fill_text(self, text: str, width: int, indent: int) -> str:
if '\n' in text:
# Assume we want to manually format the text
return super()._fill_text(text, width, indent)
else:
# Assume we want argparse to manage wrapping, indentating, and
# formatting the text for us.
return argparse.HelpFormatter._fill_text(self, text, width, indent)
# Define pairs of flag prefixes with inverse meaning.
flag_prefix_pairs = [
('allow', 'disallow'),
('show', 'hide'),
] # type: Final
flag_prefix_map = {} # type: Final[Dict[str, str]]
for a, b in flag_prefix_pairs:
flag_prefix_map[a] = b
flag_prefix_map[b] = a
def invert_flag_name(flag: str) -> str:
split = flag[2:].split('-', 1)
if len(split) == 2:
prefix, rest = split
if prefix in flag_prefix_map:
return '--{}-{}'.format(flag_prefix_map[prefix], rest)
elif prefix == 'no':
return '--{}'.format(rest)
return '--no-{}'.format(flag[2:])
class PythonExecutableInferenceError(Exception):
def python_executable_prefix(v: str) -> List[str]:
if sys.platform == 'win32':
# on Windows, all Python executables are named `python`. To handle this, there
# is the `py` launcher, which can be passed a version e.g. `py -3.5`, and it will
# execute an installed Python 3.5 interpreter. See also:
# https://docs.python.org/3/using/windows.html#python-launcher-for-windows
return ['py', '-{}'.format(v)]
else:
return ['python{}'.format(v)]
def _python_version_from_executable(python_executable: str) -> Tuple[int, int]:
try:
check = subprocess.check_output([python_executable, '-c',
'import sys; print(repr(sys.version_info[:2]))'],
stderr=subprocess.STDOUT).decode()
return ast.literal_eval(check)
except (subprocess.CalledProcessError, FileNotFoundError):
raise PythonExecutableInferenceError(
'invalid Python executable {}'.format(python_executable))
def _python_executable_from_version(python_version: Tuple[int, int]) -> str:
if sys.version_info[:2] == python_version:
return sys.executable
str_ver = '.'.join(map(str, python_version))
try:
sys_exe = subprocess.check_output(python_executable_prefix(str_ver) +
['-c', 'import sys; print(sys.executable)'],
stderr=subprocess.STDOUT).decode().strip()
return sys_exe
except (subprocess.CalledProcessError, FileNotFoundError):
raise PythonExecutableInferenceError(
'failed to find a Python executable matching version {},'
' perhaps try --python-executable, or --no-site-packages?'.format(python_version))
def infer_python_version_and_executable(options: Options,
special_opts: argparse.Namespace) -> None:
# Infer Python version and/or executable if one is not given
# TODO: (ethanhs) Look at folding these checks and the site packages subprocess calls into
# one subprocess call for speed.
if special_opts.python_executable is not None and special_opts.python_version is not None:
py_exe_ver = _python_version_from_executable(special_opts.python_executable)
if py_exe_ver != special_opts.python_version:
raise PythonExecutableInferenceError(
'Python version {} did not match executable {}, got version {}.'.format(
special_opts.python_version, special_opts.python_executable, py_exe_ver
))
else:
options.python_version = special_opts.python_version
options.python_executable = special_opts.python_executable
elif special_opts.python_executable is None and special_opts.python_version is not None:
options.python_version = special_opts.python_version
py_exe = None
if not special_opts.no_executable:
py_exe = _python_executable_from_version(special_opts.python_version)
options.python_executable = py_exe
elif special_opts.python_version is None and special_opts.python_executable is not None:
options.python_version = _python_version_from_executable(
special_opts.python_executable)
options.python_executable = special_opts.python_executable
HEADER = """%(prog)s [-h] [-v] [-V] [more options; see below]
[-m MODULE] [-p PACKAGE] [-c PROGRAM_TEXT] [files ...]""" # type: Final
DESCRIPTION = """
Mypy is a program that will type check your Python code.
Pass in any files or folders you want to type check. Mypy will
recursively traverse any provided folders to find .py files:
$ mypy my_program.py my_src_folder
For more information on getting started, see:
- http://mypy.readthedocs.io/en/latest/getting_started.html
For more details on both running mypy and using the flags below, see:
- http://mypy.readthedocs.io/en/latest/running_mypy.html
- http://mypy.readthedocs.io/en/latest/command_line.html
You can also use a config file to configure mypy instead of using
command line flags. For more details, see:
- http://mypy.readthedocs.io/en/latest/config_file.html
""" # type: Final
FOOTER = """Environment variables:
Define MYPYPATH for additional module search path entries.""" # type: Final
def process_options(args: List[str],
require_targets: bool = True,
server_options: bool = False,
fscache: Optional[FileSystemCache] = None,
) -> Tuple[List[BuildSource], Options]:
parser = argparse.ArgumentParser(prog='mypy',
usage=HEADER,
description=DESCRIPTION,
epilog=FOOTER,
fromfile_prefix_chars='@',
formatter_class=AugmentedHelpFormatter,
add_help=False)
strict_flag_names = [] # type: List[str]
strict_flag_assignments = [] # type: List[Tuple[str, bool]]
def add_invertible_flag(flag: str,
*,
inverse: Optional[str] = None,
default: bool,
dest: Optional[str] = None,
help: str,
strict_flag: bool = False,
group: Optional[argparse._ActionsContainer] = None
) -> None:
if inverse is None:
inverse = invert_flag_name(flag)
if group is None:
group = parser
if help is not argparse.SUPPRESS:
help += " (inverse: {})".format(inverse)
arg = group.add_argument(flag,
action='store_false' if default else 'store_true',
dest=dest,
help=help)
dest = arg.dest
arg = group.add_argument(inverse,
action='store_true' if default else 'store_false',
dest=dest,
help=argparse.SUPPRESS)
if strict_flag:
assert dest is not None
strict_flag_names.append(flag)
strict_flag_assignments.append((dest, not default))
# Unless otherwise specified, arguments will be parsed directly onto an
# Options object. Options that require further processing should have
# their `dest` prefixed with `special-opts:`, which will cause them to be
# parsed into the separate special_opts namespace object.
# Note: we have a style guide for formatting the mypy --help text. See
# https://github.com/python/mypy/wiki/Documentation-Conventions
general_group = parser.add_argument_group(
title='Optional arguments')
general_group.add_argument(
'-h', '--help', action='help',
help="Show this help message and exit")
general_group.add_argument(
'-v', '--verbose', action='count', dest='verbosity',
help="More verbose messages")
general_group.add_argument(
'-V', '--version', action='version',
version='%(prog)s ' + __version__,
help="Show program's version number and exit")
config_group = parser.add_argument_group(
title='Config file',
description="Use a config file instead of command line arguments. "
"This is useful if you are using many flags or want "
"to set different options per each module.")
config_group.add_argument(
'--config-file',
help="Configuration file, must have a [mypy] section "
"(defaults to {})".format(', '.join(defaults.CONFIG_FILES)))
add_invertible_flag('--warn-unused-configs', default=False, strict_flag=True,
help="Warn about unused '[mypy-<pattern>]' config sections",
group=config_group)
imports_group = parser.add_argument_group(
title='Import discovery',
description="Configure how imports are discovered and followed.")
imports_group.add_argument(
'--ignore-missing-imports', action='store_true',
help="Silently ignore imports of missing modules")
imports_group.add_argument(
'--follow-imports', choices=['normal', 'silent', 'skip', 'error'],
default='normal', help="How to treat imports (default normal)")
imports_group.add_argument(
'--python-executable', action='store', metavar='EXECUTABLE',
help="Python executable used for finding PEP 561 compliant installed"
" packages and stubs",
dest='special-opts:python_executable')
imports_group.add_argument(
'--no-site-packages', action='store_true',
dest='special-opts:no_executable',
help="Do not search for installed PEP 561 compliant packages")
imports_group.add_argument(
'--no-silence-site-packages', action='store_true',
help="Do not silence errors in PEP 561 compliant installed packages")
add_invertible_flag(
'--namespace-packages', default=False,
help="Support namespace packages (PEP 420, __init__.py-less)",
group=imports_group)
platform_group = parser.add_argument_group(
title='Platform configuration',
description="Type check code assuming it will be run under certain "
"runtime conditions. By default, mypy assumes your code "
"will be run using the same operating system and Python "
"version you are using to run mypy itself.")
platform_group.add_argument(
'--python-version', type=parse_version, metavar='x.y',
help='Type check code assuming it will be running on Python x.y',
dest='special-opts:python_version')
platform_group.add_argument(
'-2', '--py2', dest='special-opts:python_version', action='store_const',
const=defaults.PYTHON2_VERSION,
help="Use Python 2 mode (same as --python-version 2.7)")
platform_group.add_argument(
'--platform', action='store', metavar='PLATFORM',
help="Type check special-cased code for the given OS platform "
"(defaults to sys.platform)")
platform_group.add_argument(
'--always-true', metavar='NAME', action='append', default=[],
help="Additional variable to be considered True (may be repeated)")
platform_group.add_argument(
'--always-false', metavar='NAME', action='append', default=[],
help="Additional variable to be considered False (may be repeated)")
disallow_any_group = parser.add_argument_group(
title='Dynamic typing',
description="Disallow the use of the dynamic 'Any' type under certain conditions.")
disallow_any_group.add_argument(
'--disallow-any-unimported', default=False, action='store_true',
help="Disallow Any types resulting from unfollowed imports")
add_invertible_flag('--disallow-subclassing-any', default=False, strict_flag=True,
help="Disallow subclassing values of type 'Any' when defining classes",
group=disallow_any_group)
disallow_any_group.add_argument(
'--disallow-any-expr', default=False, action='store_true',
help='Disallow all expressions that have type Any')
disallow_any_group.add_argument(
'--disallow-any-decorated', default=False, action='store_true',
help='Disallow functions that have Any in their signature '
'after decorator transformation')
disallow_any_group.add_argument(
'--disallow-any-explicit', default=False, action='store_true',
help='Disallow explicit Any in type positions')
disallow_any_group.add_argument(
'--disallow-any-generics', default=False, action='store_true',
help='Disallow usage of generic types that do not specify explicit '
'type parameters')
untyped_group = parser.add_argument_group(
title='Untyped definitions and calls',
description="Configure how untyped definitions and calls are handled. "
"Note: by default, mypy ignores any untyped function definitions "
"and assumes any calls to such functions have a return "
"type of 'Any'.")
add_invertible_flag('--disallow-untyped-calls', default=False, strict_flag=True,
help="Disallow calling functions without type annotations"
" from functions with type annotations",
group=untyped_group)
add_invertible_flag('--disallow-untyped-defs', default=False, strict_flag=True,
help="Disallow defining functions without type annotations"
" or with incomplete type annotations",
group=untyped_group)
add_invertible_flag('--disallow-incomplete-defs', default=False, strict_flag=True,
help="Disallow defining functions with incomplete type annotations",
group=untyped_group)
add_invertible_flag('--check-untyped-defs', default=False, strict_flag=True,
help="Type check the interior of functions without type annotations",
group=untyped_group)
add_invertible_flag('--disallow-untyped-decorators', default=False, strict_flag=True,
help="Disallow decorating typed functions with untyped decorators",
group=untyped_group)
none_group = parser.add_argument_group(
title='None and Optional handling',
description="Adjust how values of type 'None' are handled. For more context on "
"how mypy handles values of type 'None', see: "
"mypy.readthedocs.io/en/latest/kinds_of_types.html#no-strict-optional")
add_invertible_flag('--no-implicit-optional', default=False, strict_flag=True,
help="Don't assume arguments with default values of None are Optional",
group=none_group)
none_group.add_argument(
'--strict-optional', action='store_true',
help=argparse.SUPPRESS)
none_group.add_argument(
'--no-strict-optional', action='store_false', dest='strict_optional',
help="Disable strict Optional checks (inverse: --strict-optional)")
none_group.add_argument(
'--strict-optional-whitelist', metavar='GLOB', nargs='*',
help="Suppress strict Optional errors in all but the provided files; "
"implies --strict-optional (may suppress certain other errors "
"in non-whitelisted files)")
lint_group = parser.add_argument_group(
title='Warnings',
description="Detect code that is sound but redundant or problematic.")
add_invertible_flag('--warn-redundant-casts', default=False, strict_flag=True,
help="Warn about casting an expression to its inferred type",
group=lint_group)
add_invertible_flag('--warn-unused-ignores', default=False, strict_flag=True,
help="Warn about unneeded '# type: ignore' comments",
group=lint_group)
add_invertible_flag('--no-warn-no-return', dest='warn_no_return', default=True,
help="Do not warn about functions that end without returning",
group=lint_group)
add_invertible_flag('--warn-return-any', default=False, strict_flag=True,
help="Warn about returning values of type Any"
" from non-Any typed functions",
group=lint_group)
# Note: this group is intentionally added here even though we don't add
# flags that are added after this group.
strictness_group = parser.add_argument_group(
title='Other strictness checks')
add_invertible_flag('--allow-untyped-globals', default=False, strict_flag=False,
help="Suppress toplevel errors caused by missing annotations",
group=strictness_group)
incremental_group = parser.add_argument_group(
title='Incremental mode',
description="Adjust how mypy incrementally type checks and caches modules. "
"Mypy caches type information about modules into a cache to "
"let you speed up future invocations of mypy. Also see "
"mypy's daemon mode: "
"mypy.readthedocs.io/en/latest/mypy_daemon.html#mypy-daemon")
incremental_group.add_argument(
'-i', '--incremental', action='store_true',
help=argparse.SUPPRESS)
incremental_group.add_argument(
'--no-incremental', action='store_false', dest='incremental',
help="Disable module cache (inverse: --incremental)")
incremental_group.add_argument(
'--cache-dir', action='store', metavar='DIR',
help="Store module cache info in the given folder in incremental mode "
"(defaults to '{}')".format(defaults.CACHE_DIR))
incremental_group.add_argument(
'--cache-fine-grained', action='store_true',
help="Include fine-grained dependency information in the cache for the mypy daemon")
incremental_group.add_argument(
'--quick-and-dirty', action='store_true',
help="Use cache even if dependencies out of date (implies --incremental)")
incremental_group.add_argument(
'--skip-version-check', action='store_true',
help="Allow using cache written by older mypy version")
internals_group = parser.add_argument_group(
title='Mypy internals',
description="Debug and customize mypy internals.")
internals_group.add_argument(
'--pdb', action='store_true', help="Invoke pdb on fatal error")
internals_group.add_argument(
'--show-traceback', '--tb', action='store_true',
help="Show traceback on fatal error")
internals_group.add_argument(
'--custom-typing', metavar='MODULE', dest='custom_typing_module',
help="Use a custom typing module")
internals_group.add_argument(
'--custom-typeshed-dir', metavar='DIR',
help="Use the custom typeshed in DIR")
add_invertible_flag('--warn-incomplete-stub', default=False,
help="Warn if missing type annotation in typeshed, only relevant with"
" --disallow-untyped-defs or --disallow-incomplete-defs enabled",
group=internals_group)
internals_group.add_argument(
'--shadow-file', nargs=2, metavar=('SOURCE_FILE', 'SHADOW_FILE'),
dest='shadow_file', action='append',
help="When encountering SOURCE_FILE, read and type check "
"the contents of SHADOW_FILE instead.")
add_invertible_flag('--fast-exit', default=False, help=argparse.SUPPRESS,
group=internals_group)
error_group = parser.add_argument_group(
title='Error reporting',
description="Adjust the amount of detail shown in error messages.")
add_invertible_flag('--show-error-context', default=False,
dest='show_error_context',
help='Precede errors with "note:" messages explaining context',
group=error_group)
add_invertible_flag('--show-column-numbers', default=False,
help="Show column numbers in error messages",
group=error_group)
strict_help = "Strict mode; enables the following flags: {}".format(
", ".join(strict_flag_names))
strictness_group.add_argument(
'--strict', action='store_true', dest='special-opts:strict',
help=strict_help)
report_group = parser.add_argument_group(
title='Report generation',
description='Generate a report in the specified format.')
for report_type in sorted(reporter_classes):
report_group.add_argument('--%s-report' % report_type.replace('_', '-'),
metavar='DIR',
dest='special-opts:%s_report' % report_type)
other_group = parser.add_argument_group(
title='Miscellaneous')
other_group.add_argument(
'--junit-xml', help="Write junit.xml to the given file")
other_group.add_argument(
'--scripts-are-modules', action='store_true',
help="Script x becomes module x instead of __main__")
other_group.add_argument(
'--find-occurrences', metavar='CLASS.MEMBER',
dest='special-opts:find_occurrences',
help="Print out all usages of a class member (experimental)")
if server_options:
other_group.add_argument(
'--experimental', action='store_true', dest='fine_grained_incremental',
help="Enable fine-grained incremental mode")
other_group.add_argument(
'--use-fine-grained-cache', action='store_true',
help="Use the cache in fine-grained incremental mode")
parser.add_argument(
'--stats', action='store_true', dest='dump_type_stats', help=argparse.SUPPRESS)
parser.add_argument(
'--inferstats', action='store_true', dest='dump_inference_stats',
help=argparse.SUPPRESS)
parser.add_argument('--debug-cache', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--dump-deps', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--dump-graph', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--semantic-analysis-only', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--local-partial-types', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--logical-deps', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--bazel', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--package-root', metavar='ROOT', action='append', default=[],
help=argparse.SUPPRESS)
parser.add_argument('--cache-map', nargs='+', dest='special-opts:cache_map',
help=argparse.SUPPRESS)
parser.add_argument('--disallow-any', dest='special-opts:disallow_any',
help=argparse.SUPPRESS)
add_invertible_flag('--strict-boolean', default=False,
help=argparse.SUPPRESS)
parser.add_argument('-f', '--dirty-stubs', action='store_true',
dest='special-opts:dirty_stubs',
help=argparse.SUPPRESS)
parser.add_argument('--use-python-path', action='store_true',
dest='special-opts:use_python_path',
help=argparse.SUPPRESS)
parser.add_argument('-s', '--silent-imports', action='store_true',
dest='special-opts:silent_imports',
help=argparse.SUPPRESS)
parser.add_argument('--almost-silent', action='store_true',
dest='special-opts:almost_silent',
help=argparse.SUPPRESS)
parser.add_argument('--fast-parser', action='store_true', dest='special-opts:fast_parser',
help=argparse.SUPPRESS)
parser.add_argument('--no-fast-parser', action='store_true',
dest='special-opts:no_fast_parser',
help=argparse.SUPPRESS)
code_group = parser.add_argument_group(
title="Running code",
description="Specify the code you want to type check. For more details, see "
"mypy.readthedocs.io/en/latest/running_mypy.html#running-mypy")
code_group.add_argument(
'-m', '--module', action='append', metavar='MODULE',
default=[],
dest='special-opts:modules',
help="Type-check module; can repeat for more modules")
code_group.add_argument(
'-p', '--package', action='append', metavar='PACKAGE',
default=[],
dest='special-opts:packages',
help="Type-check package recursively; can be repeated")
code_group.add_argument(
'-c', '--command', action='append', metavar='PROGRAM_TEXT',
dest='special-opts:command',
help="Type-check program passed in as string")
code_group.add_argument(
metavar='files', nargs='*', dest='special-opts:files',
help="Type-check given files or directories")
dummy = argparse.Namespace()
parser.parse_args(args, dummy)
config_file = dummy.config_file
if config_file is not None and not os.path.exists(config_file):
parser.error("Cannot find config file '%s'" % config_file)
options = Options()
parse_config_file(options, config_file)
if getattr(dummy, 'special-opts:strict'):
for dest, value in strict_flag_assignments:
setattr(options, dest, value)
special_opts = argparse.Namespace()
parser.parse_args(args, SplitNamespace(options, special_opts, 'special-opts:'))
if special_opts.use_python_path:
parser.error("Sorry, --use-python-path is no longer supported.\n"
"If you are trying this because your code depends on a library module,\n"
"you should really investigate how to obtain stubs for that module.\n"
"See https://github.com/python/mypy/issues/1411 for more discussion."
)
if special_opts.disallow_any:
print("--disallow-any option was split up into multiple flags. "
"See http://mypy.readthedocs.io/en/latest/command_line.html#disallow-dynamic-typing")
if options.strict_boolean:
print("Warning: --strict-boolean is deprecated; "
"see https://github.com/python/mypy/issues/3195", file=sys.stderr)
if special_opts.almost_silent:
print("Warning: --almost-silent has been replaced by "
"--follow-imports=errors", file=sys.stderr)
if options.follow_imports == 'normal':
options.follow_imports = 'errors'
elif special_opts.silent_imports:
print("Warning: --silent-imports has been replaced by "
"--ignore-missing-imports --follow-imports=skip", file=sys.stderr)
options.ignore_missing_imports = True
if options.follow_imports == 'normal':
options.follow_imports = 'skip'
if special_opts.dirty_stubs:
print("Warning: -f/--dirty-stubs is deprecated and no longer necessary. Mypy no longer "
"checks the git status of stubs.",
file=sys.stderr)
if special_opts.fast_parser:
print("Warning: --fast-parser is now the default (and only) parser.")
if special_opts.no_fast_parser:
print("Warning: --no-fast-parser no longer has any effect. The fast parser "
"is now mypy's default and only parser.")
try:
infer_python_version_and_executable(options, special_opts)
except PythonExecutableInferenceError as e:
parser.error(str(e))
if special_opts.no_executable:
options.python_executable = None
# Check for invalid argument combinations.
if require_targets:
code_methods = sum(bool(c) for c in [special_opts.modules + special_opts.packages,
special_opts.command,
special_opts.files])
if code_methods == 0:
parser.error("Missing target module, package, files, or command.")
elif code_methods > 1:
parser.error("May only specify one of: module/package, files, or command.")
# Check for overlapping `--always-true` and `--always-false` flags.
overlap = set(options.always_true) & set(options.always_false)
if overlap:
parser.error("You can't make a variable always true and always false (%s)" %
', '.join(sorted(overlap)))
if options.strict_optional_whitelist is not None:
options.strict_optional = True
if special_opts.find_occurrences:
experiments.find_occurrences = special_opts.find_occurrences.split('.')
assert experiments.find_occurrences is not None
if len(experiments.find_occurrences) < 2:
parser.error("Can only find occurrences of class members.")
if len(experiments.find_occurrences) != 2:
parser.error("Can only find occurrences of non-nested class members.")
for flag, val in vars(special_opts).items():
if flag.endswith('_report') and val is not None:
report_type = flag[:-7].replace('_', '-')
report_dir = val
options.report_dirs[report_type] = report_dir
if options.package_root:
process_package_roots(fscache, parser, options)
if special_opts.cache_map:
process_cache_map(parser, special_opts, options)
if options.quick_and_dirty:
options.incremental = True
if options.logical_deps:
options.cache_fine_grained = True
if special_opts.modules + special_opts.packages:
options.build_type = BuildType.MODULE
search_paths = SearchPaths((os.getcwd(),), tuple(mypy_path()), (), ())
targets = []
cache = FindModuleCache(search_paths, fscache)
for p in special_opts.packages:
if os.sep in p or os.altsep and os.altsep in p:
fail("Package name '{}' cannot have a slash in it.".format(p))
p_targets = cache.find_modules_recursive(p)
if not p_targets:
fail("Can't find package '{}'".format(p))
targets.extend(p_targets)
for m in special_opts.modules:
targets.append(BuildSource(None, m, None))
return targets, options
elif special_opts.command:
options.build_type = BuildType.PROGRAM_TEXT
targets = [BuildSource(None, None, '\n'.join(special_opts.command))]
return targets, options
else:
try:
targets = create_source_list(special_opts.files, options, fscache)
except InvalidSourceList as e:
fail(str(e))
return targets, options
def process_package_roots(fscache: Optional[FileSystemCache],
parser: argparse.ArgumentParser,
options: Options) -> None:
if fscache is None:
parser.error("--package-root does not work here (no fscache)")
assert fscache is not None # Since mypy doesn't know parser.error() raises.
current_drive, _ = os.path.splitdrive(os.getcwd())
dot = os.curdir
dotslash = os.curdir + os.sep
dotdotslash = os.pardir + os.sep
trivial_paths = {dot, dotslash}
package_root = []
for root in options.package_root:
if os.path.isabs(root):
parser.error("Package root cannot be absolute: %r" % root)
drive, root = os.path.splitdrive(root)
if drive and drive != current_drive:
parser.error("Package root must be on current drive: %r" % (drive + root))
if root:
root = os.path.relpath(root)
if root.startswith(dotdotslash):
parser.error("Package root cannot be above current directory: %r" % root)
if root in trivial_paths:
root = ''
elif not root.endswith(os.sep):
root = root + os.sep
package_root.append(root)
options.package_root = package_root
fscache.set_package_root(package_root)
def process_cache_map(parser: argparse.ArgumentParser,
special_opts: argparse.Namespace,
options: Options) -> None:
n = len(special_opts.cache_map)
if n % 3 != 0:
parser.error("--cache-map requires one or more triples (see source)")
for i in range(0, n, 3):
source, meta_file, data_file = special_opts.cache_map[i:i + 3]
if source in options.cache_map:
parser.error("Duplicate --cache-map source %s)" % source)
if not source.endswith('.py') and not source.endswith('.pyi'):
parser.error("Invalid --cache-map source %s (triple[0] must be *.py[i])" % source)
if not meta_file.endswith('.meta.json'):
parser.error("Invalid --cache-map meta_file %s (triple[1] must be *.meta.json)" %
meta_file)
if not data_file.endswith('.data.json'):
parser.error("Invalid --cache-map data_file %s (triple[2] must be *.data.json)" %
data_file)
options.cache_map[source] = (meta_file, data_file)
# exists to specify types for values initialized to None or container
# types.
config_types = {
'python_version': parse_version,
'strict_optional_whitelist': lambda s: s.split(),
'custom_typing_module': str,
'custom_typeshed_dir': str,
'mypy_path': lambda s: [p.strip() for p in re.split('[,:]', s)],
'junit_xml': str,
# These two are for backwards compatibility
'silent_imports': bool,
'almost_silent': bool,
'plugins': lambda s: [p.strip() for p in s.split(',')],
'always_true': lambda s: [p.strip() for p in s.split(',')],
'always_false': lambda s: [p.strip() for p in s.split(',')],
'package_root': lambda s: [p.strip() for p in s.split(',')],
} # type: Final
def parse_config_file(options: Options, filename: Optional[str]) -> None:
if filename is not None:
config_files = (filename,) # type: Tuple[str, ...]
else:
config_files = tuple(map(os.path.expanduser, defaults.CONFIG_FILES))
parser = configparser.RawConfigParser()
for config_file in config_files:
if not os.path.exists(config_file):
continue
try:
parser.read(config_file)
except configparser.Error as err:
print("%s: %s" % (config_file, err), file=sys.stderr)
else:
file_read = config_file
options.config_file = file_read
break
else:
return
if 'mypy' not in parser:
if filename or file_read not in defaults.SHARED_CONFIG_FILES:
print("%s: No [mypy] section in config file" % file_read, file=sys.stderr)
else:
section = parser['mypy']
prefix = '%s: [%s]' % (file_read, 'mypy')
updates, report_dirs = parse_section(prefix, options, section)
for k, v in updates.items():
setattr(options, k, v)
options.report_dirs.update(report_dirs)
for name, section in parser.items():
if name.startswith('mypy-'):
prefix = '%s: [%s]' % (file_read, name)
updates, report_dirs = parse_section(prefix, options, section)
if report_dirs:
print("%s: Per-module sections should not specify reports (%s)" %
(prefix, ', '.join(s + '_report' for s in sorted(report_dirs))),
file=sys.stderr)
if set(updates) - PER_MODULE_OPTIONS:
print("%s: Per-module sections should only specify per-module flags (%s)" %
(prefix, ', '.join(sorted(set(updates) - PER_MODULE_OPTIONS))),
file=sys.stderr)
updates = {k: v for k, v in updates.items() if k in PER_MODULE_OPTIONS}
globs = name[5:]
for glob in globs.split(','):
# For backwards compatibility, replace (back)slashes with dots.
glob = glob.replace(os.sep, '.')
if os.altsep:
glob = glob.replace(os.altsep, '.')
if (any(c in glob for c in '?[]!') or
any('*' in x and x != '*' for x in glob.split('.'))):
print("%s: Patterns must be fully-qualified module names, optionally "
"with '*' in some components (e.g spam.*.eggs.*)"
% prefix,
file=sys.stderr)
else:
options.per_module_options[glob] = updates
def parse_section(prefix: str, template: Options,
section: Mapping[str, str]) -> Tuple[Dict[str, object], Dict[str, str]]:
results = {} # type: Dict[str, object]
report_dirs = {} # type: Dict[str, str]
for key in section:
if key in config_types:
ct = config_types[key]
else:
dv = getattr(template, key, None)
if dv is None:
if key.endswith('_report'):
report_type = key[:-7].replace('_', '-')
if report_type in reporter_classes:
report_dirs[report_type] = section[key]
else:
print("%s: Unrecognized report type: %s" % (prefix, key),
file=sys.stderr)
continue
if key.startswith('x_'):
continue # Don't complain about `x_blah` flags
elif key == 'strict':
print("%s: Strict mode is not supported in configuration files: specify "
"individual flags instead (see 'mypy -h' for the list of flags enabled "
"in strict mode)" % prefix, file=sys.stderr)
else:
print("%s: Unrecognized option: %s = %s" % (prefix, key, section[key]),
file=sys.stderr)
continue
ct = type(dv)
v = None
try:
if ct is bool:
v = section.getboolean(key) allable(ct):
try:
v = ct(section.get(key))
except argparse.ArgumentTypeError as err:
print("%s: %s: %s" % (prefix, key, err), file=sys.stderr)
continue
else:
print("%s: Don't know what type %s should have" % (prefix, key), file=sys.stderr)
continue
except ValueError as err:
print("%s: %s: %s" % (prefix, key, err), file=sys.stderr)
continue
if key == 'silent_imports':
print("%s: silent_imports has been replaced by "
"ignore_missing_imports=True; follow_imports=skip" % prefix, file=sys.stderr)
if v:
if 'ignore_missing_imports' not in results:
results['ignore_missing_imports'] = True
if 'follow_imports' not in results:
results['follow_imports'] = 'skip'
if key == 'almost_silent':
print("%s: almost_silent has been replaced by "
"follow_imports=error" % prefix, file=sys.stderr)
if v:
if 'follow_imports' not in results:
results['follow_imports'] = 'error'
results[key] = v
return results, report_dirs
def fail(msg: str) -> None:
sys.stderr.write('%s\n' % msg)
sys.exit(1)
| true
| true
|
1c47b20f4f8dc841c057a6f528ecd4be3beca08f
| 10,390
|
py
|
Python
|
wbb/modules/misc.py
|
TAMILVIP007/WilliamButcherBot
|
e7a02edcd57ec62c7f80c601484e92e257e1d5bf
|
[
"MIT"
] | 1
|
2021-06-30T07:09:45.000Z
|
2021-06-30T07:09:45.000Z
|
wbb/modules/misc.py
|
fakeenemy01/GroupBot
|
e7a02edcd57ec62c7f80c601484e92e257e1d5bf
|
[
"MIT"
] | null | null | null |
wbb/modules/misc.py
|
fakeenemy01/GroupBot
|
e7a02edcd57ec62c7f80c601484e92e257e1d5bf
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2021 TheHamkerCat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import secrets
import string
import aiohttp
from cryptography.fernet import Fernet
from pyrogram import filters
from wbb import FERNET_ENCRYPTION_KEY, app, arq
from wbb.core.decorators.errors import capture_err
from wbb.utils import random_line
from wbb.utils.fetch import fetch
from wbb.utils.json_prettify import json_prettify
from wbb.utils.pastebin import paste
__MODULE__ = "Misc"
__HELP__ = """
/commit - Generate Funny Commit Messages
/runs - Idk Test Yourself
/id - Get Chat_ID or User_ID
/random [Length] - Generate Random Complex Passwords
/encrypt - Encrypt Text [Can Only Be Decrypted By This Bot]
/decrypt - Decrypt Text
/cheat [Language] [Query] - Get Programming Related Help
/weather [City] - To Get Weather Info
/tr [en] - Translate A Message
/json [URL] - Get JSON Response From An API or Something.
/arq - Statistics Of ARQ API.
/webss [URL] - Take A Screenshot Of A Webpage
/reverse - Reverse search an image.
/carbon - Make Carbon from code.
#RTFM - Tell noobs to read the manual
"""
@app.on_message(filters.command("commit") & ~filters.edited)
async def commit(_, message):
await message.reply_text(
(await random_line("wbb/utils/commit.txt"))
)
@app.on_message(filters.command("RTFM", "#"))
async def rtfm(_, message):
await message.delete()
if not message.reply_to_message:
return await message.reply_text("Reply To A Message lol")
await message.reply_to_message.reply_text(
"Are You Lost? READ THE FUCKING DOCS!"
)
@app.on_message(filters.command("runs") & ~filters.edited)
async def runs(_, message):
await message.reply_text(
(await random_line("wbb/utils/runs.txt"))
)
@app.on_message(filters.command("id"))
async def getid(_, message):
if len(message.command) == 2:
try:
id = (
await app.get_users(
message.text.split(None, 1)[1].strip()
)
).id
except Exception:
return await message.reply_text("No Such User")
text = f"**ID:** `{id}`"
return await message.reply_text(text, parse_mode="html")
text_unping = "<b>Chat ID:</b>"
if message.chat.username:
text_unping = f'<a href="https://t.me/{message.chat.username}">{text_unping}</a>'
text_unping += f" <code>{message.chat.id}</code>\n"
text = "<b>Message ID:</b>"
if message.link:
text = f'<a href="{message.link}">{text}</a>'
text += f" <code>{message.message_id}</code>\n"
text_unping += text
if message.from_user:
text_unping += f'<b><a href="tg://user?id={message.from_user.id}">User ID:</a></b> <code>{message.from_user.id}</code>\n'
text_ping = text_unping
reply = message.reply_to_message
if not getattr(reply, "empty", True):
text_unping += "\n"
text = "<b>Replied Message ID:</b>"
if reply.link:
text = f'<a href="{reply.link}">{text}</a>'
text += f" <code>{reply.message_id}</code>\n"
text_unping += text
text_ping = text_unping
if reply.from_user:
text = "<b>Replied User ID:</b>"
if reply.from_user.username:
text = f'<a href="https://t.me/{reply.from_user.username}">{text}</a>'
text += f" <code>{reply.from_user.id}</code>\n"
text_unping += text
text_ping += f'<b><a href="tg://user?id={reply.from_user.id}">Replied User ID:</a></b> <code>{reply.from_user.id}</code>\n'
if reply.forward_from:
text_unping += "\n"
text = "<b>Forwarded User ID:</b>"
if reply.forward_from.username:
text = f'<a href="https://t.me/{reply.forward_from.username}">{text}</a>'
text += f" <code>{reply.forward_from.id}</code>\n"
text_unping += text
text_ping += f'\n<b><a href="tg://user?id={reply.forward_from.id}">Forwarded User ID:</a></b> <code>{reply.forward_from.id}</code>\n'
reply = await message.reply_text(
text_unping, disable_web_page_preview=True, parse_mode="html"
)
if text_unping != text_ping:
await reply.edit_text(
text_ping,
disable_web_page_preview=True,
parse_mode="html",
)
# Random
@app.on_message(filters.command("random") & ~filters.edited)
@capture_err
async def random(_, message):
if len(message.command) != 2:
return await message.reply_text(
'"/random" Needs An Argurment.' " Ex: `/random 5`"
)
length = message.text.split(None, 1)[1]
try:
if 1 < int(length) < 1000:
alphabet = string.ascii_letters + string.digits
password = "".join(
secrets.choice(alphabet) for i in range(int(length))
)
await message.reply_text(f"`{password}`")
else:
await message.reply_text(
"Specify A Length Between 1-1000"
)
except ValueError:
await message.reply_text(
"Strings Won't Work!, Pass A Positive Integer Less Than 1000"
)
# Encrypt
@app.on_message(filters.command("encrypt") & ~filters.edited)
@capture_err
async def encrypt(_, message):
if not message.reply_to_message:
return await message.reply_text(
"Reply To A Message To Encrypt It."
)
text = message.reply_to_message.text
text_in_bytes = bytes(text, "utf-8")
cipher_suite = Fernet(FERNET_ENCRYPTION_KEY)
encrypted_text = cipher_suite.encrypt(text_in_bytes)
bytes_in_text = encrypted_text.decode("utf-8")
await message.reply_text(bytes_in_text)
# Decrypt
@app.on_message(filters.command("decrypt") & ~filters.edited)
@capture_err
async def decrypt(_, message):
if not message.reply_to_message:
return await message.reply_text(
"Reply To A Message To Decrypt It."
)
text = message.reply_to_message.text
text_in_bytes = bytes(text, "utf-8")
cipher_suite = Fernet(FERNET_ENCRYPTION_KEY)
try:
decoded_text = cipher_suite.decrypt(text_in_bytes)
except Exception:
return await message.reply_text("Incorrect token")
bytes_in_text = decoded_text.decode("utf-8")
await message.reply_text(bytes_in_text)
async def fetch_text(url):
async with aiohttp.ClientSession(
headers={"user-agent": "curl"}
) as session:
async with session.get(url) as resp:
data = await resp.text()
return data
# Cheat.sh
@app.on_message(filters.command("cheat") & ~filters.edited)
@capture_err
async def cheat(_, message):
if len(message.command) < 3:
return await message.reply_text("/cheat [language] [query]")
text = message.text.split(None, 1)[1]
m = await message.reply_text("Searching")
try:
ftext = text.split()
language = ftext[0]
query = ftext[1]
data = await fetch_text(
f"http://cht.sh/{language}/{query}?QT"
)
if not data:
return await m.edit("Found Literally Nothing!")
await m.edit(f"`{data}`")
except Exception as e:
await m.edit(str(e))
print(str(e))
# Translate
@app.on_message(filters.command("tr") & ~filters.edited)
@capture_err
async def tr(_, message):
if len(message.command) != 2:
return await message.reply_text("/tr [LANGUAGE_CODE]")
lang = message.text.split(None, 1)[1]
if not message.reply_to_message or not lang:
return await message.reply_text(
"Reply to a message with /tr [language code]"
+ "\nGet supported language list from here -"
+ " https://py-googletrans.readthedocs.io/en"
+ "/latest/#googletrans-languages"
)
reply = message.reply_to_message
text = message.text or message.reply
if not text:
return await message.reply_text(
"Reply to a text to translate it"
)
result = await arq.translate(text, lang)
if not result.ok:
return await message.reply_text(result.result)
await message.reply_text(result.result.translatedText)
@app.on_message(filters.command("json") & ~filters.edited)
@capture_err
async def json_fetch(_, message):
if len(message.command) != 2:
return await message.reply_text("/json [URL]")
url = message.text.split(None, 1)[1]
m = await message.reply_text("Fetching")
try:
data = await fetch(url)
data = await json_prettify(data)
if len(data) < 4090:
await m.edit(data)
else:
link = await paste(data)
await m.edit(
f"[OUTPUT_TOO_LONG]({link})",
disable_web_page_preview=True,
)
except Exception as e:
await m.edit(str(e))
@app.on_message(filters.command("webss"))
@capture_err
async def take_ss(_, message):
if len(message.command) != 2:
return await message.reply_text(
"Give A Url To Fetch Screenshot."
)
url = message.text.split(None, 1)[1]
m = await message.reply_text("**Uploading**")
try:
await app.send_photo(
message.chat.id,
photo=f"https://webshot.amanoteam.com/print?q={url}",
)
except Exception:
return await m.edit("No Such Website.")
await m.delete()
| 33.516129
| 145
| 0.64052
|
import secrets
import string
import aiohttp
from cryptography.fernet import Fernet
from pyrogram import filters
from wbb import FERNET_ENCRYPTION_KEY, app, arq
from wbb.core.decorators.errors import capture_err
from wbb.utils import random_line
from wbb.utils.fetch import fetch
from wbb.utils.json_prettify import json_prettify
from wbb.utils.pastebin import paste
__MODULE__ = "Misc"
__HELP__ = """
/commit - Generate Funny Commit Messages
/runs - Idk Test Yourself
/id - Get Chat_ID or User_ID
/random [Length] - Generate Random Complex Passwords
/encrypt - Encrypt Text [Can Only Be Decrypted By This Bot]
/decrypt - Decrypt Text
/cheat [Language] [Query] - Get Programming Related Help
/weather [City] - To Get Weather Info
/tr [en] - Translate A Message
/json [URL] - Get JSON Response From An API or Something.
/arq - Statistics Of ARQ API.
/webss [URL] - Take A Screenshot Of A Webpage
/reverse - Reverse search an image.
/carbon - Make Carbon from code.
#RTFM - Tell noobs to read the manual
"""
@app.on_message(filters.command("commit") & ~filters.edited)
async def commit(_, message):
await message.reply_text(
(await random_line("wbb/utils/commit.txt"))
)
@app.on_message(filters.command("RTFM", "#"))
async def rtfm(_, message):
await message.delete()
if not message.reply_to_message:
return await message.reply_text("Reply To A Message lol")
await message.reply_to_message.reply_text(
"Are You Lost? READ THE FUCKING DOCS!"
)
@app.on_message(filters.command("runs") & ~filters.edited)
async def runs(_, message):
await message.reply_text(
(await random_line("wbb/utils/runs.txt"))
)
@app.on_message(filters.command("id"))
async def getid(_, message):
if len(message.command) == 2:
try:
id = (
await app.get_users(
message.text.split(None, 1)[1].strip()
)
).id
except Exception:
return await message.reply_text("No Such User")
text = f"**ID:** `{id}`"
return await message.reply_text(text, parse_mode="html")
text_unping = "<b>Chat ID:</b>"
if message.chat.username:
text_unping = f'<a href="https://t.me/{message.chat.username}">{text_unping}</a>'
text_unping += f" <code>{message.chat.id}</code>\n"
text = "<b>Message ID:</b>"
if message.link:
text = f'<a href="{message.link}">{text}</a>'
text += f" <code>{message.message_id}</code>\n"
text_unping += text
if message.from_user:
text_unping += f'<b><a href="tg://user?id={message.from_user.id}">User ID:</a></b> <code>{message.from_user.id}</code>\n'
text_ping = text_unping
reply = message.reply_to_message
if not getattr(reply, "empty", True):
text_unping += "\n"
text = "<b>Replied Message ID:</b>"
if reply.link:
text = f'<a href="{reply.link}">{text}</a>'
text += f" <code>{reply.message_id}</code>\n"
text_unping += text
text_ping = text_unping
if reply.from_user:
text = "<b>Replied User ID:</b>"
if reply.from_user.username:
text = f'<a href="https://t.me/{reply.from_user.username}">{text}</a>'
text += f" <code>{reply.from_user.id}</code>\n"
text_unping += text
text_ping += f'<b><a href="tg://user?id={reply.from_user.id}">Replied User ID:</a></b> <code>{reply.from_user.id}</code>\n'
if reply.forward_from:
text_unping += "\n"
text = "<b>Forwarded User ID:</b>"
if reply.forward_from.username:
text = f'<a href="https://t.me/{reply.forward_from.username}">{text}</a>'
text += f" <code>{reply.forward_from.id}</code>\n"
text_unping += text
text_ping += f'\n<b><a href="tg://user?id={reply.forward_from.id}">Forwarded User ID:</a></b> <code>{reply.forward_from.id}</code>\n'
reply = await message.reply_text(
text_unping, disable_web_page_preview=True, parse_mode="html"
)
if text_unping != text_ping:
await reply.edit_text(
text_ping,
disable_web_page_preview=True,
parse_mode="html",
)
@app.on_message(filters.command("random") & ~filters.edited)
@capture_err
async def random(_, message):
if len(message.command) != 2:
return await message.reply_text(
'"/random" Needs An Argurment.' " Ex: `/random 5`"
)
length = message.text.split(None, 1)[1]
try:
if 1 < int(length) < 1000:
alphabet = string.ascii_letters + string.digits
password = "".join(
secrets.choice(alphabet) for i in range(int(length))
)
await message.reply_text(f"`{password}`")
else:
await message.reply_text(
"Specify A Length Between 1-1000"
)
except ValueError:
await message.reply_text(
"Strings Won't Work!, Pass A Positive Integer Less Than 1000"
)
# Encrypt
@app.on_message(filters.command("encrypt") & ~filters.edited)
@capture_err
async def encrypt(_, message):
if not message.reply_to_message:
return await message.reply_text(
"Reply To A Message To Encrypt It."
)
text = message.reply_to_message.text
text_in_bytes = bytes(text, "utf-8")
cipher_suite = Fernet(FERNET_ENCRYPTION_KEY)
encrypted_text = cipher_suite.encrypt(text_in_bytes)
bytes_in_text = encrypted_text.decode("utf-8")
await message.reply_text(bytes_in_text)
# Decrypt
@app.on_message(filters.command("decrypt") & ~filters.edited)
@capture_err
async def decrypt(_, message):
if not message.reply_to_message:
return await message.reply_text(
"Reply To A Message To Decrypt It."
)
text = message.reply_to_message.text
text_in_bytes = bytes(text, "utf-8")
cipher_suite = Fernet(FERNET_ENCRYPTION_KEY)
try:
decoded_text = cipher_suite.decrypt(text_in_bytes)
except Exception:
return await message.reply_text("Incorrect token")
bytes_in_text = decoded_text.decode("utf-8")
await message.reply_text(bytes_in_text)
async def fetch_text(url):
async with aiohttp.ClientSession(
headers={"user-agent": "curl"}
) as session:
async with session.get(url) as resp:
data = await resp.text()
return data
# Cheat.sh
@app.on_message(filters.command("cheat") & ~filters.edited)
@capture_err
async def cheat(_, message):
if len(message.command) < 3:
return await message.reply_text("/cheat [language] [query]")
text = message.text.split(None, 1)[1]
m = await message.reply_text("Searching")
try:
ftext = text.split()
language = ftext[0]
query = ftext[1]
data = await fetch_text(
f"http://cht.sh/{language}/{query}?QT"
)
if not data:
return await m.edit("Found Literally Nothing!")
await m.edit(f"`{data}`")
except Exception as e:
await m.edit(str(e))
print(str(e))
# Translate
@app.on_message(filters.command("tr") & ~filters.edited)
@capture_err
async def tr(_, message):
if len(message.command) != 2:
return await message.reply_text("/tr [LANGUAGE_CODE]")
lang = message.text.split(None, 1)[1]
if not message.reply_to_message or not lang:
return await message.reply_text(
"Reply to a message with /tr [language code]"
+ "\nGet supported language list from here -"
+ " https://py-googletrans.readthedocs.io/en"
+ "/latest/#googletrans-languages"
)
reply = message.reply_to_message
text = message.text or message.reply
if not text:
return await message.reply_text(
"Reply to a text to translate it"
)
result = await arq.translate(text, lang)
if not result.ok:
return await message.reply_text(result.result)
await message.reply_text(result.result.translatedText)
@app.on_message(filters.command("json") & ~filters.edited)
@capture_err
async def json_fetch(_, message):
if len(message.command) != 2:
return await message.reply_text("/json [URL]")
url = message.text.split(None, 1)[1]
m = await message.reply_text("Fetching")
try:
data = await fetch(url)
data = await json_prettify(data)
if len(data) < 4090:
await m.edit(data)
else:
link = await paste(data)
await m.edit(
f"[OUTPUT_TOO_LONG]({link})",
disable_web_page_preview=True,
)
except Exception as e:
await m.edit(str(e))
@app.on_message(filters.command("webss"))
@capture_err
async def take_ss(_, message):
if len(message.command) != 2:
return await message.reply_text(
"Give A Url To Fetch Screenshot."
)
url = message.text.split(None, 1)[1]
m = await message.reply_text("**Uploading**")
try:
await app.send_photo(
message.chat.id,
photo=f"https://webshot.amanoteam.com/print?q={url}",
)
except Exception:
return await m.edit("No Such Website.")
await m.delete()
| true
| true
|
1c47b21893ab3220005fe7fa5a3318ed874a4750
| 592
|
py
|
Python
|
python/tests/test_merge_sort.py
|
YahyaOmari/data-structures-and-algorithms
|
86c1bc892ef3b62238555548f460065ac24c5ce3
|
[
"MIT"
] | null | null | null |
python/tests/test_merge_sort.py
|
YahyaOmari/data-structures-and-algorithms
|
86c1bc892ef3b62238555548f460065ac24c5ce3
|
[
"MIT"
] | 1
|
2021-05-04T21:33:34.000Z
|
2021-05-04T21:33:34.000Z
|
python/tests/test_merge_sort.py
|
YahyaOmari/data-structures-and-algorithms
|
86c1bc892ef3b62238555548f460065ac24c5ce3
|
[
"MIT"
] | null | null | null |
import pytest
from challenges.merge_sort.merge_sort import merge_sort
def test_merge_sort():
actual = merge_sort([5,2,6,0])
excpected = [0, 2, 5, 6]
assert excpected == actual
def test_merge_sort2():
actual = merge_sort([20,18,12,8,5,-2])
excpected = [-2, 5, 8, 12, 18, 20]
assert excpected == actual
def test_merge_sort3():
actual = merge_sort([5,12,7,5,5,7])
excpected = [5, 5, 5, 7, 7, 12]
assert excpected == actual
def test_merge_sort4():
actual = merge_sort([2,3,5,7,13,11])
excpected = [2, 3, 5, 7, 11, 13]
assert excpected == actual
| 26.909091
| 55
| 0.636824
|
import pytest
from challenges.merge_sort.merge_sort import merge_sort
def test_merge_sort():
actual = merge_sort([5,2,6,0])
excpected = [0, 2, 5, 6]
assert excpected == actual
def test_merge_sort2():
actual = merge_sort([20,18,12,8,5,-2])
excpected = [-2, 5, 8, 12, 18, 20]
assert excpected == actual
def test_merge_sort3():
actual = merge_sort([5,12,7,5,5,7])
excpected = [5, 5, 5, 7, 7, 12]
assert excpected == actual
def test_merge_sort4():
actual = merge_sort([2,3,5,7,13,11])
excpected = [2, 3, 5, 7, 11, 13]
assert excpected == actual
| true
| true
|
1c47b32f4ca4a9f1fa63baf4c55c2e109438b7d7
| 3,730
|
py
|
Python
|
pychron/dashboard/process_value.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/dashboard/process_value.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/dashboard/process_value.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import time
from traits.api import HasTraits, Str, Either, Property, Float, Int, Bool, List, Enum
from traitsui.api import (
View,
VGroup,
HGroup,
UItem,
ListEditor,
InstanceEditor,
Readonly,
)
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.datetime_tools import convert_timestamp
from pychron.dashboard.conditional import DashboardConditional
from pychron.dashboard.constants import NOERROR, CRITICAL, WARNING
class ProcessValue(HasTraits):
name = Str
units = Str
tag = Str
func_name = Str
change_threshold = Float(1e-20)
period = Either(Float, Str) # "on_change" or number of seconds
last_time = Float
last_time_str = Property(depends_on="last_time")
enabled = Bool
last_value = Float
timeout = Float
plotid = Int
conditionals = List(DashboardConditional)
flag = Enum(NOERROR, WARNING, CRITICAL)
path = Str
record = Bool(False)
display_name = Property
def is_different(self, v):
ret = None
ct = time.time()
tt = 60 * 60 # max time (s) allowed without a measurement taken
# even if the current value is the same as the last value
threshold = self.change_threshold
if abs(self.last_value - v) > threshold or (
self.last_time and ct - self.last_time > tt
):
# a = abs(self.last_value - v) > threshold
# b = (self.last_time and ct - self.last_time > tt)
# self.debug('a={} {}-{}>{}, b={}'.format(a, self.last_value, v,threshold, b))
self.last_value = v
ret = True
return ret
def _get_display_name(self):
n = self.name
if self.units:
n = "{} ({})".format(n, self.units)
return n
def traits_view(self):
v = View(
VGroup(
HGroup(UItem("enabled"), Readonly("name")),
VGroup(
HGroup(Readonly("tag"), Readonly("period")),
HGroup(Readonly("last_time_str"), Readonly("last_value")),
VGroup(
UItem(
"conditionals",
editor=ListEditor(
editor=InstanceEditor(), style="custom", mutable=False
),
),
show_border=True,
label="Conditionals",
),
enabled_when="enabled",
),
)
)
return v
def _get_last_time_str(self):
r = ""
if self.last_time:
r = convert_timestamp(self.last_time)
return r
# ============= EOF =============================================
| 32.434783
| 90
| 0.531635
|
from __future__ import absolute_import
import time
from traits.api import HasTraits, Str, Either, Property, Float, Int, Bool, List, Enum
from traitsui.api import (
View,
VGroup,
HGroup,
UItem,
ListEditor,
InstanceEditor,
Readonly,
)
from pychron.core.helpers.datetime_tools import convert_timestamp
from pychron.dashboard.conditional import DashboardConditional
from pychron.dashboard.constants import NOERROR, CRITICAL, WARNING
class ProcessValue(HasTraits):
name = Str
units = Str
tag = Str
func_name = Str
change_threshold = Float(1e-20)
period = Either(Float, Str)
last_time = Float
last_time_str = Property(depends_on="last_time")
enabled = Bool
last_value = Float
timeout = Float
plotid = Int
conditionals = List(DashboardConditional)
flag = Enum(NOERROR, WARNING, CRITICAL)
path = Str
record = Bool(False)
display_name = Property
def is_different(self, v):
ret = None
ct = time.time()
tt = 60 * 60
threshold = self.change_threshold
if abs(self.last_value - v) > threshold or (
self.last_time and ct - self.last_time > tt
):
self.last_value = v
ret = True
return ret
def _get_display_name(self):
n = self.name
if self.units:
n = "{} ({})".format(n, self.units)
return n
def traits_view(self):
v = View(
VGroup(
HGroup(UItem("enabled"), Readonly("name")),
VGroup(
HGroup(Readonly("tag"), Readonly("period")),
HGroup(Readonly("last_time_str"), Readonly("last_value")),
VGroup(
UItem(
"conditionals",
editor=ListEditor(
editor=InstanceEditor(), style="custom", mutable=False
),
),
show_border=True,
label="Conditionals",
),
enabled_when="enabled",
),
)
)
return v
def _get_last_time_str(self):
r = ""
if self.last_time:
r = convert_timestamp(self.last_time)
return r
| true
| true
|
1c47b4039bfa2cc4e0a27db2b332508a8ada0804
| 1,964
|
py
|
Python
|
facelib/InsightFace/models/data/data_pipe.py
|
ffletcherr/FaceLib
|
fc1b8496f90ba2c6a76bfb8a59e2e2af7a439a63
|
[
"MIT"
] | null | null | null |
facelib/InsightFace/models/data/data_pipe.py
|
ffletcherr/FaceLib
|
fc1b8496f90ba2c6a76bfb8a59e2e2af7a439a63
|
[
"MIT"
] | null | null | null |
facelib/InsightFace/models/data/data_pipe.py
|
ffletcherr/FaceLib
|
fc1b8496f90ba2c6a76bfb8a59e2e2af7a439a63
|
[
"MIT"
] | null | null | null |
from torch.utils.data import Dataset, ConcatDataset, DataLoader
from torchvision import transforms as trans
from torchvision.datasets import ImageFolder
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import numpy as np
def de_preprocess(tensor):
return tensor * 0.5 + 0.5
def get_train_dataset(imgs_folder):
train_transform = trans.Compose([
trans.RandomHorizontalFlip(),
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
ds = ImageFolder(imgs_folder, train_transform)
class_num = ds[-1][1] + 1
return ds, class_num
def get_train_loader(conf):
if conf.data_mode in ['ms1m', 'concat']:
ms1m_ds, ms1m_class_num = get_train_dataset(conf.ms1m_folder / 'imgs')
print('ms1m loader generated')
if conf.data_mode in ['vgg', 'concat']:
vgg_ds, vgg_class_num = get_train_dataset(conf.vgg_folder / 'imgs')
print('vgg loader generated')
if conf.data_mode == 'vgg':
ds = vgg_ds
class_num = vgg_class_num
elif conf.data_mode == 'ms1m':
ds = ms1m_ds
class_num = ms1m_class_num
elif conf.data_mode == 'concat':
for i, (url, label) in enumerate(vgg_ds.imgs):
vgg_ds.imgs[i] = (url, label + ms1m_class_num)
ds = ConcatDataset([ms1m_ds, vgg_ds])
class_num = vgg_class_num + ms1m_class_num
elif conf.data_mode == 'emore':
ds, class_num = get_train_dataset(conf.emore_folder / 'imgs')
loader = DataLoader(ds, batch_size=conf.batch_size, shuffle=True, pin_memory=conf.pin_memory,
num_workers=conf.num_workers)
return loader, class_num
def get_val_data(data_path):
agedb_30, agedb_30_issame = get_val_pair(data_path, 'agedb_30')
cfp_fp, cfp_fp_issame = get_val_pair(data_path, 'cfp_fp')
lfw, lfw_issame = get_val_pair(data_path, 'lfw')
return agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame
| 34.45614
| 97
| 0.679735
|
from torch.utils.data import Dataset, ConcatDataset, DataLoader
from torchvision import transforms as trans
from torchvision.datasets import ImageFolder
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import numpy as np
def de_preprocess(tensor):
return tensor * 0.5 + 0.5
def get_train_dataset(imgs_folder):
train_transform = trans.Compose([
trans.RandomHorizontalFlip(),
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
ds = ImageFolder(imgs_folder, train_transform)
class_num = ds[-1][1] + 1
return ds, class_num
def get_train_loader(conf):
if conf.data_mode in ['ms1m', 'concat']:
ms1m_ds, ms1m_class_num = get_train_dataset(conf.ms1m_folder / 'imgs')
print('ms1m loader generated')
if conf.data_mode in ['vgg', 'concat']:
vgg_ds, vgg_class_num = get_train_dataset(conf.vgg_folder / 'imgs')
print('vgg loader generated')
if conf.data_mode == 'vgg':
ds = vgg_ds
class_num = vgg_class_num
elif conf.data_mode == 'ms1m':
ds = ms1m_ds
class_num = ms1m_class_num
elif conf.data_mode == 'concat':
for i, (url, label) in enumerate(vgg_ds.imgs):
vgg_ds.imgs[i] = (url, label + ms1m_class_num)
ds = ConcatDataset([ms1m_ds, vgg_ds])
class_num = vgg_class_num + ms1m_class_num
elif conf.data_mode == 'emore':
ds, class_num = get_train_dataset(conf.emore_folder / 'imgs')
loader = DataLoader(ds, batch_size=conf.batch_size, shuffle=True, pin_memory=conf.pin_memory,
num_workers=conf.num_workers)
return loader, class_num
def get_val_data(data_path):
agedb_30, agedb_30_issame = get_val_pair(data_path, 'agedb_30')
cfp_fp, cfp_fp_issame = get_val_pair(data_path, 'cfp_fp')
lfw, lfw_issame = get_val_pair(data_path, 'lfw')
return agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame
| true
| true
|
1c47b4fbe727de9a582c4425c5640a77c610d033
| 588
|
py
|
Python
|
Challenge 1/script.py
|
kutyel/tuenti-challenge-6
|
63b4f1843cc55c0d409dd610a3b297c276b63a83
|
[
"MIT"
] | 1
|
2016-06-27T18:28:37.000Z
|
2016-06-27T18:28:37.000Z
|
Challenge 1/script.py
|
kutyel/tuenti-challenge-6
|
63b4f1843cc55c0d409dd610a3b297c276b63a83
|
[
"MIT"
] | null | null | null |
Challenge 1/script.py
|
kutyel/tuenti-challenge-6
|
63b4f1843cc55c0d409dd610a3b297c276b63a83
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
with open('output.txt', 'w') as output:
with open('submitInput.txt', 'r') as input_:
cases = int(input_.readline())
lines = input_.readlines()
for test, line in enumerate(lines):
result = 0
people = int(line)
if people == 4:
result = 1
else:
while people > 0:
people -= 4 if result < 1 else 2
result += 1
print("Case #{0}: {1}".format(test+1, result), file=output)
| 28
| 72
| 0.472789
|
from __future__ import print_function
with open('output.txt', 'w') as output:
with open('submitInput.txt', 'r') as input_:
cases = int(input_.readline())
lines = input_.readlines()
for test, line in enumerate(lines):
result = 0
people = int(line)
if people == 4:
result = 1
else:
while people > 0:
people -= 4 if result < 1 else 2
result += 1
print("Case #{0}: {1}".format(test+1, result), file=output)
| true
| true
|
1c47b4fd441724e07fa4f7a33443a0d5dca4808b
| 1,228
|
py
|
Python
|
zclassifiershiftedae/prepare_data.py
|
VAShibaev/text_style_transfer
|
42a4a653d7c47b5f04fe8c2b043f70a28b924e1f
|
[
"Apache-2.0"
] | 38
|
2019-09-05T16:39:19.000Z
|
2022-03-07T18:04:06.000Z
|
zclassifiershiftedae/prepare_data.py
|
VAShibaev/text_style_transfer
|
42a4a653d7c47b5f04fe8c2b043f70a28b924e1f
|
[
"Apache-2.0"
] | 1
|
2020-12-08T05:12:29.000Z
|
2020-12-08T05:12:29.000Z
|
zclassifiershiftedae/prepare_data.py
|
VAShibaev/text_style_transfer
|
42a4a653d7c47b5f04fe8c2b043f70a28b924e1f
|
[
"Apache-2.0"
] | 5
|
2019-10-21T22:46:05.000Z
|
2020-10-20T02:28:45.000Z
|
# -*- coding: utf-8 -*-
# It's a code from
# Toward Controlled Generation of Text, ICML2017
# Zhiting Hu, Zichao Yang, Xiaodan Liang, Ruslan Salakhutdinov, Eric Xing
# https://github.com/asyml/texar/tree/master/examples/text_style_transfer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downloads data.
"""
import texar as tx
# pylint: disable=invalid-name
def prepare_data():
"""Downloads data.
"""
tx.data.maybe_download(
urls='https://drive.google.com/file/d/'
'1HaUKEYDBEk6GlJGmXwqYteB-4rS9q8Lg/view?usp=sharing',
path='./',
filenames='yelp.zip',
extract=True)
def main():
"""Entrypoint.
"""
prepare_data()
if __name__ == '__main__':
main()
| 27.909091
| 74
| 0.694625
|
# Toward Controlled Generation of Text, ICML2017
# Zhiting Hu, Zichao Yang, Xiaodan Liang, Ruslan Salakhutdinov, Eric Xing
# https://github.com/asyml/texar/tree/master/examples/text_style_transfer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import texar as tx
# pylint: disable=invalid-name
def prepare_data():
tx.data.maybe_download(
urls='https://drive.google.com/file/d/'
'1HaUKEYDBEk6GlJGmXwqYteB-4rS9q8Lg/view?usp=sharing',
path='./',
filenames='yelp.zip',
extract=True)
def main():
prepare_data()
if __name__ == '__main__':
main()
| true
| true
|
1c47b6c5780ab8f0347dbfcc2cf7a16e0039e94d
| 450
|
py
|
Python
|
_app/posts/serializers.py
|
OmarThinks/DRF-Social-Project
|
e012c0d9e42e07948ef2fd7e391211ecf566a79a
|
[
"MIT"
] | null | null | null |
_app/posts/serializers.py
|
OmarThinks/DRF-Social-Project
|
e012c0d9e42e07948ef2fd7e391211ecf566a79a
|
[
"MIT"
] | null | null | null |
_app/posts/serializers.py
|
OmarThinks/DRF-Social-Project
|
e012c0d9e42e07948ef2fd7e391211ecf566a79a
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import Post
from comments.serializers import CommentSerializer
#from django.conf import settings
# Serializers define the API representation.
class PostSerializer(serializers.HyperlinkedModelSerializer):
#comments = CommentSerializer(many=True, read_only=True)
class Meta:
model = Post
#fields = "__all__"
fields = ('id',"author" ,'content', "comments","url")
| 25
| 61
| 0.735556
|
from rest_framework import serializers
from .models import Post
from comments.serializers import CommentSerializer
class PostSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Post
fields = ('id',"author" ,'content', "comments","url")
| true
| true
|
1c47b7a20cbcab7a8a56ae19a8d8c0cabb9a422d
| 577
|
py
|
Python
|
Class Work/composing-methods-more/burger_toppings.py
|
Pondorasti/SPD-2.3
|
42728c1f2dfc371fb6bdf1ba008c5d41266f2fa8
|
[
"MIT"
] | null | null | null |
Class Work/composing-methods-more/burger_toppings.py
|
Pondorasti/SPD-2.3
|
42728c1f2dfc371fb6bdf1ba008c5d41266f2fa8
|
[
"MIT"
] | null | null | null |
Class Work/composing-methods-more/burger_toppings.py
|
Pondorasti/SPD-2.3
|
42728c1f2dfc371fb6bdf1ba008c5d41266f2fa8
|
[
"MIT"
] | null | null | null |
# by Kami Bigdely
# Split temporary variable
patty = 70 # [gr]
pickle = 20 # [gr]
tomatoes = 25 # [gr]
lettuce = 15 # [gr]
buns = 95 # [gr]
ny_burger_weight = (2 * patty + 4 * pickle + 3 *
tomatoes + 2 * lettuce + 2 * buns)
print("NY Burger Weight", ny_burger_weight)
kimchi = 30 # [gr]
mayo = 5 # [gr]
golden_fried_onion = 20 # [gr]
seoul_kimchi_burger_weight = (2 * patty + 4 * pickle + 3 * tomatoes
+ kimchi + mayo + golden_fried_onion + 2 * buns)
print("Seoul Kimchi Burger Weight", seoul_kimchi_burger_weight)
| 27.47619
| 78
| 0.59792
|
patty = 70
pickle = 20
tomatoes = 25
lettuce = 15
buns = 95
ny_burger_weight = (2 * patty + 4 * pickle + 3 *
tomatoes + 2 * lettuce + 2 * buns)
print("NY Burger Weight", ny_burger_weight)
kimchi = 30
mayo = 5
golden_fried_onion = 20
seoul_kimchi_burger_weight = (2 * patty + 4 * pickle + 3 * tomatoes
+ kimchi + mayo + golden_fried_onion + 2 * buns)
print("Seoul Kimchi Burger Weight", seoul_kimchi_burger_weight)
| true
| true
|
1c47b7b8a1f8b36aa064bd1292aa46d379b22d4a
| 67
|
py
|
Python
|
ApplicationServer/descriptors/__init__.py
|
paltmey/scias
|
9006b85ad5a0084d7501413649e0679ba8adbe63
|
[
"MIT"
] | null | null | null |
ApplicationServer/descriptors/__init__.py
|
paltmey/scias
|
9006b85ad5a0084d7501413649e0679ba8adbe63
|
[
"MIT"
] | null | null | null |
ApplicationServer/descriptors/__init__.py
|
paltmey/scias
|
9006b85ad5a0084d7501413649e0679ba8adbe63
|
[
"MIT"
] | null | null | null |
from calculateDescriptors_cython import calculateDescriptors_cython
| 67
| 67
| 0.955224
|
from calculateDescriptors_cython import calculateDescriptors_cython
| true
| true
|
1c47b8b7abc09b5031051f41169039d786791bfa
| 10,082
|
py
|
Python
|
configs/vrd/VRD_SgDet_heth_area_gnn_faster_rcnn_x101_64x4d_fpn_1x.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | 24
|
2021-10-14T03:28:28.000Z
|
2022-03-29T09:30:04.000Z
|
configs/vrd/VRD_SgDet_heth_area_gnn_faster_rcnn_x101_64x4d_fpn_1x.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | 4
|
2021-12-14T15:04:49.000Z
|
2022-02-19T09:54:42.000Z
|
configs/vrd/VRD_SgDet_heth_area_gnn_faster_rcnn_x101_64x4d_fpn_1x.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | 4
|
2021-10-31T11:23:06.000Z
|
2021-12-17T06:38:50.000Z
|
# dataset settings
dataset_type = 'VrdDataset'
data_root = 'data/vrd/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_relmaps']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
# Since the forward process may need gt info, annos must be loaded.
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
# NOTE: Do not change the img to DC.
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
data = dict(
imgs_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'sg_annotations/train_sgs.json',
dict_file=data_root + 'sg_annotations/labels.json',
image_file=data_root + 'sg_annotations/train_images.json',
pipeline=train_pipeline,
num_im=-1,
split='train',
img_prefix=data_root + 'sg_dataset/sg_train_images'),
val=dict(
type=dataset_type,
ann_file=data_root + 'sg_annotations/test_sgs.json',
dict_file=data_root + 'sg_annotations/labels.json',
image_file=data_root + 'sg_annotations/test_images.json',
pipeline=test_pipeline,
num_im=-1,
split='val',
img_prefix=data_root + 'sg_dataset/sg_test_images/'),
test=dict(
type=dataset_type,
ann_file=data_root + 'sg_annotations/test_sgs.json',
dict_file=data_root + 'sg_annotations/labels.json',
image_file=data_root + 'sg_annotations/test_images.json',
pipeline=test_pipeline,
num_im=-1,
split='test',
img_prefix=data_root + 'sg_dataset/sg_test_images/'))
# model settings
dataset_config = data['train'].copy()
dataset_config.update(dict(cache=data_root + 'VRD_statistics.cache'))
model = dict(
type='FasterRCNN',
pretrained='checkpoints/mmlab/imnet/resnext101_64x4d-ee2c6f71.pth',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=101,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
relation_head=dict(
type='HETHead',
dataset_config=dataset_config,
num_classes=101,
num_predicates=71,
use_bias=True,
head_config=dict(
use_gt_box=False,
use_gt_label=False,
use_vision=True,
embed_dim=200,
hidden_dim=512,
roi_dim=1024,
context_pooling_dim=4096,
dropout_rate=0.2,
context_object_layer=1,
context_edge_layer=2,
glove_dir='data/glove/',
pick_parent='area',
isc_thresh=0.9,
child_order='confidence',
chain_style='GNN',
causal_effect_analysis=False),
bbox_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
# mask_roi_layer=dict(type='ShapeAwareRoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
# mask_roi_layer=dict(type='ShapeAwareRoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=True,
separate_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_sampler=dict(
type='Motif',
pos_iou_thr=0.5,
require_overlap=False, # for sgdet training, not require
num_sample_per_gt_rel=4,
num_rel_per_image=1024,
pos_fraction=0.25,
test_overlap=True # for testing
),
loss_object=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_relation=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=50, # Follow the setting in TDE, 80 Bboxes are selected.
mask_thr_binary=0.5,
rle_mask_encode=False, # do not transform the mask into rle.
crop_mask=True, # so that the mask shape is the same as bbox, instead of image shape
format_mask_result=False, # do not transform to the result format like bbox
to_tensor=True))
find_unused_parameters = True
evaluation = dict(interval=1, metric='sgdet', relation_mode=True, classwise=True, nogc_thres_num=[10, 70])
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001,
freeze_modules=['backbone', 'neck', 'rpn_head', 'bbox_head', 'mask_head'])
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=50,
warmup_ratio=1.0 / 3,
step=[7, 10])
checkpoint_config = dict(interval=1)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './new_experiments/VRD_SgDet_heth_area_gnn_faster_rcnn_x101_64x4d_fpn_1x'
load_from = './experiments/VRD_Detection_faster_rcnn_x101_64x4d_fpn_1x_ftCOCO/latest.pth'
# load_mapping = dict(align_dict={'relation_head.bbox_roi_extractor.visual_bbox_head': 'bbox_head.shared_fcs',
# 'relation_head.relation_roi_extractor.visual_bbox_head': 'bbox_head.shared_fcs'})
resume_from = None
workflow = [('train', 1), ('val', 1)]
# yapf:disable
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs=dict(
project=work_dir.split('/')[-1],
name='train-1',
config=work_dir + '/cfg.yaml'))
])
| 36.930403
| 115
| 0.587681
|
dataset_type = 'VrdDataset'
data_root = 'data/vrd/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_relmaps']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
data = dict(
imgs_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'sg_annotations/train_sgs.json',
dict_file=data_root + 'sg_annotations/labels.json',
image_file=data_root + 'sg_annotations/train_images.json',
pipeline=train_pipeline,
num_im=-1,
split='train',
img_prefix=data_root + 'sg_dataset/sg_train_images'),
val=dict(
type=dataset_type,
ann_file=data_root + 'sg_annotations/test_sgs.json',
dict_file=data_root + 'sg_annotations/labels.json',
image_file=data_root + 'sg_annotations/test_images.json',
pipeline=test_pipeline,
num_im=-1,
split='val',
img_prefix=data_root + 'sg_dataset/sg_test_images/'),
test=dict(
type=dataset_type,
ann_file=data_root + 'sg_annotations/test_sgs.json',
dict_file=data_root + 'sg_annotations/labels.json',
image_file=data_root + 'sg_annotations/test_images.json',
pipeline=test_pipeline,
num_im=-1,
split='test',
img_prefix=data_root + 'sg_dataset/sg_test_images/'))
dataset_config = data['train'].copy()
dataset_config.update(dict(cache=data_root + 'VRD_statistics.cache'))
model = dict(
type='FasterRCNN',
pretrained='checkpoints/mmlab/imnet/resnext101_64x4d-ee2c6f71.pth',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=101,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
relation_head=dict(
type='HETHead',
dataset_config=dataset_config,
num_classes=101,
num_predicates=71,
use_bias=True,
head_config=dict(
use_gt_box=False,
use_gt_label=False,
use_vision=True,
embed_dim=200,
hidden_dim=512,
roi_dim=1024,
context_pooling_dim=4096,
dropout_rate=0.2,
context_object_layer=1,
context_edge_layer=2,
glove_dir='data/glove/',
pick_parent='area',
isc_thresh=0.9,
child_order='confidence',
chain_style='GNN',
causal_effect_analysis=False),
bbox_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=True,
separate_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_sampler=dict(
type='Motif',
pos_iou_thr=0.5,
require_overlap=False,
num_sample_per_gt_rel=4,
num_rel_per_image=1024,
pos_fraction=0.25,
test_overlap=True
),
loss_object=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_relation=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=50,
mask_thr_binary=0.5,
rle_mask_encode=False,
crop_mask=True,
format_mask_result=False,
to_tensor=True))
find_unused_parameters = True
evaluation = dict(interval=1, metric='sgdet', relation_mode=True, classwise=True, nogc_thres_num=[10, 70])
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001,
freeze_modules=['backbone', 'neck', 'rpn_head', 'bbox_head', 'mask_head'])
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=50,
warmup_ratio=1.0 / 3,
step=[7, 10])
checkpoint_config = dict(interval=1)
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './new_experiments/VRD_SgDet_heth_area_gnn_faster_rcnn_x101_64x4d_fpn_1x'
load_from = './experiments/VRD_Detection_faster_rcnn_x101_64x4d_fpn_1x_ftCOCO/latest.pth'
resume_from = None
workflow = [('train', 1), ('val', 1)]
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs=dict(
project=work_dir.split('/')[-1],
name='train-1',
config=work_dir + '/cfg.yaml'))
])
| true
| true
|
1c47b9e4144242f50539d655abe8afb3386e443d
| 1,209
|
py
|
Python
|
examples/MERAOpt.py
|
vnechaev/QGOpt
|
697f02d89df67a576cd6953ffdd2db62970727da
|
[
"Apache-2.0"
] | null | null | null |
examples/MERAOpt.py
|
vnechaev/QGOpt
|
697f02d89df67a576cd6953ffdd2db62970727da
|
[
"Apache-2.0"
] | null | null | null |
examples/MERAOpt.py
|
vnechaev/QGOpt
|
697f02d89df67a576cd6953ffdd2db62970727da
|
[
"Apache-2.0"
] | null | null | null |
import QGOpt.manifolds as m
from tensorflow.python.keras.optimizer_v2 import optimizer_v2 as opt
import tensorflow as tf
def adj(A):
"""Correct adjoint
Args:
A: tf.tensor of shape (..., n, m)
Returns:
tf tensor of shape (..., m, n), adjoint matrix"""
return tf.math.conj(tf.linalg.matrix_transpose(A))
class MERAOpt(opt.OptimizerV2):
def __init__(self,
name="Fast"):
"""Constructs a new MERA inspired optimizer.
Returns:
object of class MERAOpt"""
super(MERAOpt, self).__init__(name)
def _create_slots(self, var_list):
# MERAOpt does not need slots
pass
def _resource_apply_dense(self, grad, var):
# Complex version of grad
complex_grad = m.real_to_complex(grad)
# MERA like update
_, u, v = tf.linalg.svd(adj(complex_grad))
var.assign(m.convert.complex_to_real(-v @ adj(u)))
def _resource_apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
def get_config(self):
config = super(MERAOpt, self).get_config()
config.update({
})
return config
| 25.723404
| 79
| 0.623656
|
import QGOpt.manifolds as m
from tensorflow.python.keras.optimizer_v2 import optimizer_v2 as opt
import tensorflow as tf
def adj(A):
return tf.math.conj(tf.linalg.matrix_transpose(A))
class MERAOpt(opt.OptimizerV2):
def __init__(self,
name="Fast"):
super(MERAOpt, self).__init__(name)
def _create_slots(self, var_list):
pass
def _resource_apply_dense(self, grad, var):
complex_grad = m.real_to_complex(grad)
_, u, v = tf.linalg.svd(adj(complex_grad))
var.assign(m.convert.complex_to_real(-v @ adj(u)))
def _resource_apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
def get_config(self):
config = super(MERAOpt, self).get_config()
config.update({
})
return config
| true
| true
|
1c47b9f5723d75dc27d382fcc620139929908569
| 5,099
|
py
|
Python
|
sdk/AsposeEmailCloudSdk/models/object_exist.py
|
aspose-email-cloud/aspose-email-cloud-python
|
c5c13839cbbbfa5b6617bd1aedf3cf30cd664227
|
[
"MIT"
] | 1
|
2020-02-26T13:19:06.000Z
|
2020-02-26T13:19:06.000Z
|
sdk/AsposeEmailCloudSdk/models/object_exist.py
|
aspose-email-cloud/aspose-email-cloud-python
|
c5c13839cbbbfa5b6617bd1aedf3cf30cd664227
|
[
"MIT"
] | null | null | null |
sdk/AsposeEmailCloudSdk/models/object_exist.py
|
aspose-email-cloud/aspose-email-cloud-python
|
c5c13839cbbbfa5b6617bd1aedf3cf30cd664227
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="ObjectExist.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
import pprint
import re
import six
from typing import List, Set, Dict, Tuple, Optional
from datetime import datetime
class ObjectExist(object):
"""Object exists
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'exists': 'bool',
'is_folder': 'bool'
}
attribute_map = {
'exists': 'exists',
'is_folder': 'isFolder'
}
def __init__(self, exists: bool = None, is_folder: bool = None):
"""
Object exists
:param exists: Indicates that the file or folder exists.
:type exists: bool
:param is_folder: True if it is a folder, false if it is a file.
:type is_folder: bool
"""
self._exists = None
self._is_folder = None
if exists is not None:
self.exists = exists
if is_folder is not None:
self.is_folder = is_folder
@property
def exists(self) -> bool:
"""
Indicates that the file or folder exists.
:return: The exists of this ObjectExist.
:rtype: bool
"""
return self._exists
@exists.setter
def exists(self, exists: bool):
"""
Indicates that the file or folder exists.
:param exists: The exists of this ObjectExist.
:type: bool
"""
if exists is None:
raise ValueError("Invalid value for `exists`, must not be `None`")
self._exists = exists
@property
def is_folder(self) -> bool:
"""
True if it is a folder, false if it is a file.
:return: The is_folder of this ObjectExist.
:rtype: bool
"""
return self._is_folder
@is_folder.setter
def is_folder(self, is_folder: bool):
"""
True if it is a folder, false if it is a file.
:param is_folder: The is_folder of this ObjectExist.
:type: bool
"""
if is_folder is None:
raise ValueError("Invalid value for `is_folder`, must not be `None`")
self._is_folder = is_folder
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObjectExist):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.069182
| 81
| 0.576388
|
import pprint
import re
import six
from typing import List, Set, Dict, Tuple, Optional
from datetime import datetime
class ObjectExist(object):
swagger_types = {
'exists': 'bool',
'is_folder': 'bool'
}
attribute_map = {
'exists': 'exists',
'is_folder': 'isFolder'
}
def __init__(self, exists: bool = None, is_folder: bool = None):
self._exists = None
self._is_folder = None
if exists is not None:
self.exists = exists
if is_folder is not None:
self.is_folder = is_folder
@property
def exists(self) -> bool:
return self._exists
@exists.setter
def exists(self, exists: bool):
if exists is None:
raise ValueError("Invalid value for `exists`, must not be `None`")
self._exists = exists
@property
def is_folder(self) -> bool:
return self._is_folder
@is_folder.setter
def is_folder(self, is_folder: bool):
if is_folder is None:
raise ValueError("Invalid value for `is_folder`, must not be `None`")
self._is_folder = is_folder
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ObjectExist):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c47ba7cf688f8310a64c27916a4b31c58e71077
| 178
|
py
|
Python
|
PythonDocs/src/002.py
|
Bean-jun/LearnGuide
|
30a8567b222d18b15d3e9027a435b5bfe640a046
|
[
"MIT"
] | 1
|
2022-02-23T13:42:01.000Z
|
2022-02-23T13:42:01.000Z
|
PythonDocs/src/002.py
|
Bean-jun/LearnGuide
|
30a8567b222d18b15d3e9027a435b5bfe640a046
|
[
"MIT"
] | null | null | null |
PythonDocs/src/002.py
|
Bean-jun/LearnGuide
|
30a8567b222d18b15d3e9027a435b5bfe640a046
|
[
"MIT"
] | null | null | null |
# 单个变量赋值
name = "小明"
print(name)
# 多变量赋统一值
tom_age = jerry_age = 10
print(f"tom的年龄为{tom_age}, jerry的年龄为{jerry_age}")
# 多个变量赋不同值
name, age = "小明", 23
print(f"{name}的年龄是{age}岁")
| 14.833333
| 48
| 0.679775
|
name = "小明"
print(name)
tom_age = jerry_age = 10
print(f"tom的年龄为{tom_age}, jerry的年龄为{jerry_age}")
name, age = "小明", 23
print(f"{name}的年龄是{age}岁")
| true
| true
|
1c47bc9b26db9cf25c8c537f793dfeaff97f5c14
| 4,813
|
py
|
Python
|
homeassistant/components/ecobee/sensor.py
|
ottersen/home-assistant
|
7a57c3a66af0e47cb6a1f9971dd2b14e6acae1bf
|
[
"Apache-2.0"
] | 2
|
2017-06-18T15:09:59.000Z
|
2017-06-18T15:11:09.000Z
|
homeassistant/components/ecobee/sensor.py
|
ottersen/home-assistant
|
7a57c3a66af0e47cb6a1f9971dd2b14e6acae1bf
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/ecobee/sensor.py
|
ottersen/home-assistant
|
7a57c3a66af0e47cb6a1f9971dd2b14e6acae1bf
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Ecobee sensors."""
from pyecobee.const import ECOBEE_STATE_CALIBRATING, ECOBEE_STATE_UNKNOWN
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.entity import Entity
from .const import DOMAIN, ECOBEE_MODEL_TO_NAME, MANUFACTURER, _LOGGER
SENSOR_TYPES = {
"temperature": ["Temperature", TEMP_FAHRENHEIT],
"humidity": ["Humidity", "%"],
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up ecobee sensors."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up ecobee (temperature and humidity) sensors."""
data = hass.data[DOMAIN]
dev = list()
for index in range(len(data.ecobee.thermostats)):
for sensor in data.ecobee.get_remote_sensors(index):
for item in sensor["capability"]:
if item["type"] not in ("temperature", "humidity"):
continue
dev.append(EcobeeSensor(data, sensor["name"], item["type"], index))
async_add_entities(dev, True)
class EcobeeSensor(Entity):
"""Representation of an Ecobee sensor."""
def __init__(self, data, sensor_name, sensor_type, sensor_index):
"""Initialize the sensor."""
self.data = data
self._name = "{} {}".format(sensor_name, SENSOR_TYPES[sensor_type][0])
self.sensor_name = sensor_name
self.type = sensor_type
self.index = sensor_index
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the Ecobee sensor."""
return self._name
@property
def unique_id(self):
"""Return a unique identifier for this sensor."""
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] == self.sensor_name:
if "code" in sensor:
return f"{sensor['code']}-{self.device_class}"
thermostat = self.data.ecobee.get_thermostat(self.index)
return f"{thermostat['identifier']}-{sensor['id']}-{self.device_class}"
@property
def device_info(self):
"""Return device information for this sensor."""
identifier = None
model = None
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] != self.sensor_name:
continue
if "code" in sensor:
identifier = sensor["code"]
model = "ecobee Room Sensor"
else:
thermostat = self.data.ecobee.get_thermostat(self.index)
identifier = thermostat["identifier"]
try:
model = (
f"{ECOBEE_MODEL_TO_NAME[thermostat['modelNumber']]} Thermostat"
)
except KeyError:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link and provide the following information: "
"https://github.com/home-assistant/home-assistant/issues/27172 "
"Unrecognized model number: %s",
thermostat["name"],
thermostat["modelNumber"],
)
break
if identifier is not None and model is not None:
return {
"identifiers": {(DOMAIN, identifier)},
"name": self.sensor_name,
"manufacturer": MANUFACTURER,
"model": model,
}
return None
@property
def device_class(self):
"""Return the device class of the sensor."""
if self.type in (DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE):
return self.type
return None
@property
def state(self):
"""Return the state of the sensor."""
if self._state in [ECOBEE_STATE_CALIBRATING, ECOBEE_STATE_UNKNOWN]:
return None
if self.type == "temperature":
return float(self._state) / 10
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
async def async_update(self):
"""Get the latest state of the sensor."""
await self.data.update()
for sensor in self.data.ecobee.get_remote_sensors(self.index):
for item in sensor["capability"]:
if item["type"] == self.type and self.sensor_name == sensor["name"]:
self._state = item["value"]
| 35.651852
| 88
| 0.5909
|
from pyecobee.const import ECOBEE_STATE_CALIBRATING, ECOBEE_STATE_UNKNOWN
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.entity import Entity
from .const import DOMAIN, ECOBEE_MODEL_TO_NAME, MANUFACTURER, _LOGGER
SENSOR_TYPES = {
"temperature": ["Temperature", TEMP_FAHRENHEIT],
"humidity": ["Humidity", "%"],
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
data = hass.data[DOMAIN]
dev = list()
for index in range(len(data.ecobee.thermostats)):
for sensor in data.ecobee.get_remote_sensors(index):
for item in sensor["capability"]:
if item["type"] not in ("temperature", "humidity"):
continue
dev.append(EcobeeSensor(data, sensor["name"], item["type"], index))
async_add_entities(dev, True)
class EcobeeSensor(Entity):
def __init__(self, data, sensor_name, sensor_type, sensor_index):
self.data = data
self._name = "{} {}".format(sensor_name, SENSOR_TYPES[sensor_type][0])
self.sensor_name = sensor_name
self.type = sensor_type
self.index = sensor_index
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
return self._name
@property
def unique_id(self):
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] == self.sensor_name:
if "code" in sensor:
return f"{sensor['code']}-{self.device_class}"
thermostat = self.data.ecobee.get_thermostat(self.index)
return f"{thermostat['identifier']}-{sensor['id']}-{self.device_class}"
@property
def device_info(self):
identifier = None
model = None
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] != self.sensor_name:
continue
if "code" in sensor:
identifier = sensor["code"]
model = "ecobee Room Sensor"
else:
thermostat = self.data.ecobee.get_thermostat(self.index)
identifier = thermostat["identifier"]
try:
model = (
f"{ECOBEE_MODEL_TO_NAME[thermostat['modelNumber']]} Thermostat"
)
except KeyError:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link and provide the following information: "
"https://github.com/home-assistant/home-assistant/issues/27172 "
"Unrecognized model number: %s",
thermostat["name"],
thermostat["modelNumber"],
)
break
if identifier is not None and model is not None:
return {
"identifiers": {(DOMAIN, identifier)},
"name": self.sensor_name,
"manufacturer": MANUFACTURER,
"model": model,
}
return None
@property
def device_class(self):
if self.type in (DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE):
return self.type
return None
@property
def state(self):
if self._state in [ECOBEE_STATE_CALIBRATING, ECOBEE_STATE_UNKNOWN]:
return None
if self.type == "temperature":
return float(self._state) / 10
return self._state
@property
def unit_of_measurement(self):
return self._unit_of_measurement
async def async_update(self):
await self.data.update()
for sensor in self.data.ecobee.get_remote_sensors(self.index):
for item in sensor["capability"]:
if item["type"] == self.type and self.sensor_name == sensor["name"]:
self._state = item["value"]
| true
| true
|
1c47bca4679357156bbce5a4240e93b0d106e17f
| 1,959
|
py
|
Python
|
pycontracts/forward_solidity.py
|
rpip/contracts
|
a2d831e1ac4a728bc7342f8d2856bdeb79c37cc4
|
[
"MIT"
] | null | null | null |
pycontracts/forward_solidity.py
|
rpip/contracts
|
a2d831e1ac4a728bc7342f8d2856bdeb79c37cc4
|
[
"MIT"
] | null | null | null |
pycontracts/forward_solidity.py
|
rpip/contracts
|
a2d831e1ac4a728bc7342f8d2856bdeb79c37cc4
|
[
"MIT"
] | 4
|
2019-02-01T13:46:47.000Z
|
2020-01-17T00:46:44.000Z
|
from web3 import Web3
from pycontracts import contracts
from pycontracts.forward import Forward, CallReverted
class ForwardSolidity(Forward):
def __init__(self, contract, owner = None):
self.contract = contract
super().__init__(contract.address)
self._owner = owner
@staticmethod
def wrap(w3, address, owner = None):
return ForwardSolidity(
contract = w3.eth.contract(
address = address,
abi = contracts['Forward']['abi'],
),
owner = owner
)
@staticmethod
def deploy(w3, owner, originator = None):
c = w3.eth.contract(
bytecode = contracts['Forward']['deploy'],
abi = contracts['Forward']['abi'],
)
tx_hash = c.constructor(owner).transact({
'from': originator or w3.eth.defaultAccount,
})
r = w3.eth.waitForTransactionReceipt(tx_hash)
return ForwardSolidity.wrap(w3, r.contractAddress, owner = owner)
@property
def owner(self):
if not self._owner:
self._owner = self.contract.functions.getOwner().call()
return self._owner
def nonce(self):
return self.contract.functions.getNonce().call()
def _build(self, call):
return self.contract.functions.forward(
27 + call.signature.v,
call.signature.r.to_bytes(32, "big"),
call.signature.s.to_bytes(32, "big"),
call.target, call.value, call.data
)
def build(self, call):
t = self._build(call).buildTransaction({"nonce": 0, "gas": 0, "gasPrice": 0})
return Web3.toBytes(hexstr = t["data"])
def transact(self, call, originator):
return self._build(call).transact({ 'from': originator })
def call(self, call, type=bytes):
success, return_data = self._build(call).call()
return self._handle_result(success, return_data, call, type)
| 32.114754
| 85
| 0.600306
|
from web3 import Web3
from pycontracts import contracts
from pycontracts.forward import Forward, CallReverted
class ForwardSolidity(Forward):
def __init__(self, contract, owner = None):
self.contract = contract
super().__init__(contract.address)
self._owner = owner
@staticmethod
def wrap(w3, address, owner = None):
return ForwardSolidity(
contract = w3.eth.contract(
address = address,
abi = contracts['Forward']['abi'],
),
owner = owner
)
@staticmethod
def deploy(w3, owner, originator = None):
c = w3.eth.contract(
bytecode = contracts['Forward']['deploy'],
abi = contracts['Forward']['abi'],
)
tx_hash = c.constructor(owner).transact({
'from': originator or w3.eth.defaultAccount,
})
r = w3.eth.waitForTransactionReceipt(tx_hash)
return ForwardSolidity.wrap(w3, r.contractAddress, owner = owner)
@property
def owner(self):
if not self._owner:
self._owner = self.contract.functions.getOwner().call()
return self._owner
def nonce(self):
return self.contract.functions.getNonce().call()
def _build(self, call):
return self.contract.functions.forward(
27 + call.signature.v,
call.signature.r.to_bytes(32, "big"),
call.signature.s.to_bytes(32, "big"),
call.target, call.value, call.data
)
def build(self, call):
t = self._build(call).buildTransaction({"nonce": 0, "gas": 0, "gasPrice": 0})
return Web3.toBytes(hexstr = t["data"])
def transact(self, call, originator):
return self._build(call).transact({ 'from': originator })
def call(self, call, type=bytes):
success, return_data = self._build(call).call()
return self._handle_result(success, return_data, call, type)
| true
| true
|
1c47bcf3b91293c8818a278695ef22bba118cc44
| 605
|
py
|
Python
|
setup.py
|
lmijovic/pylhe
|
afd270044a5c37fec409daa1be45e67ac5fe9c82
|
[
"Apache-2.0"
] | 1
|
2020-05-18T17:25:58.000Z
|
2020-05-18T17:25:58.000Z
|
setup.py
|
8me/pylhe
|
a165fba7f9cda1d3f28ae679e41571d52534dc9d
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
8me/pylhe
|
a165fba7f9cda1d3f28ae679e41571d52534dc9d
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
extras_require = {
"test": [
"pytest",
"pytest-cov>=2.5.1",
"scikit-hep-testdata>=0.3.1",
"pydocstyle",
"check-manifest",
"flake8",
],
}
extras_require["lint"] = sorted(set(["pyflakes", "black;python_version>='3.6'"]))
extras_require["develop"] = sorted(
set(extras_require["test"] + ["pre-commit", "check-manifest", "twine"])
)
extras_require["complete"] = sorted(set(sum(extras_require.values(), [])))
setup(
extras_require=extras_require,
use_scm_version=lambda: {"local_scheme": lambda version: ""},
)
| 26.304348
| 81
| 0.618182
|
from setuptools import setup
extras_require = {
"test": [
"pytest",
"pytest-cov>=2.5.1",
"scikit-hep-testdata>=0.3.1",
"pydocstyle",
"check-manifest",
"flake8",
],
}
extras_require["lint"] = sorted(set(["pyflakes", "black;python_version>='3.6'"]))
extras_require["develop"] = sorted(
set(extras_require["test"] + ["pre-commit", "check-manifest", "twine"])
)
extras_require["complete"] = sorted(set(sum(extras_require.values(), [])))
setup(
extras_require=extras_require,
use_scm_version=lambda: {"local_scheme": lambda version: ""},
)
| true
| true
|
1c47bd99ea1abbad60f1ccb8e2ccf3f9e0e37943
| 7,863
|
py
|
Python
|
tests/vhdl/test_decoder.py
|
jvanstraten/vhdmmio
|
f166b07074a9159311a01af88497df91c19e09d1
|
[
"Apache-2.0"
] | 4
|
2019-07-01T14:41:38.000Z
|
2021-11-28T12:54:49.000Z
|
tests/vhdl/test_decoder.py
|
jvanstraten/vhdmmio
|
f166b07074a9159311a01af88497df91c19e09d1
|
[
"Apache-2.0"
] | 4
|
2019-08-23T15:05:24.000Z
|
2020-12-16T10:02:20.000Z
|
tests/vhdl/test_decoder.py
|
jvanstraten/vhdmmio
|
f166b07074a9159311a01af88497df91c19e09d1
|
[
"Apache-2.0"
] | 1
|
2021-07-16T13:41:21.000Z
|
2021-07-16T13:41:21.000Z
|
"""Unit tests for the VHDL address decoder generator."""
from unittest import TestCase
from vhdmmio.vhdl.address_decoder import AddressDecoder
from vhdmmio.core.address import MaskedAddress
from vhdmmio.template import TemplateEngine
class TestVhdlDecoder(TestCase):
"""Unit tests for the VHDL address decoder generator."""
maxDiff = None
def _test_decoder(self, addresses, match=None,
optimize=False, allow_overlap=False, allow_duplicate=False):
dec = AddressDecoder('address', 32, optimize, allow_overlap, allow_duplicate)
for address in addresses:
dec[MaskedAddress.parse_config(address)] = str(address)
result = str(dec)
if match is not None:
self.assertEqual(result, '\n'.join(match))
return dec
def test_empty(self):
"""tests constructing an empty address decoder"""
self._test_decoder([], [''])
def test_if(self):
"""tests address decoder if statement construction"""
self._test_decoder(['8|3'], [
'if address(31 downto 2) = "000000000000000000000000000010" then',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
'end if;',
])
self._test_decoder(['8|3'], optimize=True, match=[
'-- address = 000000000000000000000000000010--',
'',
'8|3',
])
def test_if_else(self):
"""tests address decoder if-else statement construction"""
self._test_decoder(['4|3', '0|3'], match=[
'if address(31 downto 3) = "00000000000000000000000000000" then',
' if address(2) = \'0\' then',
' -- address = 000000000000000000000000000000--',
'',
' 0|3',
'',
' else',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
' end if;',
'end if;',
])
self._test_decoder(['4|3', '0|3'], optimize=True, match=[
'if address(2) = \'0\' then',
' -- address = 000000000000000000000000000000--',
'',
' 0|3',
'',
'else',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
'end if;',
])
def test_if_elsif(self):
"""tests address decoder if-elsif statement construction"""
self._test_decoder(['8|7', '4|3', '0|3'], optimize=True, match=[
'if address(3) = \'1\' then',
' -- address = 00000000000000000000000000001---',
'',
' 8|7',
'',
'elsif address(2) = \'0\' then',
' -- address = 000000000000000000000000000000--',
'',
' 0|3',
'',
'else',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
'end if;',
])
self._test_decoder(['12|3', '8|3', '0|7'], optimize=True, match=[
'if address(3) = \'0\' then',
' -- address = 00000000000000000000000000000---',
'',
' 0|7',
'',
'elsif address(2) = \'0\' then',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
'else',
' -- address = 000000000000000000000000000011--',
'',
' 12|3',
'',
'end if;',
])
def test_case_statement(self):
"""tests address decoder case statement construction"""
self._test_decoder(['8|3', '4|3'], match=[
'if address(31 downto 4) = "0000000000000000000000000000" then',
' case address(3 downto 2) is',
' when "01" =>',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
' when "10" =>',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
' when others =>',
' null;',
' end case;',
'end if;',
])
self._test_decoder(['8|3', '4|3'], optimize=True, match=[
'case address(3 downto 2) is',
' when "01" =>',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
' when others => -- "10"',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
'end case;',
])
def test_common_suffix(self):
"""tests address decoder common suffix detection"""
self._test_decoder([16, 32], match=[
'if address(31 downto 6) = "00000000000000000000000000" then',
' if address(3 downto 0) = "0000" then',
' case address(5 downto 4) is',
' when "01" =>',
' -- address = 00000000000000000000000000010000',
'',
' 16',
'',
' when "10" =>',
' -- address = 00000000000000000000000000100000',
'',
' 32',
'',
' when others =>',
' null;',
' end case;',
' end if;',
'end if;',
])
self._test_decoder([16, 32], optimize=True, match=[
'case address(5 downto 4) is',
' when "01" =>',
' -- address = 00000000000000000000000000010000',
'',
' 16',
'',
' when others => -- "10"',
' -- address = 00000000000000000000000000100000',
'',
' 32',
'',
'end case;',
])
def test_duplicate(self):
"""tests address decoder duplicate address error"""
with self.assertRaisesRegex(ValueError, 'duplicate'):
self._test_decoder([3, '3|0'])
self._test_decoder([3, '3|0'], allow_duplicate=True, match=[
'if address(31 downto 0) = "00000000000000000000000000000011" then',
' -- address = 00000000000000000000000000000011',
'',
' 3',
'',
' 3|0',
'',
'end if;',
])
def test_overlapping(self):
"""tests address decoder overlapping address error"""
with self.assertRaisesRegex(ValueError, 'overlap'):
self._test_decoder([3, '3|3'])
self._test_decoder([3, '3|3'], allow_overlap=True, match=[
'if address(31 downto 2) = "000000000000000000000000000000" then',
' if address(1 downto 0) = "11" then',
' -- address = 00000000000000000000000000000011',
'',
' 3',
'',
' end if;',
'',
' -- address = 000000000000000000000000000000--',
'',
' 3|3',
'',
'end if;',
])
def test_template(self):
"""tests adding decoders to templates"""
tple = TemplateEngine()
self._test_decoder([3]).append_to_template(tple, 'BLOCK', 'comment for decoder')
self.assertEqual(tple.apply_str_to_str('$BLOCK', comment='-- '), '\n'.join([
'-- comment for decoder',
'if address(31 downto 0) = "00000000000000000000000000000011" then',
' -- address = 00000000000000000000000000000011',
'',
' 3',
'',
'end if;',
''
]))
| 32.626556
| 88
| 0.452499
|
from unittest import TestCase
from vhdmmio.vhdl.address_decoder import AddressDecoder
from vhdmmio.core.address import MaskedAddress
from vhdmmio.template import TemplateEngine
class TestVhdlDecoder(TestCase):
maxDiff = None
def _test_decoder(self, addresses, match=None,
optimize=False, allow_overlap=False, allow_duplicate=False):
dec = AddressDecoder('address', 32, optimize, allow_overlap, allow_duplicate)
for address in addresses:
dec[MaskedAddress.parse_config(address)] = str(address)
result = str(dec)
if match is not None:
self.assertEqual(result, '\n'.join(match))
return dec
def test_empty(self):
self._test_decoder([], [''])
def test_if(self):
self._test_decoder(['8|3'], [
'if address(31 downto 2) = "000000000000000000000000000010" then',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
'end if;',
])
self._test_decoder(['8|3'], optimize=True, match=[
'-- address = 000000000000000000000000000010--',
'',
'8|3',
])
def test_if_else(self):
self._test_decoder(['4|3', '0|3'], match=[
'if address(31 downto 3) = "00000000000000000000000000000" then',
' if address(2) = \'0\' then',
' -- address = 000000000000000000000000000000--',
'',
' 0|3',
'',
' else',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
' end if;',
'end if;',
])
self._test_decoder(['4|3', '0|3'], optimize=True, match=[
'if address(2) = \'0\' then',
' -- address = 000000000000000000000000000000--',
'',
' 0|3',
'',
'else',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
'end if;',
])
def test_if_elsif(self):
self._test_decoder(['8|7', '4|3', '0|3'], optimize=True, match=[
'if address(3) = \'1\' then',
' -- address = 00000000000000000000000000001---',
'',
' 8|7',
'',
'elsif address(2) = \'0\' then',
' -- address = 000000000000000000000000000000--',
'',
' 0|3',
'',
'else',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
'end if;',
])
self._test_decoder(['12|3', '8|3', '0|7'], optimize=True, match=[
'if address(3) = \'0\' then',
' -- address = 00000000000000000000000000000---',
'',
' 0|7',
'',
'elsif address(2) = \'0\' then',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
'else',
' -- address = 000000000000000000000000000011--',
'',
' 12|3',
'',
'end if;',
])
def test_case_statement(self):
self._test_decoder(['8|3', '4|3'], match=[
'if address(31 downto 4) = "0000000000000000000000000000" then',
' case address(3 downto 2) is',
' when "01" =>',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
' when "10" =>',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
' when others =>',
' null;',
' end case;',
'end if;',
])
self._test_decoder(['8|3', '4|3'], optimize=True, match=[
'case address(3 downto 2) is',
' when "01" =>',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
' when others => -- "10"',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
'end case;',
])
def test_common_suffix(self):
self._test_decoder([16, 32], match=[
'if address(31 downto 6) = "00000000000000000000000000" then',
' if address(3 downto 0) = "0000" then',
' case address(5 downto 4) is',
' when "01" =>',
' -- address = 00000000000000000000000000010000',
'',
' 16',
'',
' when "10" =>',
' -- address = 00000000000000000000000000100000',
'',
' 32',
'',
' when others =>',
' null;',
' end case;',
' end if;',
'end if;',
])
self._test_decoder([16, 32], optimize=True, match=[
'case address(5 downto 4) is',
' when "01" =>',
' -- address = 00000000000000000000000000010000',
'',
' 16',
'',
' when others => -- "10"',
' -- address = 00000000000000000000000000100000',
'',
' 32',
'',
'end case;',
])
def test_duplicate(self):
with self.assertRaisesRegex(ValueError, 'duplicate'):
self._test_decoder([3, '3|0'])
self._test_decoder([3, '3|0'], allow_duplicate=True, match=[
'if address(31 downto 0) = "00000000000000000000000000000011" then',
' -- address = 00000000000000000000000000000011',
'',
' 3',
'',
' 3|0',
'',
'end if;',
])
def test_overlapping(self):
with self.assertRaisesRegex(ValueError, 'overlap'):
self._test_decoder([3, '3|3'])
self._test_decoder([3, '3|3'], allow_overlap=True, match=[
'if address(31 downto 2) = "000000000000000000000000000000" then',
' if address(1 downto 0) = "11" then',
' -- address = 00000000000000000000000000000011',
'',
' 3',
'',
' end if;',
'',
' -- address = 000000000000000000000000000000--',
'',
' 3|3',
'',
'end if;',
])
def test_template(self):
tple = TemplateEngine()
self._test_decoder([3]).append_to_template(tple, 'BLOCK', 'comment for decoder')
self.assertEqual(tple.apply_str_to_str('$BLOCK', comment='-- '), '\n'.join([
'-- comment for decoder',
'if address(31 downto 0) = "00000000000000000000000000000011" then',
' -- address = 00000000000000000000000000000011',
'',
' 3',
'',
'end if;',
''
]))
| true
| true
|
1c47bd9fc2b2b2f8e378fb299617e772a61d05cc
| 704
|
py
|
Python
|
0x0F-python-object_relational_mapping/4-cities_by_state.py
|
Rmolimock/holbertonschool-higher_level_programming
|
cf0421cbb6463b3960dc581badf7d4bbe1622b7d
|
[
"MIT"
] | 1
|
2019-05-21T09:34:41.000Z
|
2019-05-21T09:34:41.000Z
|
0x0F-python-object_relational_mapping/4-cities_by_state.py
|
Rmolimock/holbertonschool-higher_level_programming
|
cf0421cbb6463b3960dc581badf7d4bbe1622b7d
|
[
"MIT"
] | null | null | null |
0x0F-python-object_relational_mapping/4-cities_by_state.py
|
Rmolimock/holbertonschool-higher_level_programming
|
cf0421cbb6463b3960dc581badf7d4bbe1622b7d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
'''
Lists all states from a given database with given name
protect against sql injection
'''
import MySQLdb
from sys import argv
if __name__ == "__main__":
connection = MySQLdb.connect(host="localhost", port=3306, charset="utf8",
user=argv[1], passwd=argv[2], db=argv[3])
cursor = connection.cursor()
cursor.execute("SELECT cities.id, cities.name, states.name"
" FROM cities LEFT JOIN states"
" ON cities.state_id = states.id"
" ORDER BY cities.id ASC")
rows = cursor.fetchall()
for eachRow in rows:
print(eachRow)
cursor.close()
connection.close()
| 30.608696
| 77
| 0.599432
|
import MySQLdb
from sys import argv
if __name__ == "__main__":
connection = MySQLdb.connect(host="localhost", port=3306, charset="utf8",
user=argv[1], passwd=argv[2], db=argv[3])
cursor = connection.cursor()
cursor.execute("SELECT cities.id, cities.name, states.name"
" FROM cities LEFT JOIN states"
" ON cities.state_id = states.id"
" ORDER BY cities.id ASC")
rows = cursor.fetchall()
for eachRow in rows:
print(eachRow)
cursor.close()
connection.close()
| true
| true
|
1c47be45651c7c68c942bf5b7c7f590e320b1cd0
| 49,438
|
py
|
Python
|
homeassistant/components/google_assistant/trait.py
|
unverbraucht/core
|
312af53935a1bffd58b3b35e82e31292a6ec22ad
|
[
"Apache-2.0"
] | 2
|
2019-11-20T20:56:59.000Z
|
2021-01-03T08:52:18.000Z
|
homeassistant/components/google_assistant/trait.py
|
shownor/core
|
b50281a9173e7fb4a37b3f813ca92876088eaac3
|
[
"Apache-2.0"
] | 5
|
2020-04-26T10:50:01.000Z
|
2021-03-16T21:19:46.000Z
|
homeassistant/components/google_assistant/trait.py
|
winterscar/core
|
5a55d508791aae65f16396691d014c73fb2095f0
|
[
"Apache-2.0"
] | 1
|
2021-04-18T19:36:34.000Z
|
2021-04-18T19:36:34.000Z
|
"""Implement the Google Smart Home traits."""
import logging
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
camera,
cover,
fan,
group,
input_boolean,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_CODE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.util import color as color_util, temperature as temp_util
from .const import (
CHALLENGE_ACK_NEEDED,
CHALLENGE_FAILED_PIN_NEEDED,
CHALLENGE_PIN_NEEDED,
ERR_ALREADY_ARMED,
ERR_ALREADY_DISARMED,
ERR_CHALLENGE_NOT_SETUP,
ERR_FUNCTION_NOT_SUPPORTED,
ERR_NOT_SUPPORTED,
ERR_VALUE_OUT_OF_RANGE,
)
from .error import ChallengeNeeded, SmartHomeError
_LOGGER = logging.getLogger(__name__)
PREFIX_TRAITS = "action.devices.traits."
TRAIT_CAMERA_STREAM = PREFIX_TRAITS + "CameraStream"
TRAIT_ONOFF = PREFIX_TRAITS + "OnOff"
TRAIT_DOCK = PREFIX_TRAITS + "Dock"
TRAIT_STARTSTOP = PREFIX_TRAITS + "StartStop"
TRAIT_BRIGHTNESS = PREFIX_TRAITS + "Brightness"
TRAIT_COLOR_SETTING = PREFIX_TRAITS + "ColorSetting"
TRAIT_SCENE = PREFIX_TRAITS + "Scene"
TRAIT_TEMPERATURE_SETTING = PREFIX_TRAITS + "TemperatureSetting"
TRAIT_LOCKUNLOCK = PREFIX_TRAITS + "LockUnlock"
TRAIT_FANSPEED = PREFIX_TRAITS + "FanSpeed"
TRAIT_MODES = PREFIX_TRAITS + "Modes"
TRAIT_OPENCLOSE = PREFIX_TRAITS + "OpenClose"
TRAIT_VOLUME = PREFIX_TRAITS + "Volume"
TRAIT_ARMDISARM = PREFIX_TRAITS + "ArmDisarm"
TRAIT_HUMIDITY_SETTING = PREFIX_TRAITS + "HumiditySetting"
PREFIX_COMMANDS = "action.devices.commands."
COMMAND_ONOFF = PREFIX_COMMANDS + "OnOff"
COMMAND_GET_CAMERA_STREAM = PREFIX_COMMANDS + "GetCameraStream"
COMMAND_DOCK = PREFIX_COMMANDS + "Dock"
COMMAND_STARTSTOP = PREFIX_COMMANDS + "StartStop"
COMMAND_PAUSEUNPAUSE = PREFIX_COMMANDS + "PauseUnpause"
COMMAND_BRIGHTNESS_ABSOLUTE = PREFIX_COMMANDS + "BrightnessAbsolute"
COMMAND_COLOR_ABSOLUTE = PREFIX_COMMANDS + "ColorAbsolute"
COMMAND_ACTIVATE_SCENE = PREFIX_COMMANDS + "ActivateScene"
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT = (
PREFIX_COMMANDS + "ThermostatTemperatureSetpoint"
)
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE = (
PREFIX_COMMANDS + "ThermostatTemperatureSetRange"
)
COMMAND_THERMOSTAT_SET_MODE = PREFIX_COMMANDS + "ThermostatSetMode"
COMMAND_LOCKUNLOCK = PREFIX_COMMANDS + "LockUnlock"
COMMAND_FANSPEED = PREFIX_COMMANDS + "SetFanSpeed"
COMMAND_MODES = PREFIX_COMMANDS + "SetModes"
COMMAND_OPENCLOSE = PREFIX_COMMANDS + "OpenClose"
COMMAND_SET_VOLUME = PREFIX_COMMANDS + "setVolume"
COMMAND_VOLUME_RELATIVE = PREFIX_COMMANDS + "volumeRelative"
COMMAND_ARMDISARM = PREFIX_COMMANDS + "ArmDisarm"
TRAITS = []
def register_trait(trait):
"""Decorate a function to register a trait."""
TRAITS.append(trait)
return trait
def _google_temp_unit(units):
"""Return Google temperature unit."""
if units == TEMP_FAHRENHEIT:
return "F"
return "C"
class _Trait:
"""Represents a Trait inside Google Assistant skill."""
commands = []
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return False
def __init__(self, hass, state, config):
"""Initialize a trait for a state."""
self.hass = hass
self.state = state
self.config = config
def sync_attributes(self):
"""Return attributes for a sync request."""
raise NotImplementedError
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
raise NotImplementedError
def can_execute(self, command, params):
"""Test if command can be executed."""
return command in self.commands
async def execute(self, command, data, params, challenge):
"""Execute a trait command."""
raise NotImplementedError
@register_trait
class BrightnessTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/brightness
"""
name = TRAIT_BRIGHTNESS
commands = [COMMAND_BRIGHTNESS_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == light.DOMAIN:
return features & light.SUPPORT_BRIGHTNESS
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
domain = self.state.domain
response = {}
if domain == light.DOMAIN:
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS)
if brightness is not None:
response["brightness"] = int(100 * (brightness / 255))
else:
response["brightness"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
domain = self.state.domain
if domain == light.DOMAIN:
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_BRIGHTNESS_PCT: params["brightness"],
},
blocking=True,
context=data.context,
)
@register_trait
class CameraStreamTrait(_Trait):
"""Trait to stream from cameras.
https://developers.google.com/actions/smarthome/traits/camerastream
"""
name = TRAIT_CAMERA_STREAM
commands = [COMMAND_GET_CAMERA_STREAM]
stream_info = None
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == camera.DOMAIN:
return features & camera.SUPPORT_STREAM
return False
def sync_attributes(self):
"""Return stream attributes for a sync request."""
return {
"cameraStreamSupportedProtocols": ["hls"],
"cameraStreamNeedAuthToken": False,
"cameraStreamNeedDrmEncryption": False,
}
def query_attributes(self):
"""Return camera stream attributes."""
return self.stream_info or {}
async def execute(self, command, data, params, challenge):
"""Execute a get camera stream command."""
url = await self.hass.components.camera.async_request_stream(
self.state.entity_id, "hls"
)
self.stream_info = {
"cameraStreamAccessUrl": self.hass.config.api.base_url + url
}
@register_trait
class OnOffTrait(_Trait):
"""Trait to offer basic on and off functionality.
https://developers.google.com/actions/smarthome/traits/onoff
"""
name = TRAIT_ONOFF
commands = [COMMAND_ONOFF]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain in (
group.DOMAIN,
input_boolean.DOMAIN,
switch.DOMAIN,
fan.DOMAIN,
light.DOMAIN,
media_player.DOMAIN,
)
def sync_attributes(self):
"""Return OnOff attributes for a sync request."""
return {}
def query_attributes(self):
"""Return OnOff query attributes."""
return {"on": self.state.state != STATE_OFF}
async def execute(self, command, data, params, challenge):
"""Execute an OnOff command."""
domain = self.state.domain
if domain == group.DOMAIN:
service_domain = HA_DOMAIN
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
else:
service_domain = domain
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
await self.hass.services.async_call(
service_domain,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ColorSettingTrait(_Trait):
"""Trait to offer color temperature functionality.
https://developers.google.com/actions/smarthome/traits/colortemperature
"""
name = TRAIT_COLOR_SETTING
commands = [COMMAND_COLOR_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != light.DOMAIN:
return False
return features & light.SUPPORT_COLOR_TEMP or features & light.SUPPORT_COLOR
def sync_attributes(self):
"""Return color temperature attributes for a sync request."""
attrs = self.state.attributes
features = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
response = {}
if features & light.SUPPORT_COLOR:
response["colorModel"] = "hsv"
if features & light.SUPPORT_COLOR_TEMP:
# Max Kelvin is Min Mireds K = 1000000 / mireds
# Min Kelvin is Max Mireds K = 1000000 / mireds
response["colorTemperatureRange"] = {
"temperatureMaxK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MIN_MIREDS)
),
"temperatureMinK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MAX_MIREDS)
),
}
return response
def query_attributes(self):
"""Return color temperature query attributes."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
color = {}
if features & light.SUPPORT_COLOR:
color_hs = self.state.attributes.get(light.ATTR_HS_COLOR)
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS, 1)
if color_hs is not None:
color["spectrumHsv"] = {
"hue": color_hs[0],
"saturation": color_hs[1] / 100,
"value": brightness / 255,
}
if features & light.SUPPORT_COLOR_TEMP:
temp = self.state.attributes.get(light.ATTR_COLOR_TEMP)
# Some faulty integrations might put 0 in here, raising exception.
if temp == 0:
_LOGGER.warning(
"Entity %s has incorrect color temperature %s",
self.state.entity_id,
temp,
)
elif temp is not None:
color["temperatureK"] = color_util.color_temperature_mired_to_kelvin(
temp
)
response = {}
if color:
response["color"] = color
return response
async def execute(self, command, data, params, challenge):
"""Execute a color temperature command."""
if "temperature" in params["color"]:
temp = color_util.color_temperature_kelvin_to_mired(
params["color"]["temperature"]
)
min_temp = self.state.attributes[light.ATTR_MIN_MIREDS]
max_temp = self.state.attributes[light.ATTR_MAX_MIREDS]
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_COLOR_TEMP: temp},
blocking=True,
context=data.context,
)
elif "spectrumRGB" in params["color"]:
# Convert integer to hex format and left pad with 0's till length 6
hex_value = f"{params['color']['spectrumRGB']:06x}"
color = color_util.color_RGB_to_hs(
*color_util.rgb_hex_to_rgb_list(hex_value)
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_HS_COLOR: color},
blocking=True,
context=data.context,
)
elif "spectrumHSV" in params["color"]:
color = params["color"]["spectrumHSV"]
saturation = color["saturation"] * 100
brightness = color["value"] * 255
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_HS_COLOR: [color["hue"], saturation],
light.ATTR_BRIGHTNESS: brightness,
},
blocking=True,
context=data.context,
)
@register_trait
class SceneTrait(_Trait):
"""Trait to offer scene functionality.
https://developers.google.com/actions/smarthome/traits/scene
"""
name = TRAIT_SCENE
commands = [COMMAND_ACTIVATE_SCENE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain in (scene.DOMAIN, script.DOMAIN)
def sync_attributes(self):
"""Return scene attributes for a sync request."""
# Neither supported domain can support sceneReversible
return {}
def query_attributes(self):
"""Return scene query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a scene command."""
# Don't block for scripts as they can be slow.
await self.hass.services.async_call(
self.state.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=self.state.domain != script.DOMAIN,
context=data.context,
)
@register_trait
class DockTrait(_Trait):
"""Trait to offer dock functionality.
https://developers.google.com/actions/smarthome/traits/dock
"""
name = TRAIT_DOCK
commands = [COMMAND_DOCK]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return dock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return dock query attributes."""
return {"isDocked": self.state.state == vacuum.STATE_DOCKED}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_RETURN_TO_BASE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class StartStopTrait(_Trait):
"""Trait to offer StartStop functionality.
https://developers.google.com/actions/smarthome/traits/startstop
"""
name = TRAIT_STARTSTOP
commands = [COMMAND_STARTSTOP, COMMAND_PAUSEUNPAUSE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return StartStop attributes for a sync request."""
return {
"pausable": self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& vacuum.SUPPORT_PAUSE
!= 0
}
def query_attributes(self):
"""Return StartStop query attributes."""
return {
"isRunning": self.state.state == vacuum.STATE_CLEANING,
"isPaused": self.state.state == vacuum.STATE_PAUSED,
}
async def execute(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_STOP,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
elif command == COMMAND_PAUSEUNPAUSE:
if params["pause"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_PAUSE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class TemperatureSettingTrait(_Trait):
"""Trait to offer handling both temperature point and modes functionality.
https://developers.google.com/actions/smarthome/traits/temperaturesetting
"""
name = TRAIT_TEMPERATURE_SETTING
commands = [
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE,
COMMAND_THERMOSTAT_SET_MODE,
]
# We do not support "on" as we are unable to know how to restore
# the last mode.
hvac_to_google = {
climate.HVAC_MODE_HEAT: "heat",
climate.HVAC_MODE_COOL: "cool",
climate.HVAC_MODE_OFF: "off",
climate.HVAC_MODE_AUTO: "auto",
climate.HVAC_MODE_HEAT_COOL: "heatcool",
climate.HVAC_MODE_FAN_ONLY: "fan-only",
climate.HVAC_MODE_DRY: "dry",
}
google_to_hvac = {value: key for key, value in hvac_to_google.items()}
preset_to_google = {climate.PRESET_ECO: "eco"}
google_to_preset = {value: key for key, value in preset_to_google.items()}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == climate.DOMAIN:
return True
return (
domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_TEMPERATURE
)
@property
def climate_google_modes(self):
"""Return supported Google modes."""
modes = []
attrs = self.state.attributes
for mode in attrs.get(climate.ATTR_HVAC_MODES, []):
google_mode = self.hvac_to_google.get(mode)
if google_mode and google_mode not in modes:
modes.append(google_mode)
for preset in attrs.get(climate.ATTR_PRESET_MODES, []):
google_mode = self.preset_to_google.get(preset)
if google_mode and google_mode not in modes:
modes.append(google_mode)
return modes
def sync_attributes(self):
"""Return temperature point and modes attributes for a sync request."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
response["thermostatTemperatureUnit"] = _google_temp_unit(
self.hass.config.units.temperature_unit
)
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_TEMPERATURE:
response["queryOnlyTemperatureSetting"] = True
elif domain == climate.DOMAIN:
modes = self.climate_google_modes
# Some integrations don't support modes (e.g. opentherm), but Google doesn't
# support changing the temperature if we don't have any modes. If there's
# only one Google doesn't support changing it, so the default mode here is
# only cosmetic.
if len(modes) == 0:
modes.append("heat")
if "off" in modes and any(
mode in modes for mode in ("heatcool", "heat", "cool")
):
modes.append("on")
response["availableThermostatModes"] = ",".join(modes)
return response
def query_attributes(self):
"""Return temperature point and modes query attributes."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
unit = self.hass.config.units.temperature_unit
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_TEMPERATURE:
current_temp = self.state.state
if current_temp not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(float(current_temp), unit, TEMP_CELSIUS), 1
)
elif domain == climate.DOMAIN:
operation = self.state.state
preset = attrs.get(climate.ATTR_PRESET_MODE)
supported = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
if preset in self.preset_to_google:
response["thermostatMode"] = self.preset_to_google[preset]
else:
response["thermostatMode"] = self.hvac_to_google.get(operation)
current_temp = attrs.get(climate.ATTR_CURRENT_TEMPERATURE)
if current_temp is not None:
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(current_temp, unit, TEMP_CELSIUS), 1
)
current_humidity = attrs.get(climate.ATTR_CURRENT_HUMIDITY)
if current_humidity is not None:
response["thermostatHumidityAmbient"] = current_humidity
if operation in (climate.HVAC_MODE_AUTO, climate.HVAC_MODE_HEAT_COOL):
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
response["thermostatTemperatureSetpointHigh"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_HIGH], unit, TEMP_CELSIUS
),
1,
)
response["thermostatTemperatureSetpointLow"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_LOW], unit, TEMP_CELSIUS
),
1,
)
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
target_temp = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
response["thermostatTemperatureSetpointHigh"] = target_temp
response["thermostatTemperatureSetpointLow"] = target_temp
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
response["thermostatTemperatureSetpoint"] = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
return response
async def execute(self, command, data, params, challenge):
"""Execute a temperature point or mode command."""
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
# All sent in temperatures are always in Celsius
unit = self.hass.config.units.temperature_unit
min_temp = self.state.attributes[climate.ATTR_MIN_TEMP]
max_temp = self.state.attributes[climate.ATTR_MAX_TEMP]
if command == COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT:
temp = temp_util.convert(
params["thermostatTemperatureSetpoint"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp = round(temp)
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: self.state.entity_id, ATTR_TEMPERATURE: temp},
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE:
temp_high = temp_util.convert(
params["thermostatTemperatureSetpointHigh"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_high = round(temp_high)
if temp_high < min_temp or temp_high > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Upper bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
temp_low = temp_util.convert(
params["thermostatTemperatureSetpointLow"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_low = round(temp_low)
if temp_low < min_temp or temp_low > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Lower bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
svc_data = {ATTR_ENTITY_ID: self.state.entity_id}
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
svc_data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
svc_data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
else:
svc_data[ATTR_TEMPERATURE] = (temp_high + temp_low) / 2
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
svc_data,
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_SET_MODE:
target_mode = params["thermostatMode"]
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if target_mode == "on":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode == "off":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode in self.google_to_preset:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_PRESET_MODE,
{
climate.ATTR_PRESET_MODE: self.google_to_preset[target_mode],
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_HVAC_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_HVAC_MODE: self.google_to_hvac[target_mode],
},
blocking=True,
context=data.context,
)
@register_trait
class HumiditySettingTrait(_Trait):
"""Trait to offer humidity setting functionality.
https://developers.google.com/actions/smarthome/traits/humiditysetting
"""
name = TRAIT_HUMIDITY_SETTING
commands = []
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_HUMIDITY
def sync_attributes(self):
"""Return humidity attributes for a sync request."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
response["queryOnlyHumiditySetting"] = True
return response
def query_attributes(self):
"""Return humidity query attributes."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
current_humidity = self.state.state
if current_humidity not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["humidityAmbientPercent"] = round(float(current_humidity))
return response
async def execute(self, command, data, params, challenge):
"""Execute a humidity command."""
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
@register_trait
class LockUnlockTrait(_Trait):
"""Trait to lock or unlock a lock.
https://developers.google.com/actions/smarthome/traits/lockunlock
"""
name = TRAIT_LOCKUNLOCK
commands = [COMMAND_LOCKUNLOCK]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == lock.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def sync_attributes(self):
"""Return LockUnlock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return LockUnlock query attributes."""
return {"isLocked": self.state.state == STATE_LOCKED}
async def execute(self, command, data, params, challenge):
"""Execute an LockUnlock command."""
if params["lock"]:
service = lock.SERVICE_LOCK
else:
_verify_pin_challenge(data, self.state, challenge)
service = lock.SERVICE_UNLOCK
await self.hass.services.async_call(
lock.DOMAIN,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ArmDisArmTrait(_Trait):
"""Trait to Arm or Disarm a Security System.
https://developers.google.com/actions/smarthome/traits/armdisarm
"""
name = TRAIT_ARMDISARM
commands = [COMMAND_ARMDISARM]
state_to_service = {
STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: SERVICE_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: SERVICE_ALARM_TRIGGER,
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == alarm_control_panel.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def sync_attributes(self):
"""Return ArmDisarm attributes for a sync request."""
response = {}
levels = []
for state in self.state_to_service:
# level synonyms are generated from state names
# 'armed_away' becomes 'armed away' or 'away'
level_synonym = [state.replace("_", " ")]
if state != STATE_ALARM_TRIGGERED:
level_synonym.append(state.split("_")[1])
level = {
"level_name": state,
"level_values": [{"level_synonym": level_synonym, "lang": "en"}],
}
levels.append(level)
response["availableArmLevels"] = {"levels": levels, "ordered": False}
return response
def query_attributes(self):
"""Return ArmDisarm query attributes."""
if "post_pending_state" in self.state.attributes:
armed_state = self.state.attributes["post_pending_state"]
else:
armed_state = self.state.state
response = {"isArmed": armed_state in self.state_to_service}
if response["isArmed"]:
response.update({"currentArmLevel": armed_state})
return response
async def execute(self, command, data, params, challenge):
"""Execute an ArmDisarm command."""
if params["arm"] and not params.get("cancel"):
if self.state.state == params["armLevel"]:
raise SmartHomeError(ERR_ALREADY_ARMED, "System is already armed")
if self.state.attributes["code_arm_required"]:
_verify_pin_challenge(data, self.state, challenge)
service = self.state_to_service[params["armLevel"]]
# disarm the system without asking for code when
# 'cancel' arming action is received while current status is pending
elif (
params["arm"]
and params.get("cancel")
and self.state.state == STATE_ALARM_PENDING
):
service = SERVICE_ALARM_DISARM
else:
if self.state.state == STATE_ALARM_DISARMED:
raise SmartHomeError(ERR_ALREADY_DISARMED, "System is already disarmed")
_verify_pin_challenge(data, self.state, challenge)
service = SERVICE_ALARM_DISARM
await self.hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{
ATTR_ENTITY_ID: self.state.entity_id,
ATTR_CODE: data.config.secure_devices_pin,
},
blocking=True,
context=data.context,
)
@register_trait
class FanSpeedTrait(_Trait):
"""Trait to control speed of Fan.
https://developers.google.com/actions/smarthome/traits/fanspeed
"""
name = TRAIT_FANSPEED
commands = [COMMAND_FANSPEED]
speed_synonyms = {
fan.SPEED_OFF: ["stop", "off"],
fan.SPEED_LOW: ["slow", "low", "slowest", "lowest"],
fan.SPEED_MEDIUM: ["medium", "mid", "middle"],
fan.SPEED_HIGH: ["high", "max", "fast", "highest", "fastest", "maximum"],
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != fan.DOMAIN:
return False
return features & fan.SUPPORT_SET_SPEED
def sync_attributes(self):
"""Return speed point and modes attributes for a sync request."""
modes = self.state.attributes.get(fan.ATTR_SPEED_LIST, [])
speeds = []
for mode in modes:
if mode not in self.speed_synonyms:
continue
speed = {
"speed_name": mode,
"speed_values": [
{"speed_synonym": self.speed_synonyms.get(mode), "lang": "en"}
],
}
speeds.append(speed)
return {
"availableFanSpeeds": {"speeds": speeds, "ordered": True},
"reversible": bool(
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& fan.SUPPORT_DIRECTION
),
}
def query_attributes(self):
"""Return speed point and modes query attributes."""
attrs = self.state.attributes
response = {}
speed = attrs.get(fan.ATTR_SPEED)
if speed is not None:
response["on"] = speed != fan.SPEED_OFF
response["online"] = True
response["currentFanSpeedSetting"] = speed
return response
async def execute(self, command, data, params, challenge):
"""Execute an SetFanSpeed command."""
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: self.state.entity_id, fan.ATTR_SPEED: params["fanSpeed"]},
blocking=True,
context=data.context,
)
@register_trait
class ModesTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/actions/smarthome/traits/modes
"""
name = TRAIT_MODES
commands = [COMMAND_MODES]
SYNONYMS = {
"input source": ["input source", "input", "source"],
"sound mode": ["sound mode", "effects"],
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != media_player.DOMAIN:
return False
return (
features & media_player.SUPPORT_SELECT_SOURCE
or features & media_player.SUPPORT_SELECT_SOUND_MODE
)
def sync_attributes(self):
"""Return mode attributes for a sync request."""
def _generate(name, settings):
mode = {
"name": name,
"name_values": [
{"name_synonym": self.SYNONYMS.get(name, [name]), "lang": "en"}
],
"settings": [],
"ordered": False,
}
for setting in settings:
mode["settings"].append(
{
"setting_name": setting,
"setting_values": [
{
"setting_synonym": self.SYNONYMS.get(
setting, [setting]
),
"lang": "en",
}
],
}
)
return mode
attrs = self.state.attributes
modes = []
if media_player.ATTR_INPUT_SOURCE_LIST in attrs:
modes.append(
_generate("input source", attrs[media_player.ATTR_INPUT_SOURCE_LIST])
)
if media_player.ATTR_SOUND_MODE_LIST in attrs:
modes.append(
_generate("sound mode", attrs[media_player.ATTR_SOUND_MODE_LIST])
)
payload = {"availableModes": modes}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
response = {}
mode_settings = {}
if media_player.ATTR_INPUT_SOURCE_LIST in attrs:
mode_settings["input source"] = attrs.get(media_player.ATTR_INPUT_SOURCE)
if media_player.ATTR_SOUND_MODE_LIST in attrs:
mode_settings["sound mode"] = attrs.get(media_player.ATTR_SOUND_MODE)
if mode_settings:
response["on"] = self.state.state != STATE_OFF
response["online"] = True
response["currentModeSettings"] = mode_settings
return response
async def execute(self, command, data, params, challenge):
"""Execute an SetModes command."""
settings = params.get("updateModeSettings")
requested_source = settings.get("input source")
sound_mode = settings.get("sound mode")
if requested_source:
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOURCE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_INPUT_SOURCE: requested_source,
},
blocking=True,
context=data.context,
)
if sound_mode:
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOUND_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_SOUND_MODE: sound_mode,
},
blocking=True,
context=data.context,
)
@register_trait
class OpenCloseTrait(_Trait):
"""Trait to open and close a cover.
https://developers.google.com/actions/smarthome/traits/openclose
"""
# Cover device classes that require 2FA
COVER_2FA = (cover.DEVICE_CLASS_DOOR, cover.DEVICE_CLASS_GARAGE)
name = TRAIT_OPENCLOSE
commands = [COMMAND_OPENCLOSE]
override_position = None
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == cover.DOMAIN:
return True
return domain == binary_sensor.DOMAIN and device_class in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_LOCK,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
)
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return domain == cover.DOMAIN and device_class in OpenCloseTrait.COVER_2FA
def sync_attributes(self):
"""Return opening direction."""
response = {}
if self.state.domain == binary_sensor.DOMAIN:
response["queryOnlyOpenClose"] = True
return response
def query_attributes(self):
"""Return state query attributes."""
domain = self.state.domain
response = {}
if self.override_position is not None:
response["openPercent"] = self.override_position
elif domain == cover.DOMAIN:
# When it's an assumed state, we will return that querying state
# is not supported.
if self.state.attributes.get(ATTR_ASSUMED_STATE):
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
if self.state.state == STATE_UNKNOWN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
position = self.override_position or self.state.attributes.get(
cover.ATTR_CURRENT_POSITION
)
if position is not None:
response["openPercent"] = position
elif self.state.state != cover.STATE_CLOSED:
response["openPercent"] = 100
else:
response["openPercent"] = 0
elif domain == binary_sensor.DOMAIN:
if self.state.state == STATE_ON:
response["openPercent"] = 100
else:
response["openPercent"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute an Open, close, Set position command."""
domain = self.state.domain
if domain == cover.DOMAIN:
svc_params = {ATTR_ENTITY_ID: self.state.entity_id}
if params["openPercent"] == 0:
service = cover.SERVICE_CLOSE_COVER
should_verify = False
elif params["openPercent"] == 100:
service = cover.SERVICE_OPEN_COVER
should_verify = True
elif (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& cover.SUPPORT_SET_POSITION
):
service = cover.SERVICE_SET_COVER_POSITION
should_verify = True
svc_params[cover.ATTR_POSITION] = params["openPercent"]
else:
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED, "Setting a position is not supported"
)
if (
should_verify
and self.state.attributes.get(ATTR_DEVICE_CLASS)
in OpenCloseTrait.COVER_2FA
):
_verify_pin_challenge(data, self.state, challenge)
await self.hass.services.async_call(
cover.DOMAIN, service, svc_params, blocking=True, context=data.context
)
if (
self.state.attributes.get(ATTR_ASSUMED_STATE)
or self.state.state == STATE_UNKNOWN
):
self.override_position = params["openPercent"]
@register_trait
class VolumeTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/volume
"""
name = TRAIT_VOLUME
commands = [COMMAND_SET_VOLUME, COMMAND_VOLUME_RELATIVE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == media_player.DOMAIN:
return features & media_player.SUPPORT_VOLUME_SET
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
response = {}
level = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
muted = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED)
if level is not None:
# Convert 0.0-1.0 to 0-100
response["currentVolume"] = int(level * 100)
response["isMuted"] = bool(muted)
return response
async def _execute_set_volume(self, data, params):
level = params["volumeLevel"]
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: level / 100,
},
blocking=True,
context=data.context,
)
async def _execute_volume_relative(self, data, params):
# This could also support up/down commands using relativeSteps
relative = params["volumeRelativeLevel"]
current = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: current + relative / 100,
},
blocking=True,
context=data.context,
)
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
if command == COMMAND_SET_VOLUME:
await self._execute_set_volume(data, params)
elif command == COMMAND_VOLUME_RELATIVE:
await self._execute_volume_relative(data, params)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
def _verify_pin_challenge(data, state, challenge):
"""Verify a pin challenge."""
if not data.config.should_2fa(state):
return
if not data.config.secure_devices_pin:
raise SmartHomeError(ERR_CHALLENGE_NOT_SETUP, "Challenge is not set up")
if not challenge:
raise ChallengeNeeded(CHALLENGE_PIN_NEEDED)
pin = challenge.get("pin")
if pin != data.config.secure_devices_pin:
raise ChallengeNeeded(CHALLENGE_FAILED_PIN_NEEDED)
def _verify_ack_challenge(data, state, challenge):
"""Verify an ack challenge."""
if not data.config.should_2fa(state):
return
if not challenge or not challenge.get("ack"):
raise ChallengeNeeded(CHALLENGE_ACK_NEEDED)
| 33.792208
| 88
| 0.594806
|
import logging
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
camera,
cover,
fan,
group,
input_boolean,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_CODE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.util import color as color_util, temperature as temp_util
from .const import (
CHALLENGE_ACK_NEEDED,
CHALLENGE_FAILED_PIN_NEEDED,
CHALLENGE_PIN_NEEDED,
ERR_ALREADY_ARMED,
ERR_ALREADY_DISARMED,
ERR_CHALLENGE_NOT_SETUP,
ERR_FUNCTION_NOT_SUPPORTED,
ERR_NOT_SUPPORTED,
ERR_VALUE_OUT_OF_RANGE,
)
from .error import ChallengeNeeded, SmartHomeError
_LOGGER = logging.getLogger(__name__)
PREFIX_TRAITS = "action.devices.traits."
TRAIT_CAMERA_STREAM = PREFIX_TRAITS + "CameraStream"
TRAIT_ONOFF = PREFIX_TRAITS + "OnOff"
TRAIT_DOCK = PREFIX_TRAITS + "Dock"
TRAIT_STARTSTOP = PREFIX_TRAITS + "StartStop"
TRAIT_BRIGHTNESS = PREFIX_TRAITS + "Brightness"
TRAIT_COLOR_SETTING = PREFIX_TRAITS + "ColorSetting"
TRAIT_SCENE = PREFIX_TRAITS + "Scene"
TRAIT_TEMPERATURE_SETTING = PREFIX_TRAITS + "TemperatureSetting"
TRAIT_LOCKUNLOCK = PREFIX_TRAITS + "LockUnlock"
TRAIT_FANSPEED = PREFIX_TRAITS + "FanSpeed"
TRAIT_MODES = PREFIX_TRAITS + "Modes"
TRAIT_OPENCLOSE = PREFIX_TRAITS + "OpenClose"
TRAIT_VOLUME = PREFIX_TRAITS + "Volume"
TRAIT_ARMDISARM = PREFIX_TRAITS + "ArmDisarm"
TRAIT_HUMIDITY_SETTING = PREFIX_TRAITS + "HumiditySetting"
PREFIX_COMMANDS = "action.devices.commands."
COMMAND_ONOFF = PREFIX_COMMANDS + "OnOff"
COMMAND_GET_CAMERA_STREAM = PREFIX_COMMANDS + "GetCameraStream"
COMMAND_DOCK = PREFIX_COMMANDS + "Dock"
COMMAND_STARTSTOP = PREFIX_COMMANDS + "StartStop"
COMMAND_PAUSEUNPAUSE = PREFIX_COMMANDS + "PauseUnpause"
COMMAND_BRIGHTNESS_ABSOLUTE = PREFIX_COMMANDS + "BrightnessAbsolute"
COMMAND_COLOR_ABSOLUTE = PREFIX_COMMANDS + "ColorAbsolute"
COMMAND_ACTIVATE_SCENE = PREFIX_COMMANDS + "ActivateScene"
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT = (
PREFIX_COMMANDS + "ThermostatTemperatureSetpoint"
)
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE = (
PREFIX_COMMANDS + "ThermostatTemperatureSetRange"
)
COMMAND_THERMOSTAT_SET_MODE = PREFIX_COMMANDS + "ThermostatSetMode"
COMMAND_LOCKUNLOCK = PREFIX_COMMANDS + "LockUnlock"
COMMAND_FANSPEED = PREFIX_COMMANDS + "SetFanSpeed"
COMMAND_MODES = PREFIX_COMMANDS + "SetModes"
COMMAND_OPENCLOSE = PREFIX_COMMANDS + "OpenClose"
COMMAND_SET_VOLUME = PREFIX_COMMANDS + "setVolume"
COMMAND_VOLUME_RELATIVE = PREFIX_COMMANDS + "volumeRelative"
COMMAND_ARMDISARM = PREFIX_COMMANDS + "ArmDisarm"
TRAITS = []
def register_trait(trait):
TRAITS.append(trait)
return trait
def _google_temp_unit(units):
if units == TEMP_FAHRENHEIT:
return "F"
return "C"
class _Trait:
commands = []
@staticmethod
def might_2fa(domain, features, device_class):
return False
def __init__(self, hass, state, config):
self.hass = hass
self.state = state
self.config = config
def sync_attributes(self):
raise NotImplementedError
def query_attributes(self):
raise NotImplementedError
def can_execute(self, command, params):
return command in self.commands
async def execute(self, command, data, params, challenge):
raise NotImplementedError
@register_trait
class BrightnessTrait(_Trait):
name = TRAIT_BRIGHTNESS
commands = [COMMAND_BRIGHTNESS_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class):
if domain == light.DOMAIN:
return features & light.SUPPORT_BRIGHTNESS
return False
def sync_attributes(self):
return {}
def query_attributes(self):
domain = self.state.domain
response = {}
if domain == light.DOMAIN:
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS)
if brightness is not None:
response["brightness"] = int(100 * (brightness / 255))
else:
response["brightness"] = 0
return response
async def execute(self, command, data, params, challenge):
domain = self.state.domain
if domain == light.DOMAIN:
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_BRIGHTNESS_PCT: params["brightness"],
},
blocking=True,
context=data.context,
)
@register_trait
class CameraStreamTrait(_Trait):
name = TRAIT_CAMERA_STREAM
commands = [COMMAND_GET_CAMERA_STREAM]
stream_info = None
@staticmethod
def supported(domain, features, device_class):
if domain == camera.DOMAIN:
return features & camera.SUPPORT_STREAM
return False
def sync_attributes(self):
return {
"cameraStreamSupportedProtocols": ["hls"],
"cameraStreamNeedAuthToken": False,
"cameraStreamNeedDrmEncryption": False,
}
def query_attributes(self):
return self.stream_info or {}
async def execute(self, command, data, params, challenge):
url = await self.hass.components.camera.async_request_stream(
self.state.entity_id, "hls"
)
self.stream_info = {
"cameraStreamAccessUrl": self.hass.config.api.base_url + url
}
@register_trait
class OnOffTrait(_Trait):
name = TRAIT_ONOFF
commands = [COMMAND_ONOFF]
@staticmethod
def supported(domain, features, device_class):
return domain in (
group.DOMAIN,
input_boolean.DOMAIN,
switch.DOMAIN,
fan.DOMAIN,
light.DOMAIN,
media_player.DOMAIN,
)
def sync_attributes(self):
return {}
def query_attributes(self):
return {"on": self.state.state != STATE_OFF}
async def execute(self, command, data, params, challenge):
domain = self.state.domain
if domain == group.DOMAIN:
service_domain = HA_DOMAIN
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
else:
service_domain = domain
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
await self.hass.services.async_call(
service_domain,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ColorSettingTrait(_Trait):
name = TRAIT_COLOR_SETTING
commands = [COMMAND_COLOR_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class):
if domain != light.DOMAIN:
return False
return features & light.SUPPORT_COLOR_TEMP or features & light.SUPPORT_COLOR
def sync_attributes(self):
attrs = self.state.attributes
features = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
response = {}
if features & light.SUPPORT_COLOR:
response["colorModel"] = "hsv"
if features & light.SUPPORT_COLOR_TEMP:
response["colorTemperatureRange"] = {
"temperatureMaxK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MIN_MIREDS)
),
"temperatureMinK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MAX_MIREDS)
),
}
return response
def query_attributes(self):
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
color = {}
if features & light.SUPPORT_COLOR:
color_hs = self.state.attributes.get(light.ATTR_HS_COLOR)
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS, 1)
if color_hs is not None:
color["spectrumHsv"] = {
"hue": color_hs[0],
"saturation": color_hs[1] / 100,
"value": brightness / 255,
}
if features & light.SUPPORT_COLOR_TEMP:
temp = self.state.attributes.get(light.ATTR_COLOR_TEMP)
if temp == 0:
_LOGGER.warning(
"Entity %s has incorrect color temperature %s",
self.state.entity_id,
temp,
)
elif temp is not None:
color["temperatureK"] = color_util.color_temperature_mired_to_kelvin(
temp
)
response = {}
if color:
response["color"] = color
return response
async def execute(self, command, data, params, challenge):
if "temperature" in params["color"]:
temp = color_util.color_temperature_kelvin_to_mired(
params["color"]["temperature"]
)
min_temp = self.state.attributes[light.ATTR_MIN_MIREDS]
max_temp = self.state.attributes[light.ATTR_MAX_MIREDS]
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_COLOR_TEMP: temp},
blocking=True,
context=data.context,
)
elif "spectrumRGB" in params["color"]:
hex_value = f"{params['color']['spectrumRGB']:06x}"
color = color_util.color_RGB_to_hs(
*color_util.rgb_hex_to_rgb_list(hex_value)
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_HS_COLOR: color},
blocking=True,
context=data.context,
)
elif "spectrumHSV" in params["color"]:
color = params["color"]["spectrumHSV"]
saturation = color["saturation"] * 100
brightness = color["value"] * 255
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_HS_COLOR: [color["hue"], saturation],
light.ATTR_BRIGHTNESS: brightness,
},
blocking=True,
context=data.context,
)
@register_trait
class SceneTrait(_Trait):
name = TRAIT_SCENE
commands = [COMMAND_ACTIVATE_SCENE]
@staticmethod
def supported(domain, features, device_class):
return domain in (scene.DOMAIN, script.DOMAIN)
def sync_attributes(self):
# Neither supported domain can support sceneReversible
return {}
def query_attributes(self):
return {}
async def execute(self, command, data, params, challenge):
# Don't block for scripts as they can be slow.
await self.hass.services.async_call(
self.state.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=self.state.domain != script.DOMAIN,
context=data.context,
)
@register_trait
class DockTrait(_Trait):
name = TRAIT_DOCK
commands = [COMMAND_DOCK]
@staticmethod
def supported(domain, features, device_class):
return domain == vacuum.DOMAIN
def sync_attributes(self):
return {}
def query_attributes(self):
return {"isDocked": self.state.state == vacuum.STATE_DOCKED}
async def execute(self, command, data, params, challenge):
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_RETURN_TO_BASE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class StartStopTrait(_Trait):
name = TRAIT_STARTSTOP
commands = [COMMAND_STARTSTOP, COMMAND_PAUSEUNPAUSE]
@staticmethod
def supported(domain, features, device_class):
return domain == vacuum.DOMAIN
def sync_attributes(self):
return {
"pausable": self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& vacuum.SUPPORT_PAUSE
!= 0
}
def query_attributes(self):
return {
"isRunning": self.state.state == vacuum.STATE_CLEANING,
"isPaused": self.state.state == vacuum.STATE_PAUSED,
}
async def execute(self, command, data, params, challenge):
if command == COMMAND_STARTSTOP:
if params["start"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_STOP,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
elif command == COMMAND_PAUSEUNPAUSE:
if params["pause"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_PAUSE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class TemperatureSettingTrait(_Trait):
name = TRAIT_TEMPERATURE_SETTING
commands = [
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE,
COMMAND_THERMOSTAT_SET_MODE,
]
hvac_to_google = {
climate.HVAC_MODE_HEAT: "heat",
climate.HVAC_MODE_COOL: "cool",
climate.HVAC_MODE_OFF: "off",
climate.HVAC_MODE_AUTO: "auto",
climate.HVAC_MODE_HEAT_COOL: "heatcool",
climate.HVAC_MODE_FAN_ONLY: "fan-only",
climate.HVAC_MODE_DRY: "dry",
}
google_to_hvac = {value: key for key, value in hvac_to_google.items()}
preset_to_google = {climate.PRESET_ECO: "eco"}
google_to_preset = {value: key for key, value in preset_to_google.items()}
@staticmethod
def supported(domain, features, device_class):
if domain == climate.DOMAIN:
return True
return (
domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_TEMPERATURE
)
@property
def climate_google_modes(self):
modes = []
attrs = self.state.attributes
for mode in attrs.get(climate.ATTR_HVAC_MODES, []):
google_mode = self.hvac_to_google.get(mode)
if google_mode and google_mode not in modes:
modes.append(google_mode)
for preset in attrs.get(climate.ATTR_PRESET_MODES, []):
google_mode = self.preset_to_google.get(preset)
if google_mode and google_mode not in modes:
modes.append(google_mode)
return modes
def sync_attributes(self):
response = {}
attrs = self.state.attributes
domain = self.state.domain
response["thermostatTemperatureUnit"] = _google_temp_unit(
self.hass.config.units.temperature_unit
)
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_TEMPERATURE:
response["queryOnlyTemperatureSetting"] = True
elif domain == climate.DOMAIN:
modes = self.climate_google_modes
# only cosmetic.
if len(modes) == 0:
modes.append("heat")
if "off" in modes and any(
mode in modes for mode in ("heatcool", "heat", "cool")
):
modes.append("on")
response["availableThermostatModes"] = ",".join(modes)
return response
def query_attributes(self):
response = {}
attrs = self.state.attributes
domain = self.state.domain
unit = self.hass.config.units.temperature_unit
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_TEMPERATURE:
current_temp = self.state.state
if current_temp not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(float(current_temp), unit, TEMP_CELSIUS), 1
)
elif domain == climate.DOMAIN:
operation = self.state.state
preset = attrs.get(climate.ATTR_PRESET_MODE)
supported = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
if preset in self.preset_to_google:
response["thermostatMode"] = self.preset_to_google[preset]
else:
response["thermostatMode"] = self.hvac_to_google.get(operation)
current_temp = attrs.get(climate.ATTR_CURRENT_TEMPERATURE)
if current_temp is not None:
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(current_temp, unit, TEMP_CELSIUS), 1
)
current_humidity = attrs.get(climate.ATTR_CURRENT_HUMIDITY)
if current_humidity is not None:
response["thermostatHumidityAmbient"] = current_humidity
if operation in (climate.HVAC_MODE_AUTO, climate.HVAC_MODE_HEAT_COOL):
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
response["thermostatTemperatureSetpointHigh"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_HIGH], unit, TEMP_CELSIUS
),
1,
)
response["thermostatTemperatureSetpointLow"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_LOW], unit, TEMP_CELSIUS
),
1,
)
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
target_temp = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
response["thermostatTemperatureSetpointHigh"] = target_temp
response["thermostatTemperatureSetpointLow"] = target_temp
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
response["thermostatTemperatureSetpoint"] = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
return response
async def execute(self, command, data, params, challenge):
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
# All sent in temperatures are always in Celsius
unit = self.hass.config.units.temperature_unit
min_temp = self.state.attributes[climate.ATTR_MIN_TEMP]
max_temp = self.state.attributes[climate.ATTR_MAX_TEMP]
if command == COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT:
temp = temp_util.convert(
params["thermostatTemperatureSetpoint"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp = round(temp)
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: self.state.entity_id, ATTR_TEMPERATURE: temp},
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE:
temp_high = temp_util.convert(
params["thermostatTemperatureSetpointHigh"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_high = round(temp_high)
if temp_high < min_temp or temp_high > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Upper bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
temp_low = temp_util.convert(
params["thermostatTemperatureSetpointLow"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_low = round(temp_low)
if temp_low < min_temp or temp_low > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Lower bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
svc_data = {ATTR_ENTITY_ID: self.state.entity_id}
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
svc_data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
svc_data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
else:
svc_data[ATTR_TEMPERATURE] = (temp_high + temp_low) / 2
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
svc_data,
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_SET_MODE:
target_mode = params["thermostatMode"]
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if target_mode == "on":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode == "off":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode in self.google_to_preset:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_PRESET_MODE,
{
climate.ATTR_PRESET_MODE: self.google_to_preset[target_mode],
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_HVAC_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_HVAC_MODE: self.google_to_hvac[target_mode],
},
blocking=True,
context=data.context,
)
@register_trait
class HumiditySettingTrait(_Trait):
name = TRAIT_HUMIDITY_SETTING
commands = []
@staticmethod
def supported(domain, features, device_class):
return domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_HUMIDITY
def sync_attributes(self):
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
response["queryOnlyHumiditySetting"] = True
return response
def query_attributes(self):
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
current_humidity = self.state.state
if current_humidity not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["humidityAmbientPercent"] = round(float(current_humidity))
return response
async def execute(self, command, data, params, challenge):
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
@register_trait
class LockUnlockTrait(_Trait):
name = TRAIT_LOCKUNLOCK
commands = [COMMAND_LOCKUNLOCK]
@staticmethod
def supported(domain, features, device_class):
return domain == lock.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
return True
def sync_attributes(self):
return {}
def query_attributes(self):
return {"isLocked": self.state.state == STATE_LOCKED}
async def execute(self, command, data, params, challenge):
if params["lock"]:
service = lock.SERVICE_LOCK
else:
_verify_pin_challenge(data, self.state, challenge)
service = lock.SERVICE_UNLOCK
await self.hass.services.async_call(
lock.DOMAIN,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ArmDisArmTrait(_Trait):
name = TRAIT_ARMDISARM
commands = [COMMAND_ARMDISARM]
state_to_service = {
STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: SERVICE_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: SERVICE_ALARM_TRIGGER,
}
@staticmethod
def supported(domain, features, device_class):
return domain == alarm_control_panel.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
return True
def sync_attributes(self):
response = {}
levels = []
for state in self.state_to_service:
# level synonyms are generated from state names
# 'armed_away' becomes 'armed away' or 'away'
level_synonym = [state.replace("_", " ")]
if state != STATE_ALARM_TRIGGERED:
level_synonym.append(state.split("_")[1])
level = {
"level_name": state,
"level_values": [{"level_synonym": level_synonym, "lang": "en"}],
}
levels.append(level)
response["availableArmLevels"] = {"levels": levels, "ordered": False}
return response
def query_attributes(self):
if "post_pending_state" in self.state.attributes:
armed_state = self.state.attributes["post_pending_state"]
else:
armed_state = self.state.state
response = {"isArmed": armed_state in self.state_to_service}
if response["isArmed"]:
response.update({"currentArmLevel": armed_state})
return response
async def execute(self, command, data, params, challenge):
if params["arm"] and not params.get("cancel"):
if self.state.state == params["armLevel"]:
raise SmartHomeError(ERR_ALREADY_ARMED, "System is already armed")
if self.state.attributes["code_arm_required"]:
_verify_pin_challenge(data, self.state, challenge)
service = self.state_to_service[params["armLevel"]]
# disarm the system without asking for code when
# 'cancel' arming action is received while current status is pending
elif (
params["arm"]
and params.get("cancel")
and self.state.state == STATE_ALARM_PENDING
):
service = SERVICE_ALARM_DISARM
else:
if self.state.state == STATE_ALARM_DISARMED:
raise SmartHomeError(ERR_ALREADY_DISARMED, "System is already disarmed")
_verify_pin_challenge(data, self.state, challenge)
service = SERVICE_ALARM_DISARM
await self.hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{
ATTR_ENTITY_ID: self.state.entity_id,
ATTR_CODE: data.config.secure_devices_pin,
},
blocking=True,
context=data.context,
)
@register_trait
class FanSpeedTrait(_Trait):
name = TRAIT_FANSPEED
commands = [COMMAND_FANSPEED]
speed_synonyms = {
fan.SPEED_OFF: ["stop", "off"],
fan.SPEED_LOW: ["slow", "low", "slowest", "lowest"],
fan.SPEED_MEDIUM: ["medium", "mid", "middle"],
fan.SPEED_HIGH: ["high", "max", "fast", "highest", "fastest", "maximum"],
}
@staticmethod
def supported(domain, features, device_class):
if domain != fan.DOMAIN:
return False
return features & fan.SUPPORT_SET_SPEED
def sync_attributes(self):
modes = self.state.attributes.get(fan.ATTR_SPEED_LIST, [])
speeds = []
for mode in modes:
if mode not in self.speed_synonyms:
continue
speed = {
"speed_name": mode,
"speed_values": [
{"speed_synonym": self.speed_synonyms.get(mode), "lang": "en"}
],
}
speeds.append(speed)
return {
"availableFanSpeeds": {"speeds": speeds, "ordered": True},
"reversible": bool(
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& fan.SUPPORT_DIRECTION
),
}
def query_attributes(self):
attrs = self.state.attributes
response = {}
speed = attrs.get(fan.ATTR_SPEED)
if speed is not None:
response["on"] = speed != fan.SPEED_OFF
response["online"] = True
response["currentFanSpeedSetting"] = speed
return response
async def execute(self, command, data, params, challenge):
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: self.state.entity_id, fan.ATTR_SPEED: params["fanSpeed"]},
blocking=True,
context=data.context,
)
@register_trait
class ModesTrait(_Trait):
name = TRAIT_MODES
commands = [COMMAND_MODES]
SYNONYMS = {
"input source": ["input source", "input", "source"],
"sound mode": ["sound mode", "effects"],
}
@staticmethod
def supported(domain, features, device_class):
if domain != media_player.DOMAIN:
return False
return (
features & media_player.SUPPORT_SELECT_SOURCE
or features & media_player.SUPPORT_SELECT_SOUND_MODE
)
def sync_attributes(self):
def _generate(name, settings):
mode = {
"name": name,
"name_values": [
{"name_synonym": self.SYNONYMS.get(name, [name]), "lang": "en"}
],
"settings": [],
"ordered": False,
}
for setting in settings:
mode["settings"].append(
{
"setting_name": setting,
"setting_values": [
{
"setting_synonym": self.SYNONYMS.get(
setting, [setting]
),
"lang": "en",
}
],
}
)
return mode
attrs = self.state.attributes
modes = []
if media_player.ATTR_INPUT_SOURCE_LIST in attrs:
modes.append(
_generate("input source", attrs[media_player.ATTR_INPUT_SOURCE_LIST])
)
if media_player.ATTR_SOUND_MODE_LIST in attrs:
modes.append(
_generate("sound mode", attrs[media_player.ATTR_SOUND_MODE_LIST])
)
payload = {"availableModes": modes}
return payload
def query_attributes(self):
attrs = self.state.attributes
response = {}
mode_settings = {}
if media_player.ATTR_INPUT_SOURCE_LIST in attrs:
mode_settings["input source"] = attrs.get(media_player.ATTR_INPUT_SOURCE)
if media_player.ATTR_SOUND_MODE_LIST in attrs:
mode_settings["sound mode"] = attrs.get(media_player.ATTR_SOUND_MODE)
if mode_settings:
response["on"] = self.state.state != STATE_OFF
response["online"] = True
response["currentModeSettings"] = mode_settings
return response
async def execute(self, command, data, params, challenge):
settings = params.get("updateModeSettings")
requested_source = settings.get("input source")
sound_mode = settings.get("sound mode")
if requested_source:
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOURCE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_INPUT_SOURCE: requested_source,
},
blocking=True,
context=data.context,
)
if sound_mode:
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOUND_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_SOUND_MODE: sound_mode,
},
blocking=True,
context=data.context,
)
@register_trait
class OpenCloseTrait(_Trait):
# Cover device classes that require 2FA
COVER_2FA = (cover.DEVICE_CLASS_DOOR, cover.DEVICE_CLASS_GARAGE)
name = TRAIT_OPENCLOSE
commands = [COMMAND_OPENCLOSE]
override_position = None
@staticmethod
def supported(domain, features, device_class):
if domain == cover.DOMAIN:
return True
return domain == binary_sensor.DOMAIN and device_class in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_LOCK,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
)
@staticmethod
def might_2fa(domain, features, device_class):
return domain == cover.DOMAIN and device_class in OpenCloseTrait.COVER_2FA
def sync_attributes(self):
response = {}
if self.state.domain == binary_sensor.DOMAIN:
response["queryOnlyOpenClose"] = True
return response
def query_attributes(self):
domain = self.state.domain
response = {}
if self.override_position is not None:
response["openPercent"] = self.override_position
elif domain == cover.DOMAIN:
# When it's an assumed state, we will return that querying state
if self.state.attributes.get(ATTR_ASSUMED_STATE):
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
if self.state.state == STATE_UNKNOWN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
position = self.override_position or self.state.attributes.get(
cover.ATTR_CURRENT_POSITION
)
if position is not None:
response["openPercent"] = position
elif self.state.state != cover.STATE_CLOSED:
response["openPercent"] = 100
else:
response["openPercent"] = 0
elif domain == binary_sensor.DOMAIN:
if self.state.state == STATE_ON:
response["openPercent"] = 100
else:
response["openPercent"] = 0
return response
async def execute(self, command, data, params, challenge):
domain = self.state.domain
if domain == cover.DOMAIN:
svc_params = {ATTR_ENTITY_ID: self.state.entity_id}
if params["openPercent"] == 0:
service = cover.SERVICE_CLOSE_COVER
should_verify = False
elif params["openPercent"] == 100:
service = cover.SERVICE_OPEN_COVER
should_verify = True
elif (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& cover.SUPPORT_SET_POSITION
):
service = cover.SERVICE_SET_COVER_POSITION
should_verify = True
svc_params[cover.ATTR_POSITION] = params["openPercent"]
else:
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED, "Setting a position is not supported"
)
if (
should_verify
and self.state.attributes.get(ATTR_DEVICE_CLASS)
in OpenCloseTrait.COVER_2FA
):
_verify_pin_challenge(data, self.state, challenge)
await self.hass.services.async_call(
cover.DOMAIN, service, svc_params, blocking=True, context=data.context
)
if (
self.state.attributes.get(ATTR_ASSUMED_STATE)
or self.state.state == STATE_UNKNOWN
):
self.override_position = params["openPercent"]
@register_trait
class VolumeTrait(_Trait):
name = TRAIT_VOLUME
commands = [COMMAND_SET_VOLUME, COMMAND_VOLUME_RELATIVE]
@staticmethod
def supported(domain, features, device_class):
if domain == media_player.DOMAIN:
return features & media_player.SUPPORT_VOLUME_SET
return False
def sync_attributes(self):
return {}
def query_attributes(self):
response = {}
level = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
muted = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED)
if level is not None:
response["currentVolume"] = int(level * 100)
response["isMuted"] = bool(muted)
return response
async def _execute_set_volume(self, data, params):
level = params["volumeLevel"]
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: level / 100,
},
blocking=True,
context=data.context,
)
async def _execute_volume_relative(self, data, params):
relative = params["volumeRelativeLevel"]
current = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: current + relative / 100,
},
blocking=True,
context=data.context,
)
async def execute(self, command, data, params, challenge):
if command == COMMAND_SET_VOLUME:
await self._execute_set_volume(data, params)
elif command == COMMAND_VOLUME_RELATIVE:
await self._execute_volume_relative(data, params)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
def _verify_pin_challenge(data, state, challenge):
if not data.config.should_2fa(state):
return
if not data.config.secure_devices_pin:
raise SmartHomeError(ERR_CHALLENGE_NOT_SETUP, "Challenge is not set up")
if not challenge:
raise ChallengeNeeded(CHALLENGE_PIN_NEEDED)
pin = challenge.get("pin")
if pin != data.config.secure_devices_pin:
raise ChallengeNeeded(CHALLENGE_FAILED_PIN_NEEDED)
def _verify_ack_challenge(data, state, challenge):
if not data.config.should_2fa(state):
return
if not challenge or not challenge.get("ack"):
raise ChallengeNeeded(CHALLENGE_ACK_NEEDED)
| true
| true
|
1c47be6838a559b898608b686a690144038060ab
| 811
|
py
|
Python
|
mysite/mysite/urls.py
|
xinkaiwang/robotJump
|
622e97451f450b755aecbd60e15b2cd47d875f47
|
[
"MIT"
] | null | null | null |
mysite/mysite/urls.py
|
xinkaiwang/robotJump
|
622e97451f450b755aecbd60e15b2cd47d875f47
|
[
"MIT"
] | null | null | null |
mysite/mysite/urls.py
|
xinkaiwang/robotJump
|
622e97451f450b755aecbd60e15b2cd47d875f47
|
[
"MIT"
] | null | null | null |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('^', include('buckets.urls')),
]
| 35.26087
| 79
| 0.696671
|
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('^', include('buckets.urls')),
]
| true
| true
|
1c47be6c708b01f8c5d2442695b7f5df61fef530
| 1,547
|
py
|
Python
|
tests/python/gaia-ui-tests/gaiatest/tests/functional/clock/test_clock_run_stopwatch_laps.py
|
NickProgramm/gaia
|
975a35c0f5010df341e96d6c5ec60217f5347412
|
[
"Apache-2.0"
] | 3
|
2016-08-17T08:52:51.000Z
|
2020-03-29T04:56:45.000Z
|
tests/python/gaia-ui-tests/gaiatest/tests/functional/clock/test_clock_run_stopwatch_laps.py
|
NickProgramm/gaia
|
975a35c0f5010df341e96d6c5ec60217f5347412
|
[
"Apache-2.0"
] | null | null | null |
tests/python/gaia-ui-tests/gaiatest/tests/functional/clock/test_clock_run_stopwatch_laps.py
|
NickProgramm/gaia
|
975a35c0f5010df341e96d6c5ec60217f5347412
|
[
"Apache-2.0"
] | 1
|
2021-11-18T21:21:19.000Z
|
2021-11-18T21:21:19.000Z
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.clock.app import Clock
import time
class TestClockRunStopWatch(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
self.clock = Clock(self.marionette)
self.clock.launch()
def test_click_run_stopwatch_laps(self):
stopwatch_view = self.clock.switch_view("stopwatch")
self.assertEqual(stopwatch_view.current_time, '00:00.00')
stopwatch_view.tap_start()
time.sleep(0.2)
self.assertNotEqual(stopwatch_view.current_time, '00:00.00')
stopwatch_view.tap_lap()
time.sleep(0.2)
self.assertEqual(len(stopwatch_view.lap_items), 2)
self.assertNotEqual(stopwatch_view.lap_items[0].time, '00:00.00')
self.assertNotEqual(stopwatch_view.lap_items[1].time, '00:00.00')
self.assertNotEqual(stopwatch_view.lap_items[0].time, stopwatch_view.lap_items[1].time)
stopwatch_view.tap_pause()
recorded_time = stopwatch_view.current_time
stopwatch_view.tap_resume()
time.sleep(0.2)
self.assertNotEqual(stopwatch_view.current_time, recorded_time)
stopwatch_view.tap_pause()
stopwatch_view.tap_reset()
self.assertEqual(len(stopwatch_view.lap_items), 0)
self.assertEqual(stopwatch_view.current_time, '00:00.00')
| 30.94
| 95
| 0.701357
|
from gaiatest import GaiaTestCase
from gaiatest.apps.clock.app import Clock
import time
class TestClockRunStopWatch(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
self.clock = Clock(self.marionette)
self.clock.launch()
def test_click_run_stopwatch_laps(self):
stopwatch_view = self.clock.switch_view("stopwatch")
self.assertEqual(stopwatch_view.current_time, '00:00.00')
stopwatch_view.tap_start()
time.sleep(0.2)
self.assertNotEqual(stopwatch_view.current_time, '00:00.00')
stopwatch_view.tap_lap()
time.sleep(0.2)
self.assertEqual(len(stopwatch_view.lap_items), 2)
self.assertNotEqual(stopwatch_view.lap_items[0].time, '00:00.00')
self.assertNotEqual(stopwatch_view.lap_items[1].time, '00:00.00')
self.assertNotEqual(stopwatch_view.lap_items[0].time, stopwatch_view.lap_items[1].time)
stopwatch_view.tap_pause()
recorded_time = stopwatch_view.current_time
stopwatch_view.tap_resume()
time.sleep(0.2)
self.assertNotEqual(stopwatch_view.current_time, recorded_time)
stopwatch_view.tap_pause()
stopwatch_view.tap_reset()
self.assertEqual(len(stopwatch_view.lap_items), 0)
self.assertEqual(stopwatch_view.current_time, '00:00.00')
| true
| true
|
1c47be9cac33d18c0c0a8c405deb236cf91a9e3f
| 14,282
|
py
|
Python
|
test/functional/p2p_unrequested_blocks.py
|
Quirky-Turt-Crypto/Quirky-Turt-Coin
|
2fce9fe4f3be715a8ad3269ed9cefb4e5b6fad59
|
[
"MIT"
] | null | null | null |
test/functional/p2p_unrequested_blocks.py
|
Quirky-Turt-Crypto/Quirky-Turt-Coin
|
2fce9fe4f3be715a8ad3269ed9cefb4e5b6fad59
|
[
"MIT"
] | null | null | null |
test/functional/p2p_unrequested_blocks.py
|
Quirky-Turt-Crypto/Quirky-Turt-Coin
|
2fce9fe4f3be715a8ad3269ed9cefb4e5b6fad59
|
[
"MIT"
] | 1
|
2021-05-16T16:09:23.000Z
|
2021-05-16T16:09:23.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import quirkyturtTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(quirkyturtTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "quirkyturtd"),
help="quirkyturtd binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
# Test logic begins here
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
network_thread_join()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| 44.080247
| 113
| 0.676096
|
from test_framework.mininode import *
from test_framework.test_framework import quirkyturtTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(quirkyturtTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "quirkyturtd"),
help="quirkyturtd binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
self.setup_nodes()
def run_test(self):
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
min_work_node.wait_for_verack()
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
blocks_h2 = []
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
network_thread_join()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
try:
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
connect_nodes(self.nodes[0], 1)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| true
| true
|
1c47beb831ed519d0ec874e7fd8ab065c7a7379d
| 6,290
|
py
|
Python
|
patent_example/patent_example.py
|
RobKraft/dedupe-examples
|
bf02a805f8d1a0581b07c1eb81503c769b9541f1
|
[
"MIT"
] | null | null | null |
patent_example/patent_example.py
|
RobKraft/dedupe-examples
|
bf02a805f8d1a0581b07c1eb81503c769b9541f1
|
[
"MIT"
] | null | null | null |
patent_example/patent_example.py
|
RobKraft/dedupe-examples
|
bf02a805f8d1a0581b07c1eb81503c769b9541f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This code demonstrates how to use dedupe to disambiguate patent
authors and demonstrates the Set and LatLong data types.
"""
import os
import csv
import logging
import optparse
import dedupe
def readData(filename, set_delim='**'):
"""
Remap columns for the following cases:
- Lat and Long are mapped into a single LatLong tuple
- Class and Coauthor are stored as delimited strings but mapped into
tuples
"""
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for idx, row in enumerate(reader):
row = dict((k, v.lower()) for k, v in row.items())
if row['Lat'] == row['Lng'] == '0.0':
row['LatLong'] = None
else:
row['LatLong'] = (float(row['Lat']), float(row['Lng']))
row['Class'] = tuple(sorted(row['Class'].split(set_delim))) if row['Class'] else None
row['Coauthor'] = tuple(sorted([author for author
in row['Coauthor'].split(set_delim)
if author != 'none']))
if row['Name'] == '':
row['Name'] = None
data_d[idx] = row
return data_d
# These generators will give us the corpora setting up the Set
# distance metrics
def classes(data):
for record in data.values():
yield record['Class']
def coauthors(data):
for record in data.values():
yield record['Coauthor']
def names(data):
for record in data.values():
yield record['Name']
if __name__ == '__main__':
# ## Logging
# Dedupe uses Python logging to show or suppress verbose output. Added
# for convenience. To enable verbose logging, run `python
# patent_example.py -v`
optp = optparse.OptionParser()
optp.add_option('-v', '--verbose', dest='verbose', action='count',
help='Increase verbosity (specify multiple times for more)'
)
(opts, args) = optp.parse_args()
log_level = logging.WARNING
if opts.verbose:
if opts.verbose == 1:
log_level = logging.INFO
elif opts.verbose > 1:
log_level = logging.DEBUG
logging.getLogger().setLevel(log_level)
input_file = 'patstat_input.csv'
output_file = 'patstat_output.csv'
settings_file = 'patstat_settings.json'
training_file = 'patstat_training.json'
scriptpath = os.path.dirname(__file__)
input_file = os.path.join(scriptpath, input_file)
output_file = os.path.join(scriptpath, output_file)
settings_file = os.path.join(scriptpath, settings_file)
training_file = os.path.join(scriptpath, training_file)
print('importing data ...')
data_d = readData(input_file)
# ## Training
if os.path.exists(settings_file):
print('reading from', settings_file)
with open(settings_file, 'rb') as sf:
deduper = dedupe.StaticDedupe(sf, num_cores=2)
else:
# Define the fields dedupe will pay attention to
fields = [
{'field': 'Name',
'variable name': 'Name',
'type': 'String',
'has missing': True},
{'field': 'LatLong',
'type': 'LatLong',
'has missing': True},
{'field': 'Class',
'variable name': 'Class',
'type': 'Set',
'corpus': classes(data_d),
'has missing': True},
{'field': 'Coauthor',
'variable name': 'Coauthor',
'type': 'Set',
'corpus': coauthors(data_d),
'has missing': True},
{'field': 'Name',
'variable name': 'Name Text',
'type': 'Text',
'corpus': names(data_d),
'has missing': True},
{'type': 'Interaction',
'interaction variables': ['Name', 'Name Text']}
]
# Create a new deduper object and pass our data model to it.
deduper = dedupe.Dedupe(fields, num_cores=2)
# If we have training data saved from a previous run of dedupe,
# look for it an load it in.
if os.path.exists(training_file):
print('reading labeled examples from ', training_file)
with open(training_file) as tf:
deduper.prepare_training(data_d, training_file=tf)
else:
deduper.prepare_training(data_d)
# ## Active learning
# Starts the training loop. Dedupe will find the next pair of records
# it is least certain about and ask you to label them as duplicates
# or not.
# use 'y', 'n' and 'u' keys to flag duplicates
# press 'f' when you are finished
print('starting active labeling...')
dedupe.console_label(deduper)
deduper.train()
# When finished, save our training away to disk
with open(training_file, 'w') as tf:
deduper.write_training(tf)
# Save our weights and predicates to disk. If the settings file
# exists, we will skip all the training and learning next time we run
# this file.
with open(settings_file, 'wb') as sf:
deduper.write_settings(sf)
clustered_dupes = deduper.partition(data_d, 0.5)
print('# duplicate sets', len(clustered_dupes))
# ## Writing Results
# Write our original data back out to a CSV with a new column called
# 'Cluster ID' which indicates which records refer to each other.
cluster_membership = {}
for cluster_id, (records, scores) in enumerate(clustered_dupes):
for record_id, score in zip(records, scores):
cluster_membership[record_id] = {
"Cluster ID": cluster_id,
"confidence_score": score
}
with open(output_file, 'w') as f_output, open(input_file) as f_input:
reader = csv.DictReader(f_input)
fieldnames = ['Cluster ID', 'confidence_score'] + reader.fieldnames
writer = csv.DictWriter(f_output, fieldnames=fieldnames)
writer.writeheader()
for row_id, row in enumerate(reader):
row.update(cluster_membership[row_id])
writer.writerow(row)
| 31.767677
| 97
| 0.583466
|
import os
import csv
import logging
import optparse
import dedupe
def readData(filename, set_delim='**'):
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for idx, row in enumerate(reader):
row = dict((k, v.lower()) for k, v in row.items())
if row['Lat'] == row['Lng'] == '0.0':
row['LatLong'] = None
else:
row['LatLong'] = (float(row['Lat']), float(row['Lng']))
row['Class'] = tuple(sorted(row['Class'].split(set_delim))) if row['Class'] else None
row['Coauthor'] = tuple(sorted([author for author
in row['Coauthor'].split(set_delim)
if author != 'none']))
if row['Name'] == '':
row['Name'] = None
data_d[idx] = row
return data_d
def classes(data):
for record in data.values():
yield record['Class']
def coauthors(data):
for record in data.values():
yield record['Coauthor']
def names(data):
for record in data.values():
yield record['Name']
if __name__ == '__main__':
optp = optparse.OptionParser()
optp.add_option('-v', '--verbose', dest='verbose', action='count',
help='Increase verbosity (specify multiple times for more)'
)
(opts, args) = optp.parse_args()
log_level = logging.WARNING
if opts.verbose:
if opts.verbose == 1:
log_level = logging.INFO
elif opts.verbose > 1:
log_level = logging.DEBUG
logging.getLogger().setLevel(log_level)
input_file = 'patstat_input.csv'
output_file = 'patstat_output.csv'
settings_file = 'patstat_settings.json'
training_file = 'patstat_training.json'
scriptpath = os.path.dirname(__file__)
input_file = os.path.join(scriptpath, input_file)
output_file = os.path.join(scriptpath, output_file)
settings_file = os.path.join(scriptpath, settings_file)
training_file = os.path.join(scriptpath, training_file)
print('importing data ...')
data_d = readData(input_file)
ts(settings_file):
print('reading from', settings_file)
with open(settings_file, 'rb') as sf:
deduper = dedupe.StaticDedupe(sf, num_cores=2)
else:
fields = [
{'field': 'Name',
'variable name': 'Name',
'type': 'String',
'has missing': True},
{'field': 'LatLong',
'type': 'LatLong',
'has missing': True},
{'field': 'Class',
'variable name': 'Class',
'type': 'Set',
'corpus': classes(data_d),
'has missing': True},
{'field': 'Coauthor',
'variable name': 'Coauthor',
'type': 'Set',
'corpus': coauthors(data_d),
'has missing': True},
{'field': 'Name',
'variable name': 'Name Text',
'type': 'Text',
'corpus': names(data_d),
'has missing': True},
{'type': 'Interaction',
'interaction variables': ['Name', 'Name Text']}
]
deduper = dedupe.Dedupe(fields, num_cores=2)
if os.path.exists(training_file):
print('reading labeled examples from ', training_file)
with open(training_file) as tf:
deduper.prepare_training(data_d, training_file=tf)
else:
deduper.prepare_training(data_d)
print('starting active labeling...')
dedupe.console_label(deduper)
deduper.train()
with open(training_file, 'w') as tf:
deduper.write_training(tf)
with open(settings_file, 'wb') as sf:
deduper.write_settings(sf)
clustered_dupes = deduper.partition(data_d, 0.5)
print('# duplicate sets', len(clustered_dupes))
= {}
for cluster_id, (records, scores) in enumerate(clustered_dupes):
for record_id, score in zip(records, scores):
cluster_membership[record_id] = {
"Cluster ID": cluster_id,
"confidence_score": score
}
with open(output_file, 'w') as f_output, open(input_file) as f_input:
reader = csv.DictReader(f_input)
fieldnames = ['Cluster ID', 'confidence_score'] + reader.fieldnames
writer = csv.DictWriter(f_output, fieldnames=fieldnames)
writer.writeheader()
for row_id, row in enumerate(reader):
row.update(cluster_membership[row_id])
writer.writerow(row)
| true
| true
|
1c47c26b2239aaaa497597e10ff585638018c10a
| 446
|
py
|
Python
|
oo/teste_carro.py
|
vladimirvinicius/pythonbirds
|
2c0c6bfcda6fbeaffc36f6f04ccd94ab704e0b1a
|
[
"MIT"
] | 1
|
2020-10-04T03:29:20.000Z
|
2020-10-04T03:29:20.000Z
|
oo/teste_carro.py
|
JosemarBrito/pythonbirds
|
eaa80f98bd4365b1146556b5f144dbab03fbf9bb
|
[
"MIT"
] | null | null | null |
oo/teste_carro.py
|
JosemarBrito/pythonbirds
|
eaa80f98bd4365b1146556b5f144dbab03fbf9bb
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from oo.carro import Motor
class CarroTestCase(TestCase):
def teste_velocidade_inicial(self):
motor = Motor()
self.assertEqual(0, motor.velocidade)
def teste_acelerar(self):
motor = Motor()
motor.acelerar()
self.assertEqual(1, motor.velocidade)
def teste_frear(self):
motor = Motor()
motor.frear()
self.assertEqual(0, motor.velocidade)
| 23.473684
| 45
| 0.650224
|
from unittest import TestCase
from oo.carro import Motor
class CarroTestCase(TestCase):
def teste_velocidade_inicial(self):
motor = Motor()
self.assertEqual(0, motor.velocidade)
def teste_acelerar(self):
motor = Motor()
motor.acelerar()
self.assertEqual(1, motor.velocidade)
def teste_frear(self):
motor = Motor()
motor.frear()
self.assertEqual(0, motor.velocidade)
| true
| true
|
1c47c3ee33915e701135e1412bec7e390f756847
| 2,676
|
py
|
Python
|
gamma_cloudinary/config.py
|
barakaVictor/django-gamma-cloudinary
|
598af46844ca7b2de3cc832cb0d8dd3f9742e625
|
[
"BSD-3-Clause"
] | 1
|
2022-03-13T13:44:19.000Z
|
2022-03-13T13:44:19.000Z
|
gamma_cloudinary/config.py
|
barakaVictor/django-gamma-cloudinary
|
598af46844ca7b2de3cc832cb0d8dd3f9742e625
|
[
"BSD-3-Clause"
] | 4
|
2021-09-22T11:44:24.000Z
|
2022-01-13T11:06:54.000Z
|
gamma_cloudinary/config.py
|
barakaVictor/django-gamma-cloudinary
|
598af46844ca7b2de3cc832cb0d8dd3f9742e625
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import cloudinary
from operator import itemgetter
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
#Execute setup code for cloudinary configuration
def setup_cloudinary():
if settings.configured:
try:
#check for the existence of CLOUDINARY_STORAGE object in django settings module
cloudinary_settings = getattr(settings, 'CLOUDINARY_STORAGE')
#if CLOUDINARY_STORAGE exists check for the minimum required keys to get cloudinary up and running
itemgetter('CLOUD_NAME', 'API_KEY', 'API_SECRET')(cloudinary_settings)
except AttributeError:
#if CLOUDINARY_STORAGE is not set check for the existence of
#either CLOUDINARY_URL or (CLOUDINARY_CLOUD_NAME, CLOUDINARY_API_KEY, CLOUDINARY_API_SECRET)
#environment variables and exit silently if they have been set
if os.environ.get('CLOUDINARY_URL'):
pass
if (os.environ.get('CLOUDINARY_CLOUD_NAME') and os.environ.get('CLOUDINARY_API_KEY') and os.environ.get('CLOUDINARY_API_SECRET')):
pass
else:
#else raise an ImproperlyConfigured exceoption if CLOUDINARY_STORAGE does not exist in
#the django settings module and CLOUDINARY_URL or (CLOUDINARY_CLOUD_NAME, CLOUDINARY_API_KEY, CLOUDINARY_API_SECRET)
#environment variables have not been set
raise ImproperlyConfigured('In order to use cloudinary storage, you need to provide '
'CLOUDINARY_STORAGE dictionary with CLOUD_NAME, API_SECRET '
'and API_KEY in the django settings module or set CLOUDINARY_URL'
'(or CLOUDINARY_CLOUD_NAME, CLOUDINARY_API_KEY, CLOUDINARY_API_SECRET) '
'environment variables).')
except KeyError as e:
#raise ImproperlyConfigured exception if CLOUDINARY_STORAGE has been set in the django settings
#module but without all of the minimum required attributes(CLOUD_NAME, API_KEY, API_SECRET)
#to get cloudinary working
raise ImproperlyConfigured(f'{e.args[0]} is a required setting in the cloudinary config.')
else:
#While passing config parameters to cloudinary.config(), run dictionary
#comprehension to convert all keys to snake_case fromat as is required in
#cloudinary data type guidelines
cloudinary.config(**{key.lower(): value for key, value in cloudinary_settings.items()})
| 58.173913
| 142
| 0.664051
|
import os
import cloudinary
from operator import itemgetter
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def setup_cloudinary():
if settings.configured:
try:
cloudinary_settings = getattr(settings, 'CLOUDINARY_STORAGE')
itemgetter('CLOUD_NAME', 'API_KEY', 'API_SECRET')(cloudinary_settings)
except AttributeError:
if os.environ.get('CLOUDINARY_URL'):
pass
if (os.environ.get('CLOUDINARY_CLOUD_NAME') and os.environ.get('CLOUDINARY_API_KEY') and os.environ.get('CLOUDINARY_API_SECRET')):
pass
else:
raise ImproperlyConfigured('In order to use cloudinary storage, you need to provide '
'CLOUDINARY_STORAGE dictionary with CLOUD_NAME, API_SECRET '
'and API_KEY in the django settings module or set CLOUDINARY_URL'
'(or CLOUDINARY_CLOUD_NAME, CLOUDINARY_API_KEY, CLOUDINARY_API_SECRET) '
'environment variables).')
except KeyError as e:
raise ImproperlyConfigured(f'{e.args[0]} is a required setting in the cloudinary config.')
else:
cloudinary.config(**{key.lower(): value for key, value in cloudinary_settings.items()})
| true
| true
|
1c47c4f4f4455be041aae5c83a2d2cfc01c700b7
| 1,554
|
py
|
Python
|
pytest_curl_report/plugin.py
|
t2y/pytest-curl-report
|
8690d8e6b78ad578af07ffad592556119304dac8
|
[
"Apache-2.0"
] | null | null | null |
pytest_curl_report/plugin.py
|
t2y/pytest-curl-report
|
8690d8e6b78ad578af07ffad592556119304dac8
|
[
"Apache-2.0"
] | null | null | null |
pytest_curl_report/plugin.py
|
t2y/pytest-curl-report
|
8690d8e6b78ad578af07ffad592556119304dac8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from .curl import Curl
from .utils import get_inspect_functions
PLUGIN_NAMESPACE = 'curl_report'
def pytest_addoption(parser):
group = parser.getgroup('curlreport', 'curl report')
group.addoption(
'--no-curl-report', dest='no_curl_report',
action='store_true', default=False,
help='not generate curl report when a testcase is failed'
)
group.addoption(
'--curl-report-only', dest='curl_report_only',
action='store_true', default=False,
help='strip pytest assertion log and generate curl report only'
)
def pytest_runtest_makereport(__multicall__, item, call):
if item.config.option.no_curl_report:
return
report = __multicall__.execute()
if report.longrepr is None:
return report
if item.config.option.curl_report_only:
if hasattr(report, 'longrepr'):
if hasattr(report.longrepr, 'reprtraceback'):
# HACK: set dummy reporting function for traceback report
report.longrepr.reprtraceback.toterminal = lambda x: None
extra_info = getattr(pytest, PLUGIN_NAMESPACE, object())
inspect_funcs = get_inspect_functions()
for _, obj in call.excinfo.traceback[0].frame.f_locals.items():
for func in inspect_funcs:
r = func(obj)
if r is not None:
cmd = Curl(r, extra_info).make_command()
report.longrepr.addsection('How to reproduce with curl', cmd)
break
return report
| 31.08
| 77
| 0.651866
|
import pytest
from .curl import Curl
from .utils import get_inspect_functions
PLUGIN_NAMESPACE = 'curl_report'
def pytest_addoption(parser):
group = parser.getgroup('curlreport', 'curl report')
group.addoption(
'--no-curl-report', dest='no_curl_report',
action='store_true', default=False,
help='not generate curl report when a testcase is failed'
)
group.addoption(
'--curl-report-only', dest='curl_report_only',
action='store_true', default=False,
help='strip pytest assertion log and generate curl report only'
)
def pytest_runtest_makereport(__multicall__, item, call):
if item.config.option.no_curl_report:
return
report = __multicall__.execute()
if report.longrepr is None:
return report
if item.config.option.curl_report_only:
if hasattr(report, 'longrepr'):
if hasattr(report.longrepr, 'reprtraceback'):
report.longrepr.reprtraceback.toterminal = lambda x: None
extra_info = getattr(pytest, PLUGIN_NAMESPACE, object())
inspect_funcs = get_inspect_functions()
for _, obj in call.excinfo.traceback[0].frame.f_locals.items():
for func in inspect_funcs:
r = func(obj)
if r is not None:
cmd = Curl(r, extra_info).make_command()
report.longrepr.addsection('How to reproduce with curl', cmd)
break
return report
| true
| true
|
1c47c521e31ebadae1e4b554a33840207018eda8
| 336
|
py
|
Python
|
quilljs_example/example/models.py
|
muke5hy/django-quill
|
16250b9c9418907123c8b40ddc66523af5d4e4d4
|
[
"BSD-3-Clause"
] | 11
|
2019-02-20T08:58:43.000Z
|
2021-01-03T16:41:07.000Z
|
quilljs_example/example/models.py
|
muke5hy/django-quill
|
16250b9c9418907123c8b40ddc66523af5d4e4d4
|
[
"BSD-3-Clause"
] | null | null | null |
quilljs_example/example/models.py
|
muke5hy/django-quill
|
16250b9c9418907123c8b40ddc66523af5d4e4d4
|
[
"BSD-3-Clause"
] | 3
|
2019-10-08T18:04:01.000Z
|
2020-11-02T12:15:03.000Z
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from quilljs.fields import RichTextField
@python_2_unicode_compatible
class ExampleModel(models.Model):
editor = RichTextField()
editor2 = RichTextField(config='basic')
def __str__(self):
return 'This is just an example'
| 24
| 61
| 0.77381
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from quilljs.fields import RichTextField
@python_2_unicode_compatible
class ExampleModel(models.Model):
editor = RichTextField()
editor2 = RichTextField(config='basic')
def __str__(self):
return 'This is just an example'
| true
| true
|
1c47c5651fa334d977285c340e3c9f7fa5d3f735
| 2,263
|
py
|
Python
|
setup.py
|
RobertDeRose/python-robin-srv
|
dcb3b8a0dff71f2b63695fdab48b322998328fc2
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
RobertDeRose/python-robin-srv
|
dcb3b8a0dff71f2b63695fdab48b322998328fc2
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
RobertDeRose/python-robin-srv
|
dcb3b8a0dff71f2b63695fdab48b322998328fc2
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='robin-srv',
version='0.1.0',
license='BSD',
description='A utility library to help with client-side load balancing based on SRV records.',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Robert DeRose',
author_email='RobertDeRose@gmail.com',
url='https://github.com/RobertDeRose/python-robin-srv',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
'dnspython'
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
entry_points={
'console_scripts': [
'robin-srv = robin_srv.cli:main',
]
},
)
| 30.581081
| 98
| 0.606717
|
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='robin-srv',
version='0.1.0',
license='BSD',
description='A utility library to help with client-side load balancing based on SRV records.',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Robert DeRose',
author_email='RobertDeRose@gmail.com',
url='https://github.com/RobertDeRose/python-robin-srv',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
],
install_requires=[
'dnspython'
],
extras_require={
},
entry_points={
'console_scripts': [
'robin-srv = robin_srv.cli:main',
]
},
)
| true
| true
|
1c47c5ded622153fdda38f1bf3179dad8b91b2a3
| 2,653
|
py
|
Python
|
tests/test_build.py
|
martinruenz/pytorch3d
|
7f1e63aed1252ba8145d4a66ce2272331d60cdae
|
[
"BSD-3-Clause"
] | 3
|
2022-03-09T08:12:54.000Z
|
2022-03-10T01:57:03.000Z
|
tests/test_build.py
|
martinruenz/pytorch3d
|
7f1e63aed1252ba8145d4a66ce2272331d60cdae
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_build.py
|
martinruenz/pytorch3d
|
7f1e63aed1252ba8145d4a66ce2272331d60cdae
|
[
"BSD-3-Clause"
] | 1
|
2020-09-15T06:01:18.000Z
|
2020-09-15T06:01:18.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import os
import unittest
from collections import Counter
from pathlib import Path
# This file groups together tests which look at the code without running it.
# When running the tests inside conda's build, the code is not available.
in_conda_build = os.environ.get("CONDA_BUILD_STATE", "") == "TEST"
class TestBuild(unittest.TestCase):
@unittest.skipIf(in_conda_build, "In conda build")
def test_name_clash(self):
# For setup.py, all translation units need distinct names, so we
# cannot have foo.cu and foo.cpp, even in different directories.
test_dir = Path(__file__).resolve().parent
source_dir = test_dir.parent / "pytorch3d"
stems = []
for extension in [".cu", ".cpp"]:
files = source_dir.glob(f"**/*{extension}")
stems.extend(f.stem for f in files)
counter = Counter(stems)
for k, v in counter.items():
self.assertEqual(v, 1, f"Too many files with stem {k}.")
@unittest.skipIf(in_conda_build, "In conda build")
def test_deprecated_usage(self):
# Check certain expressions do not occur in the csrc code
test_dir = Path(__file__).resolve().parent
source_dir = test_dir.parent / "pytorch3d" / "csrc"
files = sorted(source_dir.glob("**/*.*"))
self.assertGreater(len(files), 4)
patterns = [".type()", ".data()"]
for file in files:
with open(file) as f:
text = f.read()
for pattern in patterns:
found = pattern in text
msg = (
f"{pattern} found in {file.name}"
+ ", this has been deprecated."
)
self.assertFalse(found, msg)
@unittest.skipIf(in_conda_build, "In conda build")
def test_copyright(self):
test_dir = Path(__file__).resolve().parent
root_dir = test_dir.parent
extensions = ("py", "cu", "cuh", "cpp", "h", "hpp", "sh")
expect = (
"Copyright (c) Facebook, Inc. and its affiliates."
+ " All rights reserved.\n"
)
for extension in extensions:
for i in root_dir.glob(f"**/*.{extension}"):
with open(i) as f:
firstline = f.readline()
if firstline.startswith(("# -*-", "#!")):
firstline = f.readline()
self.assertTrue(
firstline.endswith(expect), f"{i} missing copyright header."
)
| 36.342466
| 84
| 0.560498
|
import os
import unittest
from collections import Counter
from pathlib import Path
in_conda_build = os.environ.get("CONDA_BUILD_STATE", "") == "TEST"
class TestBuild(unittest.TestCase):
@unittest.skipIf(in_conda_build, "In conda build")
def test_name_clash(self):
# For setup.py, all translation units need distinct names, so we
# cannot have foo.cu and foo.cpp, even in different directories.
test_dir = Path(__file__).resolve().parent
source_dir = test_dir.parent / "pytorch3d"
stems = []
for extension in [".cu", ".cpp"]:
files = source_dir.glob(f"**/*{extension}")
stems.extend(f.stem for f in files)
counter = Counter(stems)
for k, v in counter.items():
self.assertEqual(v, 1, f"Too many files with stem {k}.")
@unittest.skipIf(in_conda_build, "In conda build")
def test_deprecated_usage(self):
# Check certain expressions do not occur in the csrc code
test_dir = Path(__file__).resolve().parent
source_dir = test_dir.parent / "pytorch3d" / "csrc"
files = sorted(source_dir.glob("**/*.*"))
self.assertGreater(len(files), 4)
patterns = [".type()", ".data()"]
for file in files:
with open(file) as f:
text = f.read()
for pattern in patterns:
found = pattern in text
msg = (
f"{pattern} found in {file.name}"
+ ", this has been deprecated."
)
self.assertFalse(found, msg)
@unittest.skipIf(in_conda_build, "In conda build")
def test_copyright(self):
test_dir = Path(__file__).resolve().parent
root_dir = test_dir.parent
extensions = ("py", "cu", "cuh", "cpp", "h", "hpp", "sh")
expect = (
"Copyright (c) Facebook, Inc. and its affiliates."
+ " All rights reserved.\n"
)
for extension in extensions:
for i in root_dir.glob(f"**/*.{extension}"):
with open(i) as f:
firstline = f.readline()
if firstline.startswith(("# -*-", "#!")):
firstline = f.readline()
self.assertTrue(
firstline.endswith(expect), f"{i} missing copyright header."
)
| true
| true
|
1c47c5f47a3efdc6396fd4dbe3e492f94d567901
| 8,567
|
py
|
Python
|
pytorch_toolkit/face_recognition/dump_features.py
|
xzry6/openvino_training_extensions
|
b8b17bbcc352633b0f0d3a99d6179a9ec616e426
|
[
"Apache-2.0"
] | 158
|
2019-03-01T15:47:39.000Z
|
2022-02-10T15:10:48.000Z
|
dump_features.py
|
sacchinbhg/face_recognition.pytorch
|
05cb9b30e8220445fcb27988926d88f330091c12
|
[
"Apache-2.0"
] | 6
|
2020-03-08T22:58:13.000Z
|
2022-03-12T00:15:14.000Z
|
dump_features.py
|
sacchinbhg/face_recognition.pytorch
|
05cb9b30e8220445fcb27988926d88f330091c12
|
[
"Apache-2.0"
] | 23
|
2019-03-02T09:18:19.000Z
|
2021-11-06T22:01:56.000Z
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import argparse
import os
import os.path as osp
from tqdm import tqdm
import numpy as np
import glog as log
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms as t
from scripts.matio import save_mat
from model.common import models_backbones
from datasets.megaface import MegaFace
from datasets.trillion_pairs import TrillionPairs
from utils.utils import load_model_state
from utils.augmentation import ResizeNumpy, NumpyToTensor
def clean_megaface(filenames, features, noises_list_path):
"""Filters megaface from outliers"""
with open(noises_list_path, 'r') as f:
noises_list = f.readlines()
noises_list = [line.strip() for line in noises_list]
clean_features = np.zeros((features.shape[0], features.shape[1] + 1), dtype=np.float32)
for i, filename in enumerate(tqdm(filenames)):
clean_features[i, 0: features.shape[1]] = features[i, :]
for line in noises_list:
if line in filename:
clean_features[i, features.shape[1]] = 100.0
break
return clean_features
def clean_facescrub(filenames, features, noises_list_path):
"""Replaces wrong instances of identities from the Facescrub with the centroids of these identities"""
clean_feature_size = features.shape[1] + 1
with open(noises_list_path, 'r') as f:
noises_list = f.readlines()
noises_list = [osp.splitext(line.strip())[0] for line in noises_list]
clean_features = np.zeros((features.shape[0], clean_feature_size), dtype=np.float32)
centroids = {}
for i, filename in enumerate(tqdm(filenames)):
clean_features[i, 0: features.shape[1]] = features[i, :]
id_name = osp.basename(filename).split('_')[0]
if not id_name in centroids:
centroids[id_name] = np.zeros(clean_feature_size, dtype=np.float32)
centroids[id_name] += clean_features[i, :]
for i, file_path in enumerate(tqdm(filenames)):
filename = osp.basename(file_path)
for line in noises_list:
if line in filename.replace(' ', '_'):
id_name = filename.split('_')[0]
clean_features[i, :] = centroids[id_name] + np.random.uniform(-0.001, 0.001, clean_feature_size)
clean_features[i, :] /= np.linalg.norm(clean_features[i, :])
break
return clean_features
@torch.no_grad()
def main(args):
input_filenames = []
output_filenames = []
input_dir = os.path.abspath(args.input_dir)
output_dir = os.path.abspath(args.output_dir)
if not args.trillion_format:
log.info('Reading info...')
with open(os.path.join(args.input_dir, os.path.basename(args.input_list)), 'r') as f:
lines = f.readlines()
for line in tqdm(lines):
info = line.strip().split('|')
file = info[0].strip()
filename = os.path.join(input_dir, file)
path, _ = osp.split(filename)
out_folder = path.replace(input_dir, output_dir)
if not osp.isdir(out_folder):
os.makedirs(out_folder)
landmarks = None
bbox = None
if len(info) > 2:
landmarks = info[1].strip().split(' ')
landmarks = [float(x) for x in landmarks]
bbox = info[2].strip().split(' ')
bbox = [int(float(x)) for x in bbox]
outname = filename.replace(input_dir, output_dir) + args.file_ending
input_filenames.append({'path': filename, 'landmarks': landmarks, 'bbox': bbox})
output_filenames += [outname]
nrof_images = len(input_filenames)
log.info("Total number of images: ", nrof_images)
dataset = MegaFace(input_filenames)
else:
dataset = TrillionPairs(args.input_dir, osp.join(args.input_dir, 'testdata_lmk.txt'), test_mode=True)
nrof_images = len(dataset)
emb_array = np.zeros((nrof_images, args.embedding_size), dtype=np.float32)
dataset.transform = t.Compose([ResizeNumpy(models_backbones[args.model].get_input_res()),
NumpyToTensor(switch_rb=True)])
val_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=5, shuffle=False)
model = models_backbones[args.model](embedding_size=args.embedding_size, feature=True)
assert args.snap is not None
log.info('Snapshot ' + args.snap + ' ...')
log.info('Extracting embeddings ...')
model = load_model_state(model, args.snap, args.devices[0], eval_state=True)
model = torch.nn.DataParallel(model, device_ids=args.devices, output_device=args.devices[0])
f_output_filenames = []
with torch.cuda.device(args.devices[0]):
for i, data in enumerate(tqdm(val_loader), 0):
idxs, imgs = data['idx'], data['img']
batch_embeddings = F.normalize(model(imgs), p=2, dim=1).data.cpu().numpy()
batch_embeddings = batch_embeddings.reshape(batch_embeddings.shape[0], -1)
path_indices = idxs.data.cpu().numpy()
start_index = i*args.batch_size
end_index = min((i+1)*args.batch_size, nrof_images)
assert start_index == path_indices[0]
assert end_index == path_indices[-1] + 1
assert emb_array[start_index:end_index, :].shape == batch_embeddings.shape
emb_array[start_index:end_index, :] = batch_embeddings
if not args.trillion_format:
for index in path_indices:
f_output_filenames.append(output_filenames[index])
assert len(output_filenames) == len(output_filenames)
log.info('Extracting features Done.')
if args.trillion_format:
save_mat(args.file_ending, emb_array)
else:
if 'megaface_noises.txt' in args.noises_list:
log.info('Cleaning Megaface features')
emb_array = clean_megaface(f_output_filenames, emb_array, args.noises_list)
elif 'facescrub_noises.txt' in args.noises_list:
log.info('Cleaning Facescrub features')
emb_array = clean_facescrub(f_output_filenames, emb_array, args.noises_list)
else:
log.info('Megaface features are not cleaned up.')
log.info('Saving features to files...')
for i in tqdm(range(len(f_output_filenames))):
save_mat(f_output_filenames[i], emb_array[i, :])
def parse_argument(argv):
parser = argparse.ArgumentParser(description='Save embeddings to MegaFace features files')
parser.add_argument('--model', choices=models_backbones.keys(), type=str, default='rmnet', help='Model type.')
parser.add_argument('input_dir', help='Path to MegaFace Features')
parser.add_argument('output_dir', help='Path to FaceScrub Features')
parser.add_argument('--input_list', default='list.txt', type=str, required=False)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--embedding_size', type=int, default=128)
parser.add_argument('--devices', type=int, nargs='+', default=[0], help='CUDA devices to use.')
parser.add_argument('--snap', type=str, required=True, help='Snapshot to evaluate.')
parser.add_argument('--noises_list', type=str, default='', required=False, help='A list of the Megaface or Facescrub noises produced by insightface. \
See https://github.com/deepinsight/insightface/blob/master/src/megaface/README.md')
parser.add_argument('--file_ending', help='Ending appended to original photo files. i.e.\
11084833664_0.jpg_LBP_100x100.bin => _LBP_100x100.bin', default='_rmnet.bin')
parser.add_argument('--trillion_format', action='store_true')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_argument(sys.argv[1:]))
| 44.159794
| 155
| 0.65834
|
import sys
import argparse
import os
import os.path as osp
from tqdm import tqdm
import numpy as np
import glog as log
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms as t
from scripts.matio import save_mat
from model.common import models_backbones
from datasets.megaface import MegaFace
from datasets.trillion_pairs import TrillionPairs
from utils.utils import load_model_state
from utils.augmentation import ResizeNumpy, NumpyToTensor
def clean_megaface(filenames, features, noises_list_path):
with open(noises_list_path, 'r') as f:
noises_list = f.readlines()
noises_list = [line.strip() for line in noises_list]
clean_features = np.zeros((features.shape[0], features.shape[1] + 1), dtype=np.float32)
for i, filename in enumerate(tqdm(filenames)):
clean_features[i, 0: features.shape[1]] = features[i, :]
for line in noises_list:
if line in filename:
clean_features[i, features.shape[1]] = 100.0
break
return clean_features
def clean_facescrub(filenames, features, noises_list_path):
clean_feature_size = features.shape[1] + 1
with open(noises_list_path, 'r') as f:
noises_list = f.readlines()
noises_list = [osp.splitext(line.strip())[0] for line in noises_list]
clean_features = np.zeros((features.shape[0], clean_feature_size), dtype=np.float32)
centroids = {}
for i, filename in enumerate(tqdm(filenames)):
clean_features[i, 0: features.shape[1]] = features[i, :]
id_name = osp.basename(filename).split('_')[0]
if not id_name in centroids:
centroids[id_name] = np.zeros(clean_feature_size, dtype=np.float32)
centroids[id_name] += clean_features[i, :]
for i, file_path in enumerate(tqdm(filenames)):
filename = osp.basename(file_path)
for line in noises_list:
if line in filename.replace(' ', '_'):
id_name = filename.split('_')[0]
clean_features[i, :] = centroids[id_name] + np.random.uniform(-0.001, 0.001, clean_feature_size)
clean_features[i, :] /= np.linalg.norm(clean_features[i, :])
break
return clean_features
@torch.no_grad()
def main(args):
input_filenames = []
output_filenames = []
input_dir = os.path.abspath(args.input_dir)
output_dir = os.path.abspath(args.output_dir)
if not args.trillion_format:
log.info('Reading info...')
with open(os.path.join(args.input_dir, os.path.basename(args.input_list)), 'r') as f:
lines = f.readlines()
for line in tqdm(lines):
info = line.strip().split('|')
file = info[0].strip()
filename = os.path.join(input_dir, file)
path, _ = osp.split(filename)
out_folder = path.replace(input_dir, output_dir)
if not osp.isdir(out_folder):
os.makedirs(out_folder)
landmarks = None
bbox = None
if len(info) > 2:
landmarks = info[1].strip().split(' ')
landmarks = [float(x) for x in landmarks]
bbox = info[2].strip().split(' ')
bbox = [int(float(x)) for x in bbox]
outname = filename.replace(input_dir, output_dir) + args.file_ending
input_filenames.append({'path': filename, 'landmarks': landmarks, 'bbox': bbox})
output_filenames += [outname]
nrof_images = len(input_filenames)
log.info("Total number of images: ", nrof_images)
dataset = MegaFace(input_filenames)
else:
dataset = TrillionPairs(args.input_dir, osp.join(args.input_dir, 'testdata_lmk.txt'), test_mode=True)
nrof_images = len(dataset)
emb_array = np.zeros((nrof_images, args.embedding_size), dtype=np.float32)
dataset.transform = t.Compose([ResizeNumpy(models_backbones[args.model].get_input_res()),
NumpyToTensor(switch_rb=True)])
val_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=5, shuffle=False)
model = models_backbones[args.model](embedding_size=args.embedding_size, feature=True)
assert args.snap is not None
log.info('Snapshot ' + args.snap + ' ...')
log.info('Extracting embeddings ...')
model = load_model_state(model, args.snap, args.devices[0], eval_state=True)
model = torch.nn.DataParallel(model, device_ids=args.devices, output_device=args.devices[0])
f_output_filenames = []
with torch.cuda.device(args.devices[0]):
for i, data in enumerate(tqdm(val_loader), 0):
idxs, imgs = data['idx'], data['img']
batch_embeddings = F.normalize(model(imgs), p=2, dim=1).data.cpu().numpy()
batch_embeddings = batch_embeddings.reshape(batch_embeddings.shape[0], -1)
path_indices = idxs.data.cpu().numpy()
start_index = i*args.batch_size
end_index = min((i+1)*args.batch_size, nrof_images)
assert start_index == path_indices[0]
assert end_index == path_indices[-1] + 1
assert emb_array[start_index:end_index, :].shape == batch_embeddings.shape
emb_array[start_index:end_index, :] = batch_embeddings
if not args.trillion_format:
for index in path_indices:
f_output_filenames.append(output_filenames[index])
assert len(output_filenames) == len(output_filenames)
log.info('Extracting features Done.')
if args.trillion_format:
save_mat(args.file_ending, emb_array)
else:
if 'megaface_noises.txt' in args.noises_list:
log.info('Cleaning Megaface features')
emb_array = clean_megaface(f_output_filenames, emb_array, args.noises_list)
elif 'facescrub_noises.txt' in args.noises_list:
log.info('Cleaning Facescrub features')
emb_array = clean_facescrub(f_output_filenames, emb_array, args.noises_list)
else:
log.info('Megaface features are not cleaned up.')
log.info('Saving features to files...')
for i in tqdm(range(len(f_output_filenames))):
save_mat(f_output_filenames[i], emb_array[i, :])
def parse_argument(argv):
parser = argparse.ArgumentParser(description='Save embeddings to MegaFace features files')
parser.add_argument('--model', choices=models_backbones.keys(), type=str, default='rmnet', help='Model type.')
parser.add_argument('input_dir', help='Path to MegaFace Features')
parser.add_argument('output_dir', help='Path to FaceScrub Features')
parser.add_argument('--input_list', default='list.txt', type=str, required=False)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--embedding_size', type=int, default=128)
parser.add_argument('--devices', type=int, nargs='+', default=[0], help='CUDA devices to use.')
parser.add_argument('--snap', type=str, required=True, help='Snapshot to evaluate.')
parser.add_argument('--noises_list', type=str, default='', required=False, help='A list of the Megaface or Facescrub noises produced by insightface. \
See https://github.com/deepinsight/insightface/blob/master/src/megaface/README.md')
parser.add_argument('--file_ending', help='Ending appended to original photo files. i.e.\
11084833664_0.jpg_LBP_100x100.bin => _LBP_100x100.bin', default='_rmnet.bin')
parser.add_argument('--trillion_format', action='store_true')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_argument(sys.argv[1:]))
| true
| true
|
1c47c6a57ba1e3e281016c90e86575d8ae9b3a68
| 11,286
|
py
|
Python
|
fpga/lib/eth/tb/test_axis_gmii_tx.py
|
totuwei/corundum
|
e983ad519fb4523d0ffca32f5e436195bcfc945c
|
[
"BSD-2-Clause-FreeBSD"
] | 1,121
|
2015-05-26T14:41:44.000Z
|
2022-03-31T07:17:48.000Z
|
tb/test_axis_gmii_tx.py
|
yuzu762/verilog-ethernet
|
108c02d721aada8a8f51e22328f6ca6c64b70d33
|
[
"MIT"
] | 98
|
2016-02-12T21:15:45.000Z
|
2022-03-31T03:13:00.000Z
|
tb/test_axis_gmii_tx.py
|
yuzu762/verilog-ethernet
|
108c02d721aada8a8f51e22328f6ca6c64b70d33
|
[
"MIT"
] | 368
|
2015-05-05T20:49:01.000Z
|
2022-03-31T09:43:53.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2015-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import eth_ep
import gmii_ep
module = 'axis_gmii_tx'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/lfsr.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
DATA_WIDTH = 8
ENABLE_PADDING = 1
MIN_FRAME_LENGTH = 64
PTP_TS_ENABLE = 0
PTP_TS_WIDTH = 96
PTP_TAG_ENABLE = PTP_TS_ENABLE
PTP_TAG_WIDTH = 16
USER_WIDTH = (PTP_TAG_WIDTH if PTP_TAG_ENABLE else 0) + 1
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
s_axis_tvalid = Signal(bool(0))
s_axis_tlast = Signal(bool(0))
s_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
ptp_ts = Signal(intbv(0)[PTP_TS_WIDTH:])
clk_enable = Signal(bool(1))
mii_select = Signal(bool(0))
ifg_delay = Signal(intbv(0)[8:])
# Outputs
s_axis_tready = Signal(bool(0))
gmii_txd = Signal(intbv(0)[DATA_WIDTH:])
gmii_tx_en = Signal(bool(0))
gmii_tx_er = Signal(bool(0))
m_axis_ptp_ts = Signal(intbv(0)[PTP_TS_WIDTH:])
m_axis_ptp_ts_tag = Signal(intbv(0)[PTP_TAG_WIDTH:])
m_axis_ptp_ts_valid = Signal(bool(0))
start_packet = Signal(bool(0))
error_underflow = Signal(bool(0))
# sources and sinks
source_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource()
source_logic = source.create_logic(
clk,
rst,
tdata=s_axis_tdata,
tvalid=s_axis_tvalid,
tready=s_axis_tready,
tlast=s_axis_tlast,
tuser=s_axis_tuser,
pause=source_pause,
name='source'
)
sink = gmii_ep.GMIISink()
sink_logic = sink.create_logic(
clk,
rst,
rxd=gmii_txd,
rx_dv=gmii_tx_en,
rx_er=gmii_tx_er,
clk_enable=clk_enable,
mii_select=mii_select,
name='sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_axis_tdata=s_axis_tdata,
s_axis_tvalid=s_axis_tvalid,
s_axis_tready=s_axis_tready,
s_axis_tlast=s_axis_tlast,
s_axis_tuser=s_axis_tuser,
gmii_txd=gmii_txd,
gmii_tx_en=gmii_tx_en,
gmii_tx_er=gmii_tx_er,
ptp_ts=ptp_ts,
m_axis_ptp_ts=m_axis_ptp_ts,
m_axis_ptp_ts_tag=m_axis_ptp_ts_tag,
m_axis_ptp_ts_valid=m_axis_ptp_ts_valid,
clk_enable=clk_enable,
mii_select=mii_select,
ifg_delay=ifg_delay,
start_packet=start_packet,
error_underflow=error_underflow
)
@always(delay(4))
def clkgen():
clk.next = not clk
clk_enable_rate = Signal(int(1))
clk_enable_div = Signal(int(0))
@always(clk.posedge)
def clk_enable_gen():
if clk_enable_div.next > 0:
clk_enable.next = 0
clk_enable_div.next = clk_enable_div - 1
else:
clk_enable.next = 1
clk_enable_div.next = clk_enable_rate - 1
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
ifg_delay.next = 12
# testbench stimulus
for rate, mii in [(1, 0), (10, 0), (5, 1)]:
clk_enable_rate.next = rate
mii_select.next = mii
yield delay(100)
for payload_len in list(range(1,18))+list(range(64,82)):
yield clk.posedge
print("test 1: test packet, length %d" % payload_len)
current_test.next = 1
test_frame = eth_ep.EthFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.payload = bytearray(range(payload_len))
test_frame.update_fcs()
axis_frame = test_frame.build_axis()
source.send(axis_frame)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame.eth_src_mac
assert eth_frame.eth_type == test_frame.eth_type
assert eth_frame.payload.data.index(test_frame.payload.data) == 0
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 2: back-to-back packets, length %d" % payload_len)
current_test.next = 2
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame1.update_fcs()
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
test_frame2.update_fcs()
axis_frame1 = test_frame1.build_axis()
axis_frame2 = test_frame2.build_axis()
source.send(axis_frame1)
source.send(axis_frame2)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame1.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame1.eth_src_mac
assert eth_frame.eth_type == test_frame1.eth_type
assert eth_frame.payload.data.index(test_frame1.payload.data) == 0
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame2.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame2.eth_src_mac
assert eth_frame.eth_type == test_frame2.eth_type
assert eth_frame.payload.data.index(test_frame2.payload.data) == 0
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 3: tuser assert, length %d" % payload_len)
current_test.next = 3
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame1.update_fcs()
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
test_frame2.update_fcs()
axis_frame1 = test_frame1.build_axis()
axis_frame2 = test_frame2.build_axis()
axis_frame1.user = 1
source.send(axis_frame1)
source.send(axis_frame2)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
assert rx_frame.error[-1]
# bad packet
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame2.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame2.eth_src_mac
assert eth_frame.eth_type == test_frame2.eth_type
assert eth_frame.payload.data.index(test_frame2.payload.data) == 0
assert sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 32.153846
| 91
| 0.601985
|
from myhdl import *
import os
import axis_ep
import eth_ep
import gmii_ep
module = 'axis_gmii_tx'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/lfsr.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
DATA_WIDTH = 8
ENABLE_PADDING = 1
MIN_FRAME_LENGTH = 64
PTP_TS_ENABLE = 0
PTP_TS_WIDTH = 96
PTP_TAG_ENABLE = PTP_TS_ENABLE
PTP_TAG_WIDTH = 16
USER_WIDTH = (PTP_TAG_WIDTH if PTP_TAG_ENABLE else 0) + 1
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
s_axis_tvalid = Signal(bool(0))
s_axis_tlast = Signal(bool(0))
s_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
ptp_ts = Signal(intbv(0)[PTP_TS_WIDTH:])
clk_enable = Signal(bool(1))
mii_select = Signal(bool(0))
ifg_delay = Signal(intbv(0)[8:])
s_axis_tready = Signal(bool(0))
gmii_txd = Signal(intbv(0)[DATA_WIDTH:])
gmii_tx_en = Signal(bool(0))
gmii_tx_er = Signal(bool(0))
m_axis_ptp_ts = Signal(intbv(0)[PTP_TS_WIDTH:])
m_axis_ptp_ts_tag = Signal(intbv(0)[PTP_TAG_WIDTH:])
m_axis_ptp_ts_valid = Signal(bool(0))
start_packet = Signal(bool(0))
error_underflow = Signal(bool(0))
source_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource()
source_logic = source.create_logic(
clk,
rst,
tdata=s_axis_tdata,
tvalid=s_axis_tvalid,
tready=s_axis_tready,
tlast=s_axis_tlast,
tuser=s_axis_tuser,
pause=source_pause,
name='source'
)
sink = gmii_ep.GMIISink()
sink_logic = sink.create_logic(
clk,
rst,
rxd=gmii_txd,
rx_dv=gmii_tx_en,
rx_er=gmii_tx_er,
clk_enable=clk_enable,
mii_select=mii_select,
name='sink'
)
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_axis_tdata=s_axis_tdata,
s_axis_tvalid=s_axis_tvalid,
s_axis_tready=s_axis_tready,
s_axis_tlast=s_axis_tlast,
s_axis_tuser=s_axis_tuser,
gmii_txd=gmii_txd,
gmii_tx_en=gmii_tx_en,
gmii_tx_er=gmii_tx_er,
ptp_ts=ptp_ts,
m_axis_ptp_ts=m_axis_ptp_ts,
m_axis_ptp_ts_tag=m_axis_ptp_ts_tag,
m_axis_ptp_ts_valid=m_axis_ptp_ts_valid,
clk_enable=clk_enable,
mii_select=mii_select,
ifg_delay=ifg_delay,
start_packet=start_packet,
error_underflow=error_underflow
)
@always(delay(4))
def clkgen():
clk.next = not clk
clk_enable_rate = Signal(int(1))
clk_enable_div = Signal(int(0))
@always(clk.posedge)
def clk_enable_gen():
if clk_enable_div.next > 0:
clk_enable.next = 0
clk_enable_div.next = clk_enable_div - 1
else:
clk_enable.next = 1
clk_enable_div.next = clk_enable_rate - 1
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
ifg_delay.next = 12
for rate, mii in [(1, 0), (10, 0), (5, 1)]:
clk_enable_rate.next = rate
mii_select.next = mii
yield delay(100)
for payload_len in list(range(1,18))+list(range(64,82)):
yield clk.posedge
print("test 1: test packet, length %d" % payload_len)
current_test.next = 1
test_frame = eth_ep.EthFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.payload = bytearray(range(payload_len))
test_frame.update_fcs()
axis_frame = test_frame.build_axis()
source.send(axis_frame)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame.eth_src_mac
assert eth_frame.eth_type == test_frame.eth_type
assert eth_frame.payload.data.index(test_frame.payload.data) == 0
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 2: back-to-back packets, length %d" % payload_len)
current_test.next = 2
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame1.update_fcs()
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
test_frame2.update_fcs()
axis_frame1 = test_frame1.build_axis()
axis_frame2 = test_frame2.build_axis()
source.send(axis_frame1)
source.send(axis_frame2)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame1.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame1.eth_src_mac
assert eth_frame.eth_type == test_frame1.eth_type
assert eth_frame.payload.data.index(test_frame1.payload.data) == 0
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame2.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame2.eth_src_mac
assert eth_frame.eth_type == test_frame2.eth_type
assert eth_frame.payload.data.index(test_frame2.payload.data) == 0
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 3: tuser assert, length %d" % payload_len)
current_test.next = 3
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame1.update_fcs()
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
test_frame2.update_fcs()
axis_frame1 = test_frame1.build_axis()
axis_frame2 = test_frame2.build_axis()
axis_frame1.user = 1
source.send(axis_frame1)
source.send(axis_frame2)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
assert rx_frame.error[-1]
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame2.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame2.eth_src_mac
assert eth_frame.eth_type == test_frame2.eth_type
assert eth_frame.payload.data.index(test_frame2.payload.data) == 0
assert sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| true
| true
|
1c47c743603061855d979f37a27b7acaf2a74e4b
| 7,496
|
py
|
Python
|
python-modules/twisted/twisted/internet/ssl.py
|
stormtheh4ck3r/python-for-android
|
b9ea9161392f60566b81482b1e25cd77004d5c45
|
[
"Apache-2.0"
] | 267
|
2015-03-22T15:23:48.000Z
|
2022-03-05T21:57:34.000Z
|
python-modules/twisted/twisted/internet/ssl.py
|
rockyzhang/zhangyanhit-python-for-android-mips
|
799dd5ca16f72135f2eab71e144a68842e2aaee0
|
[
"Apache-2.0"
] | 133
|
2015-03-21T15:13:43.000Z
|
2021-12-11T23:37:58.000Z
|
python-modules/twisted/twisted/internet/ssl.py
|
rockyzhang/zhangyanhit-python-for-android-mips
|
799dd5ca16f72135f2eab71e144a68842e2aaee0
|
[
"Apache-2.0"
] | 119
|
2015-04-28T16:07:10.000Z
|
2022-03-18T03:49:48.000Z
|
# -*- test-case-name: twisted.test.test_ssl -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
SSL transport. Requires PyOpenSSL (http://pyopenssl.sf.net).
SSL connections require a ContextFactory so they can create SSL contexts.
End users should only use the ContextFactory classes directly - for SSL
connections use the reactor.connectSSL/listenSSL and so on, as documented
in IReactorSSL.
All server context factories should inherit from ContextFactory, and all
client context factories should inherit from ClientContextFactory. At the
moment this is not enforced, but in the future it might be.
Future Plans:
- split module so reactor-specific classes are in a separate module
- support for switching TCP into SSL
- more options
Maintainer: Itamar Shtull-Trauring
"""
# If something goes wrong, most notably an OpenSSL import failure,
# sys.modules['twisted.internet.ssl'] will be bound to a partially
# initialized module object. This is wacko, but we will take advantage
# of it to publish whether or not SSL is available.
# See the end of this module for the other half of this solution.
# The correct idiom to import this module is thus:
# try:
# from twisted.internet import ssl
# except ImportError:
# # happens the first time the interpreter tries to import it
# ssl = None
# if ssl and not ssl.supported:
# # happens second and later times
# ssl = None
supported = False
# System imports
from OpenSSL import SSL
from zope.interface import implements, implementsOnly, implementedBy
# Twisted imports
from twisted.internet import tcp, interfaces, base, address
class ContextFactory:
"""A factory for SSL context objects, for server SSL connections."""
isClient = 0
def getContext(self):
"""Return a SSL.Context object. override in subclasses."""
raise NotImplementedError
class DefaultOpenSSLContextFactory(ContextFactory):
"""
L{DefaultOpenSSLContextFactory} is a factory for server-side SSL context
objects. These objects define certain parameters related to SSL
handshakes and the subsequent connection.
@ivar _contextFactory: A callable which will be used to create new
context objects. This is typically L{SSL.Context}.
"""
_context = None
def __init__(self, privateKeyFileName, certificateFileName,
sslmethod=SSL.SSLv23_METHOD, _contextFactory=SSL.Context):
"""
@param privateKeyFileName: Name of a file containing a private key
@param certificateFileName: Name of a file containing a certificate
@param sslmethod: The SSL method to use
"""
self.privateKeyFileName = privateKeyFileName
self.certificateFileName = certificateFileName
self.sslmethod = sslmethod
self._contextFactory = _contextFactory
# Create a context object right now. This is to force validation of
# the given parameters so that errors are detected earlier rather
# than later.
self.cacheContext()
def cacheContext(self):
if self._context is None:
ctx = self._contextFactory(self.sslmethod)
# Disallow SSLv2! It's insecure! SSLv3 has been around since
# 1996. It's time to move on.
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.use_certificate_file(self.certificateFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
self._context = ctx
def __getstate__(self):
d = self.__dict__.copy()
del d['_context']
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
"""
Return an SSL context.
"""
return self._context
class ClientContextFactory:
"""A context factory for SSL clients."""
isClient = 1
# SSLv23_METHOD allows SSLv2, SSLv3, and TLSv1. We disable SSLv2 below,
# though.
method = SSL.SSLv23_METHOD
_contextFactory = SSL.Context
def getContext(self):
ctx = self._contextFactory(self.method)
# See comment in DefaultOpenSSLContextFactory about SSLv2.
ctx.set_options(SSL.OP_NO_SSLv2)
return ctx
class Client(tcp.Client):
"""I am an SSL client."""
implementsOnly(interfaces.ISSLTransport,
*[i for i in implementedBy(tcp.Client) if i != interfaces.ITLSTransport])
def __init__(self, host, port, bindAddress, ctxFactory, connector, reactor=None):
# tcp.Client.__init__ depends on self.ctxFactory being set
self.ctxFactory = ctxFactory
tcp.Client.__init__(self, host, port, bindAddress, connector, reactor)
def getHost(self):
"""Returns the address from which I am connecting."""
h, p = self.socket.getsockname()
return address.IPv4Address('TCP', h, p, 'SSL')
def getPeer(self):
"""Returns the address that I am connected."""
return address.IPv4Address('TCP', self.addr[0], self.addr[1], 'SSL')
def _connectDone(self):
self.startTLS(self.ctxFactory)
self.startWriting()
tcp.Client._connectDone(self)
class Server(tcp.Server):
"""I am an SSL server.
"""
implements(interfaces.ISSLTransport)
def getHost(self):
"""Return server's address."""
h, p = self.socket.getsockname()
return address.IPv4Address('TCP', h, p, 'SSL')
def getPeer(self):
"""Return address of peer."""
h, p = self.client
return address.IPv4Address('TCP', h, p, 'SSL')
class Port(tcp.Port):
"""I am an SSL port."""
_socketShutdownMethod = 'sock_shutdown'
transport = Server
def __init__(self, port, factory, ctxFactory, backlog=50, interface='', reactor=None):
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
self.ctxFactory = ctxFactory
def createInternetSocket(self):
"""(internal) create an SSL socket
"""
sock = tcp.Port.createInternetSocket(self)
return SSL.Connection(self.ctxFactory.getContext(), sock)
def _preMakeConnection(self, transport):
# *Don't* call startTLS here
# The transport already has the SSL.Connection object from above
transport._startTLS()
return tcp.Port._preMakeConnection(self, transport)
class Connector(base.BaseConnector):
def __init__(self, host, port, factory, contextFactory, timeout, bindAddress, reactor=None):
self.host = host
self.port = port
self.bindAddress = bindAddress
self.contextFactory = contextFactory
base.BaseConnector.__init__(self, factory, timeout, reactor)
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self.contextFactory, self, self.reactor)
def getDestination(self):
return address.IPv4Address('TCP', self.host, self.port, 'SSL')
from twisted.internet._sslverify import DistinguishedName, DN, Certificate
from twisted.internet._sslverify import CertificateRequest, PrivateCertificate
from twisted.internet._sslverify import KeyPair
from twisted.internet._sslverify import OpenSSLCertificateOptions as CertificateOptions
__all__ = [
"ContextFactory", "DefaultOpenSSLContextFactory", "ClientContextFactory",
'DistinguishedName', 'DN',
'Certificate', 'CertificateRequest', 'PrivateCertificate',
'KeyPair',
'CertificateOptions',
]
supported = True
| 32.034188
| 102
| 0.689301
|
plementsOnly, implementedBy
from twisted.internet import tcp, interfaces, base, address
class ContextFactory:
isClient = 0
def getContext(self):
raise NotImplementedError
class DefaultOpenSSLContextFactory(ContextFactory):
_context = None
def __init__(self, privateKeyFileName, certificateFileName,
sslmethod=SSL.SSLv23_METHOD, _contextFactory=SSL.Context):
self.privateKeyFileName = privateKeyFileName
self.certificateFileName = certificateFileName
self.sslmethod = sslmethod
self._contextFactory = _contextFactory
self.cacheContext()
def cacheContext(self):
if self._context is None:
ctx = self._contextFactory(self.sslmethod)
# 1996. It's time to move on.
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.use_certificate_file(self.certificateFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
self._context = ctx
def __getstate__(self):
d = self.__dict__.copy()
del d['_context']
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
return self._context
class ClientContextFactory:
isClient = 1
method = SSL.SSLv23_METHOD
_contextFactory = SSL.Context
def getContext(self):
ctx = self._contextFactory(self.method)
ctx.set_options(SSL.OP_NO_SSLv2)
return ctx
class Client(tcp.Client):
implementsOnly(interfaces.ISSLTransport,
*[i for i in implementedBy(tcp.Client) if i != interfaces.ITLSTransport])
def __init__(self, host, port, bindAddress, ctxFactory, connector, reactor=None):
self.ctxFactory = ctxFactory
tcp.Client.__init__(self, host, port, bindAddress, connector, reactor)
def getHost(self):
h, p = self.socket.getsockname()
return address.IPv4Address('TCP', h, p, 'SSL')
def getPeer(self):
return address.IPv4Address('TCP', self.addr[0], self.addr[1], 'SSL')
def _connectDone(self):
self.startTLS(self.ctxFactory)
self.startWriting()
tcp.Client._connectDone(self)
class Server(tcp.Server):
implements(interfaces.ISSLTransport)
def getHost(self):
h, p = self.socket.getsockname()
return address.IPv4Address('TCP', h, p, 'SSL')
def getPeer(self):
h, p = self.client
return address.IPv4Address('TCP', h, p, 'SSL')
class Port(tcp.Port):
_socketShutdownMethod = 'sock_shutdown'
transport = Server
def __init__(self, port, factory, ctxFactory, backlog=50, interface='', reactor=None):
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
self.ctxFactory = ctxFactory
def createInternetSocket(self):
sock = tcp.Port.createInternetSocket(self)
return SSL.Connection(self.ctxFactory.getContext(), sock)
def _preMakeConnection(self, transport):
# The transport already has the SSL.Connection object from above
transport._startTLS()
return tcp.Port._preMakeConnection(self, transport)
class Connector(base.BaseConnector):
def __init__(self, host, port, factory, contextFactory, timeout, bindAddress, reactor=None):
self.host = host
self.port = port
self.bindAddress = bindAddress
self.contextFactory = contextFactory
base.BaseConnector.__init__(self, factory, timeout, reactor)
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self.contextFactory, self, self.reactor)
def getDestination(self):
return address.IPv4Address('TCP', self.host, self.port, 'SSL')
from twisted.internet._sslverify import DistinguishedName, DN, Certificate
from twisted.internet._sslverify import CertificateRequest, PrivateCertificate
from twisted.internet._sslverify import KeyPair
from twisted.internet._sslverify import OpenSSLCertificateOptions as CertificateOptions
__all__ = [
"ContextFactory", "DefaultOpenSSLContextFactory", "ClientContextFactory",
'DistinguishedName', 'DN',
'Certificate', 'CertificateRequest', 'PrivateCertificate',
'KeyPair',
'CertificateOptions',
]
supported = True
| true
| true
|
1c47c7b7ad1cb5f4dbaadc84f69896248dc1ef93
| 1,850
|
py
|
Python
|
wikum-env3/lib/python3.7/site-packages/mwparserfromhell/nodes/text.py
|
xuericlin/wikum
|
f0171f1697efa91d6957f976f473c9201db85648
|
[
"MIT"
] | 8
|
2021-04-29T16:49:45.000Z
|
2021-08-09T18:56:35.000Z
|
wikum-env3/lib/python3.7/site-packages/mwparserfromhell/nodes/text.py
|
xuericlin/wikum
|
f0171f1697efa91d6957f976f473c9201db85648
|
[
"MIT"
] | null | null | null |
wikum-env3/lib/python3.7/site-packages/mwparserfromhell/nodes/text.py
|
xuericlin/wikum
|
f0171f1697efa91d6957f976f473c9201db85648
|
[
"MIT"
] | 2
|
2020-08-03T13:02:06.000Z
|
2020-11-04T03:15:44.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2019 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
from . import Node
from ..compat import str
__all__ = ["Text"]
class Text(Node):
"""Represents ordinary, unformatted text with no special properties."""
def __init__(self, value):
super(Text, self).__init__()
self.value = value
def __unicode__(self):
return self.value
def __strip__(self, **kwargs):
return self
def __showtree__(self, write, get, mark):
write(str(self).encode("unicode_escape").decode("utf8"))
@property
def value(self):
"""The actual text itself."""
return self._value
@value.setter
def value(self, newval):
self._value = str(newval)
| 34.259259
| 79
| 0.718378
|
from __future__ import unicode_literals
from . import Node
from ..compat import str
__all__ = ["Text"]
class Text(Node):
def __init__(self, value):
super(Text, self).__init__()
self.value = value
def __unicode__(self):
return self.value
def __strip__(self, **kwargs):
return self
def __showtree__(self, write, get, mark):
write(str(self).encode("unicode_escape").decode("utf8"))
@property
def value(self):
return self._value
@value.setter
def value(self, newval):
self._value = str(newval)
| true
| true
|
1c47c8b56a82daffb467121923485a7868336d49
| 981
|
py
|
Python
|
ratelimit/rule.py
|
abersheeran/asgi-ratelimit
|
504de6dca1eb99762114a0886d502679a608799e
|
[
"Apache-2.0"
] | 136
|
2020-06-08T10:38:19.000Z
|
2022-03-24T14:45:51.000Z
|
ratelimit/rule.py
|
abersheeran/asgi-ratelimit
|
504de6dca1eb99762114a0886d502679a608799e
|
[
"Apache-2.0"
] | 38
|
2020-07-12T15:35:15.000Z
|
2022-03-25T03:27:45.000Z
|
ratelimit/rule.py
|
abersheeran/asgi-ratelimit
|
504de6dca1eb99762114a0886d502679a608799e
|
[
"Apache-2.0"
] | 15
|
2021-01-19T13:48:37.000Z
|
2022-03-18T02:34:52.000Z
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
@dataclass
class Rule:
group: str = "default"
second: Optional[int] = None
minute: Optional[int] = None
hour: Optional[int] = None
day: Optional[int] = None
month: Optional[int] = None
block_time: Optional[int] = None
zone: Optional[str] = None
def ruleset(self, path: str, user: str) -> Dict[str, Tuple[int, int]]:
"""
builds a dictionary of keys, values where keys are
the redis keys and values is a tuple of (limit, ttl)
"""
return {
f"{path}:{user}:{name}": (limit, TTL[name])
for name, limit in map(lambda name: (name, getattr(self, name)), RULENAMES)
if limit is not None
}
TTL = {
"second": 1,
"minute": 60,
"hour": 60 * 60,
"day": 24 * 60 * 60,
"month": 31 * 24 * 60 * 60,
}
RULENAMES: Tuple[str, ...] = ("second", "minute", "hour", "day", "month")
| 24.525
| 87
| 0.566769
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
@dataclass
class Rule:
group: str = "default"
second: Optional[int] = None
minute: Optional[int] = None
hour: Optional[int] = None
day: Optional[int] = None
month: Optional[int] = None
block_time: Optional[int] = None
zone: Optional[str] = None
def ruleset(self, path: str, user: str) -> Dict[str, Tuple[int, int]]:
return {
f"{path}:{user}:{name}": (limit, TTL[name])
for name, limit in map(lambda name: (name, getattr(self, name)), RULENAMES)
if limit is not None
}
TTL = {
"second": 1,
"minute": 60,
"hour": 60 * 60,
"day": 24 * 60 * 60,
"month": 31 * 24 * 60 * 60,
}
RULENAMES: Tuple[str, ...] = ("second", "minute", "hour", "day", "month")
| true
| true
|
1c47c90a7ae040e58e2550f867ee1a2872a42dce
| 32,446
|
py
|
Python
|
cirq/sim/simulator.py
|
zchen088/Cirq
|
8cf782554adbafed724987de3067de7ca565fa0c
|
[
"Apache-2.0"
] | 1
|
2019-12-18T17:42:14.000Z
|
2019-12-18T17:42:14.000Z
|
cirq/sim/simulator.py
|
zchen088/Cirq
|
8cf782554adbafed724987de3067de7ca565fa0c
|
[
"Apache-2.0"
] | null | null | null |
cirq/sim/simulator.py
|
zchen088/Cirq
|
8cf782554adbafed724987de3067de7ca565fa0c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base classes for different types of simulators.
Simulator types include:
SimulatesSamples: mimics the interface of quantum hardware.
SimulatesAmplitudes: computes amplitudes of desired bitstrings in the
final state of the simulation.
SimulatesFinalState: allows access to the final state of the simulation.
SimulatesIntermediateState: allows for access to the state of the simulation
as the simulation iterates through the moments of a cirq.
"""
from typing import (
Any,
Dict,
Iterator,
List,
Sequence,
Tuple,
Union,
Optional,
TYPE_CHECKING,
Set,
cast,
Callable,
TypeVar,
Generic,
)
import abc
import collections
import numpy as np
from cirq import circuits, ops, protocols, study, value, work
from cirq._compat import deprecated
if TYPE_CHECKING:
import cirq
TStepResult = TypeVar('TStepResult', bound='StepResult')
TSimulationTrialResult = TypeVar('TSimulationTrialResult', bound='SimulationTrialResult')
TSimulatorState = TypeVar('TSimulatorState')
class SimulatesSamples(work.Sampler, metaclass=abc.ABCMeta):
"""Simulator that mimics running on quantum hardware.
Implementors of this interface should implement the _run method.
"""
def run_sweep(
self,
program: 'cirq.Circuit',
params: study.Sweepable,
repetitions: int = 1,
) -> List[study.Result]:
"""Runs the supplied Circuit, mimicking quantum hardware.
In contrast to run, this allows for sweeping over different parameter
values.
Args:
program: The circuit to simulate.
params: Parameters to run with the program.
repetitions: The number of repetitions to simulate.
Returns:
Result list for this run; one for each possible parameter
resolver.
"""
if not program.has_measurements():
raise ValueError("Circuit has no measurements to sample.")
_verify_unique_measurement_keys(program)
trial_results = [] # type: List[study.Result]
for param_resolver in study.to_resolvers(params):
measurements = {}
if repetitions == 0:
for _, op, _ in program.findall_operations_with_gate_type(ops.MeasurementGate):
measurements[protocols.measurement_key(op)] = np.empty([0, 1])
else:
measurements = self._run(
circuit=program, param_resolver=param_resolver, repetitions=repetitions
)
trial_results.append(
study.Result.from_single_parameter_set(
params=param_resolver, measurements=measurements
)
)
return trial_results
@abc.abstractmethod
def _run(
self, circuit: circuits.Circuit, param_resolver: study.ParamResolver, repetitions: int
) -> Dict[str, np.ndarray]:
"""Run a simulation, mimicking quantum hardware.
Args:
circuit: The circuit to simulate.
param_resolver: Parameters to run with the program.
repetitions: Number of times to repeat the run. It is expected that
this is validated greater than zero before calling this method.
Returns:
A dictionary from measurement gate key to measurement
results. Measurement results are stored in a 2-dimensional
numpy array, the first dimension corresponding to the repetition
and the second to the actual boolean measurement results (ordered
by the qubits being measured.)
"""
raise NotImplementedError()
class SimulatesAmplitudes(metaclass=abc.ABCMeta):
"""Simulator that computes final amplitudes of given bitstrings.
Given a circuit and a list of bitstrings, computes the amplitudes
of the given bitstrings in the state obtained by applying the circuit
to the all zeros state. Implementors of this interface should implement
the compute_amplitudes_sweep method.
"""
def compute_amplitudes(
self,
program: 'cirq.Circuit',
bitstrings: Sequence[int],
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
) -> Sequence[complex]:
"""Computes the desired amplitudes.
The initial state is assumed to be the all zeros state.
Args:
program: The circuit to simulate.
bitstrings: The bitstrings whose amplitudes are desired, input
as an integer array where each integer is formed from measured
qubit values according to `qubit_order` from most to least
significant qubit, i.e. in big-endian ordering.
param_resolver: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
Returns:
List of amplitudes.
"""
return self.compute_amplitudes_sweep(
program, bitstrings, study.ParamResolver(param_resolver), qubit_order
)[0]
@abc.abstractmethod
def compute_amplitudes_sweep(
self,
program: 'cirq.Circuit',
bitstrings: Sequence[int],
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
) -> Sequence[Sequence[complex]]:
"""Computes the desired amplitudes.
The initial state is assumed to be the all zeros state.
Args:
program: The circuit to simulate.
bitstrings: The bitstrings whose amplitudes are desired, input
as an integer array where each integer is formed from measured
qubit values according to `qubit_order` from most to least
significant qubit, i.e. in big-endian ordering.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
Returns:
List of lists of amplitudes. The outer dimension indexes the
circuit parameters and the inner dimension indexes the bitstrings.
"""
raise NotImplementedError()
class SimulatesExpectationValues(metaclass=abc.ABCMeta):
"""Simulator that computes exact expectation values of observables.
Given a circuit and an observable map, computes exact (to float precision)
expectation values for each observable at the end of the circuit.
Implementors of this interface should implement the
simulate_expectation_values_sweep method.
"""
def simulate_expectation_values(
self,
program: 'cirq.Circuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> List[float]:
"""Simulates the supplied circuit and calculates exact expectation
values for the given observables on its final state.
This method has no perfect analogy in hardware. Instead compare with
Sampler.sample_expectation_values, which calculates estimated
expectation values by sampling multiple times.
Args:
program: The circuit to simulate.
observables: An observable or list of observables.
param_resolver: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
permit_terminal_measurements: If the provided circuit ends with
measurement(s), this method will generate an error unless this
is set to True. This is meant to prevent measurements from
ruining expectation value calculations.
Returns:
A list of expectation values, with the value at index `n`
corresponding to `observables[n]` from the input.
Raises:
ValueError if 'program' has terminal measurement(s) and
'permit_terminal_measurements' is False.
"""
return self.simulate_expectation_values_sweep(
program,
observables,
study.ParamResolver(param_resolver),
qubit_order,
initial_state,
permit_terminal_measurements,
)[0]
@abc.abstractmethod
def simulate_expectation_values_sweep(
self,
program: 'cirq.Circuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
params: 'study.Sweepable',
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> List[List[float]]:
"""Simulates the supplied circuit and calculates exact expectation
values for the given observables on its final state, sweeping over the
given params.
This method has no perfect analogy in hardware. Instead compare with
Sampler.sample_expectation_values, which calculates estimated
expectation values by sampling multiple times.
Args:
program: The circuit to simulate.
observables: An observable or list of observables.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
permit_terminal_measurements: If the provided circuit ends in a
measurement, this method will generate an error unless this
is set to True. This is meant to prevent measurements from
ruining expectation value calculations.
Returns:
A list of expectation-value lists. The outer index determines the
sweep, and the inner index determines the observable. For instance,
results[1][3] would select the fourth observable measured in the
second sweep.
Raises:
ValueError if 'program' has terminal measurement(s) and
'permit_terminal_measurements' is False.
"""
class SimulatesFinalState(Generic[TSimulationTrialResult], metaclass=abc.ABCMeta):
"""Simulator that allows access to the simulator's final state.
Implementors of this interface should implement the simulate_sweep
method. This simulator only returns the state of the quantum system
for the final step of a simulation. This simulator state may be a state
vector, the density matrix, or another representation, depending on the
implementation. For simulators that also allow stepping through
a circuit see `SimulatesIntermediateState`.
"""
def simulate(
self,
program: 'cirq.Circuit',
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> TSimulationTrialResult:
"""Simulates the supplied Circuit.
This method returns a result which allows access to the entire
simulator's final state.
Args:
program: The circuit to simulate.
param_resolver: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
SimulationTrialResults for the simulation. Includes the final state.
"""
return self.simulate_sweep(
program, study.ParamResolver(param_resolver), qubit_order, initial_state
)[0]
@abc.abstractmethod
def simulate_sweep(
self,
program: 'cirq.Circuit',
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> List[TSimulationTrialResult]:
"""Simulates the supplied Circuit.
This method returns a result which allows access to the entire final
simulator state. In contrast to simulate, this allows for sweeping
over different parameter values.
Args:
program: The circuit to simulate.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
List of SimulationTrialResults for this run, one for each
possible parameter resolver.
"""
raise NotImplementedError()
class SimulatesIntermediateState(
Generic[TStepResult, TSimulationTrialResult, TSimulatorState],
SimulatesFinalState[TSimulationTrialResult],
metaclass=abc.ABCMeta,
):
"""A SimulatesFinalState that simulates a circuit by moments.
Whereas a general SimulatesFinalState may return the entire simulator
state at the end of a circuit, a SimulatesIntermediateState can
simulate stepping through the moments of a circuit.
Implementors of this interface should implement the _base_iterator
method.
Note that state here refers to simulator state, which is not necessarily
a state vector.
"""
def simulate_sweep(
self,
program: 'cirq.Circuit',
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> List[TSimulationTrialResult]:
"""Simulates the supplied Circuit.
This method returns a result which allows access to the entire
state vector. In contrast to simulate, this allows for sweeping
over different parameter values.
Args:
program: The circuit to simulate.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
List of SimulationTrialResults for this run, one for each
possible parameter resolver.
"""
trial_results = []
qubit_order = ops.QubitOrder.as_qubit_order(qubit_order)
for param_resolver in study.to_resolvers(params):
all_step_results = self.simulate_moment_steps(
program, param_resolver, qubit_order, initial_state
)
measurements = {} # type: Dict[str, np.ndarray]
for step_result in all_step_results:
for k, v in step_result.measurements.items():
measurements[k] = np.array(v, dtype=np.uint8)
trial_results.append(
self._create_simulator_trial_result(
params=param_resolver,
measurements=measurements,
final_simulator_state=step_result._simulator_state(),
)
)
return trial_results
def simulate_moment_steps(
self,
circuit: circuits.Circuit,
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> Iterator[TStepResult]:
"""Returns an iterator of StepResults for each moment simulated.
If the circuit being simulated is empty, a single step result should
be returned with the state being set to the initial state.
Args:
circuit: The Circuit to simulate.
param_resolver: A ParamResolver for determining values of Symbols.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
Iterator that steps through the simulation, simulating each
moment and returning a StepResult for each moment.
"""
param_resolver = study.ParamResolver(param_resolver)
resolved_circuit = protocols.resolve_parameters(circuit, param_resolver)
check_all_resolved(resolved_circuit)
actual_initial_state = 0 if initial_state is None else initial_state
return self._base_iterator(resolved_circuit, qubit_order, actual_initial_state)
@deprecated(deadline='v0.11', fix='Override _base_iterator instead')
def _simulator_iterator(
self,
circuit: circuits.Circuit,
param_resolver: study.ParamResolver,
qubit_order: ops.QubitOrderOrList,
initial_state: Any,
) -> Iterator[TStepResult]:
"""Iterator over StepResult from Moments of a Circuit.
If the initial state is an int, the state is set to the computational
basis state corresponding to this state. Otherwise if the initial
state is a np.ndarray it is the full initial state, either a pure state
or the full density matrix. If it is the pure state it must be the
correct size, be normalized (an L2 norm of 1), and be safely castable
to an appropriate dtype for the simulator. If it is a mixed state
it must be correctly sized and positive semidefinite with trace one.
Args:
circuit: The circuit to simulate.
param_resolver: A ParamResolver for determining values of
Symbols.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Yields:
StepResults from simulating a Moment of the Circuit.
"""
return self.simulate_moment_steps(circuit, param_resolver, qubit_order, initial_state)
@abc.abstractmethod
def _base_iterator(
self,
circuit: circuits.Circuit,
qubit_order: ops.QubitOrderOrList,
initial_state: Any,
) -> Iterator[TStepResult]:
"""Iterator over StepResult from Moments of a Circuit.
Args:
circuit: The circuit to simulate.
param_resolver: A ParamResolver for determining values of
Symbols.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Yields:
StepResults from simulating a Moment of the Circuit.
"""
raise NotImplementedError()
@abc.abstractmethod
def _create_simulator_trial_result(
self,
params: study.ParamResolver,
measurements: Dict[str, np.ndarray],
final_simulator_state: TSimulatorState,
) -> TSimulationTrialResult:
"""This method can be implemented to create a trial result.
Args:
params: The ParamResolver for this trial.
measurements: The measurement results for this trial.
final_simulator_state: The final state of the simulator for the
StepResult.
Returns:
The SimulationTrialResult.
"""
raise NotImplementedError()
class StepResult(Generic[TSimulatorState], metaclass=abc.ABCMeta):
"""Results of a step of a SimulatesIntermediateState.
Attributes:
measurements: A dictionary from measurement gate key to measurement
results, ordered by the qubits that the measurement operates on.
"""
def __init__(self, measurements: Optional[Dict[str, List[int]]] = None) -> None:
self.measurements = measurements or collections.defaultdict(list)
@abc.abstractmethod
def _simulator_state(self) -> TSimulatorState:
"""Returns the simulator state of the simulator after this step.
This method starts with an underscore to indicate that it is private.
To access public state, see public methods on StepResult.
The form of the simulator_state depends on the implementation of the
simulation,see documentation for the implementing class for the form of
details.
"""
@abc.abstractmethod
def sample(
self,
qubits: List[ops.Qid],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> np.ndarray:
"""Samples from the system at this point in the computation.
Note that this does not collapse the state vector.
Args:
qubits: The qubits to be sampled in an order that influence the
returned measurement results.
repetitions: The number of samples to take.
seed: A seed for the pseudorandom number generator.
Returns:
Measurement results with True corresponding to the ``|1⟩`` state.
The outer list is for repetitions, and the inner corresponds to
measurements ordered by the supplied qubits. These lists
are wrapped as an numpy ndarray.
"""
raise NotImplementedError()
def sample_measurement_ops(
self,
measurement_ops: List[ops.GateOperation],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> Dict[str, np.ndarray]:
"""Samples from the system at this point in the computation.
Note that this does not collapse the state vector.
In contrast to `sample` which samples qubits, this takes a list of
`cirq.GateOperation` instances whose gates are `cirq.MeasurementGate`
instances and then returns a mapping from the key in the measurement
gate to the resulting bit strings. Different measurement operations must
not act on the same qubits.
Args:
measurement_ops: `GateOperation` instances whose gates are
`MeasurementGate` instances to be sampled form.
repetitions: The number of samples to take.
seed: A seed for the pseudorandom number generator.
Returns: A dictionary from measurement gate key to measurement
results. Measurement results are stored in a 2-dimensional
numpy array, the first dimension corresponding to the repetition
and the second to the actual boolean measurement results (ordered
by the qubits being measured.)
Raises:
ValueError: If the operation's gates are not `MeasurementGate`
instances or a qubit is acted upon multiple times by different
operations from `measurement_ops`.
"""
# Sanity checks.
seen_measurement_keys: Set[str] = set()
for op in measurement_ops:
gate = op.gate
if not isinstance(gate, ops.MeasurementGate):
raise ValueError(f'{op.gate} was not a MeasurementGate')
key = protocols.measurement_key(gate)
if key in seen_measurement_keys:
raise ValueError(f'Duplicate MeasurementGate with key {key}')
seen_measurement_keys.add(key)
# Find measured qubits, ensuring a consistent ordering.
measured_qubits = []
seen_qubits: Set[cirq.Qid] = set()
for op in measurement_ops:
for q in op.qubits:
if q not in seen_qubits:
seen_qubits.add(q)
measured_qubits.append(q)
# Perform whole-system sampling of the measured qubits.
indexed_sample = self.sample(measured_qubits, repetitions, seed=seed)
# Extract results for each measurement.
results: Dict[str, np.ndarray] = {}
qubits_to_index = {q: i for i, q in enumerate(measured_qubits)}
for op in measurement_ops:
gate = cast(ops.MeasurementGate, op.gate)
out = np.zeros(shape=(repetitions, len(op.qubits)), dtype=np.int8)
inv_mask = gate.full_invert_mask()
for i, q in enumerate(op.qubits):
out[:, i] = indexed_sample[:, qubits_to_index[q]]
if inv_mask[i]:
out[:, i] ^= out[:, i] < 2
results[gate.key] = out
return results
@value.value_equality(unhashable=True)
class SimulationTrialResult:
"""Results of a simulation by a SimulatesFinalState.
Unlike Result these results contain the final simulator_state of the
system. This simulator_state is dependent on the simulation implementation
and may be, for example, the state vector or the density matrix of the
system.
Attributes:
params: A ParamResolver of settings used for this result.
measurements: A dictionary from measurement gate key to measurement
results. Measurement results are a numpy ndarray of actual boolean
measurement results (ordered by the qubits acted on by the
measurement gate.)
"""
def __init__(
self,
params: study.ParamResolver,
measurements: Dict[str, np.ndarray],
final_simulator_state: Any,
) -> None:
self.params = params
self.measurements = measurements
self._final_simulator_state = final_simulator_state
def __repr__(self) -> str:
return (
f'cirq.SimulationTrialResult(params={self.params!r}, '
f'measurements={self.measurements!r}, '
f'final_simulator_state={self._final_simulator_state!r})'
)
def __str__(self) -> str:
def bitstring(vals):
separator = ' ' if np.max(vals) >= 10 else ''
return separator.join(str(int(v)) for v in vals)
results = sorted([(key, bitstring(val)) for key, val in self.measurements.items()])
if not results:
return '(no measurements)'
return ' '.join([f'{key}={val}' for key, val in results])
def _repr_pretty_(self, p: Any, cycle: bool) -> None:
"""Text output in Jupyter."""
if cycle:
# There should never be a cycle. This is just in case.
p.text('SimulationTrialResult(...)')
else:
p.text(str(self))
def _value_equality_values_(self) -> Any:
measurements = {k: v.tolist() for k, v in sorted(self.measurements.items())}
return (self.params, measurements, self._final_simulator_state)
@property
def qubit_map(self) -> Dict[ops.Qid, int]:
"""A map from Qid to index used to define the ordering of the basis in
the result.
"""
return self._final_simulator_state.qubit_map
def _qid_shape_(self) -> Tuple[int, ...]:
return _qubit_map_to_shape(self.qubit_map)
def _qubit_map_to_shape(qubit_map: Dict[ops.Qid, int]) -> Tuple[int, ...]:
qid_shape: List[int] = [-1] * len(qubit_map)
try:
for q, i in qubit_map.items():
qid_shape[i] = q.dimension
except IndexError:
raise ValueError(f'Invalid qubit_map. Qubit index out of bounds. Map is <{qubit_map!r}>.')
if -1 in qid_shape:
raise ValueError(f'Invalid qubit_map. Duplicate qubit index. Map is <{qubit_map!r}>.')
return tuple(qid_shape)
def _verify_unique_measurement_keys(circuit: circuits.Circuit):
result = collections.Counter(
key for op in ops.flatten_op_tree(iter(circuit)) for key in protocols.measurement_keys(op)
)
if result:
duplicates = [k for k, v in result.most_common() if v > 1]
if duplicates:
raise ValueError(f"Measurement key {','.join(duplicates)} repeated")
def check_all_resolved(circuit):
"""Raises if the circuit contains unresolved symbols."""
if protocols.is_parameterized(circuit):
unresolved = [op for moment in circuit for op in moment if protocols.is_parameterized(op)]
raise ValueError(
'Circuit contains ops whose symbols were not specified in '
'parameter sweep. Ops: {}'.format(unresolved)
)
def split_into_matching_protocol_then_general(
circuit: 'cirq.Circuit',
predicate: Callable[['cirq.Operation'], bool],
) -> Tuple['cirq.Circuit', 'cirq.Circuit']:
"""Splits the circuit into a matching prefix and non-matching suffix.
The splitting happens in a per-qubit fashion. A non-matching operation on
qubit A will cause later operations on A to be part of the non-matching
suffix, but later operations on other qubits will continue to be put into
the matching part (as long as those qubits have had no non-matching operation
up to that point).
"""
blocked_qubits: Set[cirq.Qid] = set()
matching_prefix = circuits.Circuit()
general_suffix = circuits.Circuit()
for moment in circuit:
matching_part = []
general_part = []
for op in moment:
qs = set(op.qubits)
if not predicate(op) or not qs.isdisjoint(blocked_qubits):
blocked_qubits |= qs
if qs.isdisjoint(blocked_qubits):
matching_part.append(op)
else:
general_part.append(op)
if matching_part:
matching_prefix.append(ops.Moment(matching_part))
if general_part:
general_suffix.append(ops.Moment(general_part))
return matching_prefix, general_suffix
| 40.355721
| 98
| 0.654719
|
from typing import (
Any,
Dict,
Iterator,
List,
Sequence,
Tuple,
Union,
Optional,
TYPE_CHECKING,
Set,
cast,
Callable,
TypeVar,
Generic,
)
import abc
import collections
import numpy as np
from cirq import circuits, ops, protocols, study, value, work
from cirq._compat import deprecated
if TYPE_CHECKING:
import cirq
TStepResult = TypeVar('TStepResult', bound='StepResult')
TSimulationTrialResult = TypeVar('TSimulationTrialResult', bound='SimulationTrialResult')
TSimulatorState = TypeVar('TSimulatorState')
class SimulatesSamples(work.Sampler, metaclass=abc.ABCMeta):
def run_sweep(
self,
program: 'cirq.Circuit',
params: study.Sweepable,
repetitions: int = 1,
) -> List[study.Result]:
if not program.has_measurements():
raise ValueError("Circuit has no measurements to sample.")
_verify_unique_measurement_keys(program)
trial_results = []
for param_resolver in study.to_resolvers(params):
measurements = {}
if repetitions == 0:
for _, op, _ in program.findall_operations_with_gate_type(ops.MeasurementGate):
measurements[protocols.measurement_key(op)] = np.empty([0, 1])
else:
measurements = self._run(
circuit=program, param_resolver=param_resolver, repetitions=repetitions
)
trial_results.append(
study.Result.from_single_parameter_set(
params=param_resolver, measurements=measurements
)
)
return trial_results
@abc.abstractmethod
def _run(
self, circuit: circuits.Circuit, param_resolver: study.ParamResolver, repetitions: int
) -> Dict[str, np.ndarray]:
raise NotImplementedError()
class SimulatesAmplitudes(metaclass=abc.ABCMeta):
def compute_amplitudes(
self,
program: 'cirq.Circuit',
bitstrings: Sequence[int],
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
) -> Sequence[complex]:
return self.compute_amplitudes_sweep(
program, bitstrings, study.ParamResolver(param_resolver), qubit_order
)[0]
@abc.abstractmethod
def compute_amplitudes_sweep(
self,
program: 'cirq.Circuit',
bitstrings: Sequence[int],
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
) -> Sequence[Sequence[complex]]:
raise NotImplementedError()
class SimulatesExpectationValues(metaclass=abc.ABCMeta):
def simulate_expectation_values(
self,
program: 'cirq.Circuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> List[float]:
return self.simulate_expectation_values_sweep(
program,
observables,
study.ParamResolver(param_resolver),
qubit_order,
initial_state,
permit_terminal_measurements,
)[0]
@abc.abstractmethod
def simulate_expectation_values_sweep(
self,
program: 'cirq.Circuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
params: 'study.Sweepable',
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> List[List[float]]:
class SimulatesFinalState(Generic[TSimulationTrialResult], metaclass=abc.ABCMeta):
def simulate(
self,
program: 'cirq.Circuit',
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> TSimulationTrialResult:
return self.simulate_sweep(
program, study.ParamResolver(param_resolver), qubit_order, initial_state
)[0]
@abc.abstractmethod
def simulate_sweep(
self,
program: 'cirq.Circuit',
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> List[TSimulationTrialResult]:
raise NotImplementedError()
class SimulatesIntermediateState(
Generic[TStepResult, TSimulationTrialResult, TSimulatorState],
SimulatesFinalState[TSimulationTrialResult],
metaclass=abc.ABCMeta,
):
def simulate_sweep(
self,
program: 'cirq.Circuit',
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> List[TSimulationTrialResult]:
trial_results = []
qubit_order = ops.QubitOrder.as_qubit_order(qubit_order)
for param_resolver in study.to_resolvers(params):
all_step_results = self.simulate_moment_steps(
program, param_resolver, qubit_order, initial_state
)
measurements = {}
for step_result in all_step_results:
for k, v in step_result.measurements.items():
measurements[k] = np.array(v, dtype=np.uint8)
trial_results.append(
self._create_simulator_trial_result(
params=param_resolver,
measurements=measurements,
final_simulator_state=step_result._simulator_state(),
)
)
return trial_results
def simulate_moment_steps(
self,
circuit: circuits.Circuit,
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> Iterator[TStepResult]:
param_resolver = study.ParamResolver(param_resolver)
resolved_circuit = protocols.resolve_parameters(circuit, param_resolver)
check_all_resolved(resolved_circuit)
actual_initial_state = 0 if initial_state is None else initial_state
return self._base_iterator(resolved_circuit, qubit_order, actual_initial_state)
@deprecated(deadline='v0.11', fix='Override _base_iterator instead')
def _simulator_iterator(
self,
circuit: circuits.Circuit,
param_resolver: study.ParamResolver,
qubit_order: ops.QubitOrderOrList,
initial_state: Any,
) -> Iterator[TStepResult]:
return self.simulate_moment_steps(circuit, param_resolver, qubit_order, initial_state)
@abc.abstractmethod
def _base_iterator(
self,
circuit: circuits.Circuit,
qubit_order: ops.QubitOrderOrList,
initial_state: Any,
) -> Iterator[TStepResult]:
raise NotImplementedError()
@abc.abstractmethod
def _create_simulator_trial_result(
self,
params: study.ParamResolver,
measurements: Dict[str, np.ndarray],
final_simulator_state: TSimulatorState,
) -> TSimulationTrialResult:
raise NotImplementedError()
class StepResult(Generic[TSimulatorState], metaclass=abc.ABCMeta):
def __init__(self, measurements: Optional[Dict[str, List[int]]] = None) -> None:
self.measurements = measurements or collections.defaultdict(list)
@abc.abstractmethod
def _simulator_state(self) -> TSimulatorState:
@abc.abstractmethod
def sample(
self,
qubits: List[ops.Qid],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> np.ndarray:
raise NotImplementedError()
def sample_measurement_ops(
self,
measurement_ops: List[ops.GateOperation],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> Dict[str, np.ndarray]:
seen_measurement_keys: Set[str] = set()
for op in measurement_ops:
gate = op.gate
if not isinstance(gate, ops.MeasurementGate):
raise ValueError(f'{op.gate} was not a MeasurementGate')
key = protocols.measurement_key(gate)
if key in seen_measurement_keys:
raise ValueError(f'Duplicate MeasurementGate with key {key}')
seen_measurement_keys.add(key)
measured_qubits = []
seen_qubits: Set[cirq.Qid] = set()
for op in measurement_ops:
for q in op.qubits:
if q not in seen_qubits:
seen_qubits.add(q)
measured_qubits.append(q)
indexed_sample = self.sample(measured_qubits, repetitions, seed=seed)
results: Dict[str, np.ndarray] = {}
qubits_to_index = {q: i for i, q in enumerate(measured_qubits)}
for op in measurement_ops:
gate = cast(ops.MeasurementGate, op.gate)
out = np.zeros(shape=(repetitions, len(op.qubits)), dtype=np.int8)
inv_mask = gate.full_invert_mask()
for i, q in enumerate(op.qubits):
out[:, i] = indexed_sample[:, qubits_to_index[q]]
if inv_mask[i]:
out[:, i] ^= out[:, i] < 2
results[gate.key] = out
return results
@value.value_equality(unhashable=True)
class SimulationTrialResult:
def __init__(
self,
params: study.ParamResolver,
measurements: Dict[str, np.ndarray],
final_simulator_state: Any,
) -> None:
self.params = params
self.measurements = measurements
self._final_simulator_state = final_simulator_state
def __repr__(self) -> str:
return (
f'cirq.SimulationTrialResult(params={self.params!r}, '
f'measurements={self.measurements!r}, '
f'final_simulator_state={self._final_simulator_state!r})'
)
def __str__(self) -> str:
def bitstring(vals):
separator = ' ' if np.max(vals) >= 10 else ''
return separator.join(str(int(v)) for v in vals)
results = sorted([(key, bitstring(val)) for key, val in self.measurements.items()])
if not results:
return '(no measurements)'
return ' '.join([f'{key}={val}' for key, val in results])
def _repr_pretty_(self, p: Any, cycle: bool) -> None:
if cycle:
p.text('SimulationTrialResult(...)')
else:
p.text(str(self))
def _value_equality_values_(self) -> Any:
measurements = {k: v.tolist() for k, v in sorted(self.measurements.items())}
return (self.params, measurements, self._final_simulator_state)
@property
def qubit_map(self) -> Dict[ops.Qid, int]:
return self._final_simulator_state.qubit_map
def _qid_shape_(self) -> Tuple[int, ...]:
return _qubit_map_to_shape(self.qubit_map)
def _qubit_map_to_shape(qubit_map: Dict[ops.Qid, int]) -> Tuple[int, ...]:
qid_shape: List[int] = [-1] * len(qubit_map)
try:
for q, i in qubit_map.items():
qid_shape[i] = q.dimension
except IndexError:
raise ValueError(f'Invalid qubit_map. Qubit index out of bounds. Map is <{qubit_map!r}>.')
if -1 in qid_shape:
raise ValueError(f'Invalid qubit_map. Duplicate qubit index. Map is <{qubit_map!r}>.')
return tuple(qid_shape)
def _verify_unique_measurement_keys(circuit: circuits.Circuit):
result = collections.Counter(
key for op in ops.flatten_op_tree(iter(circuit)) for key in protocols.measurement_keys(op)
)
if result:
duplicates = [k for k, v in result.most_common() if v > 1]
if duplicates:
raise ValueError(f"Measurement key {','.join(duplicates)} repeated")
def check_all_resolved(circuit):
if protocols.is_parameterized(circuit):
unresolved = [op for moment in circuit for op in moment if protocols.is_parameterized(op)]
raise ValueError(
'Circuit contains ops whose symbols were not specified in '
'parameter sweep. Ops: {}'.format(unresolved)
)
def split_into_matching_protocol_then_general(
circuit: 'cirq.Circuit',
predicate: Callable[['cirq.Operation'], bool],
) -> Tuple['cirq.Circuit', 'cirq.Circuit']:
blocked_qubits: Set[cirq.Qid] = set()
matching_prefix = circuits.Circuit()
general_suffix = circuits.Circuit()
for moment in circuit:
matching_part = []
general_part = []
for op in moment:
qs = set(op.qubits)
if not predicate(op) or not qs.isdisjoint(blocked_qubits):
blocked_qubits |= qs
if qs.isdisjoint(blocked_qubits):
matching_part.append(op)
else:
general_part.append(op)
if matching_part:
matching_prefix.append(ops.Moment(matching_part))
if general_part:
general_suffix.append(ops.Moment(general_part))
return matching_prefix, general_suffix
| true
| true
|
1c47cab40dab1478d28390903e21858b737bfe1a
| 1,859
|
py
|
Python
|
tools/site_compare/commands/scrape.py
|
rwatson/chromium-capsicum
|
b03da8e897f897c6ad2cda03ceda217b760fd528
|
[
"BSD-3-Clause"
] | 11
|
2015-03-20T04:08:08.000Z
|
2021-11-15T15:51:36.000Z
|
tools/site_compare/commands/scrape.py
|
changbai1980/chromium
|
c4625eefca763df86471d798ee5a4a054b4716ae
|
[
"BSD-3-Clause"
] | null | null | null |
tools/site_compare/commands/scrape.py
|
changbai1980/chromium
|
c4625eefca763df86471d798ee5a4a054b4716ae
|
[
"BSD-3-Clause"
] | 1
|
2020-04-13T05:45:10.000Z
|
2020-04-13T05:45:10.000Z
|
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Command for scraping images from a URL or list of URLs.
Prerequisites:
1. The command_line package from tools/site_compare
2. Either the IE BHO or Firefox extension (or both)
Installation:
1. Build the IE BHO, or call regsvr32 on a prebuilt binary
2. Add a file called "measurepageloadtimeextension@google.com" to
the default Firefox profile directory under extensions, containing
the path to the Firefox extension root
Invoke with the command line arguments as documented within
the command line.
"""
import command_line
from drivers import windowing
from utils import browser_iterate
def CreateCommand(cmdline):
"""Inserts the command and arguments into a command line for parsing."""
cmd = cmdline.AddCommand(
["scrape"],
"Scrapes an image from a URL or series of URLs.",
None,
ExecuteScrape)
browser_iterate.SetupIterationCommandLine(cmd)
cmd.AddArgument(
["-log", "--logfile"], "File to write text output", type="string")
cmd.AddArgument(
["-out", "--outdir"], "Directory to store scrapes", type="string", required=True)
def ExecuteScrape(command):
"""Executes the Scrape command."""
def ScrapeResult(url, proc, wnd, result):
"""Capture and save the scrape."""
if log_file: log_file.write(result)
# Scrape the page
image = windowing.ScrapeWindow(wnd)
filename = windowing.URLtoFilename(url, command["--outdir"], ".bmp")
image.save(filename)
if command["--logfile"]: log_file = open(command["--logfile"], "w")
else: log_file = None
browser_iterate.Iterate(command, ScrapeResult)
# Close the log file and return. We're done.
if log_file: log_file.close()
| 29.983871
| 85
| 0.720818
|
import command_line
from drivers import windowing
from utils import browser_iterate
def CreateCommand(cmdline):
cmd = cmdline.AddCommand(
["scrape"],
"Scrapes an image from a URL or series of URLs.",
None,
ExecuteScrape)
browser_iterate.SetupIterationCommandLine(cmd)
cmd.AddArgument(
["-log", "--logfile"], "File to write text output", type="string")
cmd.AddArgument(
["-out", "--outdir"], "Directory to store scrapes", type="string", required=True)
def ExecuteScrape(command):
def ScrapeResult(url, proc, wnd, result):
if log_file: log_file.write(result)
image = windowing.ScrapeWindow(wnd)
filename = windowing.URLtoFilename(url, command["--outdir"], ".bmp")
image.save(filename)
if command["--logfile"]: log_file = open(command["--logfile"], "w")
else: log_file = None
browser_iterate.Iterate(command, ScrapeResult)
if log_file: log_file.close()
| true
| true
|
1c47cad05b01e57c60e8dd11e39f42258a462d95
| 2,910
|
py
|
Python
|
examples/orbslam_mono_kitti.py
|
frasermcghan/ORB_SLAM3-PythonBindings
|
a4fca4dbfbd70f31490e593f6c9e54c570827524
|
[
"BSD-2-Clause",
"MIT"
] | 3
|
2021-11-12T06:11:19.000Z
|
2022-03-17T04:24:25.000Z
|
examples/orbslam_mono_kitti.py
|
frasermcghan/ORB_SLAM3-PythonBindings
|
a4fca4dbfbd70f31490e593f6c9e54c570827524
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
examples/orbslam_mono_kitti.py
|
frasermcghan/ORB_SLAM3-PythonBindings
|
a4fca4dbfbd70f31490e593f6c9e54c570827524
|
[
"BSD-2-Clause",
"MIT"
] | 1
|
2021-11-12T06:11:23.000Z
|
2021-11-12T06:11:23.000Z
|
#!/usr/bin/env python3
import sys
import os.path
import orbslam3
import time
import cv2
def main(vocab_path, settings_path, sequence_path):
image_filenames, timestamps = load_images(sequence_path)
num_images = len(image_filenames)
slam = orbslam3.System(vocab_path, settings_path, orbslam3.Sensor.MONOCULAR)
slam.set_use_viewer(False)
slam.initialize()
times_track = [0 for _ in range(num_images)]
print("-----")
print("Start processing sequence ...")
print("Images in the sequence: {0}".format(num_images))
for idx in range(num_images):
image = cv2.imread(image_filenames[idx], cv2.IMREAD_UNCHANGED)
tframe = timestamps[idx]
if image is None:
print("failed to load image at {0}".format(image_filenames[idx]))
return 1
t1 = time.time()
slam.process_image_mono(image, tframe)
t2 = time.time()
ttrack = t2 - t1
times_track[idx] = ttrack
t = 0
if idx < num_images - 1:
t = timestamps[idx + 1] - tframe
elif idx > 0:
t = tframe - timestamps[idx - 1]
if ttrack < t:
time.sleep(t - ttrack)
save_trajectory(slam.get_trajectory_points(), "trajectory.txt")
slam.shutdown()
times_track = sorted(times_track)
total_time = sum(times_track)
print("-----")
print("median tracking time: {0}".format(times_track[num_images // 2]))
print("mean tracking time: {0}".format(total_time / num_images))
return 0
def load_images(path_to_sequence):
timestamps = []
with open(os.path.join(path_to_sequence, "times.txt")) as times_file:
for line in times_file:
if len(line) > 0:
timestamps.append(float(line))
return (
[
os.path.join(path_to_sequence, "image_0", "{0:06}.png".format(idx))
for idx in range(len(timestamps))
],
timestamps,
)
def save_trajectory(trajectory, filename):
with open(filename, "w") as traj_file:
traj_file.writelines(
"{time} {r00} {r01} {r02} {t0} {r10} {r11} {r12} {t1} {r20} {r21} {r22} {t2}\n".format(
time=repr(t),
r00=repr(r00),
r01=repr(r01),
r02=repr(r02),
t0=repr(t0),
r10=repr(r10),
r11=repr(r11),
r12=repr(r12),
t1=repr(t1),
r20=repr(r20),
r21=repr(r21),
r22=repr(r22),
t2=repr(t2),
)
for t, r00, r01, r02, t0, r10, r11, r12, t1, r20, r21, r22, t2 in trajectory
)
if __name__ == "__main__":
if len(sys.argv) != 4:
print(
"Usage: ./orbslam_mono_kitti path_to_vocabulary path_to_settings path_to_sequence"
)
main(sys.argv[1], sys.argv[2], sys.argv[3])
| 27.980769
| 99
| 0.56323
|
import sys
import os.path
import orbslam3
import time
import cv2
def main(vocab_path, settings_path, sequence_path):
image_filenames, timestamps = load_images(sequence_path)
num_images = len(image_filenames)
slam = orbslam3.System(vocab_path, settings_path, orbslam3.Sensor.MONOCULAR)
slam.set_use_viewer(False)
slam.initialize()
times_track = [0 for _ in range(num_images)]
print("-----")
print("Start processing sequence ...")
print("Images in the sequence: {0}".format(num_images))
for idx in range(num_images):
image = cv2.imread(image_filenames[idx], cv2.IMREAD_UNCHANGED)
tframe = timestamps[idx]
if image is None:
print("failed to load image at {0}".format(image_filenames[idx]))
return 1
t1 = time.time()
slam.process_image_mono(image, tframe)
t2 = time.time()
ttrack = t2 - t1
times_track[idx] = ttrack
t = 0
if idx < num_images - 1:
t = timestamps[idx + 1] - tframe
elif idx > 0:
t = tframe - timestamps[idx - 1]
if ttrack < t:
time.sleep(t - ttrack)
save_trajectory(slam.get_trajectory_points(), "trajectory.txt")
slam.shutdown()
times_track = sorted(times_track)
total_time = sum(times_track)
print("-----")
print("median tracking time: {0}".format(times_track[num_images // 2]))
print("mean tracking time: {0}".format(total_time / num_images))
return 0
def load_images(path_to_sequence):
timestamps = []
with open(os.path.join(path_to_sequence, "times.txt")) as times_file:
for line in times_file:
if len(line) > 0:
timestamps.append(float(line))
return (
[
os.path.join(path_to_sequence, "image_0", "{0:06}.png".format(idx))
for idx in range(len(timestamps))
],
timestamps,
)
def save_trajectory(trajectory, filename):
with open(filename, "w") as traj_file:
traj_file.writelines(
"{time} {r00} {r01} {r02} {t0} {r10} {r11} {r12} {t1} {r20} {r21} {r22} {t2}\n".format(
time=repr(t),
r00=repr(r00),
r01=repr(r01),
r02=repr(r02),
t0=repr(t0),
r10=repr(r10),
r11=repr(r11),
r12=repr(r12),
t1=repr(t1),
r20=repr(r20),
r21=repr(r21),
r22=repr(r22),
t2=repr(t2),
)
for t, r00, r01, r02, t0, r10, r11, r12, t1, r20, r21, r22, t2 in trajectory
)
if __name__ == "__main__":
if len(sys.argv) != 4:
print(
"Usage: ./orbslam_mono_kitti path_to_vocabulary path_to_settings path_to_sequence"
)
main(sys.argv[1], sys.argv[2], sys.argv[3])
| true
| true
|
1c47cae1d8d4dc028de321451ca5cca46d806629
| 2,498
|
py
|
Python
|
utils/nodes_key_pair_updator/NodesKeyPairUpdator.py
|
dawidsielski/medical-data-share
|
e462ffcfe0650b4fed2bb113c331a2a7438a8509
|
[
"MIT"
] | null | null | null |
utils/nodes_key_pair_updator/NodesKeyPairUpdator.py
|
dawidsielski/medical-data-share
|
e462ffcfe0650b4fed2bb113c331a2a7438a8509
|
[
"MIT"
] | null | null | null |
utils/nodes_key_pair_updator/NodesKeyPairUpdator.py
|
dawidsielski/medical-data-share
|
e462ffcfe0650b4fed2bb113c331a2a7438a8509
|
[
"MIT"
] | null | null | null |
import os
import requests
import logging
from logging.handlers import TimedRotatingFileHandler
from urllib.parse import urljoin
from configparser import ConfigParser
from data_share import DataShare
from data_share.KeyGeneration import KeyGeneration
from nodes_available.NodesChecker import NodesChecker
from utils.request_id_generator.RequestIdGenerator import RequestIdGenerator
key_path = lambda name: os.path.join('keys', name)
config = ConfigParser()
config.read(os.path.join(os.getcwd(), 'config.ini'), encoding='utf-8')
os.makedirs('logs', exist_ok=True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(funcName)s:%(message)s')
website_file_rotating_handler = TimedRotatingFileHandler('logs/node_updates.log', when="midnight", interval=1)
website_file_rotating_handler.setLevel(logging.INFO)
website_file_rotating_handler.setFormatter(formatter)
website_file_rotating_handler.suffix = "%Y-%m-%d"
logger.addHandler(website_file_rotating_handler)
class NodeKeyPairUpdator(object):
@staticmethod
def rename_old_keys():
os.rename(key_path('public.key'), key_path('public.old.key'))
os.rename(key_path('private.key'), key_path('private.old.key'))
@staticmethod
def update_keys():
NodeKeyPairUpdator().rename_old_keys()
kg = KeyGeneration()
kg.generate_keys()
kg.save_keys()
NodeKeyPairUpdator.update_key_on_available_nodes()
os.remove(key_path('public.old.key'))
os.remove(key_path('private.old.key'))
logger.info('New_keys_generated')
@staticmethod
def update_key_on_available_nodes():
available_nodes = NodesChecker.get_all_nodes_availability()
logger.info(available_nodes)
for key, value in available_nodes.items():
url = urljoin(value['address'], 'update-keys')
logger.info('Sending for {}'.format(key))
keys = KeyGeneration()
keys.load_keys()
data = {
'node': config.get('NODE', 'LABORATORY_NAME'),
'public_key': keys.public_key.exportKey().decode(),
'request_id': RequestIdGenerator.generate_request_id(),
}
data.update({'signature': DataShare.get_signature_for_message(data, filename='private.old.key').decode()})
r = requests.post(url, json=data)
logger.info('{} {} {} {}'.format(key, url, r.status_code, data))
| 33.756757
| 118
| 0.700961
|
import os
import requests
import logging
from logging.handlers import TimedRotatingFileHandler
from urllib.parse import urljoin
from configparser import ConfigParser
from data_share import DataShare
from data_share.KeyGeneration import KeyGeneration
from nodes_available.NodesChecker import NodesChecker
from utils.request_id_generator.RequestIdGenerator import RequestIdGenerator
key_path = lambda name: os.path.join('keys', name)
config = ConfigParser()
config.read(os.path.join(os.getcwd(), 'config.ini'), encoding='utf-8')
os.makedirs('logs', exist_ok=True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(funcName)s:%(message)s')
website_file_rotating_handler = TimedRotatingFileHandler('logs/node_updates.log', when="midnight", interval=1)
website_file_rotating_handler.setLevel(logging.INFO)
website_file_rotating_handler.setFormatter(formatter)
website_file_rotating_handler.suffix = "%Y-%m-%d"
logger.addHandler(website_file_rotating_handler)
class NodeKeyPairUpdator(object):
@staticmethod
def rename_old_keys():
os.rename(key_path('public.key'), key_path('public.old.key'))
os.rename(key_path('private.key'), key_path('private.old.key'))
@staticmethod
def update_keys():
NodeKeyPairUpdator().rename_old_keys()
kg = KeyGeneration()
kg.generate_keys()
kg.save_keys()
NodeKeyPairUpdator.update_key_on_available_nodes()
os.remove(key_path('public.old.key'))
os.remove(key_path('private.old.key'))
logger.info('New_keys_generated')
@staticmethod
def update_key_on_available_nodes():
available_nodes = NodesChecker.get_all_nodes_availability()
logger.info(available_nodes)
for key, value in available_nodes.items():
url = urljoin(value['address'], 'update-keys')
logger.info('Sending for {}'.format(key))
keys = KeyGeneration()
keys.load_keys()
data = {
'node': config.get('NODE', 'LABORATORY_NAME'),
'public_key': keys.public_key.exportKey().decode(),
'request_id': RequestIdGenerator.generate_request_id(),
}
data.update({'signature': DataShare.get_signature_for_message(data, filename='private.old.key').decode()})
r = requests.post(url, json=data)
logger.info('{} {} {} {}'.format(key, url, r.status_code, data))
| true
| true
|
1c47cc9cf70b865d84b86c603de769862667adeb
| 1,701
|
py
|
Python
|
pioneer/temp/mujoco_test.py
|
xdralex/pioneer
|
1fb9ea947d1b1cc2eb1f27bc4e8a7f206019b607
|
[
"MIT"
] | 2
|
2020-07-29T07:49:06.000Z
|
2021-04-13T20:57:45.000Z
|
pioneer/temp/mujoco_test.py
|
xdralex/pioneer
|
1fb9ea947d1b1cc2eb1f27bc4e8a7f206019b607
|
[
"MIT"
] | null | null | null |
pioneer/temp/mujoco_test.py
|
xdralex/pioneer
|
1fb9ea947d1b1cc2eb1f27bc4e8a7f206019b607
|
[
"MIT"
] | 2
|
2020-07-25T11:45:54.000Z
|
2021-01-11T07:12:07.000Z
|
import mujoco_py
import numpy as np
from gym import spaces
model = mujoco_py.load_model_from_path('pioneer/envs/assets/pioneer2.xml')
sim = mujoco_py.MjSim(model)
print(f'timestep: {model.opt.timestep}')
bounds = model.jnt_range.copy().astype(np.float32)
low, high = bounds.T
position_space = spaces.Box(low=low, high=high, dtype=np.float32)
print(f'bounds: {bounds}')
print(f'nq={model.nq}, nv={model.nv}')
a0 = sim.get_state()
print(f'qpos={a0.qpos}, nv={a0.qvel}')
a1 = mujoco_py.MjSimState(a0.time, a0.qpos, [0.2, -0.2], a0.act, a0.udd_state)
sim.set_state(a1)
sim.step()
sim.forward()
print(sim.data.qpos.flat[:])
print(sim.data.qvel.flat[:2])
exit(0)
#
# print(position_space.sample())
#
# sim.step()
#
# print(f"{sim.data.get_body_xpos('pointer')}")
#
# a0 = sim.get_state()
# print(a0)
#
# a1 = mujoco_py.MjSimState(a0.time, -1.0, 0.0, a0.act, a0.udd_state)
# print(a1)
# sim.set_state(a1)
#
# bounds = model.actuator_ctrlrange.copy().astype(np.float32)
# print(bounds)
# print(sim.data.ctrl)
#
# # sim.data.ctrl[:] = [10.0]
#
# sim.step()
# sim.forward()
# a1 = mujoco_py.MjSimState(a0.time, 0.0, 1.0, a0.act, a0.udd_state)
# sim.set_state(a1)
#
# sim.step()
# sim.forward()
#
viewer = mujoco_py.mjviewer.MjViewer(sim)
DEFAULT_CAMERA_CONFIG = {
'trackbodyid': 0,
'distance': 20.0,
'lookat': np.array((0.0, 0.0, 0.0)),
'elevation': -35.0,
'azimuth': 135.0
}
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(viewer.cam, key)[:] = value
else:
setattr(viewer.cam, key, value)
while True:
sim.step()
viewer.render()
# print(f'{sim.get_state()} - {sim.data.get_body_xpos("pointer")}')
| 21
| 78
| 0.661376
|
import mujoco_py
import numpy as np
from gym import spaces
model = mujoco_py.load_model_from_path('pioneer/envs/assets/pioneer2.xml')
sim = mujoco_py.MjSim(model)
print(f'timestep: {model.opt.timestep}')
bounds = model.jnt_range.copy().astype(np.float32)
low, high = bounds.T
position_space = spaces.Box(low=low, high=high, dtype=np.float32)
print(f'bounds: {bounds}')
print(f'nq={model.nq}, nv={model.nv}')
a0 = sim.get_state()
print(f'qpos={a0.qpos}, nv={a0.qvel}')
a1 = mujoco_py.MjSimState(a0.time, a0.qpos, [0.2, -0.2], a0.act, a0.udd_state)
sim.set_state(a1)
sim.step()
sim.forward()
print(sim.data.qpos.flat[:])
print(sim.data.qvel.flat[:2])
exit(0)
_py.mjviewer.MjViewer(sim)
DEFAULT_CAMERA_CONFIG = {
'trackbodyid': 0,
'distance': 20.0,
'lookat': np.array((0.0, 0.0, 0.0)),
'elevation': -35.0,
'azimuth': 135.0
}
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(viewer.cam, key)[:] = value
else:
setattr(viewer.cam, key, value)
while True:
sim.step()
viewer.render()
| true
| true
|
1c47cd19af43c4d1becad7a2ad917dd2ed58f098
| 10,620
|
py
|
Python
|
aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DescribeScalingGroupsRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | 1
|
2019-12-23T12:36:43.000Z
|
2019-12-23T12:36:43.000Z
|
aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DescribeScalingGroupsRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DescribeScalingGroupsRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeScalingGroupsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'DescribeScalingGroups','ess')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ScalingGroupId10(self):
return self.get_query_params().get('ScalingGroupId.10')
def set_ScalingGroupId10(self,ScalingGroupId10):
self.add_query_param('ScalingGroupId.10',ScalingGroupId10)
def get_ScalingGroupId12(self):
return self.get_query_params().get('ScalingGroupId.12')
def set_ScalingGroupId12(self,ScalingGroupId12):
self.add_query_param('ScalingGroupId.12',ScalingGroupId12)
def get_ScalingGroupId13(self):
return self.get_query_params().get('ScalingGroupId.13')
def set_ScalingGroupId13(self,ScalingGroupId13):
self.add_query_param('ScalingGroupId.13',ScalingGroupId13)
def get_ScalingGroupId14(self):
return self.get_query_params().get('ScalingGroupId.14')
def set_ScalingGroupId14(self,ScalingGroupId14):
self.add_query_param('ScalingGroupId.14',ScalingGroupId14)
def get_ScalingGroupId15(self):
return self.get_query_params().get('ScalingGroupId.15')
def set_ScalingGroupId15(self,ScalingGroupId15):
self.add_query_param('ScalingGroupId.15',ScalingGroupId15)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_ScalingGroupName20(self):
return self.get_query_params().get('ScalingGroupName.20')
def set_ScalingGroupName20(self,ScalingGroupName20):
self.add_query_param('ScalingGroupName.20',ScalingGroupName20)
def get_ScalingGroupName19(self):
return self.get_query_params().get('ScalingGroupName.19')
def set_ScalingGroupName19(self,ScalingGroupName19):
self.add_query_param('ScalingGroupName.19',ScalingGroupName19)
def get_ScalingGroupId20(self):
return self.get_query_params().get('ScalingGroupId.20')
def set_ScalingGroupId20(self,ScalingGroupId20):
self.add_query_param('ScalingGroupId.20',ScalingGroupId20)
def get_ScalingGroupName18(self):
return self.get_query_params().get('ScalingGroupName.18')
def set_ScalingGroupName18(self,ScalingGroupName18):
self.add_query_param('ScalingGroupName.18',ScalingGroupName18)
def get_ScalingGroupName17(self):
return self.get_query_params().get('ScalingGroupName.17')
def set_ScalingGroupName17(self,ScalingGroupName17):
self.add_query_param('ScalingGroupName.17',ScalingGroupName17)
def get_ScalingGroupName16(self):
return self.get_query_params().get('ScalingGroupName.16')
def set_ScalingGroupName16(self,ScalingGroupName16):
self.add_query_param('ScalingGroupName.16',ScalingGroupName16)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ScalingGroupName(self):
return self.get_query_params().get('ScalingGroupName')
def set_ScalingGroupName(self,ScalingGroupName):
self.add_query_param('ScalingGroupName',ScalingGroupName)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_ScalingGroupName1(self):
return self.get_query_params().get('ScalingGroupName.1')
def set_ScalingGroupName1(self,ScalingGroupName1):
self.add_query_param('ScalingGroupName.1',ScalingGroupName1)
def get_ScalingGroupName2(self):
return self.get_query_params().get('ScalingGroupName.2')
def set_ScalingGroupName2(self,ScalingGroupName2):
self.add_query_param('ScalingGroupName.2',ScalingGroupName2)
def get_ScalingGroupId2(self):
return self.get_query_params().get('ScalingGroupId.2')
def set_ScalingGroupId2(self,ScalingGroupId2):
self.add_query_param('ScalingGroupId.2',ScalingGroupId2)
def get_ScalingGroupId1(self):
return self.get_query_params().get('ScalingGroupId.1')
def set_ScalingGroupId1(self,ScalingGroupId1):
self.add_query_param('ScalingGroupId.1',ScalingGroupId1)
def get_ScalingGroupId6(self):
return self.get_query_params().get('ScalingGroupId.6')
def set_ScalingGroupId6(self,ScalingGroupId6):
self.add_query_param('ScalingGroupId.6',ScalingGroupId6)
def get_ScalingGroupId16(self):
return self.get_query_params().get('ScalingGroupId.16')
def set_ScalingGroupId16(self,ScalingGroupId16):
self.add_query_param('ScalingGroupId.16',ScalingGroupId16)
def get_ScalingGroupName7(self):
return self.get_query_params().get('ScalingGroupName.7')
def set_ScalingGroupName7(self,ScalingGroupName7):
self.add_query_param('ScalingGroupName.7',ScalingGroupName7)
def get_ScalingGroupName11(self):
return self.get_query_params().get('ScalingGroupName.11')
def set_ScalingGroupName11(self,ScalingGroupName11):
self.add_query_param('ScalingGroupName.11',ScalingGroupName11)
def get_ScalingGroupId5(self):
return self.get_query_params().get('ScalingGroupId.5')
def set_ScalingGroupId5(self,ScalingGroupId5):
self.add_query_param('ScalingGroupId.5',ScalingGroupId5)
def get_ScalingGroupId17(self):
return self.get_query_params().get('ScalingGroupId.17')
def set_ScalingGroupId17(self,ScalingGroupId17):
self.add_query_param('ScalingGroupId.17',ScalingGroupId17)
def get_ScalingGroupName8(self):
return self.get_query_params().get('ScalingGroupName.8')
def set_ScalingGroupName8(self,ScalingGroupName8):
self.add_query_param('ScalingGroupName.8',ScalingGroupName8)
def get_ScalingGroupName10(self):
return self.get_query_params().get('ScalingGroupName.10')
def set_ScalingGroupName10(self,ScalingGroupName10):
self.add_query_param('ScalingGroupName.10',ScalingGroupName10)
def get_ScalingGroupId4(self):
return self.get_query_params().get('ScalingGroupId.4')
def set_ScalingGroupId4(self,ScalingGroupId4):
self.add_query_param('ScalingGroupId.4',ScalingGroupId4)
def get_ScalingGroupId18(self):
return self.get_query_params().get('ScalingGroupId.18')
def set_ScalingGroupId18(self,ScalingGroupId18):
self.add_query_param('ScalingGroupId.18',ScalingGroupId18)
def get_ScalingGroupName9(self):
return self.get_query_params().get('ScalingGroupName.9')
def set_ScalingGroupName9(self,ScalingGroupName9):
self.add_query_param('ScalingGroupName.9',ScalingGroupName9)
def get_ScalingGroupId3(self):
return self.get_query_params().get('ScalingGroupId.3')
def set_ScalingGroupId3(self,ScalingGroupId3):
self.add_query_param('ScalingGroupId.3',ScalingGroupId3)
def get_ScalingGroupId19(self):
return self.get_query_params().get('ScalingGroupId.19')
def set_ScalingGroupId19(self,ScalingGroupId19):
self.add_query_param('ScalingGroupId.19',ScalingGroupId19)
def get_ScalingGroupName3(self):
return self.get_query_params().get('ScalingGroupName.3')
def set_ScalingGroupName3(self,ScalingGroupName3):
self.add_query_param('ScalingGroupName.3',ScalingGroupName3)
def get_ScalingGroupName15(self):
return self.get_query_params().get('ScalingGroupName.15')
def set_ScalingGroupName15(self,ScalingGroupName15):
self.add_query_param('ScalingGroupName.15',ScalingGroupName15)
def get_ScalingGroupId9(self):
return self.get_query_params().get('ScalingGroupId.9')
def set_ScalingGroupId9(self,ScalingGroupId9):
self.add_query_param('ScalingGroupId.9',ScalingGroupId9)
def get_ScalingGroupName4(self):
return self.get_query_params().get('ScalingGroupName.4')
def set_ScalingGroupName4(self,ScalingGroupName4):
self.add_query_param('ScalingGroupName.4',ScalingGroupName4)
def get_ScalingGroupName14(self):
return self.get_query_params().get('ScalingGroupName.14')
def set_ScalingGroupName14(self,ScalingGroupName14):
self.add_query_param('ScalingGroupName.14',ScalingGroupName14)
def get_ScalingGroupId8(self):
return self.get_query_params().get('ScalingGroupId.8')
def set_ScalingGroupId8(self,ScalingGroupId8):
self.add_query_param('ScalingGroupId.8',ScalingGroupId8)
def get_ScalingGroupName5(self):
return self.get_query_params().get('ScalingGroupName.5')
def set_ScalingGroupName5(self,ScalingGroupName5):
self.add_query_param('ScalingGroupName.5',ScalingGroupName5)
def get_ScalingGroupName13(self):
return self.get_query_params().get('ScalingGroupName.13')
def set_ScalingGroupName13(self,ScalingGroupName13):
self.add_query_param('ScalingGroupName.13',ScalingGroupName13)
def get_ScalingGroupId7(self):
return self.get_query_params().get('ScalingGroupId.7')
def set_ScalingGroupId7(self,ScalingGroupId7):
self.add_query_param('ScalingGroupId.7',ScalingGroupId7)
def get_ScalingGroupName6(self):
return self.get_query_params().get('ScalingGroupName.6')
def set_ScalingGroupName6(self,ScalingGroupName6):
self.add_query_param('ScalingGroupName.6',ScalingGroupName6)
def get_ScalingGroupName12(self):
return self.get_query_params().get('ScalingGroupName.12')
def set_ScalingGroupName12(self,ScalingGroupName12):
self.add_query_param('ScalingGroupName.12',ScalingGroupName12)
| 35.4
| 80
| 0.789642
|
from aliyunsdkcore.request import RpcRequest
class DescribeScalingGroupsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'DescribeScalingGroups','ess')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ScalingGroupId10(self):
return self.get_query_params().get('ScalingGroupId.10')
def set_ScalingGroupId10(self,ScalingGroupId10):
self.add_query_param('ScalingGroupId.10',ScalingGroupId10)
def get_ScalingGroupId12(self):
return self.get_query_params().get('ScalingGroupId.12')
def set_ScalingGroupId12(self,ScalingGroupId12):
self.add_query_param('ScalingGroupId.12',ScalingGroupId12)
def get_ScalingGroupId13(self):
return self.get_query_params().get('ScalingGroupId.13')
def set_ScalingGroupId13(self,ScalingGroupId13):
self.add_query_param('ScalingGroupId.13',ScalingGroupId13)
def get_ScalingGroupId14(self):
return self.get_query_params().get('ScalingGroupId.14')
def set_ScalingGroupId14(self,ScalingGroupId14):
self.add_query_param('ScalingGroupId.14',ScalingGroupId14)
def get_ScalingGroupId15(self):
return self.get_query_params().get('ScalingGroupId.15')
def set_ScalingGroupId15(self,ScalingGroupId15):
self.add_query_param('ScalingGroupId.15',ScalingGroupId15)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_ScalingGroupName20(self):
return self.get_query_params().get('ScalingGroupName.20')
def set_ScalingGroupName20(self,ScalingGroupName20):
self.add_query_param('ScalingGroupName.20',ScalingGroupName20)
def get_ScalingGroupName19(self):
return self.get_query_params().get('ScalingGroupName.19')
def set_ScalingGroupName19(self,ScalingGroupName19):
self.add_query_param('ScalingGroupName.19',ScalingGroupName19)
def get_ScalingGroupId20(self):
return self.get_query_params().get('ScalingGroupId.20')
def set_ScalingGroupId20(self,ScalingGroupId20):
self.add_query_param('ScalingGroupId.20',ScalingGroupId20)
def get_ScalingGroupName18(self):
return self.get_query_params().get('ScalingGroupName.18')
def set_ScalingGroupName18(self,ScalingGroupName18):
self.add_query_param('ScalingGroupName.18',ScalingGroupName18)
def get_ScalingGroupName17(self):
return self.get_query_params().get('ScalingGroupName.17')
def set_ScalingGroupName17(self,ScalingGroupName17):
self.add_query_param('ScalingGroupName.17',ScalingGroupName17)
def get_ScalingGroupName16(self):
return self.get_query_params().get('ScalingGroupName.16')
def set_ScalingGroupName16(self,ScalingGroupName16):
self.add_query_param('ScalingGroupName.16',ScalingGroupName16)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ScalingGroupName(self):
return self.get_query_params().get('ScalingGroupName')
def set_ScalingGroupName(self,ScalingGroupName):
self.add_query_param('ScalingGroupName',ScalingGroupName)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_ScalingGroupName1(self):
return self.get_query_params().get('ScalingGroupName.1')
def set_ScalingGroupName1(self,ScalingGroupName1):
self.add_query_param('ScalingGroupName.1',ScalingGroupName1)
def get_ScalingGroupName2(self):
return self.get_query_params().get('ScalingGroupName.2')
def set_ScalingGroupName2(self,ScalingGroupName2):
self.add_query_param('ScalingGroupName.2',ScalingGroupName2)
def get_ScalingGroupId2(self):
return self.get_query_params().get('ScalingGroupId.2')
def set_ScalingGroupId2(self,ScalingGroupId2):
self.add_query_param('ScalingGroupId.2',ScalingGroupId2)
def get_ScalingGroupId1(self):
return self.get_query_params().get('ScalingGroupId.1')
def set_ScalingGroupId1(self,ScalingGroupId1):
self.add_query_param('ScalingGroupId.1',ScalingGroupId1)
def get_ScalingGroupId6(self):
return self.get_query_params().get('ScalingGroupId.6')
def set_ScalingGroupId6(self,ScalingGroupId6):
self.add_query_param('ScalingGroupId.6',ScalingGroupId6)
def get_ScalingGroupId16(self):
return self.get_query_params().get('ScalingGroupId.16')
def set_ScalingGroupId16(self,ScalingGroupId16):
self.add_query_param('ScalingGroupId.16',ScalingGroupId16)
def get_ScalingGroupName7(self):
return self.get_query_params().get('ScalingGroupName.7')
def set_ScalingGroupName7(self,ScalingGroupName7):
self.add_query_param('ScalingGroupName.7',ScalingGroupName7)
def get_ScalingGroupName11(self):
return self.get_query_params().get('ScalingGroupName.11')
def set_ScalingGroupName11(self,ScalingGroupName11):
self.add_query_param('ScalingGroupName.11',ScalingGroupName11)
def get_ScalingGroupId5(self):
return self.get_query_params().get('ScalingGroupId.5')
def set_ScalingGroupId5(self,ScalingGroupId5):
self.add_query_param('ScalingGroupId.5',ScalingGroupId5)
def get_ScalingGroupId17(self):
return self.get_query_params().get('ScalingGroupId.17')
def set_ScalingGroupId17(self,ScalingGroupId17):
self.add_query_param('ScalingGroupId.17',ScalingGroupId17)
def get_ScalingGroupName8(self):
return self.get_query_params().get('ScalingGroupName.8')
def set_ScalingGroupName8(self,ScalingGroupName8):
self.add_query_param('ScalingGroupName.8',ScalingGroupName8)
def get_ScalingGroupName10(self):
return self.get_query_params().get('ScalingGroupName.10')
def set_ScalingGroupName10(self,ScalingGroupName10):
self.add_query_param('ScalingGroupName.10',ScalingGroupName10)
def get_ScalingGroupId4(self):
return self.get_query_params().get('ScalingGroupId.4')
def set_ScalingGroupId4(self,ScalingGroupId4):
self.add_query_param('ScalingGroupId.4',ScalingGroupId4)
def get_ScalingGroupId18(self):
return self.get_query_params().get('ScalingGroupId.18')
def set_ScalingGroupId18(self,ScalingGroupId18):
self.add_query_param('ScalingGroupId.18',ScalingGroupId18)
def get_ScalingGroupName9(self):
return self.get_query_params().get('ScalingGroupName.9')
def set_ScalingGroupName9(self,ScalingGroupName9):
self.add_query_param('ScalingGroupName.9',ScalingGroupName9)
def get_ScalingGroupId3(self):
return self.get_query_params().get('ScalingGroupId.3')
def set_ScalingGroupId3(self,ScalingGroupId3):
self.add_query_param('ScalingGroupId.3',ScalingGroupId3)
def get_ScalingGroupId19(self):
return self.get_query_params().get('ScalingGroupId.19')
def set_ScalingGroupId19(self,ScalingGroupId19):
self.add_query_param('ScalingGroupId.19',ScalingGroupId19)
def get_ScalingGroupName3(self):
return self.get_query_params().get('ScalingGroupName.3')
def set_ScalingGroupName3(self,ScalingGroupName3):
self.add_query_param('ScalingGroupName.3',ScalingGroupName3)
def get_ScalingGroupName15(self):
return self.get_query_params().get('ScalingGroupName.15')
def set_ScalingGroupName15(self,ScalingGroupName15):
self.add_query_param('ScalingGroupName.15',ScalingGroupName15)
def get_ScalingGroupId9(self):
return self.get_query_params().get('ScalingGroupId.9')
def set_ScalingGroupId9(self,ScalingGroupId9):
self.add_query_param('ScalingGroupId.9',ScalingGroupId9)
def get_ScalingGroupName4(self):
return self.get_query_params().get('ScalingGroupName.4')
def set_ScalingGroupName4(self,ScalingGroupName4):
self.add_query_param('ScalingGroupName.4',ScalingGroupName4)
def get_ScalingGroupName14(self):
return self.get_query_params().get('ScalingGroupName.14')
def set_ScalingGroupName14(self,ScalingGroupName14):
self.add_query_param('ScalingGroupName.14',ScalingGroupName14)
def get_ScalingGroupId8(self):
return self.get_query_params().get('ScalingGroupId.8')
def set_ScalingGroupId8(self,ScalingGroupId8):
self.add_query_param('ScalingGroupId.8',ScalingGroupId8)
def get_ScalingGroupName5(self):
return self.get_query_params().get('ScalingGroupName.5')
def set_ScalingGroupName5(self,ScalingGroupName5):
self.add_query_param('ScalingGroupName.5',ScalingGroupName5)
def get_ScalingGroupName13(self):
return self.get_query_params().get('ScalingGroupName.13')
def set_ScalingGroupName13(self,ScalingGroupName13):
self.add_query_param('ScalingGroupName.13',ScalingGroupName13)
def get_ScalingGroupId7(self):
return self.get_query_params().get('ScalingGroupId.7')
def set_ScalingGroupId7(self,ScalingGroupId7):
self.add_query_param('ScalingGroupId.7',ScalingGroupId7)
def get_ScalingGroupName6(self):
return self.get_query_params().get('ScalingGroupName.6')
def set_ScalingGroupName6(self,ScalingGroupName6):
self.add_query_param('ScalingGroupName.6',ScalingGroupName6)
def get_ScalingGroupName12(self):
return self.get_query_params().get('ScalingGroupName.12')
def set_ScalingGroupName12(self,ScalingGroupName12):
self.add_query_param('ScalingGroupName.12',ScalingGroupName12)
| true
| true
|
1c47d132a6791395267f3791dfb59ca1076cee0c
| 360
|
py
|
Python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/models.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-06-02T08:01:35.000Z
|
2021-06-02T08:01:35.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/models.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2019-06-04T18:12:16.000Z
|
2019-06-04T18:12:16.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/models.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .v2018_06_01.models import *
| 45
| 76
| 0.444444
|
from .v2018_06_01.models import *
| true
| true
|
1c47d191e4ced18e1fb9d2ca1bfe78d40d28d1ae
| 2,572
|
py
|
Python
|
tests/Modules/Indexer/test_DIALS_indexer.py
|
xia2/xia2
|
18554e9b4d442e7c23a0c4ce93f51b491f77d4b7
|
[
"BSD-3-Clause"
] | 10
|
2015-10-30T06:36:55.000Z
|
2021-12-10T20:06:22.000Z
|
tests/Modules/Indexer/test_DIALS_indexer.py
|
xia2/xia2
|
18554e9b4d442e7c23a0c4ce93f51b491f77d4b7
|
[
"BSD-3-Clause"
] | 528
|
2015-11-24T08:20:12.000Z
|
2022-03-21T21:47:29.000Z
|
tests/Modules/Indexer/test_DIALS_indexer.py
|
xia2/xia2
|
18554e9b4d442e7c23a0c4ce93f51b491f77d4b7
|
[
"BSD-3-Clause"
] | 14
|
2016-03-15T22:07:03.000Z
|
2020-12-14T07:13:35.000Z
|
from unittest import mock
import os
import pytest
import sys
from dxtbx.model import ExperimentList
from xia2.Handlers.Phil import PhilIndex
from xia2.Modules.Indexer.DialsIndexer import DialsIndexer
from xia2.Schema.XCrystal import XCrystal
from xia2.Schema.XWavelength import XWavelength
from xia2.Schema.XSweep import XSweep
from xia2.Schema.XSample import XSample
def exercise_dials_indexer(dials_data, tmp_dir, nproc=None):
if nproc is not None:
PhilIndex.params.xia2.settings.multiprocessing.nproc = nproc
template = dials_data("insulin").join("insulin_1_###.img").strpath
indexer = DialsIndexer()
indexer.set_working_directory(tmp_dir)
experiments = ExperimentList.from_templates([template])
imageset = experiments.imagesets()[0]
indexer.add_indexer_imageset(imageset)
cryst = XCrystal("CRYST1", None)
wav = XWavelength("WAVE1", cryst, imageset.get_beam().get_wavelength())
samp = XSample("X1", cryst)
directory, image = os.path.split(imageset.get_path(1))
sweep = XSweep("SWEEP1", wav, samp, directory=directory, image=image)
indexer.set_indexer_sweep(sweep)
indexer.index()
assert indexer.get_indexer_cell() == pytest.approx(
(78.14, 78.14, 78.14, 90, 90, 90), rel=1e-3
)
solution = indexer.get_solution()
assert solution["rmsd"] == pytest.approx(0.03545, abs=1e-3)
assert solution["metric"] == pytest.approx(0.02517, abs=5e-3)
assert solution["number"] == 22
assert solution["lattice"] == "cI"
beam_centre = indexer.get_indexer_beam_centre()
assert beam_centre == pytest.approx(
(94.41567208118963, 94.51337522659865), abs=1e-3
)
print(indexer.get_indexer_experiment_list()[0].crystal)
print(indexer.get_indexer_experiment_list()[0].detector)
# test serialization of indexer
json_str = indexer.as_json()
indexer2 = DialsIndexer.from_json(string=json_str)
indexer2.index()
assert indexer.get_indexer_cell() == pytest.approx(indexer2.get_indexer_cell())
assert indexer.get_indexer_beam_centre() == pytest.approx(
indexer2.get_indexer_beam_centre()
)
indexer.eliminate()
indexer2.eliminate()
assert indexer.get_indexer_cell() == pytest.approx(indexer2.get_indexer_cell())
assert indexer.get_indexer_lattice() == "hR"
assert indexer2.get_indexer_lattice() == "hR"
def test_dials_indexer_serial(regression_test, ccp4, dials_data, run_in_tmpdir):
with mock.patch.object(sys, "argv", []):
exercise_dials_indexer(dials_data, run_in_tmpdir.strpath, nproc=1)
| 34.293333
| 83
| 0.728616
|
from unittest import mock
import os
import pytest
import sys
from dxtbx.model import ExperimentList
from xia2.Handlers.Phil import PhilIndex
from xia2.Modules.Indexer.DialsIndexer import DialsIndexer
from xia2.Schema.XCrystal import XCrystal
from xia2.Schema.XWavelength import XWavelength
from xia2.Schema.XSweep import XSweep
from xia2.Schema.XSample import XSample
def exercise_dials_indexer(dials_data, tmp_dir, nproc=None):
if nproc is not None:
PhilIndex.params.xia2.settings.multiprocessing.nproc = nproc
template = dials_data("insulin").join("insulin_1_###.img").strpath
indexer = DialsIndexer()
indexer.set_working_directory(tmp_dir)
experiments = ExperimentList.from_templates([template])
imageset = experiments.imagesets()[0]
indexer.add_indexer_imageset(imageset)
cryst = XCrystal("CRYST1", None)
wav = XWavelength("WAVE1", cryst, imageset.get_beam().get_wavelength())
samp = XSample("X1", cryst)
directory, image = os.path.split(imageset.get_path(1))
sweep = XSweep("SWEEP1", wav, samp, directory=directory, image=image)
indexer.set_indexer_sweep(sweep)
indexer.index()
assert indexer.get_indexer_cell() == pytest.approx(
(78.14, 78.14, 78.14, 90, 90, 90), rel=1e-3
)
solution = indexer.get_solution()
assert solution["rmsd"] == pytest.approx(0.03545, abs=1e-3)
assert solution["metric"] == pytest.approx(0.02517, abs=5e-3)
assert solution["number"] == 22
assert solution["lattice"] == "cI"
beam_centre = indexer.get_indexer_beam_centre()
assert beam_centre == pytest.approx(
(94.41567208118963, 94.51337522659865), abs=1e-3
)
print(indexer.get_indexer_experiment_list()[0].crystal)
print(indexer.get_indexer_experiment_list()[0].detector)
json_str = indexer.as_json()
indexer2 = DialsIndexer.from_json(string=json_str)
indexer2.index()
assert indexer.get_indexer_cell() == pytest.approx(indexer2.get_indexer_cell())
assert indexer.get_indexer_beam_centre() == pytest.approx(
indexer2.get_indexer_beam_centre()
)
indexer.eliminate()
indexer2.eliminate()
assert indexer.get_indexer_cell() == pytest.approx(indexer2.get_indexer_cell())
assert indexer.get_indexer_lattice() == "hR"
assert indexer2.get_indexer_lattice() == "hR"
def test_dials_indexer_serial(regression_test, ccp4, dials_data, run_in_tmpdir):
with mock.patch.object(sys, "argv", []):
exercise_dials_indexer(dials_data, run_in_tmpdir.strpath, nproc=1)
| true
| true
|
1c47d2457497fd988ef9644f3fcee1f778042ce5
| 1,002
|
py
|
Python
|
mayan/apps/common/tests/runner.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 2,743
|
2017-12-18T07:12:30.000Z
|
2022-03-27T17:21:25.000Z
|
mayan/apps/common/tests/runner.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 15
|
2017-12-18T14:58:07.000Z
|
2021-03-01T20:05:05.000Z
|
mayan/apps/common/tests/runner.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 257
|
2017-12-18T03:12:58.000Z
|
2022-03-25T08:59:10.000Z
|
from __future__ import unicode_literals
from django import apps
from django.test.runner import DiscoverRunner
class MayanTestRunner(DiscoverRunner):
@classmethod
def add_arguments(cls, parser):
DiscoverRunner.add_arguments(parser)
parser.add_argument(
'--mayan-apps', action='store_true', default=False,
dest='mayan_apps',
help='Test all Mayan apps that report to have tests.'
)
def __init__(self, *args, **kwargs):
self.mayan_apps = kwargs.pop('mayan_apps')
super(MayanTestRunner, self).__init__(*args, **kwargs)
def build_suite(self, *args, **kwargs):
# Apps that report they have tests
if self.mayan_apps:
args = list(args)
args[0] = [
app.name for app in apps.apps.get_app_configs() if getattr(
app, 'has_tests', False
)
]
return super(MayanTestRunner, self).build_suite(*args, **kwargs)
| 31.3125
| 75
| 0.610778
|
from __future__ import unicode_literals
from django import apps
from django.test.runner import DiscoverRunner
class MayanTestRunner(DiscoverRunner):
@classmethod
def add_arguments(cls, parser):
DiscoverRunner.add_arguments(parser)
parser.add_argument(
'--mayan-apps', action='store_true', default=False,
dest='mayan_apps',
help='Test all Mayan apps that report to have tests.'
)
def __init__(self, *args, **kwargs):
self.mayan_apps = kwargs.pop('mayan_apps')
super(MayanTestRunner, self).__init__(*args, **kwargs)
def build_suite(self, *args, **kwargs):
if self.mayan_apps:
args = list(args)
args[0] = [
app.name for app in apps.apps.get_app_configs() if getattr(
app, 'has_tests', False
)
]
return super(MayanTestRunner, self).build_suite(*args, **kwargs)
| true
| true
|
1c47d4df07c1c10285d70b8e964f1a6a01f4327e
| 6,932
|
py
|
Python
|
kf_d3m_primitives/natural_language_processing/sent2vec/sent2vec.py
|
Zac-hills/d3m-primitives
|
1829fc98042dddfcbee3cfbbb8cb75dd452f1e8d
|
[
"Apache-2.0"
] | 1
|
2020-05-22T14:00:09.000Z
|
2020-05-22T14:00:09.000Z
|
kf_d3m_primitives/natural_language_processing/sent2vec/sent2vec.py
|
Zac-hills/d3m-primitives
|
1829fc98042dddfcbee3cfbbb8cb75dd452f1e8d
|
[
"Apache-2.0"
] | 18
|
2020-07-20T07:00:45.000Z
|
2022-03-12T00:37:57.000Z
|
kf_d3m_primitives/natural_language_processing/sent2vec/sent2vec.py
|
Zac-hills/d3m-primitives
|
1829fc98042dddfcbee3cfbbb8cb75dd452f1e8d
|
[
"Apache-2.0"
] | 6
|
2020-06-03T20:13:24.000Z
|
2021-12-06T18:21:32.000Z
|
import os.path
from typing import Sequence, Optional, Dict
import numpy as np
import pandas as pd
from nk_sent2vec import Sent2Vec as _Sent2Vec
from d3m import container, utils
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
from d3m.primitive_interfaces.base import CallResult
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import hyperparams, base as metadata_base, params
__author__ = "Distil"
__version__ = "1.3.0"
__contact__ = "mailto:cbethune@uncharted.software"
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
class Hyperparams(hyperparams.Hyperparams):
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A set of column indices to force primitive to operate on. If any specified \
column cannot be parsed, it is skipped.",
)
class Sent2VecPrimitive(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
"""
This primitive produces numerical representations of text data using a model
that was pre-trained on English Twitter bi-grams.
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "cf450079-9333-4a3f-aed4-b77a4e8c7be7",
"version": __version__,
"name": "sent2vec_wrapper",
"keywords": ["Sent2Vec", "Embedding", "NLP", "Natural Language Processing"],
"source": {
"name": __author__,
"contact": __contact__,
"uris": ["https://github.com/kungfuai/d3m-primitives"],
},
"installation": [
{"type": "PIP", "package": "cython", "version": "0.29.16"},
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/kungfuai/d3m-primitives.git@{git_commit}#egg=kf-d3m-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
{
"type": "FILE",
"key": "sent2vec_model",
"file_uri": "http://public.datadrivendiscovery.org/twitter_bigrams.bin",
"file_digest": "9e8ccfea2aaa4435ca61b05b11b60e1a096648d56fff76df984709339f423dd6",
},
],
"python_path": "d3m.primitives.feature_extraction.nk_sent2vec.Sent2Vec",
"algorithm_types": [metadata_base.PrimitiveAlgorithmType.VECTORIZATION],
"primitive_family": metadata_base.PrimitiveFamily.FEATURE_EXTRACTION,
}
)
# class instance to avoid unnecessary re-init on subsequent produce calls
_vectorizer: Optional[_Sent2Vec] = None
def __init__(
self,
*,
hyperparams: Hyperparams,
random_seed: int = 0,
volumes: Dict[str, str] = None
) -> None:
super().__init__(
hyperparams=hyperparams, random_seed=random_seed, volumes=volumes
)
self.volumes = volumes
def produce(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> CallResult[Outputs]:
"""
Produce numerical representations (features) for short texts or sentences.
Parameters
----------
inputs: D3M dataframe
Returns
-------
Outputs: Input D3M dataframe with vector components appended as additional columns
"""
# figure out columns to operate on
cols = self._get_operating_columns(
inputs, self.hyperparams["use_columns"], ("http://schema.org/Text",)
)
frame = inputs.iloc[:, cols]
outputs = inputs.copy()
try:
# lazy load the model and keep it around for subsequent produce calls
if Sent2VecPrimitive._vectorizer is None:
Sent2VecPrimitive._vectorizer = _Sent2Vec(
path=self.volumes["sent2vec_model"]
)
output_vectors = []
for col in range(frame.shape[1]):
text = frame.iloc[:, col].tolist()
embedded_sentences = Sent2VecPrimitive._vectorizer.embed_sentences(
sentences=text
)
output_vectors.append(embedded_sentences)
embedded_df = pd.DataFrame(
np.array(output_vectors).reshape(len(embedded_sentences), -1)
)
except ValueError:
# just return inputs with file names deleted if vectorizing fails
return CallResult(outputs)
# create df with vectorized columns and append to input df
embedded_df = d3m_DataFrame(embedded_df)
for col in range(embedded_df.shape[1]):
col_dict = dict(
embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, col))
)
col_dict["structural_type"] = type(1.0)
col_dict["name"] = "vector_" + str(col)
col_dict["semantic_types"] = (
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/Attribute",
)
embedded_df.metadata = embedded_df.metadata.update(
(metadata_base.ALL_ELEMENTS, col), col_dict
)
df_dict = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict_1 = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict["dimension"] = df_dict_1
df_dict_1["name"] = "columns"
df_dict_1["semantic_types"] = (
"https://metadata.datadrivendiscovery.org/types/TabularColumn",
)
df_dict_1["length"] = embedded_df.shape[1]
embedded_df.metadata = embedded_df.metadata.update(
(metadata_base.ALL_ELEMENTS,), df_dict
)
return CallResult(outputs.append_columns(embedded_df))
@classmethod
def _get_operating_columns(
cls,
inputs: container.DataFrame,
use_columns: Sequence[int],
semantic_types: Sequence[str],
require_attribute: bool = True,
) -> Sequence[int]:
# use caller supplied columns if supplied
cols = set(use_columns)
type_cols = set(
inputs.metadata.list_columns_with_semantic_types(semantic_types)
)
if require_attribute:
attributes = set(
inputs.metadata.list_columns_with_semantic_types(
("https://metadata.datadrivendiscovery.org/types/Attribute",)
)
)
type_cols = type_cols & attributes
if len(cols) > 0:
cols = type_cols & cols
else:
cols = type_cols
return list(cols)
| 37.879781
| 130
| 0.604443
|
import os.path
from typing import Sequence, Optional, Dict
import numpy as np
import pandas as pd
from nk_sent2vec import Sent2Vec as _Sent2Vec
from d3m import container, utils
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
from d3m.primitive_interfaces.base import CallResult
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import hyperparams, base as metadata_base, params
__author__ = "Distil"
__version__ = "1.3.0"
__contact__ = "mailto:cbethune@uncharted.software"
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
class Hyperparams(hyperparams.Hyperparams):
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A set of column indices to force primitive to operate on. If any specified \
column cannot be parsed, it is skipped.",
)
class Sent2VecPrimitive(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
metadata = metadata_base.PrimitiveMetadata(
{
"id": "cf450079-9333-4a3f-aed4-b77a4e8c7be7",
"version": __version__,
"name": "sent2vec_wrapper",
"keywords": ["Sent2Vec", "Embedding", "NLP", "Natural Language Processing"],
"source": {
"name": __author__,
"contact": __contact__,
"uris": ["https://github.com/kungfuai/d3m-primitives"],
},
"installation": [
{"type": "PIP", "package": "cython", "version": "0.29.16"},
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/kungfuai/d3m-primitives.git@{git_commit}#egg=kf-d3m-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
{
"type": "FILE",
"key": "sent2vec_model",
"file_uri": "http://public.datadrivendiscovery.org/twitter_bigrams.bin",
"file_digest": "9e8ccfea2aaa4435ca61b05b11b60e1a096648d56fff76df984709339f423dd6",
},
],
"python_path": "d3m.primitives.feature_extraction.nk_sent2vec.Sent2Vec",
"algorithm_types": [metadata_base.PrimitiveAlgorithmType.VECTORIZATION],
"primitive_family": metadata_base.PrimitiveFamily.FEATURE_EXTRACTION,
}
)
_vectorizer: Optional[_Sent2Vec] = None
def __init__(
self,
*,
hyperparams: Hyperparams,
random_seed: int = 0,
volumes: Dict[str, str] = None
) -> None:
super().__init__(
hyperparams=hyperparams, random_seed=random_seed, volumes=volumes
)
self.volumes = volumes
def produce(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> CallResult[Outputs]:
cols = self._get_operating_columns(
inputs, self.hyperparams["use_columns"], ("http://schema.org/Text",)
)
frame = inputs.iloc[:, cols]
outputs = inputs.copy()
try:
if Sent2VecPrimitive._vectorizer is None:
Sent2VecPrimitive._vectorizer = _Sent2Vec(
path=self.volumes["sent2vec_model"]
)
output_vectors = []
for col in range(frame.shape[1]):
text = frame.iloc[:, col].tolist()
embedded_sentences = Sent2VecPrimitive._vectorizer.embed_sentences(
sentences=text
)
output_vectors.append(embedded_sentences)
embedded_df = pd.DataFrame(
np.array(output_vectors).reshape(len(embedded_sentences), -1)
)
except ValueError:
return CallResult(outputs)
embedded_df = d3m_DataFrame(embedded_df)
for col in range(embedded_df.shape[1]):
col_dict = dict(
embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, col))
)
col_dict["structural_type"] = type(1.0)
col_dict["name"] = "vector_" + str(col)
col_dict["semantic_types"] = (
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/Attribute",
)
embedded_df.metadata = embedded_df.metadata.update(
(metadata_base.ALL_ELEMENTS, col), col_dict
)
df_dict = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict_1 = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict["dimension"] = df_dict_1
df_dict_1["name"] = "columns"
df_dict_1["semantic_types"] = (
"https://metadata.datadrivendiscovery.org/types/TabularColumn",
)
df_dict_1["length"] = embedded_df.shape[1]
embedded_df.metadata = embedded_df.metadata.update(
(metadata_base.ALL_ELEMENTS,), df_dict
)
return CallResult(outputs.append_columns(embedded_df))
@classmethod
def _get_operating_columns(
cls,
inputs: container.DataFrame,
use_columns: Sequence[int],
semantic_types: Sequence[str],
require_attribute: bool = True,
) -> Sequence[int]:
cols = set(use_columns)
type_cols = set(
inputs.metadata.list_columns_with_semantic_types(semantic_types)
)
if require_attribute:
attributes = set(
inputs.metadata.list_columns_with_semantic_types(
("https://metadata.datadrivendiscovery.org/types/Attribute",)
)
)
type_cols = type_cols & attributes
if len(cols) > 0:
cols = type_cols & cols
else:
cols = type_cols
return list(cols)
| true
| true
|
1c47d526b70baa1b7149d593ed8aec9074118df1
| 1,016
|
py
|
Python
|
setup.py
|
PrabhuLoganathan/pro.developers.PySelFame-6
|
3ee45e672f84965f0b8b3ccf7f8daf0c7d871261
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
PrabhuLoganathan/pro.developers.PySelFame-6
|
3ee45e672f84965f0b8b3ccf7f8daf0c7d871261
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
PrabhuLoganathan/pro.developers.PySelFame-6
|
3ee45e672f84965f0b8b3ccf7f8daf0c7d871261
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup
setup(
name='knitter',
version='1.0.2',
author='Henry Wang',
author_email='skymatrix@126.com',
maintainer='Henry Wang',
maintainer_email='skymatrix@126.com',
url='https://github.com/hw712/Knitter',
description='A Web Automation Test Framework Based On Selenium WebDriver',
long_description="Knitter['nitə] is a web automation test framework, with which you can develop "
"the web ui automation with good maintainability and extendability.",
# https://pypi.org/classifiers/
classifiers=['License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Libraries :: Application Frameworks'],
platforms=['linux', 'windows'],
license='BSD License',
packages=['knitter'],
install_requires=['selenium', 'xlrd'],
)
| 22.577778
| 101
| 0.626969
|
from setuptools import setup
setup(
name='knitter',
version='1.0.2',
author='Henry Wang',
author_email='skymatrix@126.com',
maintainer='Henry Wang',
maintainer_email='skymatrix@126.com',
url='https://github.com/hw712/Knitter',
description='A Web Automation Test Framework Based On Selenium WebDriver',
long_description="Knitter['nitə] is a web automation test framework, with which you can develop "
"the web ui automation with good maintainability and extendability.",
# https://pypi.org/classifiers/
classifiers=['License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Libraries :: Application Frameworks'],
platforms=['linux', 'windows'],
license='BSD License',
packages=['knitter'],
install_requires=['selenium', 'xlrd'],
)
| true
| true
|
1c47d61e29b517b660c0f0ee0e55960b22da7061
| 202
|
py
|
Python
|
mywebsite/users/tests/test_models.py
|
NyntoFive/mywebsite
|
07af16c564f8a7c77763187cc4cd8742c91c6534
|
[
"MIT"
] | null | null | null |
mywebsite/users/tests/test_models.py
|
NyntoFive/mywebsite
|
07af16c564f8a7c77763187cc4cd8742c91c6534
|
[
"MIT"
] | null | null | null |
mywebsite/users/tests/test_models.py
|
NyntoFive/mywebsite
|
07af16c564f8a7c77763187cc4cd8742c91c6534
|
[
"MIT"
] | null | null | null |
import pytest
from mywebsite.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| 20.2
| 64
| 0.772277
|
import pytest
from mywebsite.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| true
| true
|
1c47d67efdc69d1364d3f7859468a66ce98d53af
| 6,336
|
py
|
Python
|
tests/integration/test_es.py
|
roguesupport/localstack
|
087abb05fcb360297431ad8e5790c8014e0a80d7
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_es.py
|
roguesupport/localstack
|
087abb05fcb360297431ad8e5790c8014e0a80d7
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_es.py
|
roguesupport/localstack
|
087abb05fcb360297431ad8e5790c8014e0a80d7
|
[
"Apache-2.0"
] | null | null | null |
import logging
import threading
import botocore.exceptions
import pytest
from localstack import config
from localstack.constants import ELASTICSEARCH_DEFAULT_VERSION, OPENSEARCH_DEFAULT_VERSION
from localstack.services.install import install_elasticsearch, install_opensearch
from localstack.utils.common import safe_requests as requests
from localstack.utils.common import short_uid, start_worker_thread
LOG = logging.getLogger(__name__)
# Common headers used when sending requests to OpenSearch
COMMON_HEADERS = {"content-type": "application/json", "Accept-encoding": "identity"}
# Lock and event to ensure that the installation is executed before the tests
INIT_LOCK = threading.Lock()
installed = threading.Event()
def install_async():
"""
Installs the default elasticsearch version in a worker thread. Used by conftest.py to make
sure elasticsearch is downloaded once the tests arrive here.
"""
if installed.is_set():
return
def run_install(*args):
with INIT_LOCK:
if installed.is_set():
return
LOG.info("installing elasticsearch default version")
install_elasticsearch()
LOG.info("done installing elasticsearch default version")
LOG.info("installing opensearch default version")
install_opensearch()
LOG.info("done installing opensearch default version")
installed.set()
start_worker_thread(run_install)
@pytest.fixture(autouse=True)
def elasticsearch():
if not installed.is_set():
install_async()
assert installed.wait(timeout=5 * 60), "gave up waiting for elasticsearch to install"
yield
def try_cluster_health(cluster_url: str):
response = requests.get(cluster_url)
assert response.ok, f"cluster endpoint returned an error: {response.text}"
response = requests.get(f"{cluster_url}/_cluster/health")
assert response.ok, f"cluster health endpoint returned an error: {response.text}"
assert response.json()["status"] in [
"orange",
"yellow",
"green",
], "expected cluster state to be in a valid state"
class TestElasticsearchProvider:
def test_list_versions(self, es_client):
response = es_client.list_elasticsearch_versions()
assert "ElasticsearchVersions" in response
versions = response["ElasticsearchVersions"]
assert "OpenSearch_1.0" in versions
assert "OpenSearch_1.1" in versions
assert "7.10" in versions
def test_get_compatible_versions(self, es_client):
response = es_client.get_compatible_elasticsearch_versions()
assert "CompatibleElasticsearchVersions" in response
versions = response["CompatibleElasticsearchVersions"]
assert len(versions) == 18
assert {"SourceVersion": "OpenSearch_1.0", "TargetVersions": ["OpenSearch_1.1"]} in versions
assert {
"SourceVersion": "7.10",
"TargetVersions": ["OpenSearch_1.0", "OpenSearch_1.1"],
} in versions
assert {
"SourceVersion": "7.7",
"TargetVersions": ["7.8", "7.9", "7.10", "OpenSearch_1.0", "OpenSearch_1.1"],
} in versions
@pytest.mark.skip_offline
def test_get_compatible_version_for_domain(self, es_client, opensearch_domain):
response = es_client.get_compatible_elasticsearch_versions(DomainName=opensearch_domain)
assert "CompatibleElasticsearchVersions" in response
versions = response["CompatibleElasticsearchVersions"]
# The default version is the latest version, which is not compatible with any previous versions
assert len(versions) == 0
@pytest.mark.skip_offline
def test_create_domain(self, es_client, opensearch_create_domain):
es_domain = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
response = es_client.list_domain_names(EngineType="Elasticsearch")
domain_names = [domain["DomainName"] for domain in response["DomainNames"]]
assert es_domain in domain_names
@pytest.mark.skip_offline
def test_create_existing_domain_causes_exception(self, es_client, opensearch_create_domain):
domain_name = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
with pytest.raises(botocore.exceptions.ClientError) as exc_info:
es_client.create_elasticsearch_domain(DomainName=domain_name)
assert exc_info.type.__name__ == "ResourceAlreadyExistsException"
@pytest.mark.skip_offline
def test_describe_domains(self, es_client, opensearch_create_domain):
opensearch_domain = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
response = es_client.describe_elasticsearch_domains(DomainNames=[opensearch_domain])
assert len(response["DomainStatusList"]) == 1
assert response["DomainStatusList"][0]["DomainName"] == opensearch_domain
@pytest.mark.skip_offline
def test_domain_version(self, es_client, opensearch_domain, opensearch_create_domain):
response = es_client.describe_elasticsearch_domain(DomainName=opensearch_domain)
assert "DomainStatus" in response
status = response["DomainStatus"]
assert "ElasticsearchVersion" in status
assert status["ElasticsearchVersion"] == OPENSEARCH_DEFAULT_VERSION
domain_name = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
response = es_client.describe_elasticsearch_domain(DomainName=domain_name)
assert "DomainStatus" in response
status = response["DomainStatus"]
assert "ElasticsearchVersion" in status
assert status["ElasticsearchVersion"] == "7.10"
@pytest.mark.skip_offline
def test_path_endpoint_strategy(self, monkeypatch, opensearch_create_domain, es_client):
monkeypatch.setattr(config, "OPENSEARCH_ENDPOINT_STRATEGY", "path")
monkeypatch.setattr(config, "OPENSEARCH_MULTI_CLUSTER", True)
domain_name = f"es-domain-{short_uid()}"
opensearch_create_domain(DomainName=domain_name)
status = es_client.describe_elasticsearch_domain(DomainName=domain_name)["DomainStatus"]
assert "Endpoint" in status
endpoint = status["Endpoint"]
assert endpoint.endswith(f"/{domain_name}")
| 40.877419
| 103
| 0.72601
|
import logging
import threading
import botocore.exceptions
import pytest
from localstack import config
from localstack.constants import ELASTICSEARCH_DEFAULT_VERSION, OPENSEARCH_DEFAULT_VERSION
from localstack.services.install import install_elasticsearch, install_opensearch
from localstack.utils.common import safe_requests as requests
from localstack.utils.common import short_uid, start_worker_thread
LOG = logging.getLogger(__name__)
COMMON_HEADERS = {"content-type": "application/json", "Accept-encoding": "identity"}
INIT_LOCK = threading.Lock()
installed = threading.Event()
def install_async():
if installed.is_set():
return
def run_install(*args):
with INIT_LOCK:
if installed.is_set():
return
LOG.info("installing elasticsearch default version")
install_elasticsearch()
LOG.info("done installing elasticsearch default version")
LOG.info("installing opensearch default version")
install_opensearch()
LOG.info("done installing opensearch default version")
installed.set()
start_worker_thread(run_install)
@pytest.fixture(autouse=True)
def elasticsearch():
if not installed.is_set():
install_async()
assert installed.wait(timeout=5 * 60), "gave up waiting for elasticsearch to install"
yield
def try_cluster_health(cluster_url: str):
response = requests.get(cluster_url)
assert response.ok, f"cluster endpoint returned an error: {response.text}"
response = requests.get(f"{cluster_url}/_cluster/health")
assert response.ok, f"cluster health endpoint returned an error: {response.text}"
assert response.json()["status"] in [
"orange",
"yellow",
"green",
], "expected cluster state to be in a valid state"
class TestElasticsearchProvider:
def test_list_versions(self, es_client):
response = es_client.list_elasticsearch_versions()
assert "ElasticsearchVersions" in response
versions = response["ElasticsearchVersions"]
assert "OpenSearch_1.0" in versions
assert "OpenSearch_1.1" in versions
assert "7.10" in versions
def test_get_compatible_versions(self, es_client):
response = es_client.get_compatible_elasticsearch_versions()
assert "CompatibleElasticsearchVersions" in response
versions = response["CompatibleElasticsearchVersions"]
assert len(versions) == 18
assert {"SourceVersion": "OpenSearch_1.0", "TargetVersions": ["OpenSearch_1.1"]} in versions
assert {
"SourceVersion": "7.10",
"TargetVersions": ["OpenSearch_1.0", "OpenSearch_1.1"],
} in versions
assert {
"SourceVersion": "7.7",
"TargetVersions": ["7.8", "7.9", "7.10", "OpenSearch_1.0", "OpenSearch_1.1"],
} in versions
@pytest.mark.skip_offline
def test_get_compatible_version_for_domain(self, es_client, opensearch_domain):
response = es_client.get_compatible_elasticsearch_versions(DomainName=opensearch_domain)
assert "CompatibleElasticsearchVersions" in response
versions = response["CompatibleElasticsearchVersions"]
assert len(versions) == 0
@pytest.mark.skip_offline
def test_create_domain(self, es_client, opensearch_create_domain):
es_domain = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
response = es_client.list_domain_names(EngineType="Elasticsearch")
domain_names = [domain["DomainName"] for domain in response["DomainNames"]]
assert es_domain in domain_names
@pytest.mark.skip_offline
def test_create_existing_domain_causes_exception(self, es_client, opensearch_create_domain):
domain_name = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
with pytest.raises(botocore.exceptions.ClientError) as exc_info:
es_client.create_elasticsearch_domain(DomainName=domain_name)
assert exc_info.type.__name__ == "ResourceAlreadyExistsException"
@pytest.mark.skip_offline
def test_describe_domains(self, es_client, opensearch_create_domain):
opensearch_domain = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
response = es_client.describe_elasticsearch_domains(DomainNames=[opensearch_domain])
assert len(response["DomainStatusList"]) == 1
assert response["DomainStatusList"][0]["DomainName"] == opensearch_domain
@pytest.mark.skip_offline
def test_domain_version(self, es_client, opensearch_domain, opensearch_create_domain):
response = es_client.describe_elasticsearch_domain(DomainName=opensearch_domain)
assert "DomainStatus" in response
status = response["DomainStatus"]
assert "ElasticsearchVersion" in status
assert status["ElasticsearchVersion"] == OPENSEARCH_DEFAULT_VERSION
domain_name = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
response = es_client.describe_elasticsearch_domain(DomainName=domain_name)
assert "DomainStatus" in response
status = response["DomainStatus"]
assert "ElasticsearchVersion" in status
assert status["ElasticsearchVersion"] == "7.10"
@pytest.mark.skip_offline
def test_path_endpoint_strategy(self, monkeypatch, opensearch_create_domain, es_client):
monkeypatch.setattr(config, "OPENSEARCH_ENDPOINT_STRATEGY", "path")
monkeypatch.setattr(config, "OPENSEARCH_MULTI_CLUSTER", True)
domain_name = f"es-domain-{short_uid()}"
opensearch_create_domain(DomainName=domain_name)
status = es_client.describe_elasticsearch_domain(DomainName=domain_name)["DomainStatus"]
assert "Endpoint" in status
endpoint = status["Endpoint"]
assert endpoint.endswith(f"/{domain_name}")
| true
| true
|
1c47d7142612605ef5ca8a8c2d042e3d2166f135
| 5,609
|
py
|
Python
|
aio_pika/robust_channel.py
|
askabelin/aio-pika
|
38fd5897c556dd41624b8571b061f486e8e7508e
|
[
"Apache-2.0"
] | null | null | null |
aio_pika/robust_channel.py
|
askabelin/aio-pika
|
38fd5897c556dd41624b8571b061f486e8e7508e
|
[
"Apache-2.0"
] | null | null | null |
aio_pika/robust_channel.py
|
askabelin/aio-pika
|
38fd5897c556dd41624b8571b061f486e8e7508e
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
from typing import Callable, Any, Generator, Union
from logging import getLogger
from aio_pika.tools import create_future
from .compat import Awaitable
from .exchange import Exchange, ExchangeType
from .message import IncomingMessage
from .queue import Queue
from .common import BaseChannel, FutureStore
from .channel import Channel
from .robust_queue import RobustQueue
from .robust_exchange import RobustExchange
log = getLogger(__name__)
FunctionOrCoroutine = Union[Callable[[IncomingMessage], Any], Awaitable[IncomingMessage]]
class RobustChannel(Channel):
""" Channel abstraction """
QUEUE_CLASS = RobustQueue
EXCHANGE_CLASS = RobustExchange
def __init__(self, connection, loop: asyncio.AbstractEventLoop,
future_store: FutureStore, channel_number: int=None,
publisher_confirms: bool=True, on_return_raises=False):
"""
:param connection: :class:`aio_pika.adapter.AsyncioConnection` instance
:param loop: Event loop (:func:`asyncio.get_event_loop()` when :class:`None`)
:param future_store: :class:`aio_pika.common.FutureStore` instance
:param publisher_confirms: False if you don't need delivery confirmations (in pursuit of performance)
"""
super().__init__(
loop=loop,
future_store=future_store.get_child(),
connection=connection,
channel_number=channel_number,
publisher_confirms=publisher_confirms,
on_return_raises=on_return_raises,
)
self._closed = False
self._exchanges = dict()
self._queues = dict()
self._qos = 0, 0
@asyncio.coroutine
def on_reconnect(self, connection, channel_number):
exc = ConnectionError('Auto Reconnect Error')
if not self._closing.done():
self._closing.set_exception(exc)
self._closing = create_future(loop=self.loop)
self._futures.reject_all(exc)
self._connection = connection
self._channel_number = channel_number
yield from self.initialize()
for exchange in self._exchanges.values():
yield from exchange.on_reconnect(self)
for queue in self._queues.values():
yield from queue.on_reconnect(self)
@asyncio.coroutine
def initialize(self, timeout=None):
result = yield from super().initialize()
prefetch_count, prefetch_size = self._qos
yield from self.set_qos(
prefetch_count=prefetch_count,
prefetch_size=prefetch_size
)
return result
@asyncio.coroutine
def set_qos(self, prefetch_count: int = 0, prefetch_size: int = 0, all_channels=False, timeout: int = None):
if all_channels:
raise NotImplementedError("Not available to RobustConnection")
self._qos = prefetch_count, prefetch_size
return (yield from super().set_qos(
prefetch_count=prefetch_count,
prefetch_size=prefetch_size,
timeout=timeout,
))
@BaseChannel._ensure_channel_is_open
@asyncio.coroutine
def close(self) -> None:
if self._closed:
return
with (yield from self._write_lock):
self._closed = True
self._channel.close()
yield from self.closing
self._channel = None
@asyncio.coroutine
def declare_exchange(self, name: str, type: ExchangeType = ExchangeType.DIRECT,
durable: bool = None, auto_delete: bool = False,
internal: bool = False, passive: bool = False,
arguments: dict = None, timeout: int = None,
robust: bool = True) -> Generator[Any, None, Exchange]:
exchange = yield from super().declare_exchange(
name=name, type=type, durable=durable, auto_delete=auto_delete,
internal=internal, passive=passive, arguments=arguments,
timeout=timeout,
)
if not internal and robust:
self._exchanges[name] = exchange
return exchange
@asyncio.coroutine
def exchange_delete(self, exchange_name: str, timeout: int = None, if_unused=False, nowait=False):
result = yield from super().exchange_delete(
exchange_name=exchange_name, timeout=timeout,
if_unused=if_unused, nowait=nowait
)
self._exchanges.pop(exchange_name, None)
return result
@asyncio.coroutine
def declare_queue(self, name: str = None, *, durable: bool = None, exclusive: bool = False,
passive: bool = False, auto_delete: bool = False,
arguments: dict = None, timeout: int = None,
robust: bool = True) -> Generator[Any, None, Queue]:
queue = yield from super().declare_queue(
name=name, durable=durable, exclusive=exclusive,
passive=passive, auto_delete=auto_delete,
arguments=arguments, timeout=timeout,
)
if robust:
self._queues[name] = queue
return queue
@asyncio.coroutine
def queue_delete(self, queue_name: str, timeout: int = None,
if_unused: bool = False, if_empty: bool = False, nowait: bool = False):
result = yield from super().queue_delete(
queue_name=queue_name, timeout=timeout,
if_unused=if_unused, if_empty=if_empty, nowait=nowait
)
self._queues.pop(queue_name, None)
return result
__all__ = ('RobustChannel',)
| 33.189349
| 112
| 0.63826
|
import asyncio
from typing import Callable, Any, Generator, Union
from logging import getLogger
from aio_pika.tools import create_future
from .compat import Awaitable
from .exchange import Exchange, ExchangeType
from .message import IncomingMessage
from .queue import Queue
from .common import BaseChannel, FutureStore
from .channel import Channel
from .robust_queue import RobustQueue
from .robust_exchange import RobustExchange
log = getLogger(__name__)
FunctionOrCoroutine = Union[Callable[[IncomingMessage], Any], Awaitable[IncomingMessage]]
class RobustChannel(Channel):
QUEUE_CLASS = RobustQueue
EXCHANGE_CLASS = RobustExchange
def __init__(self, connection, loop: asyncio.AbstractEventLoop,
future_store: FutureStore, channel_number: int=None,
publisher_confirms: bool=True, on_return_raises=False):
super().__init__(
loop=loop,
future_store=future_store.get_child(),
connection=connection,
channel_number=channel_number,
publisher_confirms=publisher_confirms,
on_return_raises=on_return_raises,
)
self._closed = False
self._exchanges = dict()
self._queues = dict()
self._qos = 0, 0
@asyncio.coroutine
def on_reconnect(self, connection, channel_number):
exc = ConnectionError('Auto Reconnect Error')
if not self._closing.done():
self._closing.set_exception(exc)
self._closing = create_future(loop=self.loop)
self._futures.reject_all(exc)
self._connection = connection
self._channel_number = channel_number
yield from self.initialize()
for exchange in self._exchanges.values():
yield from exchange.on_reconnect(self)
for queue in self._queues.values():
yield from queue.on_reconnect(self)
@asyncio.coroutine
def initialize(self, timeout=None):
result = yield from super().initialize()
prefetch_count, prefetch_size = self._qos
yield from self.set_qos(
prefetch_count=prefetch_count,
prefetch_size=prefetch_size
)
return result
@asyncio.coroutine
def set_qos(self, prefetch_count: int = 0, prefetch_size: int = 0, all_channels=False, timeout: int = None):
if all_channels:
raise NotImplementedError("Not available to RobustConnection")
self._qos = prefetch_count, prefetch_size
return (yield from super().set_qos(
prefetch_count=prefetch_count,
prefetch_size=prefetch_size,
timeout=timeout,
))
@BaseChannel._ensure_channel_is_open
@asyncio.coroutine
def close(self) -> None:
if self._closed:
return
with (yield from self._write_lock):
self._closed = True
self._channel.close()
yield from self.closing
self._channel = None
@asyncio.coroutine
def declare_exchange(self, name: str, type: ExchangeType = ExchangeType.DIRECT,
durable: bool = None, auto_delete: bool = False,
internal: bool = False, passive: bool = False,
arguments: dict = None, timeout: int = None,
robust: bool = True) -> Generator[Any, None, Exchange]:
exchange = yield from super().declare_exchange(
name=name, type=type, durable=durable, auto_delete=auto_delete,
internal=internal, passive=passive, arguments=arguments,
timeout=timeout,
)
if not internal and robust:
self._exchanges[name] = exchange
return exchange
@asyncio.coroutine
def exchange_delete(self, exchange_name: str, timeout: int = None, if_unused=False, nowait=False):
result = yield from super().exchange_delete(
exchange_name=exchange_name, timeout=timeout,
if_unused=if_unused, nowait=nowait
)
self._exchanges.pop(exchange_name, None)
return result
@asyncio.coroutine
def declare_queue(self, name: str = None, *, durable: bool = None, exclusive: bool = False,
passive: bool = False, auto_delete: bool = False,
arguments: dict = None, timeout: int = None,
robust: bool = True) -> Generator[Any, None, Queue]:
queue = yield from super().declare_queue(
name=name, durable=durable, exclusive=exclusive,
passive=passive, auto_delete=auto_delete,
arguments=arguments, timeout=timeout,
)
if robust:
self._queues[name] = queue
return queue
@asyncio.coroutine
def queue_delete(self, queue_name: str, timeout: int = None,
if_unused: bool = False, if_empty: bool = False, nowait: bool = False):
result = yield from super().queue_delete(
queue_name=queue_name, timeout=timeout,
if_unused=if_unused, if_empty=if_empty, nowait=nowait
)
self._queues.pop(queue_name, None)
return result
__all__ = ('RobustChannel',)
| true
| true
|
1c47d7c374e86f2955d404bda2c09808e815f342
| 4,040
|
py
|
Python
|
recipes/b2/portable/conanfile.py
|
Aypahyo/conan-center-index
|
c41d64960c66d3d81274d4189534f6fcb7bc4a36
|
[
"MIT"
] | null | null | null |
recipes/b2/portable/conanfile.py
|
Aypahyo/conan-center-index
|
c41d64960c66d3d81274d4189534f6fcb7bc4a36
|
[
"MIT"
] | 1
|
2021-11-22T13:54:48.000Z
|
2021-11-22T14:09:45.000Z
|
recipes/b2/portable/conanfile.py
|
Aypahyo/conan-center-index
|
c41d64960c66d3d81274d4189534f6fcb7bc4a36
|
[
"MIT"
] | null | null | null |
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class B2Conan(ConanFile):
name = "b2"
homepage = "https://www.bfgroup.xyz/b2/"
description = "B2 makes it easy to build C++ projects, everywhere."
topics = ("b2", "installer", "builder", "build", "build-system")
license = "BSL-1.0"
settings = "os", "arch"
url = "https://github.com/conan-io/conan-center-index"
'''
* use_cxx_env: False, True
Indicates if the build will use the CXX and
CXXFLAGS environment variables. The common use is to add additional flags
for building on specific platforms or for additional optimization options.
* toolset: 'auto', 'cxx', 'cross-cxx',
'acc', 'borland', 'clang', 'como', 'gcc-nocygwin', 'gcc',
'intel-darwin', 'intel-linux', 'intel-win32', 'kcc', 'kylix',
'mingw', 'mipspro', 'pathscale', 'pgi', 'qcc', 'sun', 'sunpro',
'tru64cxx', 'vacpp', 'vc12', 'vc14', 'vc141', 'vc142', 'vc143'
Specifies the toolset to use for building. The default of 'auto' detects
a usable compiler for building and should be preferred. The 'cxx' toolset
uses the 'CXX' and 'CXXFLAGS' solely for building. Using the 'cxx'
toolset will also turn on the 'use_cxx_env' option. And the 'cross-cxx'
toolset uses the 'BUILD_CXX' and 'BUILD_CXXFLAGS' vars. This frees the
'CXX' and 'CXXFLAGS' variables for use in subprocesses.
'''
options = {
'use_cxx_env': [False, True],
'toolset': [
'auto', 'cxx', 'cross-cxx',
'acc', 'borland', 'clang', 'como', 'gcc-nocygwin', 'gcc',
'intel-darwin', 'intel-linux', 'intel-win32', 'kcc', 'kylix',
'mingw', 'mipspro', 'pathscale', 'pgi', 'qcc', 'sun', 'sunpro',
'tru64cxx', 'vacpp', 'vc12', 'vc14', 'vc141', 'vc142', 'vc143']
}
default_options = {
'use_cxx_env': False,
'toolset': 'auto'
}
def validate(self):
if (self.options.toolset == 'cxx' or self.options.toolset == 'cross-cxx') and not self.options.use_cxx_env:
raise ConanInvalidConfiguration(
"Option toolset 'cxx' and 'cross-cxx' requires 'use_cxx_env=True'")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
strip_root=True, destination="source")
def build(self):
use_windows_commands = os.name == 'nt'
command = "build" if use_windows_commands else "./build.sh"
if self.options.toolset != 'auto':
command += " "+str(self.options.toolset)
build_dir = os.path.join(self.source_folder, "source")
engine_dir = os.path.join(build_dir, "src", "engine")
os.chdir(engine_dir)
with tools.environment_append({"VSCMD_START_DIR": os.curdir}):
if self.options.use_cxx_env:
# Allow use of CXX env vars.
self.run(command)
else:
# To avoid using the CXX env vars we clear them out for the build.
with tools.environment_append({"CXX": "", "CXXFLAGS": ""}):
self.run(command)
os.chdir(build_dir)
command = os.path.join(
engine_dir, "b2.exe" if use_windows_commands else "b2")
full_command = \
"{0} --ignore-site-config --prefix=../output --abbreviate-paths install b2-install-layout=portable".format(
command)
self.run(full_command)
def package(self):
self.copy("LICENSE.txt", dst="licenses", src="source")
self.copy(pattern="*b2", dst="bin", src="output")
self.copy(pattern="*b2.exe", dst="bin", src="output")
self.copy(pattern="*.jam", dst="bin", src="output")
def package_info(self):
self.cpp_info.bindirs = ["bin"]
self.env_info.path = [os.path.join(
self.package_folder, "bin")]
def package_id(self):
del self.info.options.use_cxx_env
del self.info.options.toolset
| 41.22449
| 119
| 0.602723
|
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class B2Conan(ConanFile):
name = "b2"
homepage = "https://www.bfgroup.xyz/b2/"
description = "B2 makes it easy to build C++ projects, everywhere."
topics = ("b2", "installer", "builder", "build", "build-system")
license = "BSL-1.0"
settings = "os", "arch"
url = "https://github.com/conan-io/conan-center-index"
options = {
'use_cxx_env': [False, True],
'toolset': [
'auto', 'cxx', 'cross-cxx',
'acc', 'borland', 'clang', 'como', 'gcc-nocygwin', 'gcc',
'intel-darwin', 'intel-linux', 'intel-win32', 'kcc', 'kylix',
'mingw', 'mipspro', 'pathscale', 'pgi', 'qcc', 'sun', 'sunpro',
'tru64cxx', 'vacpp', 'vc12', 'vc14', 'vc141', 'vc142', 'vc143']
}
default_options = {
'use_cxx_env': False,
'toolset': 'auto'
}
def validate(self):
if (self.options.toolset == 'cxx' or self.options.toolset == 'cross-cxx') and not self.options.use_cxx_env:
raise ConanInvalidConfiguration(
"Option toolset 'cxx' and 'cross-cxx' requires 'use_cxx_env=True'")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
strip_root=True, destination="source")
def build(self):
use_windows_commands = os.name == 'nt'
command = "build" if use_windows_commands else "./build.sh"
if self.options.toolset != 'auto':
command += " "+str(self.options.toolset)
build_dir = os.path.join(self.source_folder, "source")
engine_dir = os.path.join(build_dir, "src", "engine")
os.chdir(engine_dir)
with tools.environment_append({"VSCMD_START_DIR": os.curdir}):
if self.options.use_cxx_env:
self.run(command)
else:
with tools.environment_append({"CXX": "", "CXXFLAGS": ""}):
self.run(command)
os.chdir(build_dir)
command = os.path.join(
engine_dir, "b2.exe" if use_windows_commands else "b2")
full_command = \
"{0} --ignore-site-config --prefix=../output --abbreviate-paths install b2-install-layout=portable".format(
command)
self.run(full_command)
def package(self):
self.copy("LICENSE.txt", dst="licenses", src="source")
self.copy(pattern="*b2", dst="bin", src="output")
self.copy(pattern="*b2.exe", dst="bin", src="output")
self.copy(pattern="*.jam", dst="bin", src="output")
def package_info(self):
self.cpp_info.bindirs = ["bin"]
self.env_info.path = [os.path.join(
self.package_folder, "bin")]
def package_id(self):
del self.info.options.use_cxx_env
del self.info.options.toolset
| true
| true
|
1c47d83c488b457f490f24ffef2a609a22042fe3
| 2,173
|
py
|
Python
|
tests/importer/onnx/basic/test_gemm.py
|
louareg/nncase
|
0125654eb57b7ff753fe9c396c84b264c01f34d3
|
[
"Apache-2.0"
] | null | null | null |
tests/importer/onnx/basic/test_gemm.py
|
louareg/nncase
|
0125654eb57b7ff753fe9c396c84b264c01f34d3
|
[
"Apache-2.0"
] | null | null | null |
tests/importer/onnx/basic/test_gemm.py
|
louareg/nncase
|
0125654eb57b7ff753fe9c396c84b264c01f34d3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2021 Canaan Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
import pytest
import onnx
from onnx import helper
from onnx import AttributeProto, TensorProto, GraphProto
import numpy as np
from onnx_test_runner import OnnxTestRunner
def _make_module():
input_A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [112, 224])
input_B = helper.make_tensor("B", TensorProto.FLOAT,
dims=(56, 224),
vals=np.random.randn(56, 224).astype(np.float32).flatten().tolist())
input_C = helper.make_tensor("C", TensorProto.FLOAT,
dims=(56,),
vals=np.random.randn(56,).astype(np.float32).flatten().tolist())
initializers = []
initializers.append(input_B)
initializers.append(input_C)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [112, 56])
node_def = helper.make_node(
'Gemm',
['A', 'B', 'C'],
['output'],
alpha=2.0,
beta=3.0,
transA=0,
transB=1
)
graph_def = helper.make_graph(
[node_def],
'test-model',
[input_A],
[output],
initializer=initializers
)
model_def = helper.make_model(graph_def, producer_name='kendryte')
return model_def
def test_gemm(request):
model_def = _make_module()
runner = OnnxTestRunner(request.node.name)
model_file = runner.from_onnx_helper(model_def)
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_gemm.py'])
| 32.432836
| 100
| 0.655315
|
import pytest
import onnx
from onnx import helper
from onnx import AttributeProto, TensorProto, GraphProto
import numpy as np
from onnx_test_runner import OnnxTestRunner
def _make_module():
input_A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [112, 224])
input_B = helper.make_tensor("B", TensorProto.FLOAT,
dims=(56, 224),
vals=np.random.randn(56, 224).astype(np.float32).flatten().tolist())
input_C = helper.make_tensor("C", TensorProto.FLOAT,
dims=(56,),
vals=np.random.randn(56,).astype(np.float32).flatten().tolist())
initializers = []
initializers.append(input_B)
initializers.append(input_C)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [112, 56])
node_def = helper.make_node(
'Gemm',
['A', 'B', 'C'],
['output'],
alpha=2.0,
beta=3.0,
transA=0,
transB=1
)
graph_def = helper.make_graph(
[node_def],
'test-model',
[input_A],
[output],
initializer=initializers
)
model_def = helper.make_model(graph_def, producer_name='kendryte')
return model_def
def test_gemm(request):
model_def = _make_module()
runner = OnnxTestRunner(request.node.name)
model_file = runner.from_onnx_helper(model_def)
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_gemm.py'])
| true
| true
|
1c47d8bd9f5b530094b55d25e5a8c3f6233d8908
| 140
|
py
|
Python
|
src/pyggui/defaults/structures/__init__.py
|
15minutOdmora/python-pyggui
|
6675aeecfc7c47dac54a475dfb87d9e6b641041c
|
[
"MIT"
] | null | null | null |
src/pyggui/defaults/structures/__init__.py
|
15minutOdmora/python-pyggui
|
6675aeecfc7c47dac54a475dfb87d9e6b641041c
|
[
"MIT"
] | null | null | null |
src/pyggui/defaults/structures/__init__.py
|
15minutOdmora/python-pyggui
|
6675aeecfc7c47dac54a475dfb87d9e6b641041c
|
[
"MIT"
] | null | null | null |
from pathlib import Path
# Define path constant at import time
PATH = Path(__file__).parent # Parent will fetch this files parent package
| 28
| 75
| 0.785714
|
from pathlib import Path
PATH = Path(__file__).parent
| true
| true
|
1c47dad798962eed2a8ddb76b9b3510f811c3e95
| 1,682
|
py
|
Python
|
sensors/tfmini_ros/scripts/ros_tfmini_laser_scanner.py
|
mascaaj/rosdonkeycar
|
2e98b837d9ad3a7dd73a3083f0866476501a73e7
|
[
"MIT"
] | null | null | null |
sensors/tfmini_ros/scripts/ros_tfmini_laser_scanner.py
|
mascaaj/rosdonkeycar
|
2e98b837d9ad3a7dd73a3083f0866476501a73e7
|
[
"MIT"
] | null | null | null |
sensors/tfmini_ros/scripts/ros_tfmini_laser_scanner.py
|
mascaaj/rosdonkeycar
|
2e98b837d9ad3a7dd73a3083f0866476501a73e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
from tfmini_servo_scanner import *
import math
SERVO_GPIO = 18
SRV_ANGLE_MIN = math.radians(-85)
SRV_ANGLE_MAX = math.radians(85)
SRV_DUTY_ANGLE_MIN = 2350
SRV_DUTY_ANGLE_MAX = 700
SRV_TIME_MIN_MAX = 0.7
LASER_ANGLE_SAMPLES = 50
def tfmini_laserscan_publisher():
scan_pup= rospy.Publisher('tfmini_laser', LaserScan, queue_size=0)
scan = LaserScan()
#-- Convention: counter clockwise is positive (left positive, right negative)
tfminiscanner = TfminiServoScanner(SERVO_GPIO, SRV_ANGLE_MIN, SRV_ANGLE_MAX,
SRV_DUTY_ANGLE_MIN, SRV_DUTY_ANGLE_MAX, LASER_ANGLE_SAMPLES,
SRV_TIME_MIN_MAX)
frame_id = rospy.get_param('~frame_id', '/map')
#-- Initialize the message
scan.header.frame_id = frame_id
scan.range_min = tfminiscanner.laser.distance_min*0.01
scan.range_max = tfminiscanner.laser.distance_max*0.01
tfminiscanner.reset_servo()
time.sleep(1)
counter = 0
while not rospy.is_shutdown():
ini_angle, end_angle, time_increment, angle_increment, ranges = tfminiscanner.scan(scale_factor=0.01, reset=True)
scan.angle_min = ini_angle
scan.angle_max = end_angle
scan.angle_increment = angle_increment
scan.time_increment = time_increment
scan.ranges = ranges
scan_pup.publish(scan)
if __name__ == "__main__":
rospy.init_node("tfmini_laserscan")
tfmini_laserscan_publisher()
| 30.035714
| 121
| 0.648038
|
import rospy
from sensor_msgs.msg import LaserScan
from tfmini_servo_scanner import *
import math
SERVO_GPIO = 18
SRV_ANGLE_MIN = math.radians(-85)
SRV_ANGLE_MAX = math.radians(85)
SRV_DUTY_ANGLE_MIN = 2350
SRV_DUTY_ANGLE_MAX = 700
SRV_TIME_MIN_MAX = 0.7
LASER_ANGLE_SAMPLES = 50
def tfmini_laserscan_publisher():
scan_pup= rospy.Publisher('tfmini_laser', LaserScan, queue_size=0)
scan = LaserScan()
tfminiscanner = TfminiServoScanner(SERVO_GPIO, SRV_ANGLE_MIN, SRV_ANGLE_MAX,
SRV_DUTY_ANGLE_MIN, SRV_DUTY_ANGLE_MAX, LASER_ANGLE_SAMPLES,
SRV_TIME_MIN_MAX)
frame_id = rospy.get_param('~frame_id', '/map')
scan.header.frame_id = frame_id
scan.range_min = tfminiscanner.laser.distance_min*0.01
scan.range_max = tfminiscanner.laser.distance_max*0.01
tfminiscanner.reset_servo()
time.sleep(1)
counter = 0
while not rospy.is_shutdown():
ini_angle, end_angle, time_increment, angle_increment, ranges = tfminiscanner.scan(scale_factor=0.01, reset=True)
scan.angle_min = ini_angle
scan.angle_max = end_angle
scan.angle_increment = angle_increment
scan.time_increment = time_increment
scan.ranges = ranges
scan_pup.publish(scan)
if __name__ == "__main__":
rospy.init_node("tfmini_laserscan")
tfmini_laserscan_publisher()
| true
| true
|
1c47dbc173fc346ee1f5f5043ff56d7fb45daca5
| 633
|
py
|
Python
|
backend/manage.py
|
crowdbotics-apps/apptest-33096
|
ab08576d017c0ba776394073ffaeeac46d72b8d2
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/manage.py
|
crowdbotics-apps/apptest-33096
|
ab08576d017c0ba776394073ffaeeac46d72b8d2
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/manage.py
|
crowdbotics-apps/apptest-33096
|
ab08576d017c0ba776394073ffaeeac46d72b8d2
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apptest_33096.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.772727
| 77
| 0.685624
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apptest_33096.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
1c47dbeb5a981b28bb4113e5889393e507765b42
| 8,223
|
py
|
Python
|
discord/ext/flags/_command.py
|
CircuitsBots/Flag-Parsing
|
e5e997ef4a4642d15066df1ee9b62de05e2c2bc2
|
[
"MIT"
] | 3
|
2021-03-16T20:54:37.000Z
|
2021-11-11T11:01:20.000Z
|
discord/ext/flags/_command.py
|
CircuitsBots/Flag-Parsing
|
e5e997ef4a4642d15066df1ee9b62de05e2c2bc2
|
[
"MIT"
] | null | null | null |
discord/ext/flags/_command.py
|
CircuitsBots/Flag-Parsing
|
e5e997ef4a4642d15066df1ee9b62de05e2c2bc2
|
[
"MIT"
] | 2
|
2021-09-17T04:24:57.000Z
|
2022-02-05T17:11:25.000Z
|
import shlex
from collections import namedtuple
import argparse
import sys
import discord
from discord.ext import commands
from discord.ext.commands import converter
from . import _parser
__all__ = ["add_flag", "command", "group", "FlagCommand", "FlagGroup"]
argument = namedtuple("argument", "args kwargs")
def command(**kwargs):
def inner(func):
cls = kwargs.pop('cls', FlagCommand)
return cls(func, **kwargs)
return inner
def group(**kwargs):
def inner(func):
cls = kwargs.pop('cls', FlagGroup)
return cls(func, **kwargs)
return inner
def add_flag(*flag_names, **kwargs):
def inner(func):
if isinstance(func, commands.Command):
nfunc = func.callback
else:
nfunc = func
if not hasattr(nfunc, '_def_parser'):
nfunc._def_parser = _parser.DontExitArgumentParser()
nfunc._def_parser.add_argument(*flag_names, **kwargs)
return func
return inner
class FlagCommand(commands.Command):
async def _parse_flag_arguments(self, ctx):
if not hasattr(self.callback, '_def_parser'):
return
arg = ctx.view.read_rest()
try:
namespace = self.callback._def_parser.parse_args(shlex.split(arg), ctx=ctx)
except ValueError:
raise commands.ExpectedClosingQuoteError("quote")
flags = vars(namespace)
async def do_convertion(value):
# Would only call if a value is from _get_value else it is already a value.
if type(value) is _parser.ParserResult:
try:
value = await discord.utils.maybe_coroutine(value.result)
# ArgumentTypeErrors indicate errors
except argparse.ArgumentTypeError:
msg = str(sys.exc_info()[1])
raise argparse.ArgumentError(value.action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(value.action.type, '__name__', repr(value.action.type))
args = {'type': name, 'value': value.arg_string}
msg = 'invalid %(type)s value: %(value)r'
raise argparse.ArgumentError(value.action, msg % args)
return value
for flag, value in flags.items():
# iterate if value is a list, this happens when nargs = '+'
if type(value) is list:
value = [await do_convertion(v) for v in value]
else:
value = await do_convertion(value)
ctx.kwargs.update({flag: value})
@property
def old_signature(self):
if self.usage is not None:
return self.usage
params = self.clean_params
if not params:
return ''
result = []
for name, param in params.items():
greedy = isinstance(param.annotation, converter._Greedy)
if param.default is not param.empty:
# We don't want None or '' to trigger the [name=value] case and instead it should
# do [name] since [name=None] or [name=] are not exactly useful for the user.
should_print = param.default if isinstance(param.default, str) else param.default is not None
if should_print:
result.append('[%s=%s]' % (name, param.default) if not greedy else
'[%s=%s]...' % (name, param.default))
continue
else:
result.append('[%s]' % name)
elif param.kind == param.VAR_POSITIONAL:
result.append('[%s...]' % name)
elif greedy:
result.append('[%s]...' % name)
elif self._is_typing_optional(param.annotation):
result.append('[%s]' % name)
elif param.kind == param.VAR_KEYWORD:
pass
else:
result.append('<%s>' % name)
return ' '.join(result)
@property
def signature(self):
result = self.old_signature
to_append = [result]
parser = self.callback._def_parser # type: _parser.DontExitArgumentParser
for action in parser._actions:
# in argparse, options are done before positionals
# so we need to loop over it twice unfortunately
if action.option_strings:
name = action.dest.upper()
flag = action.option_strings[0].lstrip('-').replace('-', '_')
k = '-' if len(flag) == 1 else '--'
should_print = action.default is not None and action.default != ''
if action.required:
if should_print:
to_append.append('<%s%s %s=%s>' % (k, flag, name, action.default))
else:
to_append.append('<%s%s %s>' % (k, flag, name))
else:
if should_print:
to_append.append('[%s%s %s=%s]' % (k, flag, name, action.default))
else:
to_append.append('[%s%s %s]' % (k, flag, name))
for action in parser._actions:
# here we do the positionals
if not action.option_strings:
name = action.dest
should_print = action.default is not None and action.default != ''
if action.nargs in ('*', '?'): # optional narg types
if should_print:
to_append.append('[%s=%s]' % (name, action.default))
else:
to_append.append('[%s]' % name)
else:
if should_print:
to_append.append('<%s=%s>' % (name, action.default))
else:
to_append.append('<%s>' % name)
return ' '.join(to_append)
async def _parse_arguments(self, ctx):
ctx.args = [ctx] if self.cog is None else [self.cog, ctx]
ctx.kwargs = {}
args = ctx.args
kwargs = ctx.kwargs
view = ctx.view
iterator = iter(self.params.items())
if self.cog is not None:
# we have 'self' as the first parameter so just advance
# the iterator and resume parsing
try:
next(iterator)
except StopIteration:
fmt = 'Callback for {0.name} command is missing "self" parameter.'
raise discord.ClientException(fmt.format(self))
# next we have the 'ctx' as the next parameter
try:
next(iterator)
except StopIteration:
fmt = 'Callback for {0.name} command is missing "ctx" parameter.'
raise discord.ClientException(fmt.format(self))
for name, param in iterator:
if param.kind == param.POSITIONAL_OR_KEYWORD:
transformed = await self.transform(ctx, param)
args.append(transformed)
elif param.kind == param.KEYWORD_ONLY:
# kwarg only param denotes "consume rest" semantics
if self.rest_is_raw:
converter = self._get_converter(param)
argument = view.read_rest()
kwargs[name] = await self.do_conversion(ctx, converter, argument, param)
else:
kwargs[name] = await self.transform(ctx, param)
break
elif param.kind == param.VAR_POSITIONAL:
while not view.eof:
try:
transformed = await self.transform(ctx, param)
args.append(transformed)
except RuntimeError:
break
elif param.kind == param.VAR_KEYWORD:
await self._parse_flag_arguments(ctx)
break
if not self.ignore_extra:
if not view.eof:
raise commands.TooManyArguments('Too many arguments passed to ' + self.qualified_name)
class FlagGroup(FlagCommand, commands.Group):
pass
| 37.377273
| 109
| 0.538246
|
import shlex
from collections import namedtuple
import argparse
import sys
import discord
from discord.ext import commands
from discord.ext.commands import converter
from . import _parser
__all__ = ["add_flag", "command", "group", "FlagCommand", "FlagGroup"]
argument = namedtuple("argument", "args kwargs")
def command(**kwargs):
def inner(func):
cls = kwargs.pop('cls', FlagCommand)
return cls(func, **kwargs)
return inner
def group(**kwargs):
def inner(func):
cls = kwargs.pop('cls', FlagGroup)
return cls(func, **kwargs)
return inner
def add_flag(*flag_names, **kwargs):
def inner(func):
if isinstance(func, commands.Command):
nfunc = func.callback
else:
nfunc = func
if not hasattr(nfunc, '_def_parser'):
nfunc._def_parser = _parser.DontExitArgumentParser()
nfunc._def_parser.add_argument(*flag_names, **kwargs)
return func
return inner
class FlagCommand(commands.Command):
async def _parse_flag_arguments(self, ctx):
if not hasattr(self.callback, '_def_parser'):
return
arg = ctx.view.read_rest()
try:
namespace = self.callback._def_parser.parse_args(shlex.split(arg), ctx=ctx)
except ValueError:
raise commands.ExpectedClosingQuoteError("quote")
flags = vars(namespace)
async def do_convertion(value):
if type(value) is _parser.ParserResult:
try:
value = await discord.utils.maybe_coroutine(value.result)
except argparse.ArgumentTypeError:
msg = str(sys.exc_info()[1])
raise argparse.ArgumentError(value.action, msg)
except (TypeError, ValueError):
name = getattr(value.action.type, '__name__', repr(value.action.type))
args = {'type': name, 'value': value.arg_string}
msg = 'invalid %(type)s value: %(value)r'
raise argparse.ArgumentError(value.action, msg % args)
return value
for flag, value in flags.items():
if type(value) is list:
value = [await do_convertion(v) for v in value]
else:
value = await do_convertion(value)
ctx.kwargs.update({flag: value})
@property
def old_signature(self):
if self.usage is not None:
return self.usage
params = self.clean_params
if not params:
return ''
result = []
for name, param in params.items():
greedy = isinstance(param.annotation, converter._Greedy)
if param.default is not param.empty:
# do [name] since [name=None] or [name=] are not exactly useful for the user.
should_print = param.default if isinstance(param.default, str) else param.default is not None
if should_print:
result.append('[%s=%s]' % (name, param.default) if not greedy else
'[%s=%s]...' % (name, param.default))
continue
else:
result.append('[%s]' % name)
elif param.kind == param.VAR_POSITIONAL:
result.append('[%s...]' % name)
elif greedy:
result.append('[%s]...' % name)
elif self._is_typing_optional(param.annotation):
result.append('[%s]' % name)
elif param.kind == param.VAR_KEYWORD:
pass
else:
result.append('<%s>' % name)
return ' '.join(result)
@property
def signature(self):
result = self.old_signature
to_append = [result]
parser = self.callback._def_parser # type: _parser.DontExitArgumentParser
for action in parser._actions:
# in argparse, options are done before positionals
# so we need to loop over it twice unfortunately
if action.option_strings:
name = action.dest.upper()
flag = action.option_strings[0].lstrip('-').replace('-', '_')
k = '-' if len(flag) == 1 else '--'
should_print = action.default is not None and action.default != ''
if action.required:
if should_print:
to_append.append('<%s%s %s=%s>' % (k, flag, name, action.default))
else:
to_append.append('<%s%s %s>' % (k, flag, name))
else:
if should_print:
to_append.append('[%s%s %s=%s]' % (k, flag, name, action.default))
else:
to_append.append('[%s%s %s]' % (k, flag, name))
for action in parser._actions:
# here we do the positionals
if not action.option_strings:
name = action.dest
should_print = action.default is not None and action.default != ''
if action.nargs in ('*', '?'): # optional narg types
if should_print:
to_append.append('[%s=%s]' % (name, action.default))
else:
to_append.append('[%s]' % name)
else:
if should_print:
to_append.append('<%s=%s>' % (name, action.default))
else:
to_append.append('<%s>' % name)
return ' '.join(to_append)
async def _parse_arguments(self, ctx):
ctx.args = [ctx] if self.cog is None else [self.cog, ctx]
ctx.kwargs = {}
args = ctx.args
kwargs = ctx.kwargs
view = ctx.view
iterator = iter(self.params.items())
if self.cog is not None:
# we have 'self' as the first parameter so just advance
# the iterator and resume parsing
try:
next(iterator)
except StopIteration:
fmt = 'Callback for {0.name} command is missing "self" parameter.'
raise discord.ClientException(fmt.format(self))
# next we have the 'ctx' as the next parameter
try:
next(iterator)
except StopIteration:
fmt = 'Callback for {0.name} command is missing "ctx" parameter.'
raise discord.ClientException(fmt.format(self))
for name, param in iterator:
if param.kind == param.POSITIONAL_OR_KEYWORD:
transformed = await self.transform(ctx, param)
args.append(transformed)
elif param.kind == param.KEYWORD_ONLY:
# kwarg only param denotes "consume rest" semantics
if self.rest_is_raw:
converter = self._get_converter(param)
argument = view.read_rest()
kwargs[name] = await self.do_conversion(ctx, converter, argument, param)
else:
kwargs[name] = await self.transform(ctx, param)
break
elif param.kind == param.VAR_POSITIONAL:
while not view.eof:
try:
transformed = await self.transform(ctx, param)
args.append(transformed)
except RuntimeError:
break
elif param.kind == param.VAR_KEYWORD:
await self._parse_flag_arguments(ctx)
break
if not self.ignore_extra:
if not view.eof:
raise commands.TooManyArguments('Too many arguments passed to ' + self.qualified_name)
class FlagGroup(FlagCommand, commands.Group):
pass
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.