hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f707d1d9378240dadfe0c5fce252cd9de45207d3
| 3,701
|
py
|
Python
|
demo/app.py
|
Mariatta/arq
|
7d44d1db21cc4c8eb0c6e24f510f6ba054a72f25
|
[
"MIT"
] | 1
|
2021-01-03T00:57:38.000Z
|
2021-01-03T00:57:38.000Z
|
demo/app.py
|
Mariatta/arq
|
7d44d1db21cc4c8eb0c6e24f510f6ba054a72f25
|
[
"MIT"
] | null | null | null |
demo/app.py
|
Mariatta/arq
|
7d44d1db21cc4c8eb0c6e24f510f6ba054a72f25
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.6
import os
import asyncio
from time import time
import chevron
import uvloop
from aiohttp import web, ClientError, ClientSession
from aiohttp_session import SimpleCookieStorage, get_session
from aiohttp_session import setup as session_setup
from arq import Actor, BaseWorker, RedisSettings, concurrent
R_OUTPUT = 'output'
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class Downloader(Actor):
re_enqueue_jobs = True
async def startup(self):
self.session = ClientSession(loop=self.loop)
@concurrent
async def download_content(self, url, count):
total_size = 0
errors = []
start = time()
for _ in range(count):
try:
async with self.session.get(url) as r:
content = await r.read()
total_size += len(content)
if r.status != 200:
errors.append(f'{r.status} length: {len(content)}')
except ClientError as e:
errors.append(f'{e.__class__.__name__}: {e}')
output = f'{time() - start:0.2f}s, {count} downloads, total size: {total_size}'
if errors:
output += ', errors: ' + ', '.join(errors)
await self.redis.rpush(R_OUTPUT, output.encode())
return total_size
async def shutdown(self):
self.session.close()
html_template = """
<h1>arq demo</h1>
{{#message}}
<div>{{ message }}</div>
{{/message}}
<form method="post" action="/start-job/">
<p>
<label for="url">Url to download</label>
<input type="url" name="url" id="url" value="https://httpbin.org/get" required/>
</p>
<p>
<label for="count">Download count</label>
<input type="number" step="1" name="count" id="count" value="10" required/>
</p>
<p>
<input type="submit" value="Download"/>
</p>
</form>
<h2>Results:</h2>
{{#results}}
<p>{{ . }}</p>
{{/results}}
"""
async def index(request):
redis = await request.app['downloader'].get_redis()
data = await redis.lrange(R_OUTPUT, 0, -1)
results = [r.decode() for r in data]
session = await get_session(request)
html = chevron.render(html_template, {'message': session.get('message'), 'results': results})
session.invalidate()
return web.Response(text=html, content_type='text/html')
async def start_job(request):
data = await request.post()
session = await get_session(request)
try:
url = data['url']
count = int(data['count'])
except (KeyError, ValueError) as e:
session['message'] = f'Invalid input, {e.__class__.__name__}: {e}'
else:
await request.app['downloader'].download_content(url, count)
session['message'] = f'Downloading "{url}" ' + (f'{count} times.' if count > 1 else 'once.')
raise web.HTTPFound(location='/')
redis_settings = RedisSettings(host=os.getenv('REDIS_HOST', 'localhost'))
async def shutdown(app):
await app['downloader'].close()
def create_app():
app = web.Application()
app.router.add_get('/', index)
app.router.add_post('/start-job/', start_job)
app['downloader'] = Downloader(redis_settings=redis_settings)
app.on_shutdown.append(shutdown)
session_setup(app, SimpleCookieStorage())
return app
class Worker(BaseWorker):
# used by `arq app.py` command
shadows = [Downloader]
# set to small value so we can play with timeouts
timeout_seconds = 10
def __init__(self, *args, **kwargs):
kwargs['redis_settings'] = redis_settings
super().__init__(*args, **kwargs)
if __name__ == '__main__':
# when called directly run the webserver
app = create_app()
web.run_app(app, port=8000)
| 27.827068
| 100
| 0.634153
|
import os
import asyncio
from time import time
import chevron
import uvloop
from aiohttp import web, ClientError, ClientSession
from aiohttp_session import SimpleCookieStorage, get_session
from aiohttp_session import setup as session_setup
from arq import Actor, BaseWorker, RedisSettings, concurrent
R_OUTPUT = 'output'
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class Downloader(Actor):
re_enqueue_jobs = True
async def startup(self):
self.session = ClientSession(loop=self.loop)
@concurrent
async def download_content(self, url, count):
total_size = 0
errors = []
start = time()
for _ in range(count):
try:
async with self.session.get(url) as r:
content = await r.read()
total_size += len(content)
if r.status != 200:
errors.append(f'{r.status} length: {len(content)}')
except ClientError as e:
errors.append(f'{e.__class__.__name__}: {e}')
output = f'{time() - start:0.2f}s, {count} downloads, total size: {total_size}'
if errors:
output += ', errors: ' + ', '.join(errors)
await self.redis.rpush(R_OUTPUT, output.encode())
return total_size
async def shutdown(self):
self.session.close()
html_template = """
<h1>arq demo</h1>
{{#message}}
<div>{{ message }}</div>
{{/message}}
<form method="post" action="/start-job/">
<p>
<label for="url">Url to download</label>
<input type="url" name="url" id="url" value="https://httpbin.org/get" required/>
</p>
<p>
<label for="count">Download count</label>
<input type="number" step="1" name="count" id="count" value="10" required/>
</p>
<p>
<input type="submit" value="Download"/>
</p>
</form>
<h2>Results:</h2>
{{#results}}
<p>{{ . }}</p>
{{/results}}
"""
async def index(request):
redis = await request.app['downloader'].get_redis()
data = await redis.lrange(R_OUTPUT, 0, -1)
results = [r.decode() for r in data]
session = await get_session(request)
html = chevron.render(html_template, {'message': session.get('message'), 'results': results})
session.invalidate()
return web.Response(text=html, content_type='text/html')
async def start_job(request):
data = await request.post()
session = await get_session(request)
try:
url = data['url']
count = int(data['count'])
except (KeyError, ValueError) as e:
session['message'] = f'Invalid input, {e.__class__.__name__}: {e}'
else:
await request.app['downloader'].download_content(url, count)
session['message'] = f'Downloading "{url}" ' + (f'{count} times.' if count > 1 else 'once.')
raise web.HTTPFound(location='/')
redis_settings = RedisSettings(host=os.getenv('REDIS_HOST', 'localhost'))
async def shutdown(app):
await app['downloader'].close()
def create_app():
app = web.Application()
app.router.add_get('/', index)
app.router.add_post('/start-job/', start_job)
app['downloader'] = Downloader(redis_settings=redis_settings)
app.on_shutdown.append(shutdown)
session_setup(app, SimpleCookieStorage())
return app
class Worker(BaseWorker):
shadows = [Downloader]
timeout_seconds = 10
def __init__(self, *args, **kwargs):
kwargs['redis_settings'] = redis_settings
super().__init__(*args, **kwargs)
if __name__ == '__main__':
app = create_app()
web.run_app(app, port=8000)
| true
| true
|
f707d253c63d9914028eb228f855eea4ce59bc50
| 248
|
py
|
Python
|
simple_fem/__init__.py
|
IgorBaratta/simple_fem
|
292294fdcef263b3ddebdc79dfaa05cb2cefe60f
|
[
"MIT"
] | null | null | null |
simple_fem/__init__.py
|
IgorBaratta/simple_fem
|
292294fdcef263b3ddebdc79dfaa05cb2cefe60f
|
[
"MIT"
] | null | null | null |
simple_fem/__init__.py
|
IgorBaratta/simple_fem
|
292294fdcef263b3ddebdc79dfaa05cb2cefe60f
|
[
"MIT"
] | null | null | null |
from .fem import DofMap, Q1Element
from .function_space import FunctionSpace
from .mesh import Mesh, ReferenceQuadrilateral
from .plot import plot
from .quadrature import Quadrature
from .assemble import assemble_vector, assemble_matrix, apply_bc
| 31
| 64
| 0.842742
|
from .fem import DofMap, Q1Element
from .function_space import FunctionSpace
from .mesh import Mesh, ReferenceQuadrilateral
from .plot import plot
from .quadrature import Quadrature
from .assemble import assemble_vector, assemble_matrix, apply_bc
| true
| true
|
f707d39dbc4cb9a4b69e990dc483c17469e34d51
| 536
|
py
|
Python
|
vmm/__main__.py
|
kmohrf/vmm
|
5e0dc8c9502d07681bfaca8634ed5b083deae77b
|
[
"BSD-3-Clause"
] | 4
|
2020-03-08T08:45:35.000Z
|
2021-10-17T11:05:17.000Z
|
vmm/__main__.py
|
kmohrf/vmm
|
5e0dc8c9502d07681bfaca8634ed5b083deae77b
|
[
"BSD-3-Clause"
] | null | null | null |
vmm/__main__.py
|
kmohrf/vmm
|
5e0dc8c9502d07681bfaca8634ed5b083deae77b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2007 - 2014, Pascal Volk
# See COPYING for distribution information.
"""This is the vmm main script."""
import sys
if __name__ == "__main__":
# replace the script's cwd (/usr/local/sbin) with our module dir
# (the location of the vmm directory) - if it is not in sys.path
# sys.path[0] = '/usr/local/lib/vmm'
# Otherwise just remove /usr/local/sbin from sys.path
sys.path.remove(sys.path[0])
from vmm.cli.main import run
sys.exit(run(sys.argv))
| 28.210526
| 68
| 0.666045
|
import sys
if __name__ == "__main__":
# (the location of the vmm directory) - if it is not in sys.path
# sys.path[0] = '/usr/local/lib/vmm'
# Otherwise just remove /usr/local/sbin from sys.path
sys.path.remove(sys.path[0])
from vmm.cli.main import run
sys.exit(run(sys.argv))
| true
| true
|
f707d51f70d5c76439265b879aeec29bd4a6d006
| 2,533
|
py
|
Python
|
tests/api/routes/test_routes_init.py
|
VizierDB/web-api-async
|
e99f43df3df80ad5647f57d805c339257336ac73
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2019-10-21T03:01:39.000Z
|
2020-06-05T01:43:00.000Z
|
tests/api/routes/test_routes_init.py
|
VizierDB/web-api-async
|
e99f43df3df80ad5647f57d805c339257336ac73
|
[
"ECL-2.0",
"Apache-2.0"
] | 56
|
2019-07-12T21:16:03.000Z
|
2020-11-06T23:29:22.000Z
|
tests/api/routes/test_routes_init.py
|
VizierDB/web-api-async
|
e99f43df3df80ad5647f57d805c339257336ac73
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-02-07T19:56:55.000Z
|
2020-08-07T11:17:51.000Z
|
"""Test initialization of the url factory classes"""
import unittest
from vizier.api.routes.base import UrlFactory
from vizier.api.routes.base import PROPERTIES_BASEURL, PROPERTIES_APIDOCURL
from vizier.api.routes.task import TaskUrlFactory
class TestUrlFactoryInit(unittest.TestCase):
def test_init_url_factory(self):
"""Test initializing the main url factory."""
urls = UrlFactory(base_url='http://abc.com/////')
self.assertEqual(urls.base_url, 'http://abc.com')
self.assertIsNone(urls.api_doc_url)
urls = UrlFactory(base_url='http://abc.com/////', api_doc_url='ABC')
self.assertEqual(urls.base_url, 'http://abc.com')
self.assertEqual(urls.api_doc_url, 'ABC')
# Override API doc url via properties
urls = UrlFactory(
base_url='http://abc.com/////',
api_doc_url='ABC',
properties={PROPERTIES_APIDOCURL: 'XYZ'}
)
self.assertEqual(urls.base_url, 'http://abc.com')
self.assertEqual(urls.api_doc_url, 'XYZ')
# Override base url via properties
urls = UrlFactory(
base_url='http://abc.com/////',
api_doc_url='ABC',
properties={PROPERTIES_BASEURL: 'XYZ'}
)
self.assertEqual(urls.base_url, 'XYZ')
self.assertEqual(urls.api_doc_url, 'ABC')
# Initialize only via properties
urls = UrlFactory(properties={
PROPERTIES_BASEURL: 'XYZ',
PROPERTIES_APIDOCURL: 'ABC'
})
self.assertEqual(urls.base_url, 'XYZ')
self.assertEqual(urls.api_doc_url, 'ABC')
# Value error if base url is not set
with self.assertRaises(AssertionError):
urls = UrlFactory(
api_doc_url='ABC',
properties={PROPERTIES_APIDOCURL: 'XYZ'}
)
def test_tasks_url_factory(self):
"""Initialize the task url factory."""
fact = TaskUrlFactory(base_url='http://abc.com/////')
self.assertEqual(fact.base_url, 'http://abc.com')
self.assertEqual(fact.set_task_state(task_id='TID'), 'http://abc.com/tasks/TID')
# Initialize from class loader
fact = TaskUrlFactory(
base_url='http://abc.com/////',
properties={PROPERTIES_BASEURL: 'XYZ'}
)
self.assertEqual(fact.base_url, 'XYZ')
# Value error is no url factory is given
with self.assertRaises(ValueError):
TaskUrlFactory()
if __name__ == '__main__':
unittest.main()
| 37.80597
| 88
| 0.617055
|
import unittest
from vizier.api.routes.base import UrlFactory
from vizier.api.routes.base import PROPERTIES_BASEURL, PROPERTIES_APIDOCURL
from vizier.api.routes.task import TaskUrlFactory
class TestUrlFactoryInit(unittest.TestCase):
def test_init_url_factory(self):
urls = UrlFactory(base_url='http://abc.com/////')
self.assertEqual(urls.base_url, 'http://abc.com')
self.assertIsNone(urls.api_doc_url)
urls = UrlFactory(base_url='http://abc.com/////', api_doc_url='ABC')
self.assertEqual(urls.base_url, 'http://abc.com')
self.assertEqual(urls.api_doc_url, 'ABC')
urls = UrlFactory(
base_url='http://abc.com/////',
api_doc_url='ABC',
properties={PROPERTIES_APIDOCURL: 'XYZ'}
)
self.assertEqual(urls.base_url, 'http://abc.com')
self.assertEqual(urls.api_doc_url, 'XYZ')
urls = UrlFactory(
base_url='http://abc.com/////',
api_doc_url='ABC',
properties={PROPERTIES_BASEURL: 'XYZ'}
)
self.assertEqual(urls.base_url, 'XYZ')
self.assertEqual(urls.api_doc_url, 'ABC')
urls = UrlFactory(properties={
PROPERTIES_BASEURL: 'XYZ',
PROPERTIES_APIDOCURL: 'ABC'
})
self.assertEqual(urls.base_url, 'XYZ')
self.assertEqual(urls.api_doc_url, 'ABC')
with self.assertRaises(AssertionError):
urls = UrlFactory(
api_doc_url='ABC',
properties={PROPERTIES_APIDOCURL: 'XYZ'}
)
def test_tasks_url_factory(self):
fact = TaskUrlFactory(base_url='http://abc.com/////')
self.assertEqual(fact.base_url, 'http://abc.com')
self.assertEqual(fact.set_task_state(task_id='TID'), 'http://abc.com/tasks/TID')
fact = TaskUrlFactory(
base_url='http://abc.com/////',
properties={PROPERTIES_BASEURL: 'XYZ'}
)
self.assertEqual(fact.base_url, 'XYZ')
with self.assertRaises(ValueError):
TaskUrlFactory()
if __name__ == '__main__':
unittest.main()
| true
| true
|
f707d527fa31fabb43356f22df07e5fa96b0e200
| 3,790
|
py
|
Python
|
lib/protorpc-1.0/protorpc/message_types.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 76
|
2015-01-04T13:45:16.000Z
|
2022-02-12T11:06:49.000Z
|
lib/protorpc-1.0/protorpc/message_types.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 27
|
2015-02-12T20:04:37.000Z
|
2020-04-28T07:51:39.000Z
|
lib/protorpc-1.0/protorpc/message_types.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 42
|
2015-01-24T09:49:07.000Z
|
2020-10-13T16:59:31.000Z
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple protocol message types.
Includes new message and field types that are outside what is defined by the
protocol buffers standard.
"""
__author__ = 'rafek@google.com (Rafe Kaplan)'
import datetime
from . import messages
from . import util
__all__ = [
'DateTimeField',
'DateTimeMessage',
'VoidMessage',
]
class VoidMessage(messages.Message):
"""Empty message."""
class DateTimeMessage(messages.Message):
"""Message to store/transmit a DateTime.
Fields:
milliseconds: Milliseconds since Jan 1st 1970 local time.
time_zone_offset: Optional time zone offset, in minutes from UTC.
"""
milliseconds = messages.IntegerField(1, required=True)
time_zone_offset = messages.IntegerField(2)
class DateTimeField(messages.MessageField):
"""Field definition for datetime values.
Stores a python datetime object as a field. If time zone information is
included in the datetime object, it will be included in
the encoded data when this is encoded/decoded.
"""
type = datetime.datetime
message_type = DateTimeMessage
@util.positional(3)
def __init__(self,
number,
**kwargs):
super(DateTimeField, self).__init__(self.message_type,
number,
**kwargs)
def value_from_message(self, message):
"""Convert DateTimeMessage to a datetime.
Args:
A DateTimeMessage instance.
Returns:
A datetime instance.
"""
message = super(DateTimeField, self).value_from_message(message)
if message.time_zone_offset is None:
return datetime.datetime.utcfromtimestamp(message.milliseconds / 1000.0)
# Need to subtract the time zone offset, because when we call
# datetime.fromtimestamp, it will add the time zone offset to the
# value we pass.
milliseconds = (message.milliseconds -
60000 * message.time_zone_offset)
timezone = util.TimeZoneOffset(message.time_zone_offset)
return datetime.datetime.fromtimestamp(milliseconds / 1000.0,
tz=timezone)
def value_to_message(self, value):
value = super(DateTimeField, self).value_to_message(value)
# First, determine the delta from the epoch, so we can fill in
# DateTimeMessage's milliseconds field.
if value.tzinfo is None:
time_zone_offset = 0
local_epoch = datetime.datetime.utcfromtimestamp(0)
else:
time_zone_offset = util.total_seconds(value.tzinfo.utcoffset(value))
# Determine Jan 1, 1970 local time.
local_epoch = datetime.datetime.fromtimestamp(-time_zone_offset,
tz=value.tzinfo)
delta = value - local_epoch
# Create and fill in the DateTimeMessage, including time zone if
# one was specified.
message = DateTimeMessage()
message.milliseconds = int(util.total_seconds(delta) * 1000)
if value.tzinfo is not None:
utc_offset = value.tzinfo.utcoffset(value)
if utc_offset is not None:
message.time_zone_offset = int(
util.total_seconds(value.tzinfo.utcoffset(value)) / 60)
return message
| 31.583333
| 78
| 0.687335
|
__author__ = 'rafek@google.com (Rafe Kaplan)'
import datetime
from . import messages
from . import util
__all__ = [
'DateTimeField',
'DateTimeMessage',
'VoidMessage',
]
class VoidMessage(messages.Message):
class DateTimeMessage(messages.Message):
milliseconds = messages.IntegerField(1, required=True)
time_zone_offset = messages.IntegerField(2)
class DateTimeField(messages.MessageField):
type = datetime.datetime
message_type = DateTimeMessage
@util.positional(3)
def __init__(self,
number,
**kwargs):
super(DateTimeField, self).__init__(self.message_type,
number,
**kwargs)
def value_from_message(self, message):
message = super(DateTimeField, self).value_from_message(message)
if message.time_zone_offset is None:
return datetime.datetime.utcfromtimestamp(message.milliseconds / 1000.0)
milliseconds = (message.milliseconds -
60000 * message.time_zone_offset)
timezone = util.TimeZoneOffset(message.time_zone_offset)
return datetime.datetime.fromtimestamp(milliseconds / 1000.0,
tz=timezone)
def value_to_message(self, value):
value = super(DateTimeField, self).value_to_message(value)
if value.tzinfo is None:
time_zone_offset = 0
local_epoch = datetime.datetime.utcfromtimestamp(0)
else:
time_zone_offset = util.total_seconds(value.tzinfo.utcoffset(value))
# Determine Jan 1, 1970 local time.
local_epoch = datetime.datetime.fromtimestamp(-time_zone_offset,
tz=value.tzinfo)
delta = value - local_epoch
# Create and fill in the DateTimeMessage, including time zone if
# one was specified.
message = DateTimeMessage()
message.milliseconds = int(util.total_seconds(delta) * 1000)
if value.tzinfo is not None:
utc_offset = value.tzinfo.utcoffset(value)
if utc_offset is not None:
message.time_zone_offset = int(
util.total_seconds(value.tzinfo.utcoffset(value)) / 60)
return message
| true
| true
|
f707d5397bf7e46d6b3b59811c31a1684ec0baed
| 11,809
|
py
|
Python
|
tests/test-cluster.py
|
tripolkaandrey/microk8s
|
6270ca450fab6bf98b25f0df473461210694775d
|
[
"Apache-2.0"
] | 1
|
2021-02-08T09:14:44.000Z
|
2021-02-08T09:14:44.000Z
|
tests/test-cluster.py
|
tripolkaandrey/microk8s
|
6270ca450fab6bf98b25f0df473461210694775d
|
[
"Apache-2.0"
] | 15
|
2021-05-02T05:10:59.000Z
|
2022-03-09T22:27:19.000Z
|
tests/test-cluster.py
|
thanhtung22-cloud/microk8s
|
d1d02633ca245af9724094447be2e3c144526b35
|
[
"Apache-2.0"
] | null | null | null |
import string
import random
import time
import pytest
import os
import subprocess
from os import path
# Provide a list of VMs you want to reuse. VMs should have already microk8s installed.
# the test will attempt a refresh to the channel requested for testing
# reuse_vms = ['vm-ldzcjb', 'vm-nfpgea', 'vm-pkgbtw']
reuse_vms = None
channel_to_test = os.environ.get("CHANNEL_TO_TEST", "edge/ha-preview")
backend = os.environ.get("BACKEND", None)
class VM:
"""
This class abstracts the backend we are using. It could be either multipass or lxc.
"""
def __init__(self, attach_vm=None):
"""Detect the available backends and instantiate a VM.
If `attach_vm` is provided we just make sure the right MicroK8s is deployed.
:param attach_vm: the name of the VM we want to reuse
"""
rnd_letters = "".join(random.choice(string.ascii_lowercase) for i in range(6))
self.backend = "none"
self.vm_name = "vm-{}".format(rnd_letters)
if attach_vm:
self.vm_name = attach_vm
if path.exists("/snap/bin/multipass") or backend == "multipass":
print("Creating mulitpass VM")
self.backend = "multipass"
if not attach_vm:
subprocess.check_call(
"/snap/bin/multipass launch 18.04 -n {} -m 2G".format(self.vm_name).split()
)
subprocess.check_call(
"/snap/bin/multipass exec {} -- sudo "
"snap install microk8s --classic --channel {}".format(
self.vm_name, channel_to_test
).split()
)
else:
subprocess.check_call(
"/snap/bin/multipass exec {} -- sudo "
"snap refresh microk8s --channel {}".format(
self.vm_name, channel_to_test
).split()
)
elif path.exists("/snap/bin/lxc") or backend == "lxc":
self.backend = "lxc"
if not attach_vm:
profiles = subprocess.check_output("/snap/bin/lxc profile list".split())
if "microk8s" not in profiles.decode():
subprocess.check_call("/snap/bin/lxc profile copy default microk8s".split())
with open("lxc/microk8s-zfs.profile", "r+") as fp:
profile_string = fp.read()
process = subprocess.Popen(
"/snap/bin/lxc profile edit microk8s".split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
process.stdin.write(profile_string.encode())
process.stdin.close()
subprocess.check_call(
"/snap/bin/lxc launch -p default -p microk8s ubuntu:18.04 {}".format(
self.vm_name
).split()
)
cmd_prefix = "/snap/bin/lxc exec {} -- script -e -c".format(self.vm_name).split()
cmd = ["snap install microk8s --classic --channel {}".format(channel_to_test)]
time.sleep(20)
subprocess.check_output(cmd_prefix + cmd)
else:
cmd = "/snap/bin/lxc exec {} -- ".format(self.vm_name).split()
cmd.append("sudo snap refresh microk8s --channel {}".format(channel_to_test))
subprocess.check_call(cmd)
else:
raise Exception("Need to install multipass of lxc")
def run(self, cmd):
"""
Run a command
:param cmd: the command we are running.
:return: the output of the command
"""
if self.backend == "multipass":
output = subprocess.check_output(
"/snap/bin/multipass exec {} -- sudo " "{}".format(self.vm_name, cmd).split()
)
return output
elif self.backend == "lxc":
cmd_prefix = "/snap/bin/lxc exec {} -- script -e -c ".format(self.vm_name).split()
output = subprocess.check_output(cmd_prefix + [cmd])
return output
else:
raise Exception("Not implemented for backend {}".format(self.backend))
def release(self):
"""
Release a VM.
"""
print("Destroying VM in {}".format(self.backend))
if self.backend == "multipass":
subprocess.check_call("/snap/bin/multipass stop {}".format(self.vm_name).split())
subprocess.check_call("/snap/bin/multipass delete {}".format(self.vm_name).split())
elif self.backend == "lxc":
subprocess.check_call("/snap/bin/lxc stop {}".format(self.vm_name).split())
subprocess.check_call("/snap/bin/lxc delete {}".format(self.vm_name).split())
class TestCluster(object):
@pytest.fixture(autouse=True, scope="module")
def setup_cluster(self):
"""
Provision VMs and for a cluster.
:return:
"""
try:
print("Setting up cluster")
type(self).VM = []
if not reuse_vms:
size = 3
for i in range(0, size):
print("Creating machine {}".format(i))
vm = VM()
print("Waiting for machine {}".format(i))
vm.run("/snap/bin/microk8s.status --wait-ready --timeout 120")
self.VM.append(vm)
else:
for vm_name in reuse_vms:
self.VM.append(VM(vm_name))
# Form cluster
vm_master = self.VM[0]
connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no")
for vm in self.VM:
if vm.vm_name in connected_nodes.decode():
continue
else:
print("Adding machine {} to cluster".format(vm.vm_name))
add_node = vm_master.run("/snap/bin/microk8s.add-node")
endpoint = [ep for ep in add_node.decode().split() if ":25000/" in ep]
vm.run("/snap/bin/microk8s.join {}".format(endpoint[0]))
# Wait for nodes to be ready
print("Waiting for nodes to register")
connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no")
while "NotReady" in connected_nodes.decode():
time.sleep(5)
connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no")
print(connected_nodes.decode())
# Wait for CNI pods
print("Waiting for cni")
while True:
ready_pods = 0
pods = vm_master.run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for line in pods.decode().splitlines():
if "calico" in line and "Running" in line:
ready_pods += 1
if ready_pods == (len(self.VM) + 1):
print(pods.decode())
break
time.sleep(5)
yield
finally:
print("Cleanup up cluster")
if not reuse_vms:
for vm in self.VM:
print("Releasing machine {} in {}".format(vm.vm_name, vm.backend))
vm.release()
def test_calico_in_nodes(self):
"""
Test each node has a calico pod.
"""
print("Checking calico is in all nodes")
pods = self.VM[0].run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for vm in self.VM:
if vm.vm_name not in pods.decode():
assert False
print("Calico found in node {}".format(vm.vm_name))
def test_nodes_in_ha(self):
"""
Test all nodes are seeing the database while removing nodes
"""
# All nodes see the same pods
for vm in self.VM:
pods = vm.run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for other_vm in self.VM:
if other_vm.vm_name not in pods.decode():
assert False
print("All nodes see the same pods")
attempt = 100
while True:
assert attempt > 0
for vm in self.VM:
status = vm.run("/snap/bin/microk8s.status")
if "high-availability: yes" not in status.decode():
attempt += 1
continue
break
# remove a node
print("Removing machine {}".format(self.VM[0].vm_name))
self.VM[0].run("/snap/bin/microk8s.leave")
self.VM[1].run("/snap/bin/microk8s.remove-node {}".format(self.VM[0].vm_name))
# allow for some time for the leader to hand over leadership
time.sleep(10)
attempt = 100
while True:
ready_pods = 0
pods = self.VM[1].run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for line in pods.decode().splitlines():
if "calico" in line and "Running" in line:
ready_pods += 1
if ready_pods == (len(self.VM)):
print(pods.decode())
break
attempt -= 1
if attempt <= 0:
assert False
time.sleep(5)
print("Checking calico is on the nodes running")
leftVMs = [self.VM[1], self.VM[2]]
attempt = 100
while True:
assert attempt > 0
for vm in leftVMs:
status = vm.run("/snap/bin/microk8s.status")
if "high-availability: no" not in status.decode():
attempt += 1
time.sleep(2)
continue
break
for vm in leftVMs:
pods = vm.run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for other_vm in leftVMs:
if other_vm.vm_name not in pods.decode():
time.sleep(2)
assert False
print("Remaining nodes see the same pods")
print("Waiting for two ingress to appear")
self.VM[1].run("/snap/bin/microk8s.enable ingress")
# wait for two ingress to appear
time.sleep(10)
attempt = 100
while True:
ready_pods = 0
pods = self.VM[1].run("/snap/bin/microk8s.kubectl get po -A -o wide")
for line in pods.decode().splitlines():
if "ingress" in line and "Running" in line:
ready_pods += 1
if ready_pods == (len(self.VM) - 1):
print(pods.decode())
break
attempt -= 1
if attempt <= 0:
assert False
time.sleep(5)
print("Rejoin the node")
add_node = self.VM[1].run("/snap/bin/microk8s.add-node")
endpoint = [ep for ep in add_node.decode().split() if ":25000/" in ep]
self.VM[0].run("/snap/bin/microk8s.join {}".format(endpoint[0]))
print("Waiting for nodes to be ready")
connected_nodes = self.VM[0].run("/snap/bin/microk8s.kubectl get no")
while "NotReady" in connected_nodes.decode():
time.sleep(5)
connected_nodes = self.VM[0].run("/snap/bin/microk8s.kubectl get no")
attempt = 100
while True:
assert attempt > 0
for vm in self.VM:
status = vm.run("/snap/bin/microk8s.status")
if "high-availability: yes" not in status.decode():
attempt += 1
time.sleep(2)
continue
break
| 39.494983
| 98
| 0.519519
|
import string
import random
import time
import pytest
import os
import subprocess
from os import path
reuse_vms = None
channel_to_test = os.environ.get("CHANNEL_TO_TEST", "edge/ha-preview")
backend = os.environ.get("BACKEND", None)
class VM:
def __init__(self, attach_vm=None):
rnd_letters = "".join(random.choice(string.ascii_lowercase) for i in range(6))
self.backend = "none"
self.vm_name = "vm-{}".format(rnd_letters)
if attach_vm:
self.vm_name = attach_vm
if path.exists("/snap/bin/multipass") or backend == "multipass":
print("Creating mulitpass VM")
self.backend = "multipass"
if not attach_vm:
subprocess.check_call(
"/snap/bin/multipass launch 18.04 -n {} -m 2G".format(self.vm_name).split()
)
subprocess.check_call(
"/snap/bin/multipass exec {} -- sudo "
"snap install microk8s --classic --channel {}".format(
self.vm_name, channel_to_test
).split()
)
else:
subprocess.check_call(
"/snap/bin/multipass exec {} -- sudo "
"snap refresh microk8s --channel {}".format(
self.vm_name, channel_to_test
).split()
)
elif path.exists("/snap/bin/lxc") or backend == "lxc":
self.backend = "lxc"
if not attach_vm:
profiles = subprocess.check_output("/snap/bin/lxc profile list".split())
if "microk8s" not in profiles.decode():
subprocess.check_call("/snap/bin/lxc profile copy default microk8s".split())
with open("lxc/microk8s-zfs.profile", "r+") as fp:
profile_string = fp.read()
process = subprocess.Popen(
"/snap/bin/lxc profile edit microk8s".split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
process.stdin.write(profile_string.encode())
process.stdin.close()
subprocess.check_call(
"/snap/bin/lxc launch -p default -p microk8s ubuntu:18.04 {}".format(
self.vm_name
).split()
)
cmd_prefix = "/snap/bin/lxc exec {} -- script -e -c".format(self.vm_name).split()
cmd = ["snap install microk8s --classic --channel {}".format(channel_to_test)]
time.sleep(20)
subprocess.check_output(cmd_prefix + cmd)
else:
cmd = "/snap/bin/lxc exec {} -- ".format(self.vm_name).split()
cmd.append("sudo snap refresh microk8s --channel {}".format(channel_to_test))
subprocess.check_call(cmd)
else:
raise Exception("Need to install multipass of lxc")
def run(self, cmd):
if self.backend == "multipass":
output = subprocess.check_output(
"/snap/bin/multipass exec {} -- sudo " "{}".format(self.vm_name, cmd).split()
)
return output
elif self.backend == "lxc":
cmd_prefix = "/snap/bin/lxc exec {} -- script -e -c ".format(self.vm_name).split()
output = subprocess.check_output(cmd_prefix + [cmd])
return output
else:
raise Exception("Not implemented for backend {}".format(self.backend))
def release(self):
print("Destroying VM in {}".format(self.backend))
if self.backend == "multipass":
subprocess.check_call("/snap/bin/multipass stop {}".format(self.vm_name).split())
subprocess.check_call("/snap/bin/multipass delete {}".format(self.vm_name).split())
elif self.backend == "lxc":
subprocess.check_call("/snap/bin/lxc stop {}".format(self.vm_name).split())
subprocess.check_call("/snap/bin/lxc delete {}".format(self.vm_name).split())
class TestCluster(object):
@pytest.fixture(autouse=True, scope="module")
def setup_cluster(self):
try:
print("Setting up cluster")
type(self).VM = []
if not reuse_vms:
size = 3
for i in range(0, size):
print("Creating machine {}".format(i))
vm = VM()
print("Waiting for machine {}".format(i))
vm.run("/snap/bin/microk8s.status --wait-ready --timeout 120")
self.VM.append(vm)
else:
for vm_name in reuse_vms:
self.VM.append(VM(vm_name))
vm_master = self.VM[0]
connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no")
for vm in self.VM:
if vm.vm_name in connected_nodes.decode():
continue
else:
print("Adding machine {} to cluster".format(vm.vm_name))
add_node = vm_master.run("/snap/bin/microk8s.add-node")
endpoint = [ep for ep in add_node.decode().split() if ":25000/" in ep]
vm.run("/snap/bin/microk8s.join {}".format(endpoint[0]))
print("Waiting for nodes to register")
connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no")
while "NotReady" in connected_nodes.decode():
time.sleep(5)
connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no")
print(connected_nodes.decode())
print("Waiting for cni")
while True:
ready_pods = 0
pods = vm_master.run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for line in pods.decode().splitlines():
if "calico" in line and "Running" in line:
ready_pods += 1
if ready_pods == (len(self.VM) + 1):
print(pods.decode())
break
time.sleep(5)
yield
finally:
print("Cleanup up cluster")
if not reuse_vms:
for vm in self.VM:
print("Releasing machine {} in {}".format(vm.vm_name, vm.backend))
vm.release()
def test_calico_in_nodes(self):
print("Checking calico is in all nodes")
pods = self.VM[0].run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for vm in self.VM:
if vm.vm_name not in pods.decode():
assert False
print("Calico found in node {}".format(vm.vm_name))
def test_nodes_in_ha(self):
for vm in self.VM:
pods = vm.run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for other_vm in self.VM:
if other_vm.vm_name not in pods.decode():
assert False
print("All nodes see the same pods")
attempt = 100
while True:
assert attempt > 0
for vm in self.VM:
status = vm.run("/snap/bin/microk8s.status")
if "high-availability: yes" not in status.decode():
attempt += 1
continue
break
print("Removing machine {}".format(self.VM[0].vm_name))
self.VM[0].run("/snap/bin/microk8s.leave")
self.VM[1].run("/snap/bin/microk8s.remove-node {}".format(self.VM[0].vm_name))
time.sleep(10)
attempt = 100
while True:
ready_pods = 0
pods = self.VM[1].run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for line in pods.decode().splitlines():
if "calico" in line and "Running" in line:
ready_pods += 1
if ready_pods == (len(self.VM)):
print(pods.decode())
break
attempt -= 1
if attempt <= 0:
assert False
time.sleep(5)
print("Checking calico is on the nodes running")
leftVMs = [self.VM[1], self.VM[2]]
attempt = 100
while True:
assert attempt > 0
for vm in leftVMs:
status = vm.run("/snap/bin/microk8s.status")
if "high-availability: no" not in status.decode():
attempt += 1
time.sleep(2)
continue
break
for vm in leftVMs:
pods = vm.run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for other_vm in leftVMs:
if other_vm.vm_name not in pods.decode():
time.sleep(2)
assert False
print("Remaining nodes see the same pods")
print("Waiting for two ingress to appear")
self.VM[1].run("/snap/bin/microk8s.enable ingress")
time.sleep(10)
attempt = 100
while True:
ready_pods = 0
pods = self.VM[1].run("/snap/bin/microk8s.kubectl get po -A -o wide")
for line in pods.decode().splitlines():
if "ingress" in line and "Running" in line:
ready_pods += 1
if ready_pods == (len(self.VM) - 1):
print(pods.decode())
break
attempt -= 1
if attempt <= 0:
assert False
time.sleep(5)
print("Rejoin the node")
add_node = self.VM[1].run("/snap/bin/microk8s.add-node")
endpoint = [ep for ep in add_node.decode().split() if ":25000/" in ep]
self.VM[0].run("/snap/bin/microk8s.join {}".format(endpoint[0]))
print("Waiting for nodes to be ready")
connected_nodes = self.VM[0].run("/snap/bin/microk8s.kubectl get no")
while "NotReady" in connected_nodes.decode():
time.sleep(5)
connected_nodes = self.VM[0].run("/snap/bin/microk8s.kubectl get no")
attempt = 100
while True:
assert attempt > 0
for vm in self.VM:
status = vm.run("/snap/bin/microk8s.status")
if "high-availability: yes" not in status.decode():
attempt += 1
time.sleep(2)
continue
break
| true
| true
|
f707d5fe8383434cfbe843f45b38fefdbbbbb7fb
| 526
|
py
|
Python
|
python_toolbox/binary_search/__init__.py
|
hboshnak/python_toolbox
|
cb9ef64b48f1d03275484d707dc5079b6701ad0c
|
[
"MIT"
] | 119
|
2015-02-05T17:59:47.000Z
|
2022-02-21T22:43:40.000Z
|
python_toolbox/binary_search/__init__.py
|
hboshnak/python_toolbox
|
cb9ef64b48f1d03275484d707dc5079b6701ad0c
|
[
"MIT"
] | 4
|
2019-04-24T14:01:14.000Z
|
2020-05-21T12:03:29.000Z
|
python_toolbox/binary_search/__init__.py
|
hboshnak/python_toolbox
|
cb9ef64b48f1d03275484d707dc5079b6701ad0c
|
[
"MIT"
] | 14
|
2015-03-30T06:30:42.000Z
|
2021-12-24T23:45:11.000Z
|
# Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
'''A package for doing a binary search in a sequence.'''
from .roundings import (Rounding, LOW, LOW_IF_BOTH, LOW_OTHERWISE_HIGH, HIGH,
HIGH_IF_BOTH, HIGH_OTHERWISE_LOW, EXACT, CLOSEST,
CLOSEST_IF_BOTH, BOTH)
from .functions import (binary_search, binary_search_by_index,
make_both_data_into_preferred_rounding)
from .binary_search_profile import BinarySearchProfile
| 47.818182
| 77
| 0.711027
|
from .roundings import (Rounding, LOW, LOW_IF_BOTH, LOW_OTHERWISE_HIGH, HIGH,
HIGH_IF_BOTH, HIGH_OTHERWISE_LOW, EXACT, CLOSEST,
CLOSEST_IF_BOTH, BOTH)
from .functions import (binary_search, binary_search_by_index,
make_both_data_into_preferred_rounding)
from .binary_search_profile import BinarySearchProfile
| true
| true
|
f707d613ff7ebc27f3707beca316b702b82b34b7
| 2,503
|
py
|
Python
|
setup.py
|
haroldham/hive-sbi
|
2d99703ee9e4675dbae986c1e656bf714b72d0b5
|
[
"MIT"
] | 4
|
2018-09-23T07:56:18.000Z
|
2020-04-08T03:29:19.000Z
|
setup.py
|
haroldham/hive-sbi
|
2d99703ee9e4675dbae986c1e656bf714b72d0b5
|
[
"MIT"
] | 10
|
2018-07-31T06:30:28.000Z
|
2020-03-23T16:31:03.000Z
|
setup.py
|
haroldham/hive-sbi
|
2d99703ee9e4675dbae986c1e656bf714b72d0b5
|
[
"MIT"
] | 9
|
2018-07-16T20:19:29.000Z
|
2021-04-14T23:58:42.000Z
|
# -*- coding: utf-8 -*-
"""Packaging logic for beem."""
import codecs
import io
import os
import sys
from setuptools import setup
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
try:
codecs.lookup('mbcs')
except LookupError:
ascii = codecs.lookup('ascii')
codecs.register(lambda name, enc=ascii: {True: enc}.get(name == 'mbcs'))
VERSION = '0.1.2'
tests_require = ['mock >= 2.0.0', 'pytest', 'pytest-mock', 'parameterized']
requires = [
"beem",
"dataset",
"mysqlclient"
]
def write_version_py(filename):
"""Write version."""
cnt = """\"""THIS FILE IS GENERATED FROM beem SETUP.PY.\"""
version = '%(version)s'
"""
with open(filename, 'w') as a:
a.write(cnt % {'version': VERSION})
def get_long_description():
"""Generate a long description from the README file."""
descr = []
for fname in ('README.md',):
with io.open(fname, encoding='utf-8') as f:
descr.append(f.read())
return '\n\n'.join(descr)
if __name__ == '__main__':
# Rewrite the version file everytime
write_version_py('steembi/version.py')
setup(
name='steembi',
version=VERSION,
description='Steem basic income library',
long_description=get_long_description(),
download_url='https://github.com/holgern/steembasicincome/tarball/' + VERSION,
author='Holger Nahrstaedt',
author_email='holger@nahrstaedt.de',
maintainer='Holger Nahrstaedt',
maintainer_email='holger@nahrstaedt.de',
url='http://www.github.com/holgern/steembasicincome',
keywords=['steem', 'library', 'ubi', 'steembasicincome'],
packages=[
"steembi"
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Office/Business :: Financial',
],
install_requires=requires,
entry_points={
},
setup_requires=['pytest-runner'],
tests_require=tests_require,
include_package_data=True,
)
| 28.443182
| 86
| 0.596884
|
import codecs
import io
import os
import sys
from setuptools import setup
try:
codecs.lookup('mbcs')
except LookupError:
ascii = codecs.lookup('ascii')
codecs.register(lambda name, enc=ascii: {True: enc}.get(name == 'mbcs'))
VERSION = '0.1.2'
tests_require = ['mock >= 2.0.0', 'pytest', 'pytest-mock', 'parameterized']
requires = [
"beem",
"dataset",
"mysqlclient"
]
def write_version_py(filename):
cnt = """\"""THIS FILE IS GENERATED FROM beem SETUP.PY.\"""
version = '%(version)s'
"""
with open(filename, 'w') as a:
a.write(cnt % {'version': VERSION})
def get_long_description():
descr = []
for fname in ('README.md',):
with io.open(fname, encoding='utf-8') as f:
descr.append(f.read())
return '\n\n'.join(descr)
if __name__ == '__main__':
write_version_py('steembi/version.py')
setup(
name='steembi',
version=VERSION,
description='Steem basic income library',
long_description=get_long_description(),
download_url='https://github.com/holgern/steembasicincome/tarball/' + VERSION,
author='Holger Nahrstaedt',
author_email='holger@nahrstaedt.de',
maintainer='Holger Nahrstaedt',
maintainer_email='holger@nahrstaedt.de',
url='http://www.github.com/holgern/steembasicincome',
keywords=['steem', 'library', 'ubi', 'steembasicincome'],
packages=[
"steembi"
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Office/Business :: Financial',
],
install_requires=requires,
entry_points={
},
setup_requires=['pytest-runner'],
tests_require=tests_require,
include_package_data=True,
)
| true
| true
|
f707d72b8bccb6192906d8fac11ec9b1ba13c7c0
| 369
|
py
|
Python
|
tools/c7n_mailer/c7n_mailer/utils_email.py
|
sstarcher/cloud-custodian
|
fc5b51019e9c15d0582089133d080bceee489a94
|
[
"Apache-2.0"
] | null | null | null |
tools/c7n_mailer/c7n_mailer/utils_email.py
|
sstarcher/cloud-custodian
|
fc5b51019e9c15d0582089133d080bceee489a94
|
[
"Apache-2.0"
] | null | null | null |
tools/c7n_mailer/c7n_mailer/utils_email.py
|
sstarcher/cloud-custodian
|
fc5b51019e9c15d0582089133d080bceee489a94
|
[
"Apache-2.0"
] | null | null | null |
import logging
from email.utils import parseaddr
logger = logging.getLogger('c7n_mailer.utils.email')
def is_email(target):
if target.startswith('slack://'):
logger.debug("Slack payload, not an email.")
return False
if parseaddr(target)[1] and '@' in target and '.' in target:
return True
else:
return False
| 24.6
| 65
| 0.631436
|
import logging
from email.utils import parseaddr
logger = logging.getLogger('c7n_mailer.utils.email')
def is_email(target):
if target.startswith('slack://'):
logger.debug("Slack payload, not an email.")
return False
if parseaddr(target)[1] and '@' in target and '.' in target:
return True
else:
return False
| true
| true
|
f707d8d338f577afbfffde813e38b1c37c20c5c9
| 1,525
|
py
|
Python
|
spin/models/smpl.py
|
krumo/SPIN
|
0e2f17e70f06de46e062683ea6d5b233eeaa73c1
|
[
"BSD-3-Clause"
] | null | null | null |
spin/models/smpl.py
|
krumo/SPIN
|
0e2f17e70f06de46e062683ea6d5b233eeaa73c1
|
[
"BSD-3-Clause"
] | null | null | null |
spin/models/smpl.py
|
krumo/SPIN
|
0e2f17e70f06de46e062683ea6d5b233eeaa73c1
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import numpy as np
import smplx
from smplx import SMPL as _SMPL
from smplx.body_models import ModelOutput
from smplx.lbs import vertices2joints
import spin.config as config
import spin.constants as constants
class SMPL(_SMPL):
""" Extension of the official SMPL implementation to support more joints """
def __init__(self, *args, **kwargs):
super(SMPL, self).__init__(*args, **kwargs)
joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]
J_regressor_extra = np.load(config.JOINT_REGRESSOR_TRAIN_EXTRA)
self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))
self.joint_map = torch.tensor(joints, dtype=torch.long)
def forward(self, *args, **kwargs):
kwargs['get_skin'] = True
smpl_output = super(SMPL, self).forward(*args, **kwargs)
extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)
joints = torch.cat([smpl_output.joints, extra_joints], dim=1)
joints = smpl_output.joints
# print(smpl_output.joints.shape)
# joints = joints[:, self.joint_map, :]
output = ModelOutput(vertices=smpl_output.vertices,
global_orient=smpl_output.global_orient,
body_pose=smpl_output.body_pose,
joints=joints,
betas=smpl_output.betas,
full_pose=smpl_output.full_pose)
return output
| 42.361111
| 103
| 0.658361
|
import torch
import numpy as np
import smplx
from smplx import SMPL as _SMPL
from smplx.body_models import ModelOutput
from smplx.lbs import vertices2joints
import spin.config as config
import spin.constants as constants
class SMPL(_SMPL):
def __init__(self, *args, **kwargs):
super(SMPL, self).__init__(*args, **kwargs)
joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]
J_regressor_extra = np.load(config.JOINT_REGRESSOR_TRAIN_EXTRA)
self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))
self.joint_map = torch.tensor(joints, dtype=torch.long)
def forward(self, *args, **kwargs):
kwargs['get_skin'] = True
smpl_output = super(SMPL, self).forward(*args, **kwargs)
extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)
joints = torch.cat([smpl_output.joints, extra_joints], dim=1)
joints = smpl_output.joints
output = ModelOutput(vertices=smpl_output.vertices,
global_orient=smpl_output.global_orient,
body_pose=smpl_output.body_pose,
joints=joints,
betas=smpl_output.betas,
full_pose=smpl_output.full_pose)
return output
| true
| true
|
f707d9f5ae195709bc3343f36776ae832f905010
| 522
|
py
|
Python
|
Python_Implementation/Test_Codes/Test1.py
|
ishtiaqniloy/CSE_406_TCP_Reset_Attack_Video_Streaming
|
849e3595a75cedaa8142a025eb22e1bb8871be36
|
[
"MIT"
] | 2
|
2019-09-08T15:12:38.000Z
|
2019-09-23T04:36:18.000Z
|
Python_Implementation/Test_Codes/Test1.py
|
ishtiaqniloy/CSE_406_TCP_Reset_Attack_Video_Streaming
|
849e3595a75cedaa8142a025eb22e1bb8871be36
|
[
"MIT"
] | null | null | null |
Python_Implementation/Test_Codes/Test1.py
|
ishtiaqniloy/CSE_406_TCP_Reset_Attack_Video_Streaming
|
849e3595a75cedaa8142a025eb22e1bb8871be36
|
[
"MIT"
] | 1
|
2021-05-25T09:58:46.000Z
|
2021-05-25T09:58:46.000Z
|
from scapy.all import *
from scapy.all import send
from scapy.layers.inet import *
srcIP = "192.168.0.103"
destIP = "192.168.0.108"
def spoof_tcp(pkt):
IPLayer = IP(dst=destIP, src=pkt[IP].dst)
TCPLayer = TCP(flags="R", seq=pkt[TCP].ack, dport=pkt[TCP].sport, sport=pkt[TCP].dport)
spoofpkt = IPLayer/TCPLayer
send(spoofpkt, verbose=1)
print("Spoofed Packet Sent...")
while 1 > 0:
pkt = sniff(filter="tcp and src host " + destIP)
print("Found Packet")
print(pkt)
# spoof_tcp(pkt)
| 22.695652
| 91
| 0.655172
|
from scapy.all import *
from scapy.all import send
from scapy.layers.inet import *
srcIP = "192.168.0.103"
destIP = "192.168.0.108"
def spoof_tcp(pkt):
IPLayer = IP(dst=destIP, src=pkt[IP].dst)
TCPLayer = TCP(flags="R", seq=pkt[TCP].ack, dport=pkt[TCP].sport, sport=pkt[TCP].dport)
spoofpkt = IPLayer/TCPLayer
send(spoofpkt, verbose=1)
print("Spoofed Packet Sent...")
while 1 > 0:
pkt = sniff(filter="tcp and src host " + destIP)
print("Found Packet")
print(pkt)
| true
| true
|
f707da107d989b56c26b40432ff8a4818a50ba27
| 3,748
|
py
|
Python
|
joswig_dijkstra.py
|
MrCopprHead/joswig
|
addd43e29270320e69d038ef5b24b717c2740061
|
[
"MIT"
] | null | null | null |
joswig_dijkstra.py
|
MrCopprHead/joswig
|
addd43e29270320e69d038ef5b24b717c2740061
|
[
"MIT"
] | null | null | null |
joswig_dijkstra.py
|
MrCopprHead/joswig
|
addd43e29270320e69d038ef5b24b717c2740061
|
[
"MIT"
] | null | null | null |
#Attempt to route using the Joswig Algorithm described here: https://arxiv.org/pdf/1904.01082.pdf
#using object oriented programming.
class Vertex:
num_vert = 0
vertices = []
def __init__(self, lab=""):
self.label = lab
self.adj = [] #adjacency list
self.weight = [] #weights associated to adjacency list edges
self.known = False #shortest path to self is known
self.pv = None #previous node in shortest path tree
self.dv = 0 #current distance on best known path
self.help = False #helper boolean denoting if a vertex has had dv changed from 0.
Vertex.num_vert += 1
Vertex.vertices.append(self)
#links self to vert with weight cost
def link(self,vert,cost):
if((vert in self.adj) == False):
self.adj.append(vert)
self.weight.append(cost)
def editlink(self,vert,cost):
if((vert in self.adj) == True):
self.weight[self.adj.index(vert)] = cost
def clear(self):
Vertex.num_vert = 0
Vertex.vertices = []
def printadj(self,lab):
for v in self.adj:
result = v.label,v.dv
if(lab == True):
result = v.label,v.pv.label,v.dv
print(result)
#reset vertex boolean values
#must be called before
def vert_false(self):
for v in Vertex.vertices:
v.known = False
v.dv = 0
v.pv = None
v.help = False
def shortestpath(self):
num_edge = 0
if(self.adj != []):
num_edge = len(self.adj)
self.known = True
if(num_edge > 0):
for i in range(0,num_edge):
weight = self.weight[i]
if(weight == -1):
weight = 0
if((self.adj[i].help == False) | (self.adj[i].dv > weight + self.dv)):
self.adj[i].dv = weight + self.dv
self.adj[i].pv = self
self.adj[i].help = True
min = -1
next = None
done = True
for v in Vertex.vertices:
if(v.known == False):
done = False
if(v.help == True):
if((min == -1) | (min > v.dv)):
min = v.dv
next = v
if(done == False):
if(next != None):
next.shortestpath()
class Tree:
num_trees = 0
trees = []
def __init__(self,numvert):
self.vertices = []
for i in range(0,numvert):
self.vertices.append(Vertex("v"+str(i+1)))
Tree.num_trees += 1
Tree.trees.append(self)
def link(self,init,final,weight):
numvert = len(self.vertices)
init = init-1
final = final-1
if((init < numvert) & (final < numvert)):
self.vertices[init].link(self.vertices[final],weight)
def editlink(self,init,final,newweight):
numvert = len(self.vertices)
init = init - 1
final = final - 1
if ((init < numvert) & (final < numvert)):
self.vertices[init].editlink(self.vertices[final],newweight)
def shortestpath(self,vert):
self.vertices[vert-1].shortestpath()
result = []
for x in self.vertices[vert-1].adj:
result.append([x.label,x.pv.label,x.dv])
return result
def add_vertex(self,vert):
self.vertices.append(vert)
def vert_false(self):
self.vertices[0].vert_false()
def printadj(self,vert,lab):
self.vertices[vert-1].printadj(lab)
| 32.310345
| 98
| 0.509872
|
class Vertex:
num_vert = 0
vertices = []
def __init__(self, lab=""):
self.label = lab
self.adj = [] self.weight = [] self.known = False self.pv = None self.dv = 0 self.help = False Vertex.num_vert += 1
Vertex.vertices.append(self)
def link(self,vert,cost):
if((vert in self.adj) == False):
self.adj.append(vert)
self.weight.append(cost)
def editlink(self,vert,cost):
if((vert in self.adj) == True):
self.weight[self.adj.index(vert)] = cost
def clear(self):
Vertex.num_vert = 0
Vertex.vertices = []
def printadj(self,lab):
for v in self.adj:
result = v.label,v.dv
if(lab == True):
result = v.label,v.pv.label,v.dv
print(result)
def vert_false(self):
for v in Vertex.vertices:
v.known = False
v.dv = 0
v.pv = None
v.help = False
def shortestpath(self):
num_edge = 0
if(self.adj != []):
num_edge = len(self.adj)
self.known = True
if(num_edge > 0):
for i in range(0,num_edge):
weight = self.weight[i]
if(weight == -1):
weight = 0
if((self.adj[i].help == False) | (self.adj[i].dv > weight + self.dv)):
self.adj[i].dv = weight + self.dv
self.adj[i].pv = self
self.adj[i].help = True
min = -1
next = None
done = True
for v in Vertex.vertices:
if(v.known == False):
done = False
if(v.help == True):
if((min == -1) | (min > v.dv)):
min = v.dv
next = v
if(done == False):
if(next != None):
next.shortestpath()
class Tree:
num_trees = 0
trees = []
def __init__(self,numvert):
self.vertices = []
for i in range(0,numvert):
self.vertices.append(Vertex("v"+str(i+1)))
Tree.num_trees += 1
Tree.trees.append(self)
def link(self,init,final,weight):
numvert = len(self.vertices)
init = init-1
final = final-1
if((init < numvert) & (final < numvert)):
self.vertices[init].link(self.vertices[final],weight)
def editlink(self,init,final,newweight):
numvert = len(self.vertices)
init = init - 1
final = final - 1
if ((init < numvert) & (final < numvert)):
self.vertices[init].editlink(self.vertices[final],newweight)
def shortestpath(self,vert):
self.vertices[vert-1].shortestpath()
result = []
for x in self.vertices[vert-1].adj:
result.append([x.label,x.pv.label,x.dv])
return result
def add_vertex(self,vert):
self.vertices.append(vert)
def vert_false(self):
self.vertices[0].vert_false()
def printadj(self,vert,lab):
self.vertices[vert-1].printadj(lab)
| true
| true
|
f707dc3bea0d922a73defd241268b992e7ca7392
| 971
|
py
|
Python
|
creational/singleton.py
|
rcavaz/Design-Patterns
|
d40718926be14a5da7e7deb3c6d31d5259eee1dd
|
[
"MIT"
] | 3
|
2018-01-12T01:45:00.000Z
|
2019-03-07T20:12:23.000Z
|
creational/singleton.py
|
rcavaz/Design-Patterns
|
d40718926be14a5da7e7deb3c6d31d5259eee1dd
|
[
"MIT"
] | null | null | null |
creational/singleton.py
|
rcavaz/Design-Patterns
|
d40718926be14a5da7e7deb3c6d31d5259eee1dd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
SINGLETON
Use the Singleton pattern when:
1. there must be exactly one instance of a class, and it must be
accessible to clients from a well-known access point.
2. the sole instance should be extensible by subclassing, and clients
should be able to use an extended instance without modifying their code.
"""
import logging
class Connection(object):
"""
Singleton
1. Defines an Instance operation that lets clients access its unique
instance.
2. May be responsible for creating its own unique instance.
"""
def __new__(type):
if not '_connection' in type.__dict__:
type._connection = object.__new__(type)
logging.basicConfig(level=logging.INFO)
logging.info('New database connection created!')
logging.info('Connection established.')
return type._connection
if __name__ == "__main__":
c = Connection()
d = Connection()
| 28.558824
| 79
| 0.677652
|
import logging
class Connection(object):
def __new__(type):
if not '_connection' in type.__dict__:
type._connection = object.__new__(type)
logging.basicConfig(level=logging.INFO)
logging.info('New database connection created!')
logging.info('Connection established.')
return type._connection
if __name__ == "__main__":
c = Connection()
d = Connection()
| true
| true
|
f707dca08d914b63cf7af7cdfd1b67909756c739
| 1,597
|
py
|
Python
|
nacos/base.py
|
hubertshelley/nacos_client_python
|
38f9b4fe26096626328d5f2149b144b9b5933a11
|
[
"Apache-2.0"
] | null | null | null |
nacos/base.py
|
hubertshelley/nacos_client_python
|
38f9b4fe26096626328d5f2149b144b9b5933a11
|
[
"Apache-2.0"
] | null | null | null |
nacos/base.py
|
hubertshelley/nacos_client_python
|
38f9b4fe26096626328d5f2149b144b9b5933a11
|
[
"Apache-2.0"
] | null | null | null |
import socket
from http import HTTPStatus
from urllib.request import Request, urlopen, ProxyHandler, build_opener
from urllib.parse import urlencode, unquote_plus, quote, quote_plus
from urllib.error import HTTPError, URLError
class ClientBase:
def __init__(self, nacos_host: str, api_level: str = 'v1'):
self.host = nacos_host
self.level = api_level
self.base_url = f'{nacos_host}/nacos/{api_level}'
def handle(self, url: str, headers: dict = {}, params: dict = {}, data: dict = {}, method: str = 'GET'):
def _get_params_str():
params_list = []
for key in params.keys():
value = params.get(key, None)
if value is not None:
if not isinstance(value, str):
value = str(value)
params_list.append(f'{key}={quote_plus(value)}')
return '&'.join(params_list)
try:
url += '?' + _get_params_str()
req = Request(self.base_url + url, headers=headers, data=urlencode(data).encode(), method=method)
resp = urlopen(req)
response = resp.read()
resp.close()
return response
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise Exception("Insufficient privilege.")
else:
raise Exception(e)
except socket.timeout:
raise Exception(f"{self.host} request timeout")
except URLError as e:
raise Exception(f"{self.host} connection error:{e.reason}")
| 38.95122
| 109
| 0.577959
|
import socket
from http import HTTPStatus
from urllib.request import Request, urlopen, ProxyHandler, build_opener
from urllib.parse import urlencode, unquote_plus, quote, quote_plus
from urllib.error import HTTPError, URLError
class ClientBase:
def __init__(self, nacos_host: str, api_level: str = 'v1'):
self.host = nacos_host
self.level = api_level
self.base_url = f'{nacos_host}/nacos/{api_level}'
def handle(self, url: str, headers: dict = {}, params: dict = {}, data: dict = {}, method: str = 'GET'):
def _get_params_str():
params_list = []
for key in params.keys():
value = params.get(key, None)
if value is not None:
if not isinstance(value, str):
value = str(value)
params_list.append(f'{key}={quote_plus(value)}')
return '&'.join(params_list)
try:
url += '?' + _get_params_str()
req = Request(self.base_url + url, headers=headers, data=urlencode(data).encode(), method=method)
resp = urlopen(req)
response = resp.read()
resp.close()
return response
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise Exception("Insufficient privilege.")
else:
raise Exception(e)
except socket.timeout:
raise Exception(f"{self.host} request timeout")
except URLError as e:
raise Exception(f"{self.host} connection error:{e.reason}")
| true
| true
|
f707dcc5768824ef0e014152f92bc6655d9355f0
| 657
|
py
|
Python
|
tridentstream/notifiers/multinotifier/handler.py
|
tridentstream/mediaserver
|
5d47d766df2e8dca076e41348062567a569019fd
|
[
"MIT"
] | 6
|
2020-01-03T14:50:09.000Z
|
2021-09-13T01:44:31.000Z
|
tridentstream/notifiers/multinotifier/handler.py
|
tidalstream/mediaserver
|
5d47d766df2e8dca076e41348062567a569019fd
|
[
"MIT"
] | null | null | null |
tridentstream/notifiers/multinotifier/handler.py
|
tidalstream/mediaserver
|
5d47d766df2e8dca076e41348062567a569019fd
|
[
"MIT"
] | null | null | null |
import logging
from django.contrib.auth.models import User
from unplugged import RelatedPluginField, Schema, fields
from wampyre.realm import realm_manager
from ...plugins import NotifierPlugin
logger = logging.getLogger(__name__)
class MultiNotifierSchema(Schema):
notifiers = fields.List(
RelatedPluginField(plugin_type=NotifierPlugin), many=True, default=list
)
class MultiNotifierNotifierHandlerPlugin(NotifierPlugin):
plugin_name = "multinotifier"
config_schema = MultiNotifierSchema
def notify(self, notification):
for notifier in self.config.get("notifiers", []):
notifier.notify(notification)
| 26.28
| 79
| 0.762557
|
import logging
from django.contrib.auth.models import User
from unplugged import RelatedPluginField, Schema, fields
from wampyre.realm import realm_manager
from ...plugins import NotifierPlugin
logger = logging.getLogger(__name__)
class MultiNotifierSchema(Schema):
notifiers = fields.List(
RelatedPluginField(plugin_type=NotifierPlugin), many=True, default=list
)
class MultiNotifierNotifierHandlerPlugin(NotifierPlugin):
plugin_name = "multinotifier"
config_schema = MultiNotifierSchema
def notify(self, notification):
for notifier in self.config.get("notifiers", []):
notifier.notify(notification)
| true
| true
|
f707dcef9260b20e07d25e398446967df4b622c2
| 1,410
|
py
|
Python
|
elit/datasets/sts/stsb.py
|
emorynlp/stem-cell-hypothesis
|
48a628093d93d653865fbac6409d179cddd99293
|
[
"Apache-2.0"
] | 4
|
2021-09-17T15:23:31.000Z
|
2022-02-28T10:18:04.000Z
|
elit/datasets/sts/stsb.py
|
emorynlp/stem-cell-hypothesis
|
48a628093d93d653865fbac6409d179cddd99293
|
[
"Apache-2.0"
] | null | null | null |
elit/datasets/sts/stsb.py
|
emorynlp/stem-cell-hypothesis
|
48a628093d93d653865fbac6409d179cddd99293
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-05-20 16:25
from typing import Union, List, Callable
from elit.common.dataset import TransformableDataset
from elit.utils.io_util import read_cells
STS_B_TRAIN = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-train.csv'
STS_B_DEV = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-dev.csv'
STS_B_TEST = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-test.csv'
class SemanticTextualSimilarityDataset(TransformableDataset):
def __init__(self,
data: Union[str, List],
sent_a_col,
sent_b_col,
similarity_col,
delimiter='auto',
transform: Union[Callable, List] = None,
cache=None,
generate_idx=None) -> None:
self.delimiter = delimiter
self.similarity_col = similarity_col
self.sent_b_col = sent_b_col
self.sent_a_col = sent_a_col
super().__init__(data, transform, cache, generate_idx)
def load_file(self, filepath: str):
for i, cells in enumerate(read_cells(filepath, strip=True, delimiter=self.delimiter)):
yield {
'sent_a': cells[self.sent_a_col],
'sent_b': cells[self.sent_b_col],
'similarity': float(cells[self.similarity_col])
}
| 38.108108
| 94
| 0.628369
|
from typing import Union, List, Callable
from elit.common.dataset import TransformableDataset
from elit.utils.io_util import read_cells
STS_B_TRAIN = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-train.csv'
STS_B_DEV = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-dev.csv'
STS_B_TEST = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-test.csv'
class SemanticTextualSimilarityDataset(TransformableDataset):
def __init__(self,
data: Union[str, List],
sent_a_col,
sent_b_col,
similarity_col,
delimiter='auto',
transform: Union[Callable, List] = None,
cache=None,
generate_idx=None) -> None:
self.delimiter = delimiter
self.similarity_col = similarity_col
self.sent_b_col = sent_b_col
self.sent_a_col = sent_a_col
super().__init__(data, transform, cache, generate_idx)
def load_file(self, filepath: str):
for i, cells in enumerate(read_cells(filepath, strip=True, delimiter=self.delimiter)):
yield {
'sent_a': cells[self.sent_a_col],
'sent_b': cells[self.sent_b_col],
'similarity': float(cells[self.similarity_col])
}
| true
| true
|
f707ddcb48b0d243878fade85e7923d452e6caaf
| 771
|
py
|
Python
|
utils/censor.py
|
GlobalChatDev/DeveloperGlobalChat
|
8f52e778e3730add307cfafbdb0e3b71a562462f
|
[
"MIT"
] | 4
|
2021-11-22T00:31:19.000Z
|
2022-03-05T13:38:36.000Z
|
utils/censor.py
|
GlobalChatDev/DeveloperGlobalChat
|
8f52e778e3730add307cfafbdb0e3b71a562462f
|
[
"MIT"
] | null | null | null |
utils/censor.py
|
GlobalChatDev/DeveloperGlobalChat
|
8f52e778e3730add307cfafbdb0e3b71a562462f
|
[
"MIT"
] | 1
|
2022-02-27T21:26:20.000Z
|
2022-02-27T21:26:20.000Z
|
from typing import *
import re
class Censorship:
def __init__(self, content: Union[Any, str, None] = None) -> None:
self.content: str = content
def update_content(self, content: Any):
self.content = content
def censor(self):
censored = ["fuck", "shit", "lmao", "lmfao", "porn", "sex", "cock", "ball"]
for censor in censored:
if censor in self.content:
lenned = len(censor)
hashes = "#" * lenned
self.content = self.content.replace(censor, hashes)
self.content = re.sub(
"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+",
"[url omitted]",
self.content,
)
return self.content
| 30.84
| 91
| 0.509728
|
from typing import *
import re
class Censorship:
def __init__(self, content: Union[Any, str, None] = None) -> None:
self.content: str = content
def update_content(self, content: Any):
self.content = content
def censor(self):
censored = ["fuck", "shit", "lmao", "lmfao", "porn", "sex", "cock", "ball"]
for censor in censored:
if censor in self.content:
lenned = len(censor)
hashes = "#" * lenned
self.content = self.content.replace(censor, hashes)
self.content = re.sub(
"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+",
"[url omitted]",
self.content,
)
return self.content
| true
| true
|
f707de3705af2a4dae494700ab92a8eb9604d0f0
| 389
|
py
|
Python
|
opinions/united_states/state/ohioctapp_2.py
|
brianwc/juriscraper
|
f95221c3ab38ef89e642a3f5ed5569f73866d636
|
[
"BSD-2-Clause"
] | null | null | null |
opinions/united_states/state/ohioctapp_2.py
|
brianwc/juriscraper
|
f95221c3ab38ef89e642a3f5ed5569f73866d636
|
[
"BSD-2-Clause"
] | null | null | null |
opinions/united_states/state/ohioctapp_2.py
|
brianwc/juriscraper
|
f95221c3ab38ef89e642a3f5ed5569f73866d636
|
[
"BSD-2-Clause"
] | null | null | null |
"""Scraper for the 1st District Court of Appeals
CourtID: ohio
Court Short Name: Ohio
Author: Andrei Chelaru
"""
from juriscraper.opinions.united_states.state import ohio
class Site(ohio.Site):
def __init__(self):
super(Site, self).__init__()
self.court_id = self.__module__
self.court_index = 2
self.url = self.make_url(self.court_index, self.year)
| 24.3125
| 61
| 0.70437
|
from juriscraper.opinions.united_states.state import ohio
class Site(ohio.Site):
def __init__(self):
super(Site, self).__init__()
self.court_id = self.__module__
self.court_index = 2
self.url = self.make_url(self.court_index, self.year)
| true
| true
|
f707de5d7baf6c48523186021b7d2ca01cdbe868
| 220
|
py
|
Python
|
tflib/__init__.py
|
nguyenquangduc2000/AttGAN
|
2ba96d1a1f80b39cc785c594ad8e1d800c06dd52
|
[
"MIT"
] | 405
|
2019-04-17T03:02:18.000Z
|
2022-03-11T06:36:00.000Z
|
tflib/__init__.py
|
nguyenquangduc2000/AttGAN
|
2ba96d1a1f80b39cc785c594ad8e1d800c06dd52
|
[
"MIT"
] | 58
|
2019-05-13T09:34:57.000Z
|
2021-12-07T08:40:58.000Z
|
tflib/__init__.py
|
nguyenquangduc2000/AttGAN
|
2ba96d1a1f80b39cc785c594ad8e1d800c06dd52
|
[
"MIT"
] | 95
|
2019-04-20T02:32:32.000Z
|
2022-03-07T03:58:24.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tflib.checkpoint import *
from tflib.ops import *
from tflib.utils import *
from tflib.variable import *
| 24.444444
| 38
| 0.831818
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tflib.checkpoint import *
from tflib.ops import *
from tflib.utils import *
from tflib.variable import *
| true
| true
|
f707de66a1a318b60effa45ff1790f7b9c557bb8
| 116
|
py
|
Python
|
holygun/bot.py
|
TitaniumHocker/Holygun
|
b1860ca7586c222e68ab79d1408126f757cc2057
|
[
"MIT"
] | null | null | null |
holygun/bot.py
|
TitaniumHocker/Holygun
|
b1860ca7586c222e68ab79d1408126f757cc2057
|
[
"MIT"
] | null | null | null |
holygun/bot.py
|
TitaniumHocker/Holygun
|
b1860ca7586c222e68ab79d1408126f757cc2057
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from discord.ext import commands
import discord
client = commands.Bot(command_prefix='.')
| 16.571429
| 41
| 0.706897
|
from discord.ext import commands
import discord
client = commands.Bot(command_prefix='.')
| true
| true
|
f707dec50aebb6bb333e897656608d4ecdba6528
| 5,601
|
py
|
Python
|
portfolio/Python/scrapy/gadgetpanda/sellusyourgadget_spider.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/gadgetpanda/sellusyourgadget_spider.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/gadgetpanda/sellusyourgadget_spider.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | 5
|
2016-03-22T07:40:46.000Z
|
2021-05-30T16:12:21.000Z
|
import os
import csv
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest
from product_spiders.fuzzywuzzy import process
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class SellUsYourGadgetSpider(BaseSpider):
name = 'sellusyourgadget.co.uk'
allowed_domains = ['sellusyourgadget.co.uk']
start_urls = ['http://sellusyourgadget.co.uk/index.php/home/myProduct']
def __init__(self, *args, **kwargs):
super(SellUsYourGadgetSpider, self).__init__(*args, **kwargs)
csv_file = csv.reader(open(os.path.join(HERE, 'sellusyourgadget_products.csv')))
self.products =[row[0] for row in csv_file]
def parse(self, response):
hxs = HtmlXPathSelector(response)
product_ids = hxs.select('//*[@id="product"]/option/@value').extract()
for id in product_ids:
url = 'http://sellusyourgadget.co.uk/index.php/home/getSubProducts/%s'
yield Request(url % id, callback=self.parse_subproducts, meta={'id': id})
def parse_subproducts(self, response):
hxs = HtmlXPathSelector(response)
#Fix for the HTML code.
html = hxs.extract().replace('<br></h3>','').\
replace('<h3','<div class="item"').\
replace('</p>\n <div','</p></div>\n <div').\
replace('<input type="radio"', '<div class="hd" ').\
replace('checked>','>').\
replace('</p></div>','</div></p></div>').\
replace('</p>\n', '</div></p>\n')
products_hxs = HtmlXPathSelector(text=html)
products = products_hxs.select('//div[@class="item"]')
for product in products:
sub_products = product.select('div[@class="hd"]')
if sub_products:
for sub_product in sub_products:
value = sub_product.select('./@value').extract()[0]
hd = sub_product.select('./text()').extract()[0]
name = ' '.join((product.select('p/text()').extract()[0], hd))
extracted = process.extractOne(name, self.products)
try:
if extracted[1]>=98:
url = 'http://sellusyourgadget.co.uk/index.php/home/getConditions/%s'
yield Request(url % value.split(':')[0], callback=self.parse_options,
meta={'id':response.meta['id'],
'name': name,
'memoryR':value,
'memory':value})
except TypeError:
return
else:
name = product.select('p/text()').extract()[0]
extracted = process.extractOne(name, self.products)
try:
if extracted[1]>=98:
value = product.select('p/input/@value').extract()[0]
url = 'http://sellusyourgadget.co.uk/index.php/home/getConditions/%s'
yield Request(url % value.split(':')[0], callback=self.parse_options,
meta={'id':response.meta['id'],
'name':name,
'memoryR':value,
'memory':value})
except TypeError:
return
def parse_options(self, response):
'''Gets the percentages to be subtracted to the initial price.
'''
try:
hxs = HtmlXPathSelector(response)
percentages = hxs.select('//input[@name="conditionR"]/@value').extract()
grade_values = dict(zip(['Grade A','Grade B', 'Grade C',
'Grade D', 'Grade E'], percentages))
for grade, percentage in grade_values.iteritems():
yield FormRequest('http://sellusyourgadget.co.uk/index.php/home/getQuote',
method='POST',
formdata={'product':response.meta['id'],
'memoryR':response.meta['memoryR'],
'conditionR':percentage,
'condition':percentage,
'memory':response.meta['memory'],
'tick1':'0',
'tick2':'0',
'tick3':'0',
'tick4':'0',
'price':''},
callback=self.parse_product,
meta={'name': ' '.join((response.meta['name'], grade))})
except TypeError:
return
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
loader = ProductLoader(item=Product(), response=response)
loader.add_value('name', response.meta['name'])
loader.add_xpath('price', '//*[@id="price-text"]/span/text()')
yield loader.load_item()
| 51.861111
| 119
| 0.466702
|
import os
import csv
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest
from product_spiders.fuzzywuzzy import process
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class SellUsYourGadgetSpider(BaseSpider):
name = 'sellusyourgadget.co.uk'
allowed_domains = ['sellusyourgadget.co.uk']
start_urls = ['http://sellusyourgadget.co.uk/index.php/home/myProduct']
def __init__(self, *args, **kwargs):
super(SellUsYourGadgetSpider, self).__init__(*args, **kwargs)
csv_file = csv.reader(open(os.path.join(HERE, 'sellusyourgadget_products.csv')))
self.products =[row[0] for row in csv_file]
def parse(self, response):
hxs = HtmlXPathSelector(response)
product_ids = hxs.select('//*[@id="product"]/option/@value').extract()
for id in product_ids:
url = 'http://sellusyourgadget.co.uk/index.php/home/getSubProducts/%s'
yield Request(url % id, callback=self.parse_subproducts, meta={'id': id})
def parse_subproducts(self, response):
hxs = HtmlXPathSelector(response)
html = hxs.extract().replace('<br></h3>','').\
replace('<h3','<div class="item"').\
replace('</p>\n <div','</p></div>\n <div').\
replace('<input type="radio"', '<div class="hd" ').\
replace('checked>','>').\
replace('</p></div>','</div></p></div>').\
replace('</p>\n', '</div></p>\n')
products_hxs = HtmlXPathSelector(text=html)
products = products_hxs.select('//div[@class="item"]')
for product in products:
sub_products = product.select('div[@class="hd"]')
if sub_products:
for sub_product in sub_products:
value = sub_product.select('./@value').extract()[0]
hd = sub_product.select('./text()').extract()[0]
name = ' '.join((product.select('p/text()').extract()[0], hd))
extracted = process.extractOne(name, self.products)
try:
if extracted[1]>=98:
url = 'http://sellusyourgadget.co.uk/index.php/home/getConditions/%s'
yield Request(url % value.split(':')[0], callback=self.parse_options,
meta={'id':response.meta['id'],
'name': name,
'memoryR':value,
'memory':value})
except TypeError:
return
else:
name = product.select('p/text()').extract()[0]
extracted = process.extractOne(name, self.products)
try:
if extracted[1]>=98:
value = product.select('p/input/@value').extract()[0]
url = 'http://sellusyourgadget.co.uk/index.php/home/getConditions/%s'
yield Request(url % value.split(':')[0], callback=self.parse_options,
meta={'id':response.meta['id'],
'name':name,
'memoryR':value,
'memory':value})
except TypeError:
return
def parse_options(self, response):
try:
hxs = HtmlXPathSelector(response)
percentages = hxs.select('//input[@name="conditionR"]/@value').extract()
grade_values = dict(zip(['Grade A','Grade B', 'Grade C',
'Grade D', 'Grade E'], percentages))
for grade, percentage in grade_values.iteritems():
yield FormRequest('http://sellusyourgadget.co.uk/index.php/home/getQuote',
method='POST',
formdata={'product':response.meta['id'],
'memoryR':response.meta['memoryR'],
'conditionR':percentage,
'condition':percentage,
'memory':response.meta['memory'],
'tick1':'0',
'tick2':'0',
'tick3':'0',
'tick4':'0',
'price':''},
callback=self.parse_product,
meta={'name': ' '.join((response.meta['name'], grade))})
except TypeError:
return
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
loader = ProductLoader(item=Product(), response=response)
loader.add_value('name', response.meta['name'])
loader.add_xpath('price', '//*[@id="price-text"]/span/text()')
yield loader.load_item()
| true
| true
|
f707dfbc174261fab6b852262aa102e8664820a1
| 1,335
|
py
|
Python
|
src/python/WMCore/BossAir/MySQL/LoadForMonitoring.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 21
|
2015-11-19T16:18:45.000Z
|
2021-12-02T18:20:39.000Z
|
src/python/WMCore/BossAir/MySQL/LoadForMonitoring.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 5,671
|
2015-01-06T14:38:52.000Z
|
2022-03-31T22:11:14.000Z
|
src/python/WMCore/BossAir/MySQL/LoadForMonitoring.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 67
|
2015-01-21T15:55:38.000Z
|
2022-02-03T19:53:13.000Z
|
#!/usr/bin/env python
"""
_LoadForMonitoring_
MySQL implementation for loading a job by scheduler status
"""
from WMCore.Database.DBFormatter import DBFormatter
class LoadForMonitoring(DBFormatter):
"""
_LoadForMonitoring_
Load all jobs with a certain scheduler status including
all the joined information.
"""
sql = """SELECT rj.wmbs_id AS jobid, rj.grid_id AS gridid, rj.bulk_id AS bulkid,
st.name AS status, rj.retry_count as retry_count, rj.id AS id,
rj.status_time as status_time, wl.plugin AS plugin, wu.cert_dn AS owner
FROM bl_runjob rj
INNER JOIN bl_status st ON rj.sched_status = st.id
LEFT OUTER JOIN wmbs_users wu ON wu.id = rj.user_id
INNER JOIN wmbs_job wj ON wj.id = rj.wmbs_id
LEFT OUTER JOIN wmbs_location wl ON wl.id = wj.location
WHERE rj.status = :complete
"""
def execute(self, complete = '1', conn = None, transaction = False):
"""
_execute_
Load all jobs either running or not (running by default)
"""
binds = {'complete': complete}
result = self.dbi.processData(self.sql, binds, conn = conn,
transaction = transaction)
return self.formatDict(result)
| 28.404255
| 86
| 0.617978
|
from WMCore.Database.DBFormatter import DBFormatter
class LoadForMonitoring(DBFormatter):
sql = """SELECT rj.wmbs_id AS jobid, rj.grid_id AS gridid, rj.bulk_id AS bulkid,
st.name AS status, rj.retry_count as retry_count, rj.id AS id,
rj.status_time as status_time, wl.plugin AS plugin, wu.cert_dn AS owner
FROM bl_runjob rj
INNER JOIN bl_status st ON rj.sched_status = st.id
LEFT OUTER JOIN wmbs_users wu ON wu.id = rj.user_id
INNER JOIN wmbs_job wj ON wj.id = rj.wmbs_id
LEFT OUTER JOIN wmbs_location wl ON wl.id = wj.location
WHERE rj.status = :complete
"""
def execute(self, complete = '1', conn = None, transaction = False):
binds = {'complete': complete}
result = self.dbi.processData(self.sql, binds, conn = conn,
transaction = transaction)
return self.formatDict(result)
| true
| true
|
f707e00a97a99d4a6c312d59ee4e46abeccbb04f
| 407
|
py
|
Python
|
myscraper/spiders/myscraper.py
|
melki/scrapeSlate
|
f1583e49e29cef6670a1f390f918207fa63646a5
|
[
"MIT"
] | null | null | null |
myscraper/spiders/myscraper.py
|
melki/scrapeSlate
|
f1583e49e29cef6670a1f390f918207fa63646a5
|
[
"MIT"
] | null | null | null |
myscraper/spiders/myscraper.py
|
melki/scrapeSlate
|
f1583e49e29cef6670a1f390f918207fa63646a5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from scrapy import Spider, Request
from ..items import Article
from ..items import Lien
class MyScraper(Spider):
name = u'myscraper'
def start_requests(self):
urlToVisit = {}
Request(
url='http://www.google.fr/',
callback=self.parse,
)
def parse(self, response):
for i in response:
yield i
| 16.958333
| 40
| 0.55774
|
from scrapy import Spider, Request
from ..items import Article
from ..items import Lien
class MyScraper(Spider):
name = u'myscraper'
def start_requests(self):
urlToVisit = {}
Request(
url='http://www.google.fr/',
callback=self.parse,
)
def parse(self, response):
for i in response:
yield i
| true
| true
|
f707e1277f32bfebfdc9ac2cf05c0ae86aa33b5f
| 8,465
|
py
|
Python
|
docs/conf.py
|
liuzh91/gluon-nlp
|
189bbdcc56d8e58aa908963949687b99ff9a3cff
|
[
"Apache-2.0"
] | 1
|
2021-06-17T12:59:25.000Z
|
2021-06-17T12:59:25.000Z
|
docs/conf.py
|
liuzh91/gluon-nlp
|
189bbdcc56d8e58aa908963949687b99ff9a3cff
|
[
"Apache-2.0"
] | 3
|
2020-09-01T05:45:57.000Z
|
2020-10-22T23:14:20.000Z
|
docs/conf.py
|
ZiyueHuang/gluon-nlp
|
0d5c61992180f41eab590e74c7b679980f429292
|
[
"Apache-2.0"
] | 1
|
2020-09-04T22:28:31.000Z
|
2020-09-04T22:28:31.000Z
|
# documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 23 19:40:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os, subprocess
import shlex
import recommonmark
import sphinx_gallery
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '..'))
# -- General configuration ------------------------------------------------
# Version information.
import gluonnlp as nlp
version = nlp.__version__
release = nlp.__version__
# General information about the project.
project = 'gluonnlp'
author = '%s developers' % project
copyright = '2019, %s' % author
github_doc_root = 'http://gluon-nlp.mxnet.io/{}/'.format(str(version))
# add markdown parser
CommonMarkParser.github_doc_root = github_doc_root
extensions = ['recommonmark']
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'sphinx.ext.mathjax',
'sphinx_gallery.gen_gallery',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
nbsphinx_kernel_name = 'python3'
nbsphinx_allow_errors = True
nbsphinx_timeout = 1200
nbsphinx_execute = 'never'
html_sourcelink_suffix = ''
html_context = {
'display_github': True,
'github_user': 'dmlc',
'github_repo': 'gluon-nlp',
'github_version': 'master',
'conf_py_path': '/docs/',
'last_updated': False,
'commit': True
}
nbsphinx_prolog = """
{% set paths = env.docname.split('/') %}
.. only:: html
:download:`Download this tutorial <{{ "../%s.zip"|format(paths[1]) }}>`
"""
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.ipynb', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# generate autosummary even if no references
autosummary_generate = True
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/gluon-logo.svg'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/gluon.ico'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints', 'examples/*/*/**.rst', 'model_zoo/*/*/**.rst',
'model_zoo/word_embeddings/tools/extern/*/**.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme is set by the make target
# html_theme = os.environ.get('GLUONNLP_THEME', 'rtd')
# on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# only import rtd theme and set it if want to build docs locally
# if not on_rtd and html_theme == 'rtd':
# import sphinx_rtd_theme
# html_theme = 'sphinx_rtd_theme'
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = 'mxtheme'
html_theme_path = ['mxtheme']
html_theme_options = {
'primary_color': 'blue',
'accent_color': 'deep_orange',
'header_links' : [
('Install', 'install/install-more', False, ''),
('API', 'api/index', False, ''),
('Community', 'website/index', False, ''),
('Contribute', 'website/contribute', False, ''),
('GitHub', 'https://github.com/dmlc/gluon-nlp/', True, 'fab fa-github'),
],
# custom layout
'fixed_drawer' : True,
'fixed_header' : True,
'header_waterfall' : True,
'header_scroll': True,
# Render footer (Default: True)
'show_footer': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output ---------------------------------------------
# latex_elements = {
# }
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# latex_documents = [
# (master_doc, '%s.tex' % project, project,
# author, 'manual'),
# ]
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),
'mxnet': ('https://mxnet.apache.org/api/python/docs/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.org/', None),
'nltk': ('http://www.nltk.org/', None),
}
from sphinx_gallery.sorting import ExplicitOrder
# examples_dirs = []
# gallery_dirs = []
# subsection_order = ExplicitOrder([])
def setup(app):
import mxtheme
app.add_directive('card', mxtheme.CardDirective)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'auto_doc_ref': True
}, True)
app.add_transform(AutoStructify)
app.add_javascript('google_analytics.js')
app.add_javascript('hidebib.js')
app.add_javascript('install-options.js')
app.add_stylesheet('custom.css')
sphinx_gallery_conf = {
'backreferences_dir': 'gen_modules/backreferences',
'doc_module': ('gluonnlp', 'mxnet', 'numpy'),
'reference_url': {
'gluonnlp': None,
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'},
'examples_dirs': [],
'gallery_dirs': [],
'subsection_order': ExplicitOrder([]),
'find_mayavi_figures': False,
'filename_pattern': '.py',
'expected_failing_examples': []
}
# Napoleon settings
napoleon_use_ivar = True
napoleon_use_param = True # Required for compatibility with sphinx-autodoc-typehints
# linkcheck settings
import multiprocessing
linkcheck_ignore = [r'http[s]://apache-mxnet.s3*']
linkcheck_retries = 3
linkcheck_workers = int(multiprocessing.cpu_count() / 2)
| 31.823308
| 100
| 0.695334
|
import sys
import os, subprocess
import shlex
import recommonmark
import sphinx_gallery
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '..'))
import gluonnlp as nlp
version = nlp.__version__
release = nlp.__version__
project = 'gluonnlp'
author = '%s developers' % project
copyright = '2019, %s' % author
github_doc_root = 'http://gluon-nlp.mxnet.io/{}/'.format(str(version))
CommonMarkParser.github_doc_root = github_doc_root
extensions = ['recommonmark']
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'sphinx.ext.mathjax',
'sphinx_gallery.gen_gallery',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
]
templates_path = ['_templates']
nbsphinx_kernel_name = 'python3'
nbsphinx_allow_errors = True
nbsphinx_timeout = 1200
nbsphinx_execute = 'never'
html_sourcelink_suffix = ''
html_context = {
'display_github': True,
'github_user': 'dmlc',
'github_repo': 'gluon-nlp',
'github_version': 'master',
'conf_py_path': '/docs/',
'last_updated': False,
'commit': True
}
nbsphinx_prolog = """
{% set paths = env.docname.split('/') %}
.. only:: html
:download:`Download this tutorial <{{ "../%s.zip"|format(paths[1]) }}>`
"""
source_suffix = ['.rst', '.ipynb', '.md']
autosummary_generate = True
master_doc = 'index'
language = None
html_logo = '_static/gluon-logo.svg'
html_favicon = '_static/gluon.ico'
exclude_patterns = ['_build', '**.ipynb_checkpoints', 'examples/*/*/**.rst', 'model_zoo/*/*/**.rst',
'model_zoo/word_embeddings/tools/extern/*/**.md']
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = 'mxtheme'
html_theme_path = ['mxtheme']
html_theme_options = {
'primary_color': 'blue',
'accent_color': 'deep_orange',
'header_links' : [
('Install', 'install/install-more', False, ''),
('API', 'api/index', False, ''),
('Community', 'website/index', False, ''),
('Contribute', 'website/contribute', False, ''),
('GitHub', 'https://github.com/dmlc/gluon-nlp/', True, 'fab fa-github'),
],
'fixed_drawer' : True,
'fixed_header' : True,
'header_waterfall' : True,
'header_scroll': True,
'show_footer': False
}
html_static_path = ['_static']
htmlhelp_basename = project + 'doc'
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),
'mxnet': ('https://mxnet.apache.org/api/python/docs/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.org/', None),
'nltk': ('http://www.nltk.org/', None),
}
from sphinx_gallery.sorting import ExplicitOrder
def setup(app):
import mxtheme
app.add_directive('card', mxtheme.CardDirective)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'auto_doc_ref': True
}, True)
app.add_transform(AutoStructify)
app.add_javascript('google_analytics.js')
app.add_javascript('hidebib.js')
app.add_javascript('install-options.js')
app.add_stylesheet('custom.css')
sphinx_gallery_conf = {
'backreferences_dir': 'gen_modules/backreferences',
'doc_module': ('gluonnlp', 'mxnet', 'numpy'),
'reference_url': {
'gluonnlp': None,
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'},
'examples_dirs': [],
'gallery_dirs': [],
'subsection_order': ExplicitOrder([]),
'find_mayavi_figures': False,
'filename_pattern': '.py',
'expected_failing_examples': []
}
napoleon_use_ivar = True
napoleon_use_param = True
import multiprocessing
linkcheck_ignore = [r'http[s]://apache-mxnet.s3*']
linkcheck_retries = 3
linkcheck_workers = int(multiprocessing.cpu_count() / 2)
| true
| true
|
f707e18686892825bbfde8338065f25653edad29
| 4,210
|
py
|
Python
|
analysis/coarse_graining.py
|
yketa/UBC---Spring-2018---code
|
b065544639a483dda48cda89bcbb11c1772232aa
|
[
"MIT"
] | 1
|
2021-12-15T13:38:13.000Z
|
2021-12-15T13:38:13.000Z
|
analysis/coarse_graining.py
|
yketa/UBC---Spring-2018---code
|
b065544639a483dda48cda89bcbb11c1772232aa
|
[
"MIT"
] | 1
|
2019-05-25T20:00:17.000Z
|
2019-05-25T20:00:17.000Z
|
analysis/coarse_graining.py
|
yketa/UBC---Spring-2018---code
|
b065544639a483dda48cda89bcbb11c1772232aa
|
[
"MIT"
] | 1
|
2020-01-22T17:05:18.000Z
|
2020-01-22T17:05:18.000Z
|
"""
Module coarse_graining implements a Gaussian coarse-graining adapted from
Illing et al., Phys. Rev. Lett. 117, 208002 (2016) following Goldhirsch and
Goldenberg, Eur. Phys. J. E 9, 245–251 (2002).
"""
import numpy as np
class GaussianCG:
"""
Gaussian coarse-graining.
"""
def __init__(self, sigma, r_cut):
"""
Parameters
----------
sigma : float
Length scale of Gaussian function.
r_cut : float
Coarse-graining cut-off radius.
"""
self.sigma = sigma # length scale of Gaussian function
self.r_cut = r_cut # coarse-graining cut-off radius
def function(self, r):
"""
Parameters
----------
r : float
Radius.
Returns
-------
phi : float
Coarse-graining factor at radius r.
"""
if r > self.r_cut: return 0 # coarse-graining function is zero after cut-off
Dg = 2*np.pi*(self.sigma**2)*(1 -
np.exp(-0.5*((self.r_cut/self.sigma)**2))) # normalisation factor
return np.exp(-0.5*((r/self.sigma)**2))/Dg # coarse-graining factor
def factors(self, positions):
"""
Parameters
----------
positions : float array
Coordinates at which coarse-graining is desired.
Returns
-------
CGfactors : Numpy float array
Coarse-graining factors at positions.
"""
return np.array(list(map(
lambda r: self.function(r),
np.sqrt(np.sum(positions**2, axis=-1))
))) # coarse graining factors at positions
class SquareUniformCG:
"""
Square uniform coarse-graining.
"""
def __init__(self, dL):
"""
Parameters
----------
dL : float
Length of square box on which to average.
"""
self.dL = dL # averaging square length
def function(self, position):
"""
Parameters
----------
position : float array
Coordinates.
Returns
-------
phi : float
Coarse-graining factor at position position.
"""
if (np.abs(np.array(position)) > self.dL/2).any(): return 0 # coarse-graining function is zero outside square
return 1 # is one in
def factors(self, positions):
"""
Parameters
----------
positions : float array
Coordinates at which coarse-graining is desired.
Returns
-------
CGfactors : Numpy float array
Coarse-graining factors at positions.
"""
CGfactors = np.array(list(map(
lambda position:
self.function(position),
positions
)))
sumCGfactors = np.sum(CGfactors)
if np.sum(CGfactors) == 0: return 0
return CGfactors/sumCGfactors # coarse graining factors at positions
class CoarseGraining:
"""
Enables unique calculation of coarse-graining factors and then calculation
of coarse-graining avergages.
"""
def __init__(self, factors_function, positions):
"""
Parameters
----------
factors_function : function
Function of array of coordinates which returns coarse-graining
factors at these coordinates.
positions : float array
Coordinates at which coarse-graining is desired.
"""
self.CGfactors = np.array(factors_function(positions)) # coarse-graining factors at positions
def average(self, var):
"""
Coarse-graining averaging.
Parameters
----------
var : float array
Values of variable to coarse-grain at different positions from
point at which coarse-graining is desired.
Returns
-------
average : float
Coarse-grained variable.
"""
return np.sum(
np.transpose(np.array(self.CGfactors,
ndmin=len(np.array(var).shape)))
*np.array(var), axis=0) # coarse-grained variable
| 26.987179
| 117
| 0.539905
|
import numpy as np
class GaussianCG:
def __init__(self, sigma, r_cut):
self.sigma = sigma self.r_cut = r_cut
def function(self, r):
if r > self.r_cut: return 0
Dg = 2*np.pi*(self.sigma**2)*(1 -
np.exp(-0.5*((self.r_cut/self.sigma)**2))) return np.exp(-0.5*((r/self.sigma)**2))/Dg
def factors(self, positions):
return np.array(list(map(
lambda r: self.function(r),
np.sqrt(np.sum(positions**2, axis=-1))
)))
class SquareUniformCG:
def __init__(self, dL):
self.dL = dL
def function(self, position):
if (np.abs(np.array(position)) > self.dL/2).any(): return 0 return 1
def factors(self, positions):
CGfactors = np.array(list(map(
lambda position:
self.function(position),
positions
)))
sumCGfactors = np.sum(CGfactors)
if np.sum(CGfactors) == 0: return 0
return CGfactors/sumCGfactors
class CoarseGraining:
def __init__(self, factors_function, positions):
self.CGfactors = np.array(factors_function(positions))
def average(self, var):
return np.sum(
np.transpose(np.array(self.CGfactors,
ndmin=len(np.array(var).shape)))
*np.array(var), axis=0)
| true
| true
|
f707e18b95fefa6dc45d4e780606b33415022b2a
| 1,715
|
py
|
Python
|
test/functional/feature_uacomment.py
|
dogxteam/dogxwallet-master
|
346189354bdec9a80c20bdc429ddec15c3b17b73
|
[
"MIT"
] | 5
|
2019-03-18T02:14:20.000Z
|
2019-03-21T17:08:27.000Z
|
test/functional/feature_uacomment.py
|
dogxteam/dogxwallet-master
|
346189354bdec9a80c20bdc429ddec15c3b17b73
|
[
"MIT"
] | null | null | null |
test/functional/feature_uacomment.py
|
dogxteam/dogxwallet-master
|
346189354bdec9a80c20bdc429ddec15c3b17b73
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The dogxcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
import re
from test_framework.test_framework import dogxcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class UacommentTest(dogxcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = "Error: Total length of network version string \([0-9]+\) exceeds maximum length \(256\). Reduce the number or size of uacomments."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + 'a' * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')', '₿', '🏃']:
expected = "Error: User Agent comment \(" + re.escape(unsafe_char) + "\) contains unsafe characters."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + unsafe_char], expected, match=ErrorMatch.FULL_REGEX)
if __name__ == '__main__':
UacommentTest().main()
| 41.829268
| 150
| 0.689213
|
import re
from test_framework.test_framework import dogxcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class UacommentTest(dogxcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = "Error: Total length of network version string \([0-9]+\) exceeds maximum length \(256\). Reduce the number or size of uacomments."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + 'a' * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')', '₿', '🏃']:
expected = "Error: User Agent comment \(" + re.escape(unsafe_char) + "\) contains unsafe characters."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + unsafe_char], expected, match=ErrorMatch.FULL_REGEX)
if __name__ == '__main__':
UacommentTest().main()
| true
| true
|
f707e23b55d940cd1262516340bb49b3d5e18399
| 2,673
|
py
|
Python
|
tests/services/schemas/test_access.py
|
inveniosoftware/invenio-datacite
|
d25e3670b74f132390fc42e5647765ae5c605ef3
|
[
"MIT"
] | 10
|
2020-01-17T10:13:09.000Z
|
2022-03-17T10:14:41.000Z
|
tests/services/schemas/test_access.py
|
inveniosoftware/invenio-datacite
|
d25e3670b74f132390fc42e5647765ae5c605ef3
|
[
"MIT"
] | 570
|
2019-08-15T16:35:25.000Z
|
2022-03-31T13:46:17.000Z
|
tests/services/schemas/test_access.py
|
inveniosoftware/invenio-datacite
|
d25e3670b74f132390fc42e5647765ae5c605ef3
|
[
"MIT"
] | 57
|
2019-09-04T09:25:29.000Z
|
2022-03-30T19:32:55.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020-2021 CERN.
# Copyright (C) 2020-2021 Northwestern University.
# Copyright (C) 2021 TU Wien.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Test metadata access schema."""
import pytest
from marshmallow.exceptions import ValidationError
from invenio_rdm_records.services.schemas.access import AccessSchema, \
EmbargoSchema
def test_embargo_load_no_until_is_valid():
expected = {
"active": False,
"until": None,
"reason": None
}
valid_no_until = {
"active": False,
}
assert expected == EmbargoSchema().load(valid_no_until)
valid_no_until = {
"active": False,
"until": None,
}
assert expected == EmbargoSchema().load(valid_no_until)
def test_embargo_dump_no_until_is_valid():
valid_no_until = {
"active": False,
}
assert valid_no_until == EmbargoSchema().dump(valid_no_until)
expected = {
"active": False,
}
valid_no_until = {
"active": False,
"until": None,
}
assert expected == EmbargoSchema().dump(valid_no_until)
def test_valid_full():
valid_full = {
"record": "public",
"files": "restricted",
"embargo": {
"active": True,
"until": "2120-10-06",
"reason": "espionage"
},
}
assert valid_full == AccessSchema().load(valid_full)
@pytest.mark.parametrize("invalid_access,invalid_attr", [
({"files": "restricted",
"embargo": {"active": True, "until": "2131-01-01", "reason": "secret!"}},
"record"),
({"record": "public",
"embargo": {"active": True, "until": "2131-01-01", "reason": "secret!"}},
"files"),
({"record": "public", "files": "restricted",
"embargo": {"active": False, "until": "2131-01-01", "reason": "secret!"}},
"embargo"),
({"record": "public", "files": "restricted",
"embargo": {"active": True, "until": "1999-01-01", "reason": "secret!"}},
"embargo"),
({"record": "invalid", "files": "restricted",
"embargo": {"active": False, "until": "1999-01-01", "reason": "secret!"}},
"record"),
({"record": "public", "files": "invalid",
"embargo": {"active": False, "until": "1999-01-01", "reason": "secret!"}},
"files"),
])
def test_invalid(invalid_access, invalid_attr):
with pytest.raises(ValidationError) as e:
AccessSchema().load(invalid_access)
error_fields = e.value.messages.keys()
assert len(error_fields) == 1
assert invalid_attr in error_fields
| 28.136842
| 79
| 0.601571
|
import pytest
from marshmallow.exceptions import ValidationError
from invenio_rdm_records.services.schemas.access import AccessSchema, \
EmbargoSchema
def test_embargo_load_no_until_is_valid():
expected = {
"active": False,
"until": None,
"reason": None
}
valid_no_until = {
"active": False,
}
assert expected == EmbargoSchema().load(valid_no_until)
valid_no_until = {
"active": False,
"until": None,
}
assert expected == EmbargoSchema().load(valid_no_until)
def test_embargo_dump_no_until_is_valid():
valid_no_until = {
"active": False,
}
assert valid_no_until == EmbargoSchema().dump(valid_no_until)
expected = {
"active": False,
}
valid_no_until = {
"active": False,
"until": None,
}
assert expected == EmbargoSchema().dump(valid_no_until)
def test_valid_full():
valid_full = {
"record": "public",
"files": "restricted",
"embargo": {
"active": True,
"until": "2120-10-06",
"reason": "espionage"
},
}
assert valid_full == AccessSchema().load(valid_full)
@pytest.mark.parametrize("invalid_access,invalid_attr", [
({"files": "restricted",
"embargo": {"active": True, "until": "2131-01-01", "reason": "secret!"}},
"record"),
({"record": "public",
"embargo": {"active": True, "until": "2131-01-01", "reason": "secret!"}},
"files"),
({"record": "public", "files": "restricted",
"embargo": {"active": False, "until": "2131-01-01", "reason": "secret!"}},
"embargo"),
({"record": "public", "files": "restricted",
"embargo": {"active": True, "until": "1999-01-01", "reason": "secret!"}},
"embargo"),
({"record": "invalid", "files": "restricted",
"embargo": {"active": False, "until": "1999-01-01", "reason": "secret!"}},
"record"),
({"record": "public", "files": "invalid",
"embargo": {"active": False, "until": "1999-01-01", "reason": "secret!"}},
"files"),
])
def test_invalid(invalid_access, invalid_attr):
with pytest.raises(ValidationError) as e:
AccessSchema().load(invalid_access)
error_fields = e.value.messages.keys()
assert len(error_fields) == 1
assert invalid_attr in error_fields
| true
| true
|
f707e2b06dbe69bf58625f3d2057eaab1323eae7
| 2,467
|
py
|
Python
|
src/movement/dc_with_ultra.py
|
Quanta-Robotics/Robot-Blueberry
|
7b7e77e09ac5e9ec5afd947e0db1ecc8773e56da
|
[
"MIT"
] | 25
|
2021-06-08T07:09:30.000Z
|
2021-12-30T06:28:35.000Z
|
src/movement/dc_with_ultra.py
|
ICT-CoU/Robot-Blueberry
|
d19fd1be037df9d67de64df57a87006d74cd6c43
|
[
"MIT"
] | 2
|
2021-05-23T12:54:51.000Z
|
2021-06-07T17:47:56.000Z
|
src/movement/dc_with_ultra.py
|
ICT-CoU/Robot-Blueberry
|
d19fd1be037df9d67de64df57a87006d74cd6c43
|
[
"MIT"
] | 14
|
2021-06-08T13:02:28.000Z
|
2021-12-30T20:07:18.000Z
|
from expression import *
# programming the GPIO by BCM pin numbers
#TRIG = servo['Sensor']['ultrasonic']['trigger']
#ECHO = servo['Sensor']['ultrasonic']['echo']
TRIG = 24
ECHO = 23
GPIO.setup(TRIG,GPIO.OUT) # initialize GPIO Pin as outputs
GPIO.setup(ECHO,GPIO.IN) # initialize GPIO Pin as input
def forward():
Run(1,0,1,0,80)
def back():
Run(0,1,0,1,80)
def left():
Run(0,1,1,0,80)
def right():
Run(1,0,0,1,80)
Stop()
count=0
def Distance():
avgDistance=0
for i in range(2):
GPIO.output(TRIG, False) #Set TRIG as LOW
time.sleep(0.1) #Delay
GPIO.output(TRIG, True) #Set TRIG as HIGH
time.sleep(0.00001) #Delay of 0.00001 seconds
GPIO.output(TRIG, False) #Set TRIG as LOW
off=1
while GPIO.input(ECHO)==0: #Check whether the ECHO is LOW
pass
pulse_start = time.time()
off=0
while GPIO.input(ECHO)==1: #Check whether the ECHO is HIGH
pass
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start #time to get back the pulse to sensor
distance = pulse_duration * 17150 #Multiply pulse duration by 17150 (34300/2) to get distance
distance = round(distance,2) #Round to two decimal points
avgDistance=avgDistance+distance
return avgDistance
while True:
i=0
avgDistance=Distance()/5
time.sleep(1)
flag=0
if avgDistance < 100:
count += 1 #Check whether the distance is within 15 cm range
Stop()
time.sleep(2)
changeDegreeGpio([0],[0],5,0.05)
dist = Distance()/5
print("right dist ",dist)
time.sleep(8)
if dist>=5:
right()
continue
changeDegreeGpio([0],[180],5,0.05)
dist = Distance()/5
print("left dist ",dist)
time.sleep(8)
if dist>=5:
left()
continue
changeDegreeGpio([0],[90],5,0.05)
time.sleep(1)
back()
time.sleep(1.5)
if (count%3 ==1) & (flag==0):
right()
flag=1
else:
left()
flag=0
time.sleep(1.5)
stop()
time.sleep(1)
else:
print("go forward")
flag=0
| 28.034091
| 108
| 0.511147
|
from expression import *
TRIG = 24
ECHO = 23
GPIO.setup(TRIG,GPIO.OUT) GPIO.setup(ECHO,GPIO.IN)
def forward():
Run(1,0,1,0,80)
def back():
Run(0,1,0,1,80)
def left():
Run(0,1,1,0,80)
def right():
Run(1,0,0,1,80)
Stop()
count=0
def Distance():
avgDistance=0
for i in range(2):
GPIO.output(TRIG, False) time.sleep(0.1)
GPIO.output(TRIG, True) time.sleep(0.00001) GPIO.output(TRIG, False) off=1
while GPIO.input(ECHO)==0: pass
pulse_start = time.time()
off=0
while GPIO.input(ECHO)==1: pass
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start distance = pulse_duration * 17150 distance = round(distance,2) avgDistance=avgDistance+distance
return avgDistance
while True:
i=0
avgDistance=Distance()/5
time.sleep(1)
flag=0
if avgDistance < 100:
count += 1 Stop()
time.sleep(2)
changeDegreeGpio([0],[0],5,0.05)
dist = Distance()/5
print("right dist ",dist)
time.sleep(8)
if dist>=5:
right()
continue
changeDegreeGpio([0],[180],5,0.05)
dist = Distance()/5
print("left dist ",dist)
time.sleep(8)
if dist>=5:
left()
continue
changeDegreeGpio([0],[90],5,0.05)
time.sleep(1)
back()
time.sleep(1.5)
if (count%3 ==1) & (flag==0):
right()
flag=1
else:
left()
flag=0
time.sleep(1.5)
stop()
time.sleep(1)
else:
print("go forward")
flag=0
| true
| true
|
f707e323b5012ccf55f0e28d9d20cc066ceefb85
| 1,916
|
py
|
Python
|
tamusers/management/commands/sync_tamusers.py
|
Tampere/django-tamusers
|
5b3d82332c435990dad98f64bb9094cc1502b7ef
|
[
"BSD-2-Clause"
] | null | null | null |
tamusers/management/commands/sync_tamusers.py
|
Tampere/django-tamusers
|
5b3d82332c435990dad98f64bb9094cc1502b7ef
|
[
"BSD-2-Clause"
] | 1
|
2020-04-20T14:05:28.000Z
|
2020-04-20T14:05:28.000Z
|
tamusers/management/commands/sync_tamusers.py
|
Tampere/django-tamusers
|
5b3d82332c435990dad98f64bb9094cc1502b7ef
|
[
"BSD-2-Clause"
] | null | null | null |
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from allauth.socialaccount.models import SocialApp
from tamusers.providers.tampere.provider import TampereProvider
class Command(BaseCommand):
help = 'Create or update tamusers allauth SocialApp'
def handle(self, *args, **options):
changed = False
try:
app = SocialApp.objects.get(provider=TampereProvider.id)
except SocialApp.DoesNotExist:
app = SocialApp(provider=TampereProvider.id)
self.stdout.write(self.style.SUCCESS('Creating new SocialApp'))
if not app.name:
app.name = 'Tampereen kaupungin työntekijät'
changed = True
client_id = secret_key = None
jwt_settings = getattr(settings, 'JWT_AUTH')
if jwt_settings:
client_id = jwt_settings.get('JWT_AUDIENCE')
secret_key = jwt_settings.get('JWT_SECRET_KEY')
if not client_id:
raise ImproperlyConfigured("You must set JWT_AUTH['JWT_AUDIENCE'] to correspond to your client ID")
if not secret_key:
raise ImproperlyConfigured("You must set JWT_AUTH['JWT_SECRET_KEY'] to correspond to your secret key")
if app.client_id != client_id:
changed = True
app.client_id = client_id
if app.secret != secret_key:
changed = True
app.secret = secret_key
if changed:
app.save()
if not app.sites.exists():
app.sites.add(Site.objects.get(id=settings.SITE_ID))
changed = True
if changed:
self.stdout.write(self.style.SUCCESS('SocialApp successfully updated'))
else:
self.stdout.write(self.style.NOTICE('Already synced -- no changes needed'))
| 36.150943
| 114
| 0.653967
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from allauth.socialaccount.models import SocialApp
from tamusers.providers.tampere.provider import TampereProvider
class Command(BaseCommand):
help = 'Create or update tamusers allauth SocialApp'
def handle(self, *args, **options):
changed = False
try:
app = SocialApp.objects.get(provider=TampereProvider.id)
except SocialApp.DoesNotExist:
app = SocialApp(provider=TampereProvider.id)
self.stdout.write(self.style.SUCCESS('Creating new SocialApp'))
if not app.name:
app.name = 'Tampereen kaupungin työntekijät'
changed = True
client_id = secret_key = None
jwt_settings = getattr(settings, 'JWT_AUTH')
if jwt_settings:
client_id = jwt_settings.get('JWT_AUDIENCE')
secret_key = jwt_settings.get('JWT_SECRET_KEY')
if not client_id:
raise ImproperlyConfigured("You must set JWT_AUTH['JWT_AUDIENCE'] to correspond to your client ID")
if not secret_key:
raise ImproperlyConfigured("You must set JWT_AUTH['JWT_SECRET_KEY'] to correspond to your secret key")
if app.client_id != client_id:
changed = True
app.client_id = client_id
if app.secret != secret_key:
changed = True
app.secret = secret_key
if changed:
app.save()
if not app.sites.exists():
app.sites.add(Site.objects.get(id=settings.SITE_ID))
changed = True
if changed:
self.stdout.write(self.style.SUCCESS('SocialApp successfully updated'))
else:
self.stdout.write(self.style.NOTICE('Already synced -- no changes needed'))
| true
| true
|
f707e4491d0edf77917437d57d826e828a2fb974
| 19,684
|
py
|
Python
|
cirq-core/cirq/work/observable_measurement_data_test.py
|
Nexuscompute/Cirq
|
640ef8f82d6a56ec95361388ce7976e096cca906
|
[
"Apache-2.0"
] | 1
|
2022-02-05T22:17:39.000Z
|
2022-02-05T22:17:39.000Z
|
cirq-core/cirq/work/observable_measurement_data_test.py
|
pavoljuhas/Cirq
|
b6d6577be61d216ce2f29f8c64ae5879cf3087d5
|
[
"Apache-2.0"
] | 4
|
2022-01-16T14:12:15.000Z
|
2022-02-24T03:58:46.000Z
|
cirq-core/cirq/work/observable_measurement_data_test.py
|
Nexuscompute/Cirq
|
640ef8f82d6a56ec95361388ce7976e096cca906
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import datetime
import time
import numpy as np
import pytest
import cirq
import cirq.work as cw
from cirq.work.observable_measurement_data import (
_check_and_get_real_coef,
_obs_vals_from_measurements,
_stats_from_measurements,
)
from cirq.work.observable_settings import _MeasurementSpec
def test_get_real_coef():
q0 = cirq.LineQubit(0)
assert _check_and_get_real_coef(cirq.Z(q0) * 2, atol=1e-8) == 2
assert _check_and_get_real_coef(cirq.Z(q0) * complex(2.0), atol=1e-8) == 2
with pytest.raises(ValueError):
_check_and_get_real_coef(cirq.Z(q0) * 2.0j, atol=1e-8)
def test_obs_vals_from_measurements():
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
obs = cirq.Z(a) * cirq.Z(b) * 10
vals = _obs_vals_from_measurements(bitstrings, qubit_to_index, obs, atol=1e-8)
should_be = [10, -10, -10, 10]
np.testing.assert_equal(vals, should_be)
def test_stats_from_measurements():
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
obs = cirq.Z(a) * cirq.Z(b) * 10
mean, err = _stats_from_measurements(bitstrings, qubit_to_index, obs, atol=1e-8)
# The mean is zero since our bitstrings have balanced even- and odd-
# parity cases.
assert mean == 0
# Since we multiplied our observable by 10, the standard deviation is
# 10 [each obs val deviates by 10]. The variance is 10**2 and the
# squared-standard-error-of-the-mean can be found by dividing by the
# number of samples minus 1.
assert err == 10**2 / (4 - 1)
def test_observable_measured_result():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
omr = cw.ObservableMeasuredResult(
setting=cw.InitObsSetting(
init_state=cirq.Z(a) * cirq.Z(b), observable=cirq.Y(a) * cirq.Y(b)
),
mean=0,
variance=5**2,
repetitions=4,
circuit_params={'phi': 52},
)
assert omr.stddev == 5
assert omr.observable == cirq.Y(a) * cirq.Y(b)
assert omr.init_state == cirq.Z(a) * cirq.Z(b)
cirq.testing.assert_equivalent_repr(omr)
assert omr.as_dict() == {
'init_state': cirq.Z(a) * cirq.Z(b),
'observable': cirq.Y(a) * cirq.Y(b),
'mean': 0,
'variance': 25,
'repetitions': 4,
'param.phi': 52,
}
omr2 = dataclasses.replace(
omr,
circuit_params={
'phi': 52,
'observable': 3.14, # this would be a bad but legal parameter name
'param.phi': -1,
},
)
assert omr2.as_dict() == {
'init_state': cirq.Z(a) * cirq.Z(b),
'observable': cirq.Y(a) * cirq.Y(b),
'mean': 0,
'variance': 25,
'repetitions': 4,
'param.phi': 52,
'param.observable': 3.14,
'param.param.phi': -1,
}
@pytest.fixture()
def example_bsa() -> 'cw.BitstringAccumulator':
"""Test fixture to create an (empty) example BitstringAccumulator"""
q0, q1 = cirq.LineQubit.range(2)
setting = cw.InitObsSetting(
init_state=cirq.KET_ZERO(q0) * cirq.KET_ZERO(q1), observable=cirq.X(q0) * cirq.Y(q1)
)
meas_spec = _MeasurementSpec(
max_setting=setting, circuit_params={'beta': 0.123, 'gamma': 0.456}
)
bsa = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[
setting,
cw.InitObsSetting(init_state=setting.init_state, observable=cirq.X(q0)),
cw.InitObsSetting(init_state=setting.init_state, observable=cirq.Y(q1)),
],
qubit_to_index={q0: 0, q1: 1},
)
return bsa
def test_bitstring_accumulator(example_bsa):
# test initialization
assert example_bsa.bitstrings.shape == (0, 2)
assert example_bsa.chunksizes.shape == (0,)
assert example_bsa.timestamps.shape == (0,)
# test consume_results
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
example_bsa.consume_results(bitstrings)
assert example_bsa.bitstrings.shape == (4, 2)
assert example_bsa.chunksizes.shape == (1,)
assert example_bsa.timestamps.shape == (1,)
assert example_bsa.n_repetitions == 4
with pytest.raises(ValueError):
example_bsa.consume_results(bitstrings.astype(int))
# test results
results = list(example_bsa.results)
assert len(results) == 3
for r in results:
assert r.repetitions == 4
# test records
for r in example_bsa.records:
assert isinstance(r, dict)
assert 'repetitions' in r
assert r['repetitions'] == 4
def test_bitstring_accumulator_strings(example_bsa):
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
example_bsa.consume_results(bitstrings)
q0, q1 = cirq.LineQubit.range(2)
settings = cw.observables_to_settings(
[cirq.X(q0), cirq.Y(q1), cirq.X(q0) * cirq.Y(q1)], qubits=[q0, q1]
)
strings_should_be = [
'+Z(q(0)) * +Z(q(1)) → X(q(0)): 0.000 +- 0.577',
'+Z(q(0)) * +Z(q(1)) → Y(q(1)): 0.000 +- 0.577',
'+Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)): 0.000 +- 0.577',
]
for setting, ssb in zip(settings, strings_should_be):
assert example_bsa.summary_string(setting) == ssb, ssb
assert (
str(example_bsa)
== """Accumulator +Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)); 4 repetitions
+Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)): 0.000 +- 0.577
+Z(q(0)) * +Z(q(1)) → X(q(0)): 0.000 +- 0.577
+Z(q(0)) * +Z(q(1)) → Y(q(1)): 0.000 +- 0.577"""
)
def test_bitstring_accumulator_equality():
et = cirq.testing.EqualsTester()
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
chunksizes = np.asarray([4])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
obs = cirq.Z(a) * cirq.Z(b) * 10
setting = cw.InitObsSetting(init_state=cirq.Z(a) * cirq.Z(b), observable=obs)
meas_spec = _MeasurementSpec(setting, {})
cirq.testing.assert_equivalent_repr(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings.copy(),
chunksizes=chunksizes.copy(),
timestamps=timestamps.copy(),
)
)
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings.copy(),
chunksizes=chunksizes.copy(),
timestamps=timestamps.copy(),
),
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings.copy(),
chunksizes=chunksizes.copy(),
timestamps=timestamps.copy(),
),
)
time.sleep(1)
timestamps = np.asarray([datetime.datetime.now()])
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(setting, {'a': 2}),
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
bitstrings = bitstrings.copy()
bitstrings[0] = [1, 1]
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
chunksizes = np.asarray([2, 2])
timestamps = np.asarray(list(timestamps) * 2)
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
def _get_ZZ_Z_Z_bsa_constructor_args():
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
chunksizes = np.asarray([4])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
settings = list(
cw.observables_to_settings(
[cirq.Z(a) * cirq.Z(b) * 7, cirq.Z(a) * 5, cirq.Z(b) * 3], qubits=[a, b]
)
)
meas_spec = _MeasurementSpec(settings[0], {})
return {
'meas_spec': meas_spec,
'simul_settings': settings,
'qubit_to_index': qubit_to_index,
'bitstrings': bitstrings,
'chunksizes': chunksizes,
'timestamps': timestamps,
}
def test_bitstring_accumulator_stats():
kwargs = _get_ZZ_Z_Z_bsa_constructor_args()
settings = kwargs['simul_settings']
a, b = kwargs['qubit_to_index']
bsa = cw.BitstringAccumulator(**kwargs)
# There are three observables, each with mean 0 because
# the four 2-bit strings have even numbers of a) ones in the
# first position b) ones in the second position c) even parity
# pairs.
np.testing.assert_allclose([0, 0, 0], bsa.means())
# Covariance: Sum[(x - xbar)(y - ybar)] / (N-1)
# where xbar and ybar are 0, per above. Each individual observed
# value is +-1, so (x-xbar)(y-bar) is +-1 (neglecting observable coefficients)
# For off-diagonal elements, there are two +1 and two -1 terms for each entry
# so the total contribution is zero, and the matrix is diagonal
should_be = np.array([[4 * 7**2, 0, 0], [0, 4 * 5**2, 0], [0, 0, 4 * 3**2]])
should_be = should_be / (4 - 1) # covariance formula
should_be = should_be / 4 # cov of the distribution of sample mean
np.testing.assert_allclose(should_be, bsa.covariance())
for setting, var in zip(settings, [4 * 7**2, 4 * 5**2, 4 * 3**2]):
np.testing.assert_allclose(0, bsa.mean(setting))
np.testing.assert_allclose(var / 4 / (4 - 1), bsa.variance(setting))
np.testing.assert_allclose(np.sqrt(var / 4 / (4 - 1)), bsa.stderr(setting))
bad_obs = [cirq.X(a) * cirq.X(b)]
bad_setting = list(cw.observables_to_settings(bad_obs, qubits=[a, b]))[0]
with pytest.raises(ValueError):
bsa.mean(bad_setting)
def test_bitstring_accumulator_stats_2():
bitstrings = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], np.uint8)
chunksizes = np.asarray([4])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
settings = list(cw.observables_to_settings([cirq.Z(a) * 5, cirq.Z(b) * 3], qubits=[a, b]))
meas_spec = _MeasurementSpec(settings[0], {})
bsa = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
# There are three observables, each with mean 0 because
# the four 2-bit strings have even numbers of a) ones in the
# first position b) ones in the second position.
np.testing.assert_allclose([0, 0], bsa.means())
# Covariance: Sum[(x - xbar)(y - ybar)] / (N-1)
# where xbar and ybar are 0, per above. Each individual observed
# value is +-1, so (x-xbar)(y-bar) is +-1 (neglecting observable coefficients)
# In this case, the measurements are perfectly correlated.
should_be = 4 * np.array([[5 * 5, 5 * 3], [3 * 5, 3 * 3]])
should_be = should_be / (4 - 1) # covariance formula
should_be = should_be / 4 # cov of the distribution of sample mean
np.testing.assert_allclose(should_be, bsa.covariance())
for setting, var in zip(settings, [4 * 5**2, 4 * 3**2]):
np.testing.assert_allclose(0, bsa.mean(setting))
np.testing.assert_allclose(var / 4 / (4 - 1), bsa.variance(setting))
np.testing.assert_allclose(np.sqrt(var / 4 / (4 - 1)), bsa.stderr(setting))
def test_bitstring_accumulator_errors():
q0, q1 = cirq.LineQubit.range(2)
settings = cw.observables_to_settings(
[cirq.X(q0), cirq.Y(q0), cirq.Z(q0), cirq.Z(q0) * cirq.Z(q1)], qubits=[q0, q1]
)
grouped_settings = cw.group_settings_greedy(settings)
max_setting = list(grouped_settings.keys())[0]
simul_settings = grouped_settings[max_setting]
with pytest.raises(ValueError):
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings,
qubit_to_index={q0: 0, q1: 1},
bitstrings=np.array([[0, 1], [0, 1]]),
chunksizes=np.array([2]),
)
with pytest.raises(ValueError):
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings,
qubit_to_index={q0: 0, q1: 1},
bitstrings=np.array([[0, 1], [0, 1]]),
chunksizes=np.array([3]),
timestamps=[datetime.datetime.now()],
)
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings[:1],
qubit_to_index={q0: 0, q1: 1},
)
with pytest.raises(ValueError):
bsa.covariance()
with pytest.raises(ValueError):
bsa.variance(simul_settings[0])
with pytest.raises(ValueError):
bsa.mean(simul_settings[0])
bsa.consume_results(np.array([[0, 0]], dtype=np.uint8))
assert bsa.covariance().shape == (1, 1)
def test_flatten_grouped_results():
q0, q1 = cirq.LineQubit.range(2)
settings = cw.observables_to_settings(
[cirq.X(q0), cirq.Y(q0), cirq.Z(q0), cirq.Z(q0) * cirq.Z(q1)], qubits=[q0, q1]
)
grouped_settings = cw.group_settings_greedy(settings)
bsas = []
for max_setting, simul_settings in grouped_settings.items():
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings,
qubit_to_index={q0: 0, q1: 1},
)
bsa.consume_results(np.array([[0, 0], [0, 0], [0, 0]], dtype=np.uint8))
bsas.append(bsa)
results = cw.flatten_grouped_results(bsas)
assert len(results) == 4
for res in results:
# We pass all 0's to each consume_results, so everything is 1 +- 0
assert res.mean == 1
assert res.variance == 0
assert res.repetitions == 3
def _get_mock_readout_calibration(qa_0=90, qa_1=10, qb_0=91, qb_1=9):
# Mock readout correction results by constructing a BitstringAccumulator
# with two <Z> measurements
q1_ro = np.array([0] * qa_0 + [1] * qa_1)
q2_ro = np.array([0] * qb_0 + [1] * qb_1)
rs = np.random.RandomState(52)
rs.shuffle(q1_ro)
rs.shuffle(q2_ro)
ro_bitstrings = np.vstack((q1_ro, q2_ro)).T
assert ro_bitstrings.shape == (100, 2)
chunksizes = np.asarray([100])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
ro_settings = list(cw.observables_to_settings([cirq.Z(a), cirq.Z(b)], qubits=[a, b]))
(ro_meas_spec_setting,) = list(
cw.observables_to_settings([cirq.Z(a) * cirq.Z(b)], qubits=[a, b])
)
ro_meas_spec = _MeasurementSpec(ro_meas_spec_setting, {})
ro_bsa = cw.BitstringAccumulator(
meas_spec=ro_meas_spec,
simul_settings=ro_settings,
qubit_to_index=qubit_to_index,
bitstrings=ro_bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
return ro_bsa, ro_settings, ro_meas_spec_setting
def test_readout_correction():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
ro_bsa, ro_settings, ro_meas_spec_setting = _get_mock_readout_calibration()
# observables range from 1 to -1 while bitstrings range from 0 to 1
assert ro_bsa.mean(ro_settings[0]) == 0.8
assert ro_bsa.mean(ro_settings[1]) == 0.82
assert np.isclose(ro_bsa.mean(ro_meas_spec_setting), 0.8 * 0.82, atol=0.05)
bitstrings = np.array(
[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 1], [1, 1]], dtype=np.uint8
)
chunksizes = np.asarray([len(bitstrings)])
timestamps = np.asarray([datetime.datetime.now()])
qubit_to_index = {a: 0, b: 1}
settings = list(
cw.observables_to_settings([cirq.X(a) * cirq.Y(b), cirq.X(a), cirq.Y(b)], qubits=[a, b])
)
meas_spec = _MeasurementSpec(settings[0], {})
# First, make one with no readout correction
bsa1 = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
# [XY: one excitation, X: one excitation, Y: two excitations]
np.testing.assert_allclose([1 - 1 / 4, 1 - 1 / 4, 1 - 2 / 4], bsa1.means())
np.testing.assert_allclose([0.75, 0.75, 0.5], bsa1.means())
# Turn on readout correction
bsa2 = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
readout_calibration=ro_bsa,
)
# Readout correction increases variance
for setting in settings:
assert bsa2.variance(setting) > bsa1.variance(setting)
np.testing.assert_allclose(
[0.75 / (0.8 * 0.82), 0.75 / 0.8, 0.5 / 0.82], bsa2.means(), atol=0.01
)
# Variance becomes singular when readout error is 50/50
ro_bsa_50_50, _, _ = _get_mock_readout_calibration(qa_0=50, qa_1=50)
bsa3 = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
readout_calibration=ro_bsa_50_50,
)
with pytest.raises(ZeroDivisionError):
bsa3.means()
assert bsa3.variance(settings[1]) == np.inf
def test_readout_correction_errors():
kwargs = _get_ZZ_Z_Z_bsa_constructor_args()
settings = kwargs['simul_settings']
ro_bsa, _, _ = _get_mock_readout_calibration()
kwargs['readout_calibration'] = ro_bsa
bsa = cw.BitstringAccumulator(**kwargs)
# Variance becomes singular as the estimated value approaches zero
np.testing.assert_allclose(bsa.means(), [0, 0, 0])
assert bsa.variance(settings[0]) == np.inf
| 35.024911
| 96
| 0.624416
|
import dataclasses
import datetime
import time
import numpy as np
import pytest
import cirq
import cirq.work as cw
from cirq.work.observable_measurement_data import (
_check_and_get_real_coef,
_obs_vals_from_measurements,
_stats_from_measurements,
)
from cirq.work.observable_settings import _MeasurementSpec
def test_get_real_coef():
q0 = cirq.LineQubit(0)
assert _check_and_get_real_coef(cirq.Z(q0) * 2, atol=1e-8) == 2
assert _check_and_get_real_coef(cirq.Z(q0) * complex(2.0), atol=1e-8) == 2
with pytest.raises(ValueError):
_check_and_get_real_coef(cirq.Z(q0) * 2.0j, atol=1e-8)
def test_obs_vals_from_measurements():
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
obs = cirq.Z(a) * cirq.Z(b) * 10
vals = _obs_vals_from_measurements(bitstrings, qubit_to_index, obs, atol=1e-8)
should_be = [10, -10, -10, 10]
np.testing.assert_equal(vals, should_be)
def test_stats_from_measurements():
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
obs = cirq.Z(a) * cirq.Z(b) * 10
mean, err = _stats_from_measurements(bitstrings, qubit_to_index, obs, atol=1e-8)
assert mean == 0
assert err == 10**2 / (4 - 1)
def test_observable_measured_result():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
omr = cw.ObservableMeasuredResult(
setting=cw.InitObsSetting(
init_state=cirq.Z(a) * cirq.Z(b), observable=cirq.Y(a) * cirq.Y(b)
),
mean=0,
variance=5**2,
repetitions=4,
circuit_params={'phi': 52},
)
assert omr.stddev == 5
assert omr.observable == cirq.Y(a) * cirq.Y(b)
assert omr.init_state == cirq.Z(a) * cirq.Z(b)
cirq.testing.assert_equivalent_repr(omr)
assert omr.as_dict() == {
'init_state': cirq.Z(a) * cirq.Z(b),
'observable': cirq.Y(a) * cirq.Y(b),
'mean': 0,
'variance': 25,
'repetitions': 4,
'param.phi': 52,
}
omr2 = dataclasses.replace(
omr,
circuit_params={
'phi': 52,
'observable': 3.14, 'param.phi': -1,
},
)
assert omr2.as_dict() == {
'init_state': cirq.Z(a) * cirq.Z(b),
'observable': cirq.Y(a) * cirq.Y(b),
'mean': 0,
'variance': 25,
'repetitions': 4,
'param.phi': 52,
'param.observable': 3.14,
'param.param.phi': -1,
}
@pytest.fixture()
def example_bsa() -> 'cw.BitstringAccumulator':
q0, q1 = cirq.LineQubit.range(2)
setting = cw.InitObsSetting(
init_state=cirq.KET_ZERO(q0) * cirq.KET_ZERO(q1), observable=cirq.X(q0) * cirq.Y(q1)
)
meas_spec = _MeasurementSpec(
max_setting=setting, circuit_params={'beta': 0.123, 'gamma': 0.456}
)
bsa = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[
setting,
cw.InitObsSetting(init_state=setting.init_state, observable=cirq.X(q0)),
cw.InitObsSetting(init_state=setting.init_state, observable=cirq.Y(q1)),
],
qubit_to_index={q0: 0, q1: 1},
)
return bsa
def test_bitstring_accumulator(example_bsa):
assert example_bsa.bitstrings.shape == (0, 2)
assert example_bsa.chunksizes.shape == (0,)
assert example_bsa.timestamps.shape == (0,)
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
example_bsa.consume_results(bitstrings)
assert example_bsa.bitstrings.shape == (4, 2)
assert example_bsa.chunksizes.shape == (1,)
assert example_bsa.timestamps.shape == (1,)
assert example_bsa.n_repetitions == 4
with pytest.raises(ValueError):
example_bsa.consume_results(bitstrings.astype(int))
results = list(example_bsa.results)
assert len(results) == 3
for r in results:
assert r.repetitions == 4
for r in example_bsa.records:
assert isinstance(r, dict)
assert 'repetitions' in r
assert r['repetitions'] == 4
def test_bitstring_accumulator_strings(example_bsa):
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
example_bsa.consume_results(bitstrings)
q0, q1 = cirq.LineQubit.range(2)
settings = cw.observables_to_settings(
[cirq.X(q0), cirq.Y(q1), cirq.X(q0) * cirq.Y(q1)], qubits=[q0, q1]
)
strings_should_be = [
'+Z(q(0)) * +Z(q(1)) → X(q(0)): 0.000 +- 0.577',
'+Z(q(0)) * +Z(q(1)) → Y(q(1)): 0.000 +- 0.577',
'+Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)): 0.000 +- 0.577',
]
for setting, ssb in zip(settings, strings_should_be):
assert example_bsa.summary_string(setting) == ssb, ssb
assert (
str(example_bsa)
== """Accumulator +Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)); 4 repetitions
+Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)): 0.000 +- 0.577
+Z(q(0)) * +Z(q(1)) → X(q(0)): 0.000 +- 0.577
+Z(q(0)) * +Z(q(1)) → Y(q(1)): 0.000 +- 0.577"""
)
def test_bitstring_accumulator_equality():
et = cirq.testing.EqualsTester()
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
chunksizes = np.asarray([4])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
obs = cirq.Z(a) * cirq.Z(b) * 10
setting = cw.InitObsSetting(init_state=cirq.Z(a) * cirq.Z(b), observable=obs)
meas_spec = _MeasurementSpec(setting, {})
cirq.testing.assert_equivalent_repr(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings.copy(),
chunksizes=chunksizes.copy(),
timestamps=timestamps.copy(),
)
)
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings.copy(),
chunksizes=chunksizes.copy(),
timestamps=timestamps.copy(),
),
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings.copy(),
chunksizes=chunksizes.copy(),
timestamps=timestamps.copy(),
),
)
time.sleep(1)
timestamps = np.asarray([datetime.datetime.now()])
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(setting, {'a': 2}),
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
bitstrings = bitstrings.copy()
bitstrings[0] = [1, 1]
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
chunksizes = np.asarray([2, 2])
timestamps = np.asarray(list(timestamps) * 2)
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
def _get_ZZ_Z_Z_bsa_constructor_args():
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
chunksizes = np.asarray([4])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
settings = list(
cw.observables_to_settings(
[cirq.Z(a) * cirq.Z(b) * 7, cirq.Z(a) * 5, cirq.Z(b) * 3], qubits=[a, b]
)
)
meas_spec = _MeasurementSpec(settings[0], {})
return {
'meas_spec': meas_spec,
'simul_settings': settings,
'qubit_to_index': qubit_to_index,
'bitstrings': bitstrings,
'chunksizes': chunksizes,
'timestamps': timestamps,
}
def test_bitstring_accumulator_stats():
kwargs = _get_ZZ_Z_Z_bsa_constructor_args()
settings = kwargs['simul_settings']
a, b = kwargs['qubit_to_index']
bsa = cw.BitstringAccumulator(**kwargs)
np.testing.assert_allclose([0, 0, 0], bsa.means())
should_be = np.array([[4 * 7**2, 0, 0], [0, 4 * 5**2, 0], [0, 0, 4 * 3**2]])
should_be = should_be / (4 - 1) should_be = should_be / 4 np.testing.assert_allclose(should_be, bsa.covariance())
for setting, var in zip(settings, [4 * 7**2, 4 * 5**2, 4 * 3**2]):
np.testing.assert_allclose(0, bsa.mean(setting))
np.testing.assert_allclose(var / 4 / (4 - 1), bsa.variance(setting))
np.testing.assert_allclose(np.sqrt(var / 4 / (4 - 1)), bsa.stderr(setting))
bad_obs = [cirq.X(a) * cirq.X(b)]
bad_setting = list(cw.observables_to_settings(bad_obs, qubits=[a, b]))[0]
with pytest.raises(ValueError):
bsa.mean(bad_setting)
def test_bitstring_accumulator_stats_2():
bitstrings = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], np.uint8)
chunksizes = np.asarray([4])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
settings = list(cw.observables_to_settings([cirq.Z(a) * 5, cirq.Z(b) * 3], qubits=[a, b]))
meas_spec = _MeasurementSpec(settings[0], {})
bsa = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
np.testing.assert_allclose([0, 0], bsa.means())
should_be = 4 * np.array([[5 * 5, 5 * 3], [3 * 5, 3 * 3]])
should_be = should_be / (4 - 1) should_be = should_be / 4 np.testing.assert_allclose(should_be, bsa.covariance())
for setting, var in zip(settings, [4 * 5**2, 4 * 3**2]):
np.testing.assert_allclose(0, bsa.mean(setting))
np.testing.assert_allclose(var / 4 / (4 - 1), bsa.variance(setting))
np.testing.assert_allclose(np.sqrt(var / 4 / (4 - 1)), bsa.stderr(setting))
def test_bitstring_accumulator_errors():
q0, q1 = cirq.LineQubit.range(2)
settings = cw.observables_to_settings(
[cirq.X(q0), cirq.Y(q0), cirq.Z(q0), cirq.Z(q0) * cirq.Z(q1)], qubits=[q0, q1]
)
grouped_settings = cw.group_settings_greedy(settings)
max_setting = list(grouped_settings.keys())[0]
simul_settings = grouped_settings[max_setting]
with pytest.raises(ValueError):
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings,
qubit_to_index={q0: 0, q1: 1},
bitstrings=np.array([[0, 1], [0, 1]]),
chunksizes=np.array([2]),
)
with pytest.raises(ValueError):
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings,
qubit_to_index={q0: 0, q1: 1},
bitstrings=np.array([[0, 1], [0, 1]]),
chunksizes=np.array([3]),
timestamps=[datetime.datetime.now()],
)
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings[:1],
qubit_to_index={q0: 0, q1: 1},
)
with pytest.raises(ValueError):
bsa.covariance()
with pytest.raises(ValueError):
bsa.variance(simul_settings[0])
with pytest.raises(ValueError):
bsa.mean(simul_settings[0])
bsa.consume_results(np.array([[0, 0]], dtype=np.uint8))
assert bsa.covariance().shape == (1, 1)
def test_flatten_grouped_results():
q0, q1 = cirq.LineQubit.range(2)
settings = cw.observables_to_settings(
[cirq.X(q0), cirq.Y(q0), cirq.Z(q0), cirq.Z(q0) * cirq.Z(q1)], qubits=[q0, q1]
)
grouped_settings = cw.group_settings_greedy(settings)
bsas = []
for max_setting, simul_settings in grouped_settings.items():
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings,
qubit_to_index={q0: 0, q1: 1},
)
bsa.consume_results(np.array([[0, 0], [0, 0], [0, 0]], dtype=np.uint8))
bsas.append(bsa)
results = cw.flatten_grouped_results(bsas)
assert len(results) == 4
for res in results:
assert res.mean == 1
assert res.variance == 0
assert res.repetitions == 3
def _get_mock_readout_calibration(qa_0=90, qa_1=10, qb_0=91, qb_1=9):
# Mock readout correction results by constructing a BitstringAccumulator
# with two <Z> measurements
q1_ro = np.array([0] * qa_0 + [1] * qa_1)
q2_ro = np.array([0] * qb_0 + [1] * qb_1)
rs = np.random.RandomState(52)
rs.shuffle(q1_ro)
rs.shuffle(q2_ro)
ro_bitstrings = np.vstack((q1_ro, q2_ro)).T
assert ro_bitstrings.shape == (100, 2)
chunksizes = np.asarray([100])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
ro_settings = list(cw.observables_to_settings([cirq.Z(a), cirq.Z(b)], qubits=[a, b]))
(ro_meas_spec_setting,) = list(
cw.observables_to_settings([cirq.Z(a) * cirq.Z(b)], qubits=[a, b])
)
ro_meas_spec = _MeasurementSpec(ro_meas_spec_setting, {})
ro_bsa = cw.BitstringAccumulator(
meas_spec=ro_meas_spec,
simul_settings=ro_settings,
qubit_to_index=qubit_to_index,
bitstrings=ro_bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
return ro_bsa, ro_settings, ro_meas_spec_setting
def test_readout_correction():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
ro_bsa, ro_settings, ro_meas_spec_setting = _get_mock_readout_calibration()
# observables range from 1 to -1 while bitstrings range from 0 to 1
assert ro_bsa.mean(ro_settings[0]) == 0.8
assert ro_bsa.mean(ro_settings[1]) == 0.82
assert np.isclose(ro_bsa.mean(ro_meas_spec_setting), 0.8 * 0.82, atol=0.05)
bitstrings = np.array(
[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 1], [1, 1]], dtype=np.uint8
)
chunksizes = np.asarray([len(bitstrings)])
timestamps = np.asarray([datetime.datetime.now()])
qubit_to_index = {a: 0, b: 1}
settings = list(
cw.observables_to_settings([cirq.X(a) * cirq.Y(b), cirq.X(a), cirq.Y(b)], qubits=[a, b])
)
meas_spec = _MeasurementSpec(settings[0], {})
# First, make one with no readout correction
bsa1 = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
# [XY: one excitation, X: one excitation, Y: two excitations]
np.testing.assert_allclose([1 - 1 / 4, 1 - 1 / 4, 1 - 2 / 4], bsa1.means())
np.testing.assert_allclose([0.75, 0.75, 0.5], bsa1.means())
# Turn on readout correction
bsa2 = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
readout_calibration=ro_bsa,
)
# Readout correction increases variance
for setting in settings:
assert bsa2.variance(setting) > bsa1.variance(setting)
np.testing.assert_allclose(
[0.75 / (0.8 * 0.82), 0.75 / 0.8, 0.5 / 0.82], bsa2.means(), atol=0.01
)
# Variance becomes singular when readout error is 50/50
ro_bsa_50_50, _, _ = _get_mock_readout_calibration(qa_0=50, qa_1=50)
bsa3 = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
readout_calibration=ro_bsa_50_50,
)
with pytest.raises(ZeroDivisionError):
bsa3.means()
assert bsa3.variance(settings[1]) == np.inf
def test_readout_correction_errors():
kwargs = _get_ZZ_Z_Z_bsa_constructor_args()
settings = kwargs['simul_settings']
ro_bsa, _, _ = _get_mock_readout_calibration()
kwargs['readout_calibration'] = ro_bsa
bsa = cw.BitstringAccumulator(**kwargs)
# Variance becomes singular as the estimated value approaches zero
np.testing.assert_allclose(bsa.means(), [0, 0, 0])
assert bsa.variance(settings[0]) == np.inf
| true
| true
|
f707e5ac07ee0b8d1e609c00662e283e6331f036
| 1,500
|
py
|
Python
|
vnpy_huobi/__init__.py
|
noranhe/vnpy_huobi
|
9f4a8abe9c6716492fccec6a58bb1e054490b8c2
|
[
"MIT"
] | null | null | null |
vnpy_huobi/__init__.py
|
noranhe/vnpy_huobi
|
9f4a8abe9c6716492fccec6a58bb1e054490b8c2
|
[
"MIT"
] | null | null | null |
vnpy_huobi/__init__.py
|
noranhe/vnpy_huobi
|
9f4a8abe9c6716492fccec6a58bb1e054490b8c2
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
#
# Copyright (c) 2015-present, Xiaoyou Chen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import importlib_metadata
from .huobi_spot_gateway import HuobiSpotGateway
from .huobi_futures_gateway import HuobiFuturesGateway
from .huobi_usdt_gateway import HuobiUsdtGateway
from .huobi_inverse_gateway import HuobiInverseGateway
try:
__version__ = importlib_metadata.version("vnpy_huobi")
except importlib_metadata.PackageNotFoundError:
__version__ = "dev"
| 42.857143
| 80
| 0.795333
|
import importlib_metadata
from .huobi_spot_gateway import HuobiSpotGateway
from .huobi_futures_gateway import HuobiFuturesGateway
from .huobi_usdt_gateway import HuobiUsdtGateway
from .huobi_inverse_gateway import HuobiInverseGateway
try:
__version__ = importlib_metadata.version("vnpy_huobi")
except importlib_metadata.PackageNotFoundError:
__version__ = "dev"
| true
| true
|
f707e851434ea098122462fc160ade2093d6c2f0
| 35,468
|
py
|
Python
|
frontends/etiquette_cli.py
|
voussoir/etiquette
|
e982858c28335b11528c52af181abd1bbc71673f
|
[
"BSD-3-Clause"
] | 20
|
2018-03-20T01:40:13.000Z
|
2022-02-11T20:23:41.000Z
|
frontends/etiquette_cli.py
|
voussoir/etiquette
|
e982858c28335b11528c52af181abd1bbc71673f
|
[
"BSD-3-Clause"
] | null | null | null |
frontends/etiquette_cli.py
|
voussoir/etiquette
|
e982858c28335b11528c52af181abd1bbc71673f
|
[
"BSD-3-Clause"
] | 1
|
2018-03-20T13:10:31.000Z
|
2018-03-20T13:10:31.000Z
|
import argparse
import os
import re
import sys
from voussoirkit import betterhelp
from voussoirkit import interactive
from voussoirkit import pathclass
from voussoirkit import pipeable
from voussoirkit import spinal
from voussoirkit import stringtools
from voussoirkit import vlogging
import etiquette
# HELPERS ##########################################################################################
def export_symlinks_albums(albums, destination, dry_run):
album_directory_names = etiquette.helpers.decollide_names(albums, lambda a: a.display_name)
for (album, directory_name) in album_directory_names.items():
associated_directories = album.get_associated_directories()
if len(associated_directories) == 1:
album_dir = associated_directories.pop()
directory_name = etiquette.helpers.remove_path_badchars(directory_name)
symlink_dir = destination.with_child(directory_name)
if dry_run:
yield symlink_dir
continue
if not album_dir.exists:
continue
if symlink_dir.exists:
yield symlink_dir
continue
print(album, symlink_dir)
os.symlink(src=album_dir, dst=symlink_dir)
yield symlink_dir
def export_symlinks_photos(photos, destination, dry_run):
photo_filenames = etiquette.helpers.decollide_names(photos, lambda p: p.basename)
for (photo, filename) in photo_filenames.items():
symlink_path = destination.with_child(filename)
if dry_run:
yield symlink_path
continue
if not photo.real_path.exists:
continue
if symlink_path.exists:
yield symlink_path
continue
print(symlink_path.absolute_path)
os.symlink(src=photo.real_path, dst=symlink_path)
yield symlink_path
def get_photos_by_glob(pattern):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
pattern = pathclass.normalize_sep(pattern)
if pattern == '**':
return search_in_cwd(yield_photos=True, yield_albums=False)
cwd = pathclass.cwd()
(folder, pattern) = os.path.split(pattern)
if folder:
folder = cwd.join(folder)
else:
folder = cwd
files = [f for f in folder.glob(pattern) if f.is_file]
for file in files:
try:
photo = photodb.get_photo_by_path(file)
yield photo
except etiquette.exceptions.NoSuchPhoto:
pass
def get_photos_by_globs(patterns):
for pattern in patterns:
yield from get_photos_by_glob(pattern)
def get_photos_from_args(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
photos = []
if args.photo_id_args:
photos.extend(photodb.get_photos_by_id(args.photo_id_args))
if args.photo_search_args:
photos.extend(search_by_argparse(args.photo_search_args, yield_photos=True))
return photos
def get_albums_from_args(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
albums = []
if args.album_id_args:
albums.extend(photodb.get_albums_by_id(args.album_id_args))
if args.album_search_args:
albums.extend(search_by_argparse(args.album_search_args, yield_albums=True))
return albums
def search_in_cwd(**kwargs):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
cwd = pathclass.cwd()
return photodb.search(
within_directory=cwd,
**kwargs,
)
def search_by_argparse(args, yield_albums=False, yield_photos=False):
return search_in_cwd(
area=args.area,
width=args.width,
height=args.height,
ratio=args.ratio,
bytes=args.bytes,
duration=args.duration,
author=args.author,
created=args.created,
extension=args.extension,
extension_not=args.extension_not,
filename=args.filename,
has_tags=args.has_tags,
has_thumbnail=args.has_thumbnail,
is_searchhidden=args.is_searchhidden,
sha256=args.sha256,
mimetype=args.mimetype,
tag_musts=args.tag_musts,
tag_mays=args.tag_mays,
tag_forbids=args.tag_forbids,
tag_expression=args.tag_expression,
limit=args.limit,
offset=args.offset,
orderby=args.orderby,
yield_albums=yield_albums,
yield_photos=yield_photos,
)
# ARGPARSE #########################################################################################
def add_remove_tag_argparse(args, action):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
tag = photodb.get_tag(name=args.tag_name)
if args.any_id_args:
photos = get_photos_from_args(args)
elif args.globs:
photos = get_photos_by_globs(args.globs)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
for photo in photos:
if action == 'add':
photo.add_tag(tag)
elif action == 'remove':
photo.remove_tag(tag)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def delete_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
need_commit = False
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
for photo in photos:
photo.delete(delete_file=args.delete_file)
need_commit = True
if args.album_id_args or args.album_search_args:
albums = get_albums_from_args(args)
for album in albums:
album.delete()
need_commit = True
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def digest_directory_argparse(args):
directories = pipeable.input(args.directory, strip=True, skip_blank=True)
directories = [pathclass.Path(d) for d in directories]
for directory in directories:
directory.assert_is_directory()
photodb = etiquette.photodb.PhotoDB.closest_photodb()
need_commit = False
for directory in directories:
digest = photodb.digest_directory(
directory,
exclude_directories=args.exclude_directories,
exclude_filenames=args.exclude_filenames,
glob_directories=args.glob_directories,
glob_filenames=args.glob_filenames,
hash_kwargs={'bytes_per_second': args.hash_bytes_per_second},
make_albums=args.make_albums,
new_photo_ratelimit=args.ratelimit,
recurse=args.recurse,
yield_albums=True,
yield_photos=True,
)
for result in digest:
# print(result)
need_commit = True
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def easybake_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
for eb_string in args.eb_strings:
notes = photodb.easybake(eb_string)
for (action, tagname) in notes:
print(action, tagname)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def export_symlinks_argparse(args):
destination = pathclass.Path(args.destination)
destination.makedirs(exist_ok=True)
total_paths = set()
if args.album_id_args or args.album_search_args:
albums = get_albums_from_args(args)
export = export_symlinks_albums(
albums,
destination,
dry_run=args.dry_run,
)
total_paths.update(export)
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
export = export_symlinks_photos(
photos,
destination,
dry_run=args.dry_run,
)
total_paths.update(export)
if not args.prune or args.dry_run:
return 0
symlinks = spinal.walk(destination, yield_directories=True, yield_files=True)
symlinks = set(path for path in symlinks if path.is_link)
symlinks = symlinks.difference(total_paths)
for old_symlink in symlinks:
print(f'Pruning {old_symlink}.')
os.remove(old_symlink)
if not old_symlink.parent.listdir():
os.rmdir(old_symlink.parent)
checkdirs = set(spinal.walk(destination, yield_directories=True, yield_files=False))
while checkdirs:
check = checkdirs.pop()
if check not in destination:
continue
if len(check.listdir()) == 0:
os.rmdir(check)
checkdirs.add(check.parent)
return 0
def generate_thumbnail_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
need_commit = False
try:
for photo in photos:
photo.generate_thumbnail()
need_commit = True
except KeyboardInterrupt:
pass
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def init_argparse(args):
photodb = etiquette.photodb.PhotoDB(create=True)
photodb.commit()
return 0
def purge_deleted_files_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
need_commit = False
for deleted in photodb.purge_deleted_files(photos):
need_commit = True
print(deleted)
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def purge_empty_albums_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
# We do not check args.album_search_args because currently it is not
# possible for search results to find empty albums on account of the fact
# that albums are only yielded when they contain some result photo.
if args.album_id_args:
albums = get_albums_from_args(args)
else:
albums = photodb.get_albums_within_directory(pathclass.cwd())
need_commit = False
for deleted in photodb.purge_empty_albums(albums):
need_commit = True
print(deleted)
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def reload_metadata_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
hash_kwargs = {
'bytes_per_second': args.hash_bytes_per_second,
'callback_progress': spinal.callback_progress_v1,
}
need_commit = False
try:
for photo in photos:
if not photo.real_path.is_file:
continue
need_reload = (
args.force or
photo.mtime != photo.real_path.stat.st_mtime or
photo.bytes != photo.real_path.stat.st_size
)
if not need_reload:
continue
photo.reload_metadata(hash_kwargs=hash_kwargs)
need_commit = True
except KeyboardInterrupt:
pass
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def relocate_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
photo = photodb.get_photo(args.photo_id)
photo.relocate(args.filepath)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def search_argparse(args):
photos = search_by_argparse(args, yield_photos=True)
for photo in photos:
print(photo.real_path.absolute_path)
return 0
def show_associated_directories_argparse(args):
if args.album_id_args or args.album_search_args:
albums = get_albums_from_args(args)
else:
albums = search_in_cwd(yield_photos=False, yield_albums=True)
for album in albums:
directories = album.get_associated_directories()
if not directories:
continue
directories = [f'"{d.absolute_path}"' for d in directories]
directories = ' '.join(directories)
print(f'{album} | {directories}')
return 0
def set_unset_searchhidden_argparse(args, searchhidden):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_search_args:
args.photo_search_args.is_searchhidden = not searchhidden
if args.album_search_args:
args.album_search_args.is_searchhidden = not searchhidden
if args.any_id_args:
photos = get_photos_from_args(args)
albums = get_albums_from_args(args)
photos.extend(photo for album in albums for photo in album.walk_photos())
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
for photo in photos:
print(photo)
photo.set_searchhidden(searchhidden)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def tag_breplace_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
renames = []
tag_names = photodb.get_all_tag_names()
all_names = tag_names.union(photodb.get_all_synonyms())
for tag_name in tag_names:
if args.regex:
new_name = re.sub(args.replace_from, args.replace_to, tag_name)
else:
new_name = tag_name.replace(args.replace_from, args.replace_to)
new_name = photodb.normalize_tagname(new_name)
if new_name == tag_name:
continue
if new_name in all_names:
raise etiquette.exceptions.TagExists(new_name)
if args.set_synonym:
printline = f'{tag_name} -> {new_name}+{tag_name}'
else:
printline = f'{tag_name} -> {new_name}'
renames.append((tag_name, new_name, printline))
if not args.autoyes:
for (tag_name, new_name, printline) in renames:
print(printline)
if not interactive.getpermission('Ok?', must_pick=True):
return 0
for (tag_name, new_name, printline) in renames:
print(printline)
tag = photodb.get_tag(tag_name)
tag.rename(new_name)
if args.set_synonym:
tag.add_synonym(tag_name)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def tag_list_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
tags = photodb.get_all_tag_names()
synonyms = photodb.get_all_synonyms()
keys = sorted(tags.union(synonyms.keys()))
for key in keys:
if key in synonyms:
print(f'{key}={synonyms[key]}')
else:
print(key)
return 0
DOCSTRING = '''
Etiquette CLI
=============
This is the command-line interface for Etiquette, so that you can automate your
database and integrate it into other scripts.
The following commands are available:
{add_tag}
{remove_tag}
{delete}
{digest}
{easybake}
{export_symlinks}
{generate_thumbnail}
{init}
{purge_deleted_files}
{purge_empty_albums}
{reload_metadata}
{relocate}
{search}
{show_associated_directories}
{set_searchhidden}
{unset_searchhidden}
{tag_breplace}
{tag_list}
You can add --yes to avoid the "Commit?" prompt on commands that modify the db.
TO SEE DETAILS ON EACH COMMAND, RUN
> etiquette_cli.py <command> --help
'''
SUB_DOCSTRINGS = dict(
add_tag='''
add_tag:
Add a tag to photos by a filename glob or by search results.
> etiquette_cli.py add_tag tag_name glob_patterns
> etiquette_cli.py add_tag tag_name --search searchargs
Examples:
> etiquette_cli.py add_tag wallpaper wall*.jpg wall*.png
> etiquette_cli.py add_tag author.author_voussoir --search --tag-forbids author
See etiquette_cli.py search --help for more info about searchargs.
''',
remove_tag='''
remove_tag:
Remove a tag from photos by a filename glob or by search results.
> etiquette_cli.py remove_tag tag_name glob_patterns
> etiquette_cli.py remove_tag tag_name --search searchargs
Examples:
> etiquette_cli.py remove_tag watchlist spongebob*.mp4
> etiquette_cli.py remove_tag watchlist --search --tag-musts directed_by_michael_bay
See etiquette_cli.py search --help for more info about searchargs.
''',
delete='''
delete:
Remove photos or albums from the database.
flags:
--delete_file:
Delete the file from disk after committing.
Your config.json file's recycle_instead_of_delete will influence this.
Without this flag, photos are removed from the db but remain on disk.
> etiquette_cli.py delete --photos id id id
> etiquette_cli.py delete --search searchargs
> etiquette_cli.py delete --albums id id id
> etiquette_cli.py delete --album-search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
digest='''
digest:
Digest a directory, adding new files as Photos into the database.
> etiquette_cli.py digest directory <flags>
flags:
--exclude_directories A B C:
Any directories matching any pattern of A, B, C... will be skipped.
These patterns may be absolute paths like 'D:\\temp', plain names like
'thumbnails' or glob patterns like 'build_*'.
--exclude_filenames A B C:
Any filenames matching any pattern of A, B, C... will be skipped.
These patterns may be absolute paths like 'D:\\somewhere\\config.json',
plain names like 'thumbs.db' or glob patterns like '*.temp'.
--glob_directories A B C:
Only directories matching any pattern of A, B, C... will be digested.
These patterns may be plain names or glob patterns like '2021*'
--glob_filenames A B C:
Only filenames matching any pattern of A, B, C... will be digested.
These patterns may be plain names or glob patterns like '*.jpg'
--no_albums:
Do not create any albums. By default, albums are created and nested to
match the directory structure.
--ratelimit X:
Limit the ingest of new Photos to only one per X seconds. This can be
used to reduce system load or to make sure that two photos don't get the
same `created` timestamp.
--no_recurse:
Do not recurse into subdirectories. Only create Photos from files in
the current directory.
Examples:
> etiquette_cli.py digest media --ratelimit 1
> etiquette_cli.py digest photos --no-recurse --no-albums --ratelimit 0.25
> etiquette_cli.py digest . --glob-filenames *.jpg --exclude-filenames thumb*
''',
easybake='''
easybake:
Create and manipulate tags by easybake strings.
> etiquette_cli.py easybake eb_string
''',
export_symlinks='''
export_symlinks:
Search for photos or albums, then create symlinks pointing to the results.
THIS IS STILL A BIT EXPERIMENTAL.
This can be used to gather up search results for the purpose of further
uploading, transfering, etc. with other applications.
Symlinks point to files (if result is a photo) or directories (if result is
an album with an associated directory).
Albums are limited to only one associated directory since the output
symlink can't point to two places at once.
> etiquette_cli.py export_symlinks --destination directory --search searchargs
> etiquette_cli.py export_symlinks --destination directory --album-search searchargs
flags:
--destination X:
A path to a directory into which the symlinks will be placed.
--dry:
Print the results without actually creating the symlinks.
--prune:
In the destination directory, any existing symlinks whose target no
longer exists will be deleted.
See etiquette_cli.py search --help for more info about searchargs.
''',
generate_thumbnail='''
generate_thumbnail:
Generate thumbnails for photos.
With no args, all files under the cwd will be thumbnailed.
Or, you can pass specific photo ids or searchargs.
> etiquette_cli.py generate_thumbnail
> etiquette_cli.py generate_thumbnail --photos id id id
> etiquette_cli.py generate_thumbnail --search searchargs
Examples:
> etiquette_cli.py generate_thumbnail --search --has-thumbnail no
See etiquette_cli.py search --help for more info about searchargs.
''',
init='''
init:
Create a new Etiquette database in the current directory.
> etiquette_cli.py init
''',
purge_deleted_files='''
purge_deleted_files:
Delete any Photo objects whose file no longer exists on disk.
> etiquette_cli.py purge_deleted_files
> etiquette_cli.py purge_deleted_files --photos id id id
> etiquette_cli.py purge_deleted_files --search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
purge_empty_albums='''
purge_empty_albums:
Delete any albums which have no child albums or photos.
Consider running purge_deleted_files first, so that albums containing
deleted files will get cleared out and then caught by this function.
With no args, all albums will be checked.
Or you can pass specific album ids. (searchargs is not available since
albums only appear in search results when a matching photo is found, and
we're looking for albums with no photos!)
> etiquette_cli.py purge_empty_albums
> etiquette_cli.py purge_empty_albums --albums id id id
''',
reload_metadata='''
reload_metadata:
Reload photos' metadata by reading the files from disk.
With no args, all files under the cwd will be reloaded.
Or, you can pass specific photo ids or searchargs.
> etiquette_cli.py reload_metadata
> etiquette_cli.py reload_metadata --photos id id id
> etiquette_cli.py reload_metadata --search searchargs
flags:
--force:
By default, we wil skip any files that have the same mtime and byte
size as before. You can pass --force to always reload.
--hash_bytes_per_second X:
A string like "10mb" to limit the speed of file hashing for the purpose
of reducing system load.
See etiquette_cli.py search --help for more info about searchargs.
''',
relocate='''
relocate:
Change a photo's filepath. Used for updating photos that have been changed
by external tools.
> etiquette_cli.py relocate photo_id filepath
''',
search='''
search:
Search for photos and albums with complex operators.
> etiquette_cli.py search searchargs
> etiquette_cli.py search --album-search searchargs
Searchargs:
--area X-Y:
Photo/video width*height between X and Y.
--width X-Y:
Photo/video width between X and Y.
--height X-Y:
Photo/video height between X and Y.
--ratio X-Y:
Photo/video aspect ratio between X and Y.
--bytes X-Y:
File size in bytes between X and Y.
--duration X-Y:
Media duration between X and Y seconds.
--author X:
Photo authored by user with username X.
--created X-Y:
Photo creation date between X and Y unix timestamp.
--extension A,B,C:
Photo with any extension of A, B, C...
--extension_not A,B,C:
Photo without any extension of A, B, C...
--filename X:
Search terms for Photo's filename.
--has_tags yes/no/null:
If yes, Photo must have at least one tag.
If no, Photo must have no tags.
If null, doesn't matter.
--has_thumbnail yes/no/null:
--is_searchhidden yes/no/null:
--mimetype A,B,C:
Photo with any mimetype of A, B, C...
--sha256 A,B,C:
Photo with any sha256 of A, B, C...
--tag_musts A,B,C:
Photo must have all tags A and B and C...
--tag_mays A,B,C:
Photo must have at least one tag of A, B, C...
--tag_forbids A,B,C:
Photo must not have any tags of A, B, C...
--tag_expression X:
Complex expression string to match tags.
--limit X:
Limit results to first X items.
--offset X:
Skip the first X items.
--orderby X-Y:
Order the results by property X in direction Y. E.g. created-desc or
bytes-asc.
''',
show_associated_directories='''
show_associated_directories:
Show the associated directories for albums.
> etiquette_cli.py show_associated_directories
> etiquette_cli.py show_associated_directories --albums id id id
> etiquette_cli.py show_associated_directories --album-search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
set_searchhidden='''
set_searchhidden:
Mark photos as searchhidden.
> etiquette_cli.py set_searchhidden --photos id id id
> etiquette_cli.py set_searchhidden --search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
unset_searchhidden='''
unset_searchhidden:
Unmark photos as searchhidden.
> etiquette_cli.py unset_searchhidden --photos id id id
> etiquette_cli.py unset_searchhidden --search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
tag_breplace='''
tag_breplace:
For all tags in the database, use find-and-replace to rename the tags.
> etiquette_cli.py tag_breplace replace_from replace_to
''',
tag_list='''
tag_list:
Show all tags in the database.
> etiquette_cli.py tag_list
''',
)
DOCSTRING = betterhelp.add_previews(DOCSTRING, SUB_DOCSTRINGS)
@vlogging.main_decorator
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers()
primary_args = []
photo_id_args = []
photo_search_args = []
album_id_args = []
album_search_args = []
mode = primary_args
for arg in argv:
if 0:
pass
elif arg in {'--search', '--photo_search', '--photo-search'}:
mode = photo_search_args
elif arg in {'--album_search', '--album-search'}:
mode = album_search_args
elif arg == '--photos':
mode = photo_id_args
elif arg == '--albums':
mode = album_id_args
else:
mode.append(arg)
p_add_tag = subparsers.add_parser('add_tag', aliases=['add-tag'])
p_add_tag.add_argument('tag_name')
p_add_tag.add_argument('globs', nargs='*')
p_add_tag.add_argument('--yes', dest='autoyes', action='store_true')
p_add_tag.set_defaults(func=lambda args: add_remove_tag_argparse(args, action='add'))
p_remove_tag = subparsers.add_parser('remove_tag', aliases=['remove-tag'])
p_remove_tag.add_argument('tag_name')
p_remove_tag.add_argument('globs', nargs='*')
p_remove_tag.add_argument('--yes', dest='autoyes', action='store_true')
p_remove_tag.set_defaults(func=lambda args: add_remove_tag_argparse(args, action='remove'))
p_delete = subparsers.add_parser('delete')
p_delete.add_argument('--delete_file', '--delete-file', action='store_true')
p_delete.add_argument('--yes', dest='autoyes', action='store_true')
p_delete.set_defaults(func=delete_argparse)
p_digest = subparsers.add_parser('digest', aliases=['digest_directory', 'digest-directory'])
p_digest.add_argument('directory')
p_digest.add_argument('--exclude_directories', '--exclude-directories', nargs='+', default=None)
p_digest.add_argument('--exclude_filenames', '--exclude-filenames', nargs='+', default=None)
p_digest.add_argument('--glob_directories', '--glob-directories', nargs='+', default=None)
p_digest.add_argument('--glob_filenames', '--glob-filenames', nargs='+', default=None)
p_digest.add_argument('--no_albums', '--no-albums', dest='make_albums', action='store_false', default=True)
p_digest.add_argument('--ratelimit', dest='ratelimit', type=float, default=0.2)
p_digest.add_argument('--no_recurse', '--no-recurse', dest='recurse', action='store_false', default=True)
p_digest.add_argument('--hash_bytes_per_second', '--hash-bytes-per-second', default=None)
p_digest.add_argument('--yes', dest='autoyes', action='store_true')
p_digest.set_defaults(func=digest_directory_argparse)
p_easybake = subparsers.add_parser('easybake')
p_easybake.add_argument('eb_strings', nargs='+')
p_easybake.add_argument('--yes', dest='autoyes', action='store_true')
p_easybake.set_defaults(func=easybake_argparse)
p_export_symlinks = subparsers.add_parser('export_symlinks', aliases=['export-symlinks'])
p_export_symlinks.add_argument('--destination', dest='destination', required=True)
p_export_symlinks.add_argument('--dry', dest='dry_run', action='store_true')
p_export_symlinks.add_argument('--prune', dest='prune', action='store_true')
p_export_symlinks.set_defaults(func=export_symlinks_argparse)
p_generate_thumbnail = subparsers.add_parser('generate_thumbnail', aliases=['generate-thumbnail'])
p_generate_thumbnail.add_argument('--yes', dest='autoyes', action='store_true')
p_generate_thumbnail.set_defaults(func=generate_thumbnail_argparse)
p_init = subparsers.add_parser('init', aliases=['create'])
p_init.set_defaults(func=init_argparse)
p_purge_deleted_files = subparsers.add_parser('purge_deleted_files', aliases=['purge-deleted-files'])
p_purge_deleted_files.add_argument('--yes', dest='autoyes', action='store_true')
p_purge_deleted_files.set_defaults(func=purge_deleted_files_argparse)
p_purge_empty_albums = subparsers.add_parser('purge_empty_albums', aliases=['purge-empty-albums'])
p_purge_empty_albums.add_argument('--yes', dest='autoyes', action='store_true')
p_purge_empty_albums.set_defaults(func=purge_empty_albums_argparse)
p_reload_metadata = subparsers.add_parser('reload_metadata', aliases=['reload-metadata'])
p_reload_metadata.add_argument('--hash_bytes_per_second', '--hash-bytes-per-second', default=None)
p_reload_metadata.add_argument('--force', action='store_true')
p_reload_metadata.add_argument('--yes', dest='autoyes', action='store_true')
p_reload_metadata.set_defaults(func=reload_metadata_argparse)
p_relocate = subparsers.add_parser('relocate')
p_relocate.add_argument('photo_id')
p_relocate.add_argument('filepath')
p_relocate.add_argument('--yes', dest='autoyes', action='store_true')
p_relocate.set_defaults(func=relocate_argparse)
p_search = subparsers.add_parser('search')
p_search.add_argument('--area', dest='area', default=None)
p_search.add_argument('--width', dest='width', default=None)
p_search.add_argument('--height', dest='height', default=None)
p_search.add_argument('--ratio', dest='ratio', default=None)
p_search.add_argument('--bytes', dest='bytes', default=None)
p_search.add_argument('--duration', dest='duration', default=None)
p_search.add_argument('--author', dest='author', default=None)
p_search.add_argument('--created', dest='created', default=None)
p_search.add_argument('--extension', dest='extension', default=None)
p_search.add_argument('--extension_not', '--extension-not', dest='extension_not', default=None)
p_search.add_argument('--filename', dest='filename', default=None)
p_search.add_argument('--has_tags', '--has-tags', dest='has_tags', default=None)
p_search.add_argument('--has_thumbnail', '--has-thumbnail', dest='has_thumbnail', default=None)
p_search.add_argument('--is_searchhidden', '--is-searchhidden', dest='is_searchhidden', default=False)
p_search.add_argument('--sha256', default=None)
p_search.add_argument('--mimetype', dest='mimetype', default=None)
p_search.add_argument('--tag_musts', '--tag-musts', dest='tag_musts', default=None)
p_search.add_argument('--tag_mays', '--tag-mays', dest='tag_mays', default=None)
p_search.add_argument('--tag_forbids', '--tag-forbids', dest='tag_forbids', default=None)
p_search.add_argument('--tag_expression', '--tag-expression', dest='tag_expression', default=None)
p_search.add_argument('--limit', dest='limit', default=None)
p_search.add_argument('--offset', dest='offset', default=None)
p_search.add_argument('--orderby', dest='orderby', default='basename-ASC')
# p_search.add_argument('--yield_albums', '--yield-albums', dest='yield_albums', default=None)
p_search.set_defaults(func=search_argparse)
p_show_associated_directories = subparsers.add_parser('show_associated_directories', aliases=['show-associated-directories'])
p_show_associated_directories.set_defaults(func=show_associated_directories_argparse)
p_set_searchhidden = subparsers.add_parser('set_searchhidden', aliases=['set-searchhidden'])
p_set_searchhidden.add_argument('--yes', dest='autoyes', action='store_true')
p_set_searchhidden.set_defaults(func=lambda args: set_unset_searchhidden_argparse(args, searchhidden=True))
p_unset_searchhidden = subparsers.add_parser('unset_searchhidden', aliases=['unset-searchhidden'])
p_unset_searchhidden.add_argument('--yes', dest='autoyes', action='store_true')
p_unset_searchhidden.set_defaults(func=lambda args: set_unset_searchhidden_argparse(args, searchhidden=False))
p_tag_breplace = subparsers.add_parser('tag_breplace', aliases=['tag-breplace'])
p_tag_breplace.add_argument('replace_from')
p_tag_breplace.add_argument('replace_to')
p_tag_breplace.add_argument('--set_synonym', '--set-synonym', dest='set_synonym', action='store_true')
p_tag_breplace.add_argument('--regex', dest='regex', action='store_true')
p_tag_breplace.add_argument('--yes', dest='autoyes', action='store_true')
p_tag_breplace.set_defaults(func=tag_breplace_argparse)
p_tag_list = subparsers.add_parser('tag_list', aliases=['tag-list'])
p_tag_list.set_defaults(func=tag_list_argparse)
##
def postprocessor(args):
args.photo_search_args = p_search.parse_args(photo_search_args) if photo_search_args else None
args.album_search_args = p_search.parse_args(album_search_args) if album_search_args else None
args.photo_id_args = [id for arg in photo_id_args for id in stringtools.comma_space_split(arg)]
args.album_id_args = [id for arg in album_id_args for id in stringtools.comma_space_split(arg)]
args.any_id_args = bool(
args.photo_search_args or
args.album_search_args or
args.photo_id_args or
args.album_id_args
)
return args
try:
return betterhelp.subparser_main(
primary_args,
parser,
main_docstring=DOCSTRING,
sub_docstrings=SUB_DOCSTRINGS,
args_postprocessor=postprocessor,
)
except etiquette.exceptions.NoClosestPhotoDB as exc:
pipeable.stderr(exc.error_message)
pipeable.stderr('Try `etiquette_cli.py init` to create the database.')
return 1
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
| 33.054986
| 129
| 0.687662
|
import argparse
import os
import re
import sys
from voussoirkit import betterhelp
from voussoirkit import interactive
from voussoirkit import pathclass
from voussoirkit import pipeable
from voussoirkit import spinal
from voussoirkit import stringtools
from voussoirkit import vlogging
import etiquette
def export_symlinks_albums(albums, destination, dry_run):
album_directory_names = etiquette.helpers.decollide_names(albums, lambda a: a.display_name)
for (album, directory_name) in album_directory_names.items():
associated_directories = album.get_associated_directories()
if len(associated_directories) == 1:
album_dir = associated_directories.pop()
directory_name = etiquette.helpers.remove_path_badchars(directory_name)
symlink_dir = destination.with_child(directory_name)
if dry_run:
yield symlink_dir
continue
if not album_dir.exists:
continue
if symlink_dir.exists:
yield symlink_dir
continue
print(album, symlink_dir)
os.symlink(src=album_dir, dst=symlink_dir)
yield symlink_dir
def export_symlinks_photos(photos, destination, dry_run):
photo_filenames = etiquette.helpers.decollide_names(photos, lambda p: p.basename)
for (photo, filename) in photo_filenames.items():
symlink_path = destination.with_child(filename)
if dry_run:
yield symlink_path
continue
if not photo.real_path.exists:
continue
if symlink_path.exists:
yield symlink_path
continue
print(symlink_path.absolute_path)
os.symlink(src=photo.real_path, dst=symlink_path)
yield symlink_path
def get_photos_by_glob(pattern):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
pattern = pathclass.normalize_sep(pattern)
if pattern == '**':
return search_in_cwd(yield_photos=True, yield_albums=False)
cwd = pathclass.cwd()
(folder, pattern) = os.path.split(pattern)
if folder:
folder = cwd.join(folder)
else:
folder = cwd
files = [f for f in folder.glob(pattern) if f.is_file]
for file in files:
try:
photo = photodb.get_photo_by_path(file)
yield photo
except etiquette.exceptions.NoSuchPhoto:
pass
def get_photos_by_globs(patterns):
for pattern in patterns:
yield from get_photos_by_glob(pattern)
def get_photos_from_args(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
photos = []
if args.photo_id_args:
photos.extend(photodb.get_photos_by_id(args.photo_id_args))
if args.photo_search_args:
photos.extend(search_by_argparse(args.photo_search_args, yield_photos=True))
return photos
def get_albums_from_args(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
albums = []
if args.album_id_args:
albums.extend(photodb.get_albums_by_id(args.album_id_args))
if args.album_search_args:
albums.extend(search_by_argparse(args.album_search_args, yield_albums=True))
return albums
def search_in_cwd(**kwargs):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
cwd = pathclass.cwd()
return photodb.search(
within_directory=cwd,
**kwargs,
)
def search_by_argparse(args, yield_albums=False, yield_photos=False):
return search_in_cwd(
area=args.area,
width=args.width,
height=args.height,
ratio=args.ratio,
bytes=args.bytes,
duration=args.duration,
author=args.author,
created=args.created,
extension=args.extension,
extension_not=args.extension_not,
filename=args.filename,
has_tags=args.has_tags,
has_thumbnail=args.has_thumbnail,
is_searchhidden=args.is_searchhidden,
sha256=args.sha256,
mimetype=args.mimetype,
tag_musts=args.tag_musts,
tag_mays=args.tag_mays,
tag_forbids=args.tag_forbids,
tag_expression=args.tag_expression,
limit=args.limit,
offset=args.offset,
orderby=args.orderby,
yield_albums=yield_albums,
yield_photos=yield_photos,
)
def add_remove_tag_argparse(args, action):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
tag = photodb.get_tag(name=args.tag_name)
if args.any_id_args:
photos = get_photos_from_args(args)
elif args.globs:
photos = get_photos_by_globs(args.globs)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
for photo in photos:
if action == 'add':
photo.add_tag(tag)
elif action == 'remove':
photo.remove_tag(tag)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def delete_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
need_commit = False
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
for photo in photos:
photo.delete(delete_file=args.delete_file)
need_commit = True
if args.album_id_args or args.album_search_args:
albums = get_albums_from_args(args)
for album in albums:
album.delete()
need_commit = True
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def digest_directory_argparse(args):
directories = pipeable.input(args.directory, strip=True, skip_blank=True)
directories = [pathclass.Path(d) for d in directories]
for directory in directories:
directory.assert_is_directory()
photodb = etiquette.photodb.PhotoDB.closest_photodb()
need_commit = False
for directory in directories:
digest = photodb.digest_directory(
directory,
exclude_directories=args.exclude_directories,
exclude_filenames=args.exclude_filenames,
glob_directories=args.glob_directories,
glob_filenames=args.glob_filenames,
hash_kwargs={'bytes_per_second': args.hash_bytes_per_second},
make_albums=args.make_albums,
new_photo_ratelimit=args.ratelimit,
recurse=args.recurse,
yield_albums=True,
yield_photos=True,
)
for result in digest:
need_commit = True
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def easybake_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
for eb_string in args.eb_strings:
notes = photodb.easybake(eb_string)
for (action, tagname) in notes:
print(action, tagname)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def export_symlinks_argparse(args):
destination = pathclass.Path(args.destination)
destination.makedirs(exist_ok=True)
total_paths = set()
if args.album_id_args or args.album_search_args:
albums = get_albums_from_args(args)
export = export_symlinks_albums(
albums,
destination,
dry_run=args.dry_run,
)
total_paths.update(export)
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
export = export_symlinks_photos(
photos,
destination,
dry_run=args.dry_run,
)
total_paths.update(export)
if not args.prune or args.dry_run:
return 0
symlinks = spinal.walk(destination, yield_directories=True, yield_files=True)
symlinks = set(path for path in symlinks if path.is_link)
symlinks = symlinks.difference(total_paths)
for old_symlink in symlinks:
print(f'Pruning {old_symlink}.')
os.remove(old_symlink)
if not old_symlink.parent.listdir():
os.rmdir(old_symlink.parent)
checkdirs = set(spinal.walk(destination, yield_directories=True, yield_files=False))
while checkdirs:
check = checkdirs.pop()
if check not in destination:
continue
if len(check.listdir()) == 0:
os.rmdir(check)
checkdirs.add(check.parent)
return 0
def generate_thumbnail_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
need_commit = False
try:
for photo in photos:
photo.generate_thumbnail()
need_commit = True
except KeyboardInterrupt:
pass
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def init_argparse(args):
photodb = etiquette.photodb.PhotoDB(create=True)
photodb.commit()
return 0
def purge_deleted_files_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
need_commit = False
for deleted in photodb.purge_deleted_files(photos):
need_commit = True
print(deleted)
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def purge_empty_albums_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.album_id_args:
albums = get_albums_from_args(args)
else:
albums = photodb.get_albums_within_directory(pathclass.cwd())
need_commit = False
for deleted in photodb.purge_empty_albums(albums):
need_commit = True
print(deleted)
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def reload_metadata_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
hash_kwargs = {
'bytes_per_second': args.hash_bytes_per_second,
'callback_progress': spinal.callback_progress_v1,
}
need_commit = False
try:
for photo in photos:
if not photo.real_path.is_file:
continue
need_reload = (
args.force or
photo.mtime != photo.real_path.stat.st_mtime or
photo.bytes != photo.real_path.stat.st_size
)
if not need_reload:
continue
photo.reload_metadata(hash_kwargs=hash_kwargs)
need_commit = True
except KeyboardInterrupt:
pass
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def relocate_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
photo = photodb.get_photo(args.photo_id)
photo.relocate(args.filepath)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def search_argparse(args):
photos = search_by_argparse(args, yield_photos=True)
for photo in photos:
print(photo.real_path.absolute_path)
return 0
def show_associated_directories_argparse(args):
if args.album_id_args or args.album_search_args:
albums = get_albums_from_args(args)
else:
albums = search_in_cwd(yield_photos=False, yield_albums=True)
for album in albums:
directories = album.get_associated_directories()
if not directories:
continue
directories = [f'"{d.absolute_path}"' for d in directories]
directories = ' '.join(directories)
print(f'{album} | {directories}')
return 0
def set_unset_searchhidden_argparse(args, searchhidden):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_search_args:
args.photo_search_args.is_searchhidden = not searchhidden
if args.album_search_args:
args.album_search_args.is_searchhidden = not searchhidden
if args.any_id_args:
photos = get_photos_from_args(args)
albums = get_albums_from_args(args)
photos.extend(photo for album in albums for photo in album.walk_photos())
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
for photo in photos:
print(photo)
photo.set_searchhidden(searchhidden)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def tag_breplace_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
renames = []
tag_names = photodb.get_all_tag_names()
all_names = tag_names.union(photodb.get_all_synonyms())
for tag_name in tag_names:
if args.regex:
new_name = re.sub(args.replace_from, args.replace_to, tag_name)
else:
new_name = tag_name.replace(args.replace_from, args.replace_to)
new_name = photodb.normalize_tagname(new_name)
if new_name == tag_name:
continue
if new_name in all_names:
raise etiquette.exceptions.TagExists(new_name)
if args.set_synonym:
printline = f'{tag_name} -> {new_name}+{tag_name}'
else:
printline = f'{tag_name} -> {new_name}'
renames.append((tag_name, new_name, printline))
if not args.autoyes:
for (tag_name, new_name, printline) in renames:
print(printline)
if not interactive.getpermission('Ok?', must_pick=True):
return 0
for (tag_name, new_name, printline) in renames:
print(printline)
tag = photodb.get_tag(tag_name)
tag.rename(new_name)
if args.set_synonym:
tag.add_synonym(tag_name)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def tag_list_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
tags = photodb.get_all_tag_names()
synonyms = photodb.get_all_synonyms()
keys = sorted(tags.union(synonyms.keys()))
for key in keys:
if key in synonyms:
print(f'{key}={synonyms[key]}')
else:
print(key)
return 0
DOCSTRING = '''
Etiquette CLI
=============
This is the command-line interface for Etiquette, so that you can automate your
database and integrate it into other scripts.
The following commands are available:
{add_tag}
{remove_tag}
{delete}
{digest}
{easybake}
{export_symlinks}
{generate_thumbnail}
{init}
{purge_deleted_files}
{purge_empty_albums}
{reload_metadata}
{relocate}
{search}
{show_associated_directories}
{set_searchhidden}
{unset_searchhidden}
{tag_breplace}
{tag_list}
You can add --yes to avoid the "Commit?" prompt on commands that modify the db.
TO SEE DETAILS ON EACH COMMAND, RUN
> etiquette_cli.py <command> --help
'''
SUB_DOCSTRINGS = dict(
add_tag='''
add_tag:
Add a tag to photos by a filename glob or by search results.
> etiquette_cli.py add_tag tag_name glob_patterns
> etiquette_cli.py add_tag tag_name --search searchargs
Examples:
> etiquette_cli.py add_tag wallpaper wall*.jpg wall*.png
> etiquette_cli.py add_tag author.author_voussoir --search --tag-forbids author
See etiquette_cli.py search --help for more info about searchargs.
''',
remove_tag='''
remove_tag:
Remove a tag from photos by a filename glob or by search results.
> etiquette_cli.py remove_tag tag_name glob_patterns
> etiquette_cli.py remove_tag tag_name --search searchargs
Examples:
> etiquette_cli.py remove_tag watchlist spongebob*.mp4
> etiquette_cli.py remove_tag watchlist --search --tag-musts directed_by_michael_bay
See etiquette_cli.py search --help for more info about searchargs.
''',
delete='''
delete:
Remove photos or albums from the database.
flags:
--delete_file:
Delete the file from disk after committing.
Your config.json file's recycle_instead_of_delete will influence this.
Without this flag, photos are removed from the db but remain on disk.
> etiquette_cli.py delete --photos id id id
> etiquette_cli.py delete --search searchargs
> etiquette_cli.py delete --albums id id id
> etiquette_cli.py delete --album-search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
digest='''
digest:
Digest a directory, adding new files as Photos into the database.
> etiquette_cli.py digest directory <flags>
flags:
--exclude_directories A B C:
Any directories matching any pattern of A, B, C... will be skipped.
These patterns may be absolute paths like 'D:\\temp', plain names like
'thumbnails' or glob patterns like 'build_*'.
--exclude_filenames A B C:
Any filenames matching any pattern of A, B, C... will be skipped.
These patterns may be absolute paths like 'D:\\somewhere\\config.json',
plain names like 'thumbs.db' or glob patterns like '*.temp'.
--glob_directories A B C:
Only directories matching any pattern of A, B, C... will be digested.
These patterns may be plain names or glob patterns like '2021*'
--glob_filenames A B C:
Only filenames matching any pattern of A, B, C... will be digested.
These patterns may be plain names or glob patterns like '*.jpg'
--no_albums:
Do not create any albums. By default, albums are created and nested to
match the directory structure.
--ratelimit X:
Limit the ingest of new Photos to only one per X seconds. This can be
used to reduce system load or to make sure that two photos don't get the
same `created` timestamp.
--no_recurse:
Do not recurse into subdirectories. Only create Photos from files in
the current directory.
Examples:
> etiquette_cli.py digest media --ratelimit 1
> etiquette_cli.py digest photos --no-recurse --no-albums --ratelimit 0.25
> etiquette_cli.py digest . --glob-filenames *.jpg --exclude-filenames thumb*
''',
easybake='''
easybake:
Create and manipulate tags by easybake strings.
> etiquette_cli.py easybake eb_string
''',
export_symlinks='''
export_symlinks:
Search for photos or albums, then create symlinks pointing to the results.
THIS IS STILL A BIT EXPERIMENTAL.
This can be used to gather up search results for the purpose of further
uploading, transfering, etc. with other applications.
Symlinks point to files (if result is a photo) or directories (if result is
an album with an associated directory).
Albums are limited to only one associated directory since the output
symlink can't point to two places at once.
> etiquette_cli.py export_symlinks --destination directory --search searchargs
> etiquette_cli.py export_symlinks --destination directory --album-search searchargs
flags:
--destination X:
A path to a directory into which the symlinks will be placed.
--dry:
Print the results without actually creating the symlinks.
--prune:
In the destination directory, any existing symlinks whose target no
longer exists will be deleted.
See etiquette_cli.py search --help for more info about searchargs.
''',
generate_thumbnail='''
generate_thumbnail:
Generate thumbnails for photos.
With no args, all files under the cwd will be thumbnailed.
Or, you can pass specific photo ids or searchargs.
> etiquette_cli.py generate_thumbnail
> etiquette_cli.py generate_thumbnail --photos id id id
> etiquette_cli.py generate_thumbnail --search searchargs
Examples:
> etiquette_cli.py generate_thumbnail --search --has-thumbnail no
See etiquette_cli.py search --help for more info about searchargs.
''',
init='''
init:
Create a new Etiquette database in the current directory.
> etiquette_cli.py init
''',
purge_deleted_files='''
purge_deleted_files:
Delete any Photo objects whose file no longer exists on disk.
> etiquette_cli.py purge_deleted_files
> etiquette_cli.py purge_deleted_files --photos id id id
> etiquette_cli.py purge_deleted_files --search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
purge_empty_albums='''
purge_empty_albums:
Delete any albums which have no child albums or photos.
Consider running purge_deleted_files first, so that albums containing
deleted files will get cleared out and then caught by this function.
With no args, all albums will be checked.
Or you can pass specific album ids. (searchargs is not available since
albums only appear in search results when a matching photo is found, and
we're looking for albums with no photos!)
> etiquette_cli.py purge_empty_albums
> etiquette_cli.py purge_empty_albums --albums id id id
''',
reload_metadata='''
reload_metadata:
Reload photos' metadata by reading the files from disk.
With no args, all files under the cwd will be reloaded.
Or, you can pass specific photo ids or searchargs.
> etiquette_cli.py reload_metadata
> etiquette_cli.py reload_metadata --photos id id id
> etiquette_cli.py reload_metadata --search searchargs
flags:
--force:
By default, we wil skip any files that have the same mtime and byte
size as before. You can pass --force to always reload.
--hash_bytes_per_second X:
A string like "10mb" to limit the speed of file hashing for the purpose
of reducing system load.
See etiquette_cli.py search --help for more info about searchargs.
''',
relocate='''
relocate:
Change a photo's filepath. Used for updating photos that have been changed
by external tools.
> etiquette_cli.py relocate photo_id filepath
''',
search='''
search:
Search for photos and albums with complex operators.
> etiquette_cli.py search searchargs
> etiquette_cli.py search --album-search searchargs
Searchargs:
--area X-Y:
Photo/video width*height between X and Y.
--width X-Y:
Photo/video width between X and Y.
--height X-Y:
Photo/video height between X and Y.
--ratio X-Y:
Photo/video aspect ratio between X and Y.
--bytes X-Y:
File size in bytes between X and Y.
--duration X-Y:
Media duration between X and Y seconds.
--author X:
Photo authored by user with username X.
--created X-Y:
Photo creation date between X and Y unix timestamp.
--extension A,B,C:
Photo with any extension of A, B, C...
--extension_not A,B,C:
Photo without any extension of A, B, C...
--filename X:
Search terms for Photo's filename.
--has_tags yes/no/null:
If yes, Photo must have at least one tag.
If no, Photo must have no tags.
If null, doesn't matter.
--has_thumbnail yes/no/null:
--is_searchhidden yes/no/null:
--mimetype A,B,C:
Photo with any mimetype of A, B, C...
--sha256 A,B,C:
Photo with any sha256 of A, B, C...
--tag_musts A,B,C:
Photo must have all tags A and B and C...
--tag_mays A,B,C:
Photo must have at least one tag of A, B, C...
--tag_forbids A,B,C:
Photo must not have any tags of A, B, C...
--tag_expression X:
Complex expression string to match tags.
--limit X:
Limit results to first X items.
--offset X:
Skip the first X items.
--orderby X-Y:
Order the results by property X in direction Y. E.g. created-desc or
bytes-asc.
''',
show_associated_directories='''
show_associated_directories:
Show the associated directories for albums.
> etiquette_cli.py show_associated_directories
> etiquette_cli.py show_associated_directories --albums id id id
> etiquette_cli.py show_associated_directories --album-search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
set_searchhidden='''
set_searchhidden:
Mark photos as searchhidden.
> etiquette_cli.py set_searchhidden --photos id id id
> etiquette_cli.py set_searchhidden --search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
unset_searchhidden='''
unset_searchhidden:
Unmark photos as searchhidden.
> etiquette_cli.py unset_searchhidden --photos id id id
> etiquette_cli.py unset_searchhidden --search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
tag_breplace='''
tag_breplace:
For all tags in the database, use find-and-replace to rename the tags.
> etiquette_cli.py tag_breplace replace_from replace_to
''',
tag_list='''
tag_list:
Show all tags in the database.
> etiquette_cli.py tag_list
''',
)
DOCSTRING = betterhelp.add_previews(DOCSTRING, SUB_DOCSTRINGS)
@vlogging.main_decorator
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers()
primary_args = []
photo_id_args = []
photo_search_args = []
album_id_args = []
album_search_args = []
mode = primary_args
for arg in argv:
if 0:
pass
elif arg in {'--search', '--photo_search', '--photo-search'}:
mode = photo_search_args
elif arg in {'--album_search', '--album-search'}:
mode = album_search_args
elif arg == '--photos':
mode = photo_id_args
elif arg == '--albums':
mode = album_id_args
else:
mode.append(arg)
p_add_tag = subparsers.add_parser('add_tag', aliases=['add-tag'])
p_add_tag.add_argument('tag_name')
p_add_tag.add_argument('globs', nargs='*')
p_add_tag.add_argument('--yes', dest='autoyes', action='store_true')
p_add_tag.set_defaults(func=lambda args: add_remove_tag_argparse(args, action='add'))
p_remove_tag = subparsers.add_parser('remove_tag', aliases=['remove-tag'])
p_remove_tag.add_argument('tag_name')
p_remove_tag.add_argument('globs', nargs='*')
p_remove_tag.add_argument('--yes', dest='autoyes', action='store_true')
p_remove_tag.set_defaults(func=lambda args: add_remove_tag_argparse(args, action='remove'))
p_delete = subparsers.add_parser('delete')
p_delete.add_argument('--delete_file', '--delete-file', action='store_true')
p_delete.add_argument('--yes', dest='autoyes', action='store_true')
p_delete.set_defaults(func=delete_argparse)
p_digest = subparsers.add_parser('digest', aliases=['digest_directory', 'digest-directory'])
p_digest.add_argument('directory')
p_digest.add_argument('--exclude_directories', '--exclude-directories', nargs='+', default=None)
p_digest.add_argument('--exclude_filenames', '--exclude-filenames', nargs='+', default=None)
p_digest.add_argument('--glob_directories', '--glob-directories', nargs='+', default=None)
p_digest.add_argument('--glob_filenames', '--glob-filenames', nargs='+', default=None)
p_digest.add_argument('--no_albums', '--no-albums', dest='make_albums', action='store_false', default=True)
p_digest.add_argument('--ratelimit', dest='ratelimit', type=float, default=0.2)
p_digest.add_argument('--no_recurse', '--no-recurse', dest='recurse', action='store_false', default=True)
p_digest.add_argument('--hash_bytes_per_second', '--hash-bytes-per-second', default=None)
p_digest.add_argument('--yes', dest='autoyes', action='store_true')
p_digest.set_defaults(func=digest_directory_argparse)
p_easybake = subparsers.add_parser('easybake')
p_easybake.add_argument('eb_strings', nargs='+')
p_easybake.add_argument('--yes', dest='autoyes', action='store_true')
p_easybake.set_defaults(func=easybake_argparse)
p_export_symlinks = subparsers.add_parser('export_symlinks', aliases=['export-symlinks'])
p_export_symlinks.add_argument('--destination', dest='destination', required=True)
p_export_symlinks.add_argument('--dry', dest='dry_run', action='store_true')
p_export_symlinks.add_argument('--prune', dest='prune', action='store_true')
p_export_symlinks.set_defaults(func=export_symlinks_argparse)
p_generate_thumbnail = subparsers.add_parser('generate_thumbnail', aliases=['generate-thumbnail'])
p_generate_thumbnail.add_argument('--yes', dest='autoyes', action='store_true')
p_generate_thumbnail.set_defaults(func=generate_thumbnail_argparse)
p_init = subparsers.add_parser('init', aliases=['create'])
p_init.set_defaults(func=init_argparse)
p_purge_deleted_files = subparsers.add_parser('purge_deleted_files', aliases=['purge-deleted-files'])
p_purge_deleted_files.add_argument('--yes', dest='autoyes', action='store_true')
p_purge_deleted_files.set_defaults(func=purge_deleted_files_argparse)
p_purge_empty_albums = subparsers.add_parser('purge_empty_albums', aliases=['purge-empty-albums'])
p_purge_empty_albums.add_argument('--yes', dest='autoyes', action='store_true')
p_purge_empty_albums.set_defaults(func=purge_empty_albums_argparse)
p_reload_metadata = subparsers.add_parser('reload_metadata', aliases=['reload-metadata'])
p_reload_metadata.add_argument('--hash_bytes_per_second', '--hash-bytes-per-second', default=None)
p_reload_metadata.add_argument('--force', action='store_true')
p_reload_metadata.add_argument('--yes', dest='autoyes', action='store_true')
p_reload_metadata.set_defaults(func=reload_metadata_argparse)
p_relocate = subparsers.add_parser('relocate')
p_relocate.add_argument('photo_id')
p_relocate.add_argument('filepath')
p_relocate.add_argument('--yes', dest='autoyes', action='store_true')
p_relocate.set_defaults(func=relocate_argparse)
p_search = subparsers.add_parser('search')
p_search.add_argument('--area', dest='area', default=None)
p_search.add_argument('--width', dest='width', default=None)
p_search.add_argument('--height', dest='height', default=None)
p_search.add_argument('--ratio', dest='ratio', default=None)
p_search.add_argument('--bytes', dest='bytes', default=None)
p_search.add_argument('--duration', dest='duration', default=None)
p_search.add_argument('--author', dest='author', default=None)
p_search.add_argument('--created', dest='created', default=None)
p_search.add_argument('--extension', dest='extension', default=None)
p_search.add_argument('--extension_not', '--extension-not', dest='extension_not', default=None)
p_search.add_argument('--filename', dest='filename', default=None)
p_search.add_argument('--has_tags', '--has-tags', dest='has_tags', default=None)
p_search.add_argument('--has_thumbnail', '--has-thumbnail', dest='has_thumbnail', default=None)
p_search.add_argument('--is_searchhidden', '--is-searchhidden', dest='is_searchhidden', default=False)
p_search.add_argument('--sha256', default=None)
p_search.add_argument('--mimetype', dest='mimetype', default=None)
p_search.add_argument('--tag_musts', '--tag-musts', dest='tag_musts', default=None)
p_search.add_argument('--tag_mays', '--tag-mays', dest='tag_mays', default=None)
p_search.add_argument('--tag_forbids', '--tag-forbids', dest='tag_forbids', default=None)
p_search.add_argument('--tag_expression', '--tag-expression', dest='tag_expression', default=None)
p_search.add_argument('--limit', dest='limit', default=None)
p_search.add_argument('--offset', dest='offset', default=None)
p_search.add_argument('--orderby', dest='orderby', default='basename-ASC')
p_search.set_defaults(func=search_argparse)
p_show_associated_directories = subparsers.add_parser('show_associated_directories', aliases=['show-associated-directories'])
p_show_associated_directories.set_defaults(func=show_associated_directories_argparse)
p_set_searchhidden = subparsers.add_parser('set_searchhidden', aliases=['set-searchhidden'])
p_set_searchhidden.add_argument('--yes', dest='autoyes', action='store_true')
p_set_searchhidden.set_defaults(func=lambda args: set_unset_searchhidden_argparse(args, searchhidden=True))
p_unset_searchhidden = subparsers.add_parser('unset_searchhidden', aliases=['unset-searchhidden'])
p_unset_searchhidden.add_argument('--yes', dest='autoyes', action='store_true')
p_unset_searchhidden.set_defaults(func=lambda args: set_unset_searchhidden_argparse(args, searchhidden=False))
p_tag_breplace = subparsers.add_parser('tag_breplace', aliases=['tag-breplace'])
p_tag_breplace.add_argument('replace_from')
p_tag_breplace.add_argument('replace_to')
p_tag_breplace.add_argument('--set_synonym', '--set-synonym', dest='set_synonym', action='store_true')
p_tag_breplace.add_argument('--regex', dest='regex', action='store_true')
p_tag_breplace.add_argument('--yes', dest='autoyes', action='store_true')
p_tag_breplace.set_defaults(func=tag_breplace_argparse)
p_tag_list = subparsers.add_parser('tag_list', aliases=['tag-list'])
p_tag_list.set_defaults(func=tag_list_argparse)
def postprocessor(args):
args.photo_search_args = p_search.parse_args(photo_search_args) if photo_search_args else None
args.album_search_args = p_search.parse_args(album_search_args) if album_search_args else None
args.photo_id_args = [id for arg in photo_id_args for id in stringtools.comma_space_split(arg)]
args.album_id_args = [id for arg in album_id_args for id in stringtools.comma_space_split(arg)]
args.any_id_args = bool(
args.photo_search_args or
args.album_search_args or
args.photo_id_args or
args.album_id_args
)
return args
try:
return betterhelp.subparser_main(
primary_args,
parser,
main_docstring=DOCSTRING,
sub_docstrings=SUB_DOCSTRINGS,
args_postprocessor=postprocessor,
)
except etiquette.exceptions.NoClosestPhotoDB as exc:
pipeable.stderr(exc.error_message)
pipeable.stderr('Try `etiquette_cli.py init` to create the database.')
return 1
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
| true
| true
|
f707e8b56ef15989602ef9fa6ae6524c8a27ce3b
| 7,549
|
py
|
Python
|
python/tests/wrapper/test_05_multimodal.py
|
menshikh-iv/bigartm
|
9be401f740402814fe79ff4a6cebfd3db0bf992f
|
[
"BSD-3-Clause"
] | 638
|
2015-02-03T22:17:00.000Z
|
2022-03-23T18:47:50.000Z
|
python/tests/wrapper/test_05_multimodal.py
|
menshikh-iv/bigartm
|
9be401f740402814fe79ff4a6cebfd3db0bf992f
|
[
"BSD-3-Clause"
] | 566
|
2015-01-01T21:49:00.000Z
|
2022-02-14T09:14:35.000Z
|
python/tests/wrapper/test_05_multimodal.py
|
bt2901/bigartm
|
92c9d5746c122d0124bab700469d8a2a7f58ff40
|
[
"BSD-3-Clause"
] | 148
|
2015-01-06T15:30:07.000Z
|
2022-02-12T18:40:17.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017, Additive Regularization of Topic Models.
from __future__ import print_function
import os
import uuid
import string
import itertools
import tempfile
import shutil
import pytest
from six.moves import range, zip
import artm.wrapper
import artm.wrapper.messages_pb2 as messages
import artm.master_component as mc
def _print_top_tokens(top_tokens_score, expected_values_topic, tolerance):
top_tokens_triplets = zip(top_tokens_score.topic_index,
zip(top_tokens_score.token,
top_tokens_score.weight))
for topic_index, group in itertools.groupby(top_tokens_triplets, key=lambda triplet: triplet[0]):
print_string = u'Topic#{0} : '.format(topic_index)
for _, (token, weight) in group:
print_string += u' {0}({1:.3f})'.format(token, weight)
assert abs(expected_values_topic[topic_index][token] - weight) < tolerance
print(print_string)
def test_func():
# Set some constants
dictionary_name = 'dictionary'
pwt = 'pwt'
nwt = 'nwt'
num_topics = 2
num_document_passes = 10
num_outer_iterations = 10
russian_class_weight = 1.0
english_class_weight = 1.0
russian_class = '@russian'
english_class = '@english'
tolerance = 0.001
expected_values_rus_topic = {
0: {
u'документ': 0.125,
u'текст': 0.125,
u'анализ': 0.125,
u'статистический': 0.125,
u'модель': 0.125,
u'коллекция': 0.083,
u'тематическая': 0.083,
'model': 0.042,
'topic': 0.042,
'artm': 0.042
},
1: {
u'ногие': 0.115,
u'отряд': 0.115,
u'млекопитающие': 0.115,
u'семейство': 0.115,
u'хищный': 0.077,
u'ласто': 0.077,
u'моржовых': 0.077,
u'тюлень': 0.077,
u'ушастый': 0.077,
u'коротко': 0.038
}
}
expected_values_eng_topic = {
0: {
'model': 0.167,
'text': 0.125,
'analysis': 0.125,
'statistical': 0.125,
'topic': 0.125,
'artm': 0.083,
'plsa': 0.083,
'lda': 0.083,
'collection': 0.083,
'not': 0.000
},
1: {
'mammal': 0.188,
'predatory': 0.125,
'eared': 0.125,
'marine': 0.125,
'seal': 0.125,
'not': 0.062,
'reptile': 0.062,
'crocodilia': 0.062,
'order': 0.062,
'pinnipeds': 0.062
}
}
expected_sparsity_values = {'russian': 0.5, 'english': 0.5}
# Prepare multimodal data
ens = []
rus = []
ens.append(u'Topic model statistical analysis text collection LDA PLSA ARTM')
rus.append(u'Тематическая модель статистический анализ текст коллекция')
ens.append(u'LDA statistical topic model text collection')
rus.append(u'LDA статистический тематическая модель текст документ коллекция')
ens.append(u'PLSA statistical analysis text model')
rus.append(u'PLSA статистический анализ документ текст модель')
ens.append(u'ARTM analysis topic model')
rus.append(u'ARTM анализ документ topic model')
ens.append(u'Pinnipeds seal marine mammal order')
rus.append(u'Тюлень семейство млекопитающие моржовых отряд ласто ногие')
ens.append(u'Eared seal marine predatory mammal')
rus.append(u'Ушастый тюлень семейство млекопитающие отряд хищный семейство моржовых ласто ногие')
ens.append(u'Eared Crocodilia predatory reptile not mammal')
rus.append(u'Ушастый крокодил гена отряд хищный не млекопитающие коротко ногие')
ru_dic = {} # mapping from russian token to its index in batch.token list
en_dic = {} # mapping from english token to its index in batch.token list
batch = messages.Batch() # batch representing the entire collection
batch.id = str(uuid.uuid1())
dict_data = messages.DictionaryData() # BigARTM dictionary to initialize model
dict_data.name = dictionary_name
def append(tokens, dic, item, class_id):
for token in tokens:
if token not in dic: # New token discovered:
dic[token] = len(batch.token) # 1. update ru_dic or en_dic
batch.token.append(token) # 2. update batch.token and batch.class_id
batch.class_id.append(class_id)
dict_data.token.append(token)
dict_data.class_id.append(class_id)
# Add token to the item.
item.token_id.append(dic[token])
# replace '1' with the actual number of token occupancies in the item
item.token_weight.append(1)
# Iterate through all items and populate the batch
for (en, ru) in zip(ens, rus):
next_item = batch.item.add()
next_item.id = len(batch.item) - 1
append(ru.lower().split(), ru_dic, next_item, russian_class)
append(en.lower().split(), en_dic, next_item, english_class)
batches_folder = tempfile.mkdtemp()
try:
# Create the instance of low-level API and master object
lib = artm.wrapper.LibArtm()
# Save batch and dictionary on the disk
lib.ArtmSaveBatch(batches_folder, batch)
# Create master component and scores
scores = {'SparsityPhiRus': messages.SparsityPhiScoreConfig(class_id=russian_class),
'SparsityPhiEng': messages.SparsityPhiScoreConfig(class_id=english_class),
'TopTokensRus': messages.TopTokensScoreConfig(class_id=russian_class),
'TopTokensEng': messages.TopTokensScoreConfig(class_id=english_class)}
master = mc.MasterComponent(lib, scores=scores)
# Create the collection dictionary
lib.ArtmCreateDictionary(master.master_id, dict_data)
# Initialize model
master.initialize_model(model_name=pwt,
topic_names=['topic_{}'.format(i) for i in range(num_topics)],
dictionary_name=dictionary_name)
for iter in range(num_outer_iterations):
# Invoke one scan of the collection, regularize and normalize Phi
master.clear_score_cache()
master.process_batches(pwt, nwt, num_document_passes, batches_folder,
class_ids=[russian_class, english_class],
class_weights=[russian_class_weight, english_class_weight])
master.normalize_model(pwt, nwt)
# Retrieve and print scores
top_tokens_rus = master.get_score('TopTokensRus')
top_tokens_eng = master.get_score('TopTokensEng')
sp_phi_rus = master.get_score('SparsityPhiRus')
sp_phi_eng = master.get_score('SparsityPhiEng')
print('Top tokens per russian topic:')
_print_top_tokens(top_tokens_rus, expected_values_rus_topic, tolerance)
print('Top tokens per english topic:')
_print_top_tokens(top_tokens_eng, expected_values_eng_topic, tolerance)
print('\nSparsity Phi: russian {0:.3f}, english {1:.3f}'.format(sp_phi_rus.value, sp_phi_eng.value))
assert abs(expected_sparsity_values['russian'] - sp_phi_rus.value) < tolerance
assert abs(expected_sparsity_values['english'] - sp_phi_eng.value) < tolerance
finally:
shutil.rmtree(batches_folder)
| 37.371287
| 108
| 0.616903
|
from __future__ import print_function
import os
import uuid
import string
import itertools
import tempfile
import shutil
import pytest
from six.moves import range, zip
import artm.wrapper
import artm.wrapper.messages_pb2 as messages
import artm.master_component as mc
def _print_top_tokens(top_tokens_score, expected_values_topic, tolerance):
top_tokens_triplets = zip(top_tokens_score.topic_index,
zip(top_tokens_score.token,
top_tokens_score.weight))
for topic_index, group in itertools.groupby(top_tokens_triplets, key=lambda triplet: triplet[0]):
print_string = u'Topic#{0} : '.format(topic_index)
for _, (token, weight) in group:
print_string += u' {0}({1:.3f})'.format(token, weight)
assert abs(expected_values_topic[topic_index][token] - weight) < tolerance
print(print_string)
def test_func():
dictionary_name = 'dictionary'
pwt = 'pwt'
nwt = 'nwt'
num_topics = 2
num_document_passes = 10
num_outer_iterations = 10
russian_class_weight = 1.0
english_class_weight = 1.0
russian_class = '@russian'
english_class = '@english'
tolerance = 0.001
expected_values_rus_topic = {
0: {
u'документ': 0.125,
u'текст': 0.125,
u'анализ': 0.125,
u'статистический': 0.125,
u'модель': 0.125,
u'коллекция': 0.083,
u'тематическая': 0.083,
'model': 0.042,
'topic': 0.042,
'artm': 0.042
},
1: {
u'ногие': 0.115,
u'отряд': 0.115,
u'млекопитающие': 0.115,
u'семейство': 0.115,
u'хищный': 0.077,
u'ласто': 0.077,
u'моржовых': 0.077,
u'тюлень': 0.077,
u'ушастый': 0.077,
u'коротко': 0.038
}
}
expected_values_eng_topic = {
0: {
'model': 0.167,
'text': 0.125,
'analysis': 0.125,
'statistical': 0.125,
'topic': 0.125,
'artm': 0.083,
'plsa': 0.083,
'lda': 0.083,
'collection': 0.083,
'not': 0.000
},
1: {
'mammal': 0.188,
'predatory': 0.125,
'eared': 0.125,
'marine': 0.125,
'seal': 0.125,
'not': 0.062,
'reptile': 0.062,
'crocodilia': 0.062,
'order': 0.062,
'pinnipeds': 0.062
}
}
expected_sparsity_values = {'russian': 0.5, 'english': 0.5}
ens = []
rus = []
ens.append(u'Topic model statistical analysis text collection LDA PLSA ARTM')
rus.append(u'Тематическая модель статистический анализ текст коллекция')
ens.append(u'LDA statistical topic model text collection')
rus.append(u'LDA статистический тематическая модель текст документ коллекция')
ens.append(u'PLSA statistical analysis text model')
rus.append(u'PLSA статистический анализ документ текст модель')
ens.append(u'ARTM analysis topic model')
rus.append(u'ARTM анализ документ topic model')
ens.append(u'Pinnipeds seal marine mammal order')
rus.append(u'Тюлень семейство млекопитающие моржовых отряд ласто ногие')
ens.append(u'Eared seal marine predatory mammal')
rus.append(u'Ушастый тюлень семейство млекопитающие отряд хищный семейство моржовых ласто ногие')
ens.append(u'Eared Crocodilia predatory reptile not mammal')
rus.append(u'Ушастый крокодил гена отряд хищный не млекопитающие коротко ногие')
ru_dic = {} en_dic = {} batch = messages.Batch() batch.id = str(uuid.uuid1())
dict_data = messages.DictionaryData() dict_data.name = dictionary_name
def append(tokens, dic, item, class_id):
for token in tokens:
if token not in dic: dic[token] = len(batch.token) batch.token.append(token) batch.class_id.append(class_id)
dict_data.token.append(token)
dict_data.class_id.append(class_id)
item.token_id.append(dic[token])
item.token_weight.append(1)
for (en, ru) in zip(ens, rus):
next_item = batch.item.add()
next_item.id = len(batch.item) - 1
append(ru.lower().split(), ru_dic, next_item, russian_class)
append(en.lower().split(), en_dic, next_item, english_class)
batches_folder = tempfile.mkdtemp()
try:
lib = artm.wrapper.LibArtm()
lib.ArtmSaveBatch(batches_folder, batch)
scores = {'SparsityPhiRus': messages.SparsityPhiScoreConfig(class_id=russian_class),
'SparsityPhiEng': messages.SparsityPhiScoreConfig(class_id=english_class),
'TopTokensRus': messages.TopTokensScoreConfig(class_id=russian_class),
'TopTokensEng': messages.TopTokensScoreConfig(class_id=english_class)}
master = mc.MasterComponent(lib, scores=scores)
lib.ArtmCreateDictionary(master.master_id, dict_data)
master.initialize_model(model_name=pwt,
topic_names=['topic_{}'.format(i) for i in range(num_topics)],
dictionary_name=dictionary_name)
for iter in range(num_outer_iterations):
master.clear_score_cache()
master.process_batches(pwt, nwt, num_document_passes, batches_folder,
class_ids=[russian_class, english_class],
class_weights=[russian_class_weight, english_class_weight])
master.normalize_model(pwt, nwt)
top_tokens_rus = master.get_score('TopTokensRus')
top_tokens_eng = master.get_score('TopTokensEng')
sp_phi_rus = master.get_score('SparsityPhiRus')
sp_phi_eng = master.get_score('SparsityPhiEng')
print('Top tokens per russian topic:')
_print_top_tokens(top_tokens_rus, expected_values_rus_topic, tolerance)
print('Top tokens per english topic:')
_print_top_tokens(top_tokens_eng, expected_values_eng_topic, tolerance)
print('\nSparsity Phi: russian {0:.3f}, english {1:.3f}'.format(sp_phi_rus.value, sp_phi_eng.value))
assert abs(expected_sparsity_values['russian'] - sp_phi_rus.value) < tolerance
assert abs(expected_sparsity_values['english'] - sp_phi_eng.value) < tolerance
finally:
shutil.rmtree(batches_folder)
| true
| true
|
f707e8d92417a3709eee2909a385d1cc48e469bf
| 213
|
py
|
Python
|
src/accounts/models.py
|
rafaellcoellho/pergunteme
|
df60823c311657a85d54eedbef4c95997c17e9b7
|
[
"MIT"
] | null | null | null |
src/accounts/models.py
|
rafaellcoellho/pergunteme
|
df60823c311657a85d54eedbef4c95997c17e9b7
|
[
"MIT"
] | null | null | null |
src/accounts/models.py
|
rafaellcoellho/pergunteme
|
df60823c311657a85d54eedbef4c95997c17e9b7
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractUser
class CustomUser(AbstractUser):
pass
# add additional fields in here
def __str__(self):
return self.username
| 19.363636
| 51
| 0.737089
|
from django.db import models
from django.contrib.auth.models import AbstractUser
class CustomUser(AbstractUser):
pass
def __str__(self):
return self.username
| true
| true
|
f707e9b1b9ea3839eac0a69112f41e93fa9c360a
| 1,447
|
py
|
Python
|
general_modules/postprocess_decoded_seq.py
|
johntsi/preast_qa
|
e95fb167f4fdeb8868fed935f88493450ea10ddd
|
[
"MIT"
] | null | null | null |
general_modules/postprocess_decoded_seq.py
|
johntsi/preast_qa
|
e95fb167f4fdeb8868fed935f88493450ea10ddd
|
[
"MIT"
] | null | null | null |
general_modules/postprocess_decoded_seq.py
|
johntsi/preast_qa
|
e95fb167f4fdeb8868fed935f88493450ea10ddd
|
[
"MIT"
] | null | null | null |
def postprocess_decoded_seq(answers):
"""
Corrects for some extra spaces that are created by the decode method
of the tokenizer like in numerical strings
example: 1, 000, 000 --> 1,000,000
Args:
answers: list[str]
Returns:
new_answers: list[str]
"""
new_answers = []
for answer in answers:
parts = answer.split(", ")
if len(parts) > 1:
try:
new0 = parts[0]
for i in range(1, len(parts)):
if new0[-1].isnumeric() and parts[i][0].isnumeric():
if len(parts[i]) > 3 and parts[i][3].isnumeric():
new0 = ", ".join([new0, parts[i]])
else:
new0 = ",".join([new0, parts[i]])
else:
new0 = ", ".join([new0, parts[i]])
except IndexError:
print("--> IndexError:", answer)
new0 = answer
else:
new0 = answer
parts = new0.split(". ")
if len(parts) > 1:
new1 = parts[0]
for i in range(1, len(parts)):
try:
if new1[-1].isnumeric() and parts[i][0].isnumeric():
new1 = ".".join([new1, parts[i]])
else:
new1 = ". ".join([new1, parts[i]])
except IndexError:
new1 = parts[1]
else:
new1 = new0
parts = new1.split(" : ")
if len(parts) > 1:
new2 = parts[0]
for i in range(1, len(parts)):
if new2[-1].isnumeric() and parts[i][0].isnumeric():
new2 = ":".join([new2, parts[i]])
else:
new2 = " : ".join([new2, parts[i]])
else:
new2 = new1
new_answers.append(new2)
return new_answers
| 22.968254
| 69
| 0.565999
|
def postprocess_decoded_seq(answers):
new_answers = []
for answer in answers:
parts = answer.split(", ")
if len(parts) > 1:
try:
new0 = parts[0]
for i in range(1, len(parts)):
if new0[-1].isnumeric() and parts[i][0].isnumeric():
if len(parts[i]) > 3 and parts[i][3].isnumeric():
new0 = ", ".join([new0, parts[i]])
else:
new0 = ",".join([new0, parts[i]])
else:
new0 = ", ".join([new0, parts[i]])
except IndexError:
print("--> IndexError:", answer)
new0 = answer
else:
new0 = answer
parts = new0.split(". ")
if len(parts) > 1:
new1 = parts[0]
for i in range(1, len(parts)):
try:
if new1[-1].isnumeric() and parts[i][0].isnumeric():
new1 = ".".join([new1, parts[i]])
else:
new1 = ". ".join([new1, parts[i]])
except IndexError:
new1 = parts[1]
else:
new1 = new0
parts = new1.split(" : ")
if len(parts) > 1:
new2 = parts[0]
for i in range(1, len(parts)):
if new2[-1].isnumeric() and parts[i][0].isnumeric():
new2 = ":".join([new2, parts[i]])
else:
new2 = " : ".join([new2, parts[i]])
else:
new2 = new1
new_answers.append(new2)
return new_answers
| true
| true
|
f707ec62b70359ffbd3ab48f3f38674053d85189
| 10,193
|
py
|
Python
|
omop_harvest/conf/base.py
|
chop-dbhi/omop_harvest
|
ed2f5101941b48ea1a76761ae05162b89ead5bb7
|
[
"BSD-2-Clause"
] | 4
|
2015-01-21T13:48:33.000Z
|
2017-02-22T15:22:39.000Z
|
omop_harvest/conf/base.py
|
chop-dbhi/omop_harvest
|
ed2f5101941b48ea1a76761ae05162b89ead5bb7
|
[
"BSD-2-Clause"
] | 2
|
2015-01-29T19:24:24.000Z
|
2015-04-10T21:04:53.000Z
|
omop_harvest/conf/base.py
|
chop-dbhi/omop_harvest
|
ed2f5101941b48ea1a76761ae05162b89ead5bb7
|
[
"BSD-2-Clause"
] | 2
|
2015-01-28T01:07:01.000Z
|
2015-07-22T17:36:21.000Z
|
import os
# Import global settings to make it easier to extend settings.
from django.conf.global_settings import *
from django.core.exceptions import ImproperlyConfigured
def get_env_variable(var_name):
""" Get the environment variable or return an exception"""
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
# Import the project module to calculate directories relative to the module
# location.
PROJECT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../..')
# List all Django apps here. Note that standard Python libraries should not
# be added to this list since Django will not recognize them as apps anyway.
# An app is really only an "app" if a `models` module or package is defined.
# Read more about projects vs. apps here:
# https://docs.djangoproject.com/en/1.3/intro/tutorial01/#creating-models
INSTALLED_APPS = (
'omop_harvest',
'south',
'serrano',
'avocado',
'modeltree',
'haystack',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'chopauth',
'registration'
)
#
# ADMINISTRATIVE
#
# TODO: Add admins here.
# Admins receive any error messages by email if DEBUG is False
ADMINS = ()
# Managers receive broken link emails if SEND_BROKEN_LINK_EMAILS is True
MANAGERS = ADMINS
# List of IP addresses which will show debug comments
INTERNAL_IPS = ('127.0.0.1', '::1')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
#
# DATABASES
# Each database can be specified here, but passwords should be in a separate
# file that is not versioned. Use ``local_settings.py``.
#
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_PATH, 'harvest.db')
},
'omop': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_PATH, 'omop.db')
}
}
DATABASE_ROUTERS = ('omop_harvest.routers.OmopRouter',)
#
# LOCALITY
#
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = None
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
#
# STATIC AND MEDIA
# The application's static files should be placed in the STATIC_ROOT in
# addition to other static files found in third-party apps. The MEDIA_ROOT
# is intended for user uploaded files.
#
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_PATH, '_site/media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_PATH, '_site/static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# TODO: Remove this. Shouldn't the files at the below location
# be collected under '_site/static'?
# Additional locations of static files
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# project level static files
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, 'omop_harvest', 'static'),
)
#
# TEMPLATES
#
# Project level templates and template directories that override
# third-party app templates.
TEMPLATE_DIRS = ()
# Context processors are simply functions that return a dict which augments the
# template context.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
'omop_harvest.context_processors.static',
)
#
# URLS
#
# FORCE_SCRIPT_NAME overrides the interpreted 'SCRIPT_NAME' provided by the
# web server. since the URLs below are used for various purposes outside of
# the WSGI application (static and media files), these need to be updated to
# reflect this discrepancy.
FORCE_SCRIPT_NAME = ''
LOGIN_URL = FORCE_SCRIPT_NAME + '/login/'
LOGIN_REDIRECT_URL = FORCE_SCRIPT_NAME + '/query/'
LOGOUT_URL = '/logout/'
ROOT_URLCONF = 'omop_harvest.conf.urls'
# For non-publicly accessible applications, the siteauth app can be used to
# restrict access site-wide.
# SITEAUTH_ACCESS_ORDER = 'allow/deny'
#
SITEAUTH_ALLOW_URLS = (
r'^log(in|out)/',
r'^password/reset/',
r'^(register|verify)/',
)
SITEAUTH_DENY_URLS = (
r'^workspace/',
r'^workspace/discover/',
r'^query/',
r'^results/+',
r'^api/+',
r'^details/\d+/',
r'^moderate/+',
r'^verify/+',
)
#
# MIDDLEWARE
#
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'siteauth.middleware.SiteAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'serrano.middleware.SessionMiddleware',
)
#
# EMAIL
#
SUPPORT_EMAIL = 'cbmisupport@email.chop.edu'
DEFAULT_FROM_EMAIL = 'cbmisupport@email.chop.edu'
EMAIL_SUBJECT_PREFIX = '[omop_harvest] '
SEND_BROKEN_LINK_EMAILS = False
#
# LOGGING
#
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
}
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/omop_harvest.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
'request_handler': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/omop_harvest_requests.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['request_handler'],
'level': 'DEBUG',
'propagate': False
},
'avocado': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'serrano': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
#
# CACHE
#
# For production environments, the memcached backend is highly recommended
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique',
'KEY_PREFIX': 'omop_harvest',
'VERSION': 1,
}
}
CACHE_MIDDLEWARE_SECONDS = 0
# This is not necessary to set if the above `KEY_PREFIX` value is set since
# the `KEY_PREFIX` namespaces all cache set by this application
CACHE_MIDDLEWARE_KEY_PREFIX = 'omop_harvest'
#
# SESSIONS AND COOKIES
#
CSRF_COOKIE_NAME = 'omop_harvest_csrftoken'
# SESSION_COOKIE_AGE = 60 * 20
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_COOKIE_NAME = 'omop_harvest_sessionid'
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_SAVE_EVERY_REQUEST = False
#
# OTHER PROJECT SETTINGS
#
# USE_ETAGS = True
IGNORABLE_404_PATHS = (
r'robots.txt$',
r'favicon.ico$',
)
#
# VARIOUS APP SETTINGS
#
# The primary key of the ``Site`` object for the Sites Framework
SITE_ID = 1
#
# ModelTrees Configuration
#
MODELTREES = {
'default': {
'model': 'omop_harvest.Person',
}
}
#
# Haystack Configuration
#
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), '../../whoosh.index'),
}
}
#
# Avocado Configuration
#
# TODO: Should data_cache_enabled be set to True?
AVOCADO = {
'DATA_CACHE_ENABLED': False,
'METADATA_MIGRATION_APP': 'omop_harvest',
}
| 26.613577
| 81
| 0.672913
|
import os
from django.conf.global_settings import *
from django.core.exceptions import ImproperlyConfigured
def get_env_variable(var_name):
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
PROJECT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../..')
INSTALLED_APPS = (
'omop_harvest',
'south',
'serrano',
'avocado',
'modeltree',
'haystack',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'chopauth',
'registration'
)
ADMINS = ()
MANAGERS = ADMINS
INTERNAL_IPS = ('127.0.0.1', '::1')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_PATH, 'harvest.db')
},
'omop': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_PATH, 'omop.db')
}
}
DATABASE_ROUTERS = ('omop_harvest.routers.OmopRouter',)
TIME_ZONE = None
LANGUAGE_CODE = 'en-us'
USE_I18N = False
USE_L10N = False
# addition to other static files found in third-party apps. The MEDIA_ROOT
# is intended for user uploaded files.
#
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_PATH, '_site/media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_PATH, '_site/static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# TODO: Remove this. Shouldn't the files at the below location
# project level static files
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, 'omop_harvest', 'static'),
)
#
# TEMPLATES
#
# Project level templates and template directories that override
# third-party app templates.
TEMPLATE_DIRS = ()
# Context processors are simply functions that return a dict which augments the
# template context.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
'omop_harvest.context_processors.static',
)
#
# URLS
#
# FORCE_SCRIPT_NAME overrides the interpreted 'SCRIPT_NAME' provided by the
# web server. since the URLs below are used for various purposes outside of
# the WSGI application (static and media files), these need to be updated to
# reflect this discrepancy.
FORCE_SCRIPT_NAME = ''
LOGIN_URL = FORCE_SCRIPT_NAME + '/login/'
LOGIN_REDIRECT_URL = FORCE_SCRIPT_NAME + '/query/'
LOGOUT_URL = '/logout/'
ROOT_URLCONF = 'omop_harvest.conf.urls'
# For non-publicly accessible applications, the siteauth app can be used to
# restrict access site-wide.
# SITEAUTH_ACCESS_ORDER = 'allow/deny'
#
SITEAUTH_ALLOW_URLS = (
r'^log(in|out)/',
r'^password/reset/',
r'^(register|verify)/',
)
SITEAUTH_DENY_URLS = (
r'^workspace/',
r'^workspace/discover/',
r'^query/',
r'^results/+',
r'^api/+',
r'^details/\d+/',
r'^moderate/+',
r'^verify/+',
)
#
# MIDDLEWARE
#
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'siteauth.middleware.SiteAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'serrano.middleware.SessionMiddleware',
)
#
# EMAIL
#
SUPPORT_EMAIL = 'cbmisupport@email.chop.edu'
DEFAULT_FROM_EMAIL = 'cbmisupport@email.chop.edu'
EMAIL_SUBJECT_PREFIX = '[omop_harvest] '
SEND_BROKEN_LINK_EMAILS = False
#
# LOGGING
#
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
}
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/omop_harvest.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
'request_handler': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/omop_harvest_requests.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['request_handler'],
'level': 'DEBUG',
'propagate': False
},
'avocado': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'serrano': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
#
# CACHE
#
# For production environments, the memcached backend is highly recommended
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique',
'KEY_PREFIX': 'omop_harvest',
'VERSION': 1,
}
}
CACHE_MIDDLEWARE_SECONDS = 0
# This is not necessary to set if the above `KEY_PREFIX` value is set since
# the `KEY_PREFIX` namespaces all cache set by this application
CACHE_MIDDLEWARE_KEY_PREFIX = 'omop_harvest'
#
# SESSIONS AND COOKIES
#
CSRF_COOKIE_NAME = 'omop_harvest_csrftoken'
# SESSION_COOKIE_AGE = 60 * 20
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_COOKIE_NAME = 'omop_harvest_sessionid'
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_SAVE_EVERY_REQUEST = False
#
# OTHER PROJECT SETTINGS
#
# USE_ETAGS = True
IGNORABLE_404_PATHS = (
r'robots.txt$',
r'favicon.ico$',
)
#
# VARIOUS APP SETTINGS
#
# The primary key of the ``Site`` object for the Sites Framework
SITE_ID = 1
#
# ModelTrees Configuration
#
MODELTREES = {
'default': {
'model': 'omop_harvest.Person',
}
}
#
# Haystack Configuration
#
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), '../../whoosh.index'),
}
}
#
# Avocado Configuration
#
# TODO: Should data_cache_enabled be set to True?
AVOCADO = {
'DATA_CACHE_ENABLED': False,
'METADATA_MIGRATION_APP': 'omop_harvest',
}
| true
| true
|
f707ed0c7e8ee188d0347cf515666d3fbef96b5e
| 1,580
|
py
|
Python
|
wsltools/similar.py
|
Symbo1/wsltools
|
0b6e536fc85c707a1c81f0296c4e91ca835396a1
|
[
"MIT"
] | 412
|
2020-04-16T08:11:58.000Z
|
2022-02-02T19:49:53.000Z
|
wsltools/similar.py
|
Symbo1/wsltools
|
0b6e536fc85c707a1c81f0296c4e91ca835396a1
|
[
"MIT"
] | 1
|
2020-04-16T14:03:46.000Z
|
2020-04-17T03:41:18.000Z
|
wsltools/similar.py
|
Symbo1/wsltools
|
0b6e536fc85c707a1c81f0296c4e91ca835396a1
|
[
"MIT"
] | 33
|
2020-04-16T08:48:53.000Z
|
2021-10-20T04:39:29.000Z
|
# -*- coding: utf-8 -*-
__author__ = 'CongRong <tr3jer@gmail.com>'
import difflib
from .utils.compat import bytes_decode, xrange
hashbits = 128
difflib_threshold = 0.95
simhash_threshold = 0.95
def simhash(tokens):
v = [0] * hashbits
for t in [string_hash(x) for x in tokens]:
for i in xrange(hashbits):
bitmask = 1 << i
if t & bitmask:
v[i] += 1
else:
v[i] -= 1
fingerprint = 0
for i in xrange(hashbits):
if v[i] >= 0:
fingerprint += 1 << i
return fingerprint
def string_hash(source):
if source == "":
return 0
else:
x = ord(source[0]) << 7
m = 1000003
mask = 2 ** hashbits - 1
for c in source:
x = ((x * m) ^ ord(c)) & mask
x ^= len(source)
if x == -1:
x = -2
return x
def hamming_distance(hash1, hash2):
x = (hash1 ^ hash2) & ((1 << hashbits) - 1)
tot = 0
while x:
tot += 1
x &= x - 1
return tot
def similar(content1, content2, engine='difflib'):
'''
:param content1: content1
:param content2: content2
:param engine: [optional] diiflib / simhash, Default By difflib
:return: Bool
'''
content1, content2 = map(lambda x: bytes_decode(x), [content1, content2])
sim = False
if engine == 'difflib':
if difflib.SequenceMatcher(None, content1, content2).quick_ratio() > difflib_threshold: sim = True
elif engine == 'simhash':
hash1 = simhash(content1.split())
hash2 = simhash(content2.split())
hamming = hamming_distance(hash1, hash2)
res = float(hashbits - hamming) / hashbits
if hamming: simhash_threshold = 0.90
sim = True if res >= simhash_threshold else False
return sim
| 19.268293
| 100
| 0.643038
|
__author__ = 'CongRong <tr3jer@gmail.com>'
import difflib
from .utils.compat import bytes_decode, xrange
hashbits = 128
difflib_threshold = 0.95
simhash_threshold = 0.95
def simhash(tokens):
v = [0] * hashbits
for t in [string_hash(x) for x in tokens]:
for i in xrange(hashbits):
bitmask = 1 << i
if t & bitmask:
v[i] += 1
else:
v[i] -= 1
fingerprint = 0
for i in xrange(hashbits):
if v[i] >= 0:
fingerprint += 1 << i
return fingerprint
def string_hash(source):
if source == "":
return 0
else:
x = ord(source[0]) << 7
m = 1000003
mask = 2 ** hashbits - 1
for c in source:
x = ((x * m) ^ ord(c)) & mask
x ^= len(source)
if x == -1:
x = -2
return x
def hamming_distance(hash1, hash2):
x = (hash1 ^ hash2) & ((1 << hashbits) - 1)
tot = 0
while x:
tot += 1
x &= x - 1
return tot
def similar(content1, content2, engine='difflib'):
content1, content2 = map(lambda x: bytes_decode(x), [content1, content2])
sim = False
if engine == 'difflib':
if difflib.SequenceMatcher(None, content1, content2).quick_ratio() > difflib_threshold: sim = True
elif engine == 'simhash':
hash1 = simhash(content1.split())
hash2 = simhash(content2.split())
hamming = hamming_distance(hash1, hash2)
res = float(hashbits - hamming) / hashbits
if hamming: simhash_threshold = 0.90
sim = True if res >= simhash_threshold else False
return sim
| true
| true
|
f707edd2b040f0802a1354f8b3a12e88d8aee693
| 58
|
py
|
Python
|
essentials/hello.py
|
ariannasg/python3-essential-training
|
9b52645f5ccb57d2bda5d5f4a3053681a026450a
|
[
"MIT"
] | 1
|
2020-06-02T08:37:41.000Z
|
2020-06-02T08:37:41.000Z
|
essentials/hello.py
|
ariannasg/python3-training
|
9b52645f5ccb57d2bda5d5f4a3053681a026450a
|
[
"MIT"
] | null | null | null |
essentials/hello.py
|
ariannasg/python3-training
|
9b52645f5ccb57d2bda5d5f4a3053681a026450a
|
[
"MIT"
] | null | null | null |
print('Hello, World.')
# CONSOLE OUTPUT:
# Hello, World.
| 11.6
| 22
| 0.655172
|
print('Hello, World.')
| true
| true
|
f707ef2ead5ec54c7eb2427bced3cc02f631c7dd
| 31,651
|
py
|
Python
|
request.py
|
alexcornier/INSEE
|
a5dc6e1267834754ac1cd1331203b5e835828946
|
[
"MIT"
] | 1
|
2020-08-25T16:20:03.000Z
|
2020-08-25T16:20:03.000Z
|
request.py
|
alexcornier/INSEE
|
a5dc6e1267834754ac1cd1331203b5e835828946
|
[
"MIT"
] | null | null | null |
request.py
|
alexcornier/INSEE
|
a5dc6e1267834754ac1cd1331203b5e835828946
|
[
"MIT"
] | null | null | null |
#================================================================
# Ensemble de requêtes SQL sur une base de données SQL
# hébergées sur un serveur local postgresql
#
# Modules pythons nécessaires
# psycopg2 (SQL connection)
# pandas (DataFrame et HTML)
# matplotlib
# jinja2 (styles HTML)
#
# Alexandre Cornier - 2020
#================================================================
import psycopg2
import pandas as pd
import webbrowser
import pathlib
# Interrupteur d'affichage console
bconsole = False # pas d'affichage console par défaut
#---------------------------- Connection à la Base de Données ------------------------------------
connection = psycopg2.connect("host=localhost port=5432 dbname=cremi user=postgres password=Audierne")
cur = connection.cursor()
#-------------------------------------- Fonctions ------------------------------------------------
# Affichage HTML des résultats dans le navigateur
def affiche_html(titre_question, question, fichier, resultat_html):
# Préparation de l'entête du fichier HTML
header = """<!DOCTYPE html>
<html>
<head>
<title>""" + titre_question + """</title>
</head>
<body>
<h1>""" + titre_question + """</h1>
<p>""" + question + """</p>
"""
footer = """
</body>
</html>"""
# write html to file
text_file = open(fichier, "w")
text_file.write(header)
text_file.write(resultat_html)
text_file.write(footer)
text_file.close()
# open report.html in browser
current_path = pathlib.Path(__file__).parent.absolute()
fichier = "file://" + str(current_path) + "/" + fichier
webbrowser.open(fichier)
# Question 1
def listeRegions():
cur.execute("""SELECT reg, libelle FROM regions ORDER BY reg""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code région', 'Région'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Région" else '' for i in x])
.hide_index()
.render())
affiche_html("Question 1", "Régions présentes dans la base de données",\
"question_01.html", html)
if (bconsole):
print("les régions présentes dans la base de données sont : ")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 2
def listeDepartement():
cur.execute("""SELECT dep, libelle FROM departements ORDER BY dep""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code département', 'Département'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Département" else '' for i in x])
.hide_index()
.render())
affiche_html("Question 2", "Départements présents dans la base de données",\
"question_02.html", html)
if (bconsole):
print("les départements présents dans la base de données sont : ")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 3
def choixRegions():
print("Donnez le nom de la région :")
choix = input().capitalize()
cur.execute("""SELECT * FROM regionsocial WHERE region = '%s' """ % choix)
lst = []
for info in cur.fetchall():
lst=[["Numéro", info[0]],
["Taux de pauvreté (%)", info[2]],
["Part des jeunes non insérés (%) en 2014", info[3]],
["Part des jeunes non insérés (%) en 2009", info[4]],
["Poids de l'économie sociale dans les emplois salariés du territoire (%)", info[5]]]
df = pd.DataFrame(lst, columns=['Information', 'Valeur'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.set_properties(subset=["Valeur"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 3", "Informations concernant la régione " + choix,\
"question_03.html", html)
if (bconsole):
print("-------------- Informations concernant", choix, "--------------")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 4
def choix_departement_theme():
print("Donnez le nom du département :")
choix1 = input().capitalize()
print("Choisissez un thème : 1.Social ou 2.Environnement (par défaut)")
choix2 = input()
lst = []
if choix2 == "1" or choix2.lower() == "social":
cur.execute("""SELECT * FROM departementsocial WHERE departements = '%s' """ % choix1)
for info in cur.fetchall():
lst = [["Numéro", info[0]],
["Espérance de vie des hommes à la naissance en 2015 (années)", info[2]],
["Espérance de vie des hommes à la naissance en 2010 (années)", info[3]],
["Espérance de vie des femmes à la naissance en 2015 (années)", info[4]],
["Espérance de vie des femmes à la naissance en 2010 (années)", info[5]],
["Part de la population éloignée de plus de 7 mn des services de santé de proximité (%) en 2016", info[6]],
["Part de la population estimée en zone inondable (%)", info[7]]]
df = pd.DataFrame(lst, columns=['Information', 'Valeur'])
df["Valeur"] = pd.to_numeric(df["Valeur"], errors='coerce')
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.format({"Valeur": "{:.1f}"})
.set_properties(subset=["Valeur"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 4a",\
"Informations sociales concernant le département " + choix1,\
"question_04a.html", html)
if (bconsole):
df["Valeur"] = df["Valeur"].map("{:.1f}".format)
print("-------------- Informations concernant", choix1, "--------------")
print(df)
else :
cur.execute("""SELECT * FROM departementenvironnement WHERE departements = '%s' """ % choix1)
for info in cur.fetchall():
lst = [["Numéro", info[0]],
["Taux de valorisation matière et organique (%) en 2013", info[2]],
["Taux de valorisation matière et organique (%) en 2009", info[3]],
["Part de surfaces artificialisées (%) en 2012", info[4]],
["Part de surfaces artificialisées (%) en 2006", info[5]],
["Part de l'agriculture biologique dans la surface agricole totale (%) en 2016", info[6]],
["Part de l'agriculture biologique dans la surface agricole totale (%) en 2010", info[7]],
["Production de granulats (tonnes) en 2014", info[8]],
["Production de granulats (tonnes) en 2009", info[9]],
["Eolien (%) en 2015", info[10]],
["Eolien (%) en 2010", info[11]],
["Photovoltaïque (%) en 2015", info[12]],
["Photovoltaïque (%) en 2010", info[13]],
["Autre (biogaz, biomasse, géothermie, incinération de déchets, petite hydraulique) (%) en 2015",info[14]],
["Autre (biogaz, biomasse, géothermie, incinération de déchets, petite hydraulique) (%) en 2010",info[15]]]
df = pd.DataFrame(lst, columns=['Information', 'Valeur'])
df["Valeur"] = pd.to_numeric(df["Valeur"], errors='coerce')
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.format({"Valeur": "{:.1f}"})
.set_properties(subset=["Valeur"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 4b",\
"Informations environnementales concernant le département " + choix1,\
"question_04b.html", html)
if (bconsole):
df["Valeur"] = df["Valeur"].map("{:.1f}".format)
print("-------------- Informations concernant", choix1, "--------------")
print(df)
if (bconsole):
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 5
def typeEnergie():
print("Choisissez un type d'energie : 1.Eolien, 2.Photovoltaique ou 3.Autre")
choix = input()
if choix == "1" or choix.lower() == "eolien":
cur.execute("""SELECT nb, departements, eolien2015 - eolien2010 AS croissance FROM departementenvironnement
WHERE eolien2015 > eolien2010
ORDER BY eolien2015 - eolien2010 DESC""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code', 'Département', 'Croissance'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Département" else '' for i in x])
.background_gradient(cmap='Blues', subset=["Croissance"])
.format({"Croissance": "{:.1f}pts"})
.set_properties(subset=["Croissance"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 5a",\
"Départements où la part de l'énergie éolienne a augmenté entre les deux années de référence",\
"question_05a.html", html)
if (bconsole):
df["Croissance"] = df["Croissance"].map("{:.1f}pts".format)
print(
"Voici la liste des départements où la part de cette énergie a augmenté entre les deux années de référence : ")
print(df)
if choix == "2" or choix.lower() == "photovoltaique":
cur.execute("""SELECT nb, departements, photovoltaique2015 - photovoltaique2010 AS croissance FROM departementenvironnement
WHERE photovoltaique2015 > photovoltaique2010
ORDER BY photovoltaique2015 - photovoltaique2010 DESC""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code', 'Département', 'Croissance'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Département" else '' for i in x])
.background_gradient(cmap='Blues', subset=["Croissance"])
.format({"Croissance": "{:.1f}pts"})
.set_properties(subset=["Croissance"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 5b",\
"Départements où la part de l'énergie photovoltaïque a augmenté entre les deux années de référence",\
"question_05b.html", html)
if (bconsole):
df["Croissance"] = df["Croissance"].map("{:.1f}pts".format)
print("Voici la liste des départements où la part de cette énergie a augmenté entre les deux années de référence : ")
print(df)
if choix == "3" or choix.lower() == "autre":
cur.execute("""SELECT nb, departements, autre2015 - autre2010 AS croissance FROM departementenvironnement
WHERE autre2015 > autre2010
ORDER BY autre2015 - autre2010 DESC""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code', 'Département', 'Croissance'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Département" else '' for i in x])
.background_gradient(cmap='Blues', subset=["Croissance"])
.format({"Croissance": "{:.1f}pts"})
.set_properties(subset=["Croissance"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 5c",\
"Départements où la part des énergies renouvelables autres a augmenté entre les deux années de référence",\
"question_05c.html", html)
if (bconsole):
df["Croissance"] = df["Croissance"].map("{:.1f}pts".format)
print("Voici la liste des départements où la part de cette énergie a augmenté entre les deux années de référence : ")
print(df)
if (bconsole):
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 6
def tonnes():
cur.execute("""SELECT departements.reg, regions.libelle AS region, departements.libelle AS departement
FROM departements, regions
WHERE departements.reg
IN (SELECT departements.reg from departements
INNER JOIN departementenvironnement
ON departements.dep = departementenvironnement.nb
INNER JOIN regions
ON departements.reg = regions.reg
GROUP BY departements.reg
HAVING SUM(prodgranulat2014) > 25000000
AND SUM(prodgranulat2014) <> 'NaN')
ORDER BY region, departement""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code région', 'Région', 'Département'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Département" else '' for i in x])
.hide_index()
.render())
affiche_html("Question 6",\
"Départements dont la région a eu une production de granulats supérieure à 25 000 000 tonnes en 2014",\
"question_06.html", html)
if (bconsole):
print("les départements dont la région a eu une production de granulats supérieure à 25 000 000 tonnes en 2014 sont :")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 7
def topFive():
cur.execute("""SELECT nb, departements, eolien2015 FROM departementenvironnement
ORDER BY nullif(eolien2015, 'NaN')
DESC nulls last LIMIT 5""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code département', 'Département', "Part de l'énergie éolienne en 2015"])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Département" else '' for i in x])
.background_gradient(cmap='Blues', subset=["Part de l'énergie éolienne en 2015"])
.format({"Part de l'énergie éolienne en 2015": "{:.1f}%"})
.set_properties(subset=["Part de l'énergie éolienne en 2015"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 7",\
"Les 5 départements avec le plus grand taux d’énergie éolienne comme source de la puissance électrique en 2015",\
"question_07.html", html)
if (bconsole):
df["Part de l'énergie éolienne en 2015"] = df["Part de l'énergie éolienne en 2015"].map("{:.1f}%".format)
print("Les 5 départements avec le plus grand taux d’énergie éolienne comme source de la puissance électrique en 2015 sont :")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 8
def weak():
cur.execute("""SELECT regions.reg, regions.libelle AS region,
departements.libelle AS departement, departementenvironnement.valorisationorga2013
FROM departements
INNER JOIN regions
ON departements.reg = regions.reg
INNER JOIN departementenvironnement
ON departements.dep = departementenvironnement.nb
ORDER BY nullif(valorisationorga2013, 'NaN') nulls last LIMIT 1""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code région', 'Région', 'Département', 'Valorisation en 2013'])
# Formattage des valeurs
df["Valorisation en 2013"] = df["Valorisation en 2013"].map("{:.1f}".format)
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Région" else '' for i in x])
.set_properties(subset=["Valorisation en 2013"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 8",\
"Région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013",\
"question_08.html", html)
if (bconsole):
print("La région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013 est :")
print("Reg, Région, Département, Valorisation2013")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 9
def bestPopMin():
cur.execute("""SELECT departementenvironnement.departements, departementenvironnement.agriculturebio2016
FROM departementenvironnement
INNER JOIN departementsocial
ON departementenvironnement.departements = departementsocial.departements
ORDER BY nullif(popeloignee7min, 'NaN') DESC nulls last LIMIT 1""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Département', "Part de l'agriculture biologique"])
# Formattage des valeurs
df["Part de l'agriculture biologique"] = df["Part de l'agriculture biologique"].map("{:.1f}%".format)
titre_html = "Part en 2016 (en %) de l’agriculture biologique dans la surface agricole totale du département<br>" +\
"contenant le plus grand pourcentage de population éloignée de plus de 7 minutes des services de santé de proximité"
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Part de l'agriculture biologique" else '' for i in x])
.set_properties(subset=["Part de l'agriculture biologique"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 9", titre_html, "question_09.html", html)
if (bconsole):
print("En 2016, la part (en %) de l’agriculture biologique dans la surface agricole totale du département")
print("contenant le plus grand pourcentage de population éloignée de plus de 7 minutes des services de santé de proximité est : ")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 10
def pauvrete():
cur.execute("""SELECT pauvrete,region
FROM regionsocial
WHERE jeunesnoninseres2014 > 30
AND pauvrete <> 'NaN'
ORDER BY nullif(pauvrete, 'NaN') DESC nulls last""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Pauvreté', 'Région'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Pauvreté" else '' for i in x])
.format({"Pauvreté": "{:.2f}%"})
.set_properties(subset=["Pauvreté"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 10",\
"Taux de pauvreté connu en 2014 des régions dont la part des jeunes non insérés est supérieure à 30% en 2014",\
"question_10.html", html)
if (bconsole):
df["Pauvreté"] = df["Pauvreté"].map("{:.2f}%".format)
print("Le taux de pauvreté connu en 2014 des régions dont la part des jeunes non insérés est supérieure à 30% en 2014 sont : ")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 11
def poids_eco():
cur.execute("""SELECT regions.reg, regions.libelle, poidseco,
AVG(photovoltaique2015) AS photovoltaique2015,
AVG(agriculturebio2016) AS agriculturebio2016
FROM departements
INNER JOIN departementenvironnement
ON departements.dep = departementenvironnement.nb
INNER JOIN regionsocial
ON departements.reg = regionsocial.nb
INNER JOIN regions
ON departements.reg = regions.reg
GROUP BY poidseco, regions.reg
HAVING AVG(photovoltaique2015) >= 10
AND AVG(photovoltaique2015) <> 'NaN'
AND AVG(agriculturebio2016) >= 5
AND AVG(agriculturebio2016) <> 'NaN'
ORDER BY poidseco""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code région', 'Région', "Poids de l'économie sociale",\
"Part moyenne du photovoltaïque", "Part moyenne de l'agriculture Bio"])
# Conversion string vers float pour le formattage
df["Part moyenne du photovoltaïque"] = pd.to_numeric(df["Part moyenne du photovoltaïque"], errors='coerce').fillna(0)
df["Part moyenne de l'agriculture Bio"] = pd.to_numeric(df["Part moyenne de l'agriculture Bio"], errors="coerce").fillna(0)
titre_html = "Poids de l'économie sociale en 2015 dans les emplois salariés de la région<br>" +\
"dont la source de la puissance électrique en énergies renouvelables provenait à au moins 10% de l'énergie photovoltaïque<br>" +\
"et dont la part de l'agriculture biologique dans la surface agricole totale était d'au moins 5%"
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.set_properties(subset=["Poids de l'économie sociale", "Part moyenne du photovoltaïque",
"Part moyenne de l'agriculture Bio"], **{'text-align': 'right'})
.hide_index()
.background_gradient(cmap='Blues', subset=["Poids de l'économie sociale"])
.format({"Poids de l'économie sociale": "{:.1f}%"})
.format({"Part moyenne du photovoltaïque": "{:.1f}%"})
.format({"Part moyenne de l'agriculture Bio": "{:.1f}%"})
.render())
affiche_html("Question 11", titre_html, "question_11.html", html)
if (bconsole):
df["Poids de l'économie sociale"] = df["Poids de l'économie sociale"].map("{:.1f}%".format)
df["Part moyenne du photovoltaïque"] = df["Part moyenne du photovoltaïque"].map("{:.1f}%".format)
df["Part moyenne de l'agriculture Bio"] = df["Part moyenne de l'agriculture Bio"].map("{:.1f}%".format)
print("Poids de l'économie sociale en 2015 dans les emplois salariés de la région")
print("dont la source de la puissance électrique en énergies renouvelables provenait à au moins 10% de l'énergie photovoltaïque")
print("et dont la part de l'agriculture biologique dans la surface agricole totale était d'au moins 5%")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
def menu():
print ("")
print ("------------------------------------ Projet INSEE -----------------------------------")
print ("")
print ("1...Afficher la liste des régions")
print ("2...Afficher la liste des départements")
print ("3...Demander à l’utilisateur de choisir une région et afficher les données de la region choisie")
print ("4...Demander à l’utilisateur de choisir un département et un thème : social ou environnemental,")
print (" | et afficher les données demandées pour le departement choisi")
print ("5...demander à l’utilisateur de choisir un type d’énergie (éolien, photovoltaïque, autre)")
print (" | et en fonction de ce choix retourner la liste des départements où la part de cette énergie a augmenté")
print (" | entre les deux années de référence, classés de la plus forte augmentation à la plus faible.")
print ("6...les départements dont la région a eu une production de granulats supérieure à 25 000 000 tonnes en 2014")
print ("7...les 5 départements avec le plus grand taux d’énergie éolienne comme source de la puissance électrique en 2015")
print ("8...La région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013")
print ("9...La part (en %) de l’agriculture biologique dans la surface agricole totale du département contenant")
print (" | le plus grand pourcentage de population éloignée de plus de 7 minutes des services de santé de proximité en 2016")
print ("10..Le taux de pauvreté en 2014 des régions dont la part des jeunes non insérés est supérieure à 30% en 2014 ")
print ("11..Le poids de l'économie sociale dans les emplois salariés de la région dont la source de la puissance électrique")
print (" | en énergies renouvelables provenait à au moins 10% de l’énergie photovoltaïque et dont la part")
print (" | de l’agriculture biologique dans la surface agricole totale était d’au moins 5% en 2015")
print ("")
print ("0...Quitter")
print ("-------------------------------------------------------------------------------------")
#----------------------------------------- MAIN --------------------------------------------------
# Demande d'affichae console ou non, HTML seul par défaut
print("Souhaitez-vous afficher les résultats dans la console,")
print("en plus de la création des fichiers HTML ?")
print(" (O Oui / N Non)")
choix = input()
if (choix[0].lower() == "o"):
bconsole = True
# Menu principal
while True:
menu()
print("Chosissez un numéro de question pour avoir la réponse :")
choix = input()
if (choix == "1"):
listeRegions()
elif (choix == "2"):
listeDepartement()
elif (choix == "3"):
choixRegions()
elif (choix == "4"):
choix_departement_theme()
elif (choix == "5"):
typeEnergie()
elif (choix == "6"):
tonnes()
elif (choix == "7"):
topFive()
elif (choix == "8"):
weak()
elif (choix == "9"):
bestPopMin()
elif (choix == "10"):
pauvrete()
elif (choix == "11"):
poids_eco()
elif (choix == "0"):
break
else:
print ("Choix invalide")
# fermeture "propre" du curseur et de la connection
cur.close()
connection.close()
| 45.02276
| 138
| 0.563616
|
import psycopg2
import pandas as pd
import webbrowser
import pathlib
bconsole = False # pas d'affichage console par défaut
connection = psycopg2.connect("host=localhost port=5432 dbname=cremi user=postgres password=Audierne")
cur = connection.cursor()
def affiche_html(titre_question, question, fichier, resultat_html):
header = """<!DOCTYPE html>
<html>
<head>
<title>""" + titre_question + """</title>
</head>
<body>
<h1>""" + titre_question + """</h1>
<p>""" + question + """</p>
"""
footer = """
</body>
</html>"""
# write html to file
text_file = open(fichier, "w")
text_file.write(header)
text_file.write(resultat_html)
text_file.write(footer)
text_file.close()
# open report.html in browser
current_path = pathlib.Path(__file__).parent.absolute()
fichier = "file://" + str(current_path) + "/" + fichier
webbrowser.open(fichier)
# Question 1
def listeRegions():
cur.execute("""SELECT reg, libelle FROM regions ORDER BY reg""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code région', 'Région'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', ' {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', ' ('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Région" else '' for i in x])
.hide_index()
.render())
affiche_html("Question 1", "Régions présentes dans la base de données",\
"question_01.html", html)
if (bconsole):
print("les régions présentes dans la base de données sont : ")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 2
def listeDepartement():
cur.execute("""SELECT dep, libelle FROM departements ORDER BY dep""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code département', 'Département'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', ' {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', ' ('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Département" else '' for i in x])
.hide_index()
.render())
affiche_html("Question 2", "Départements présents dans la base de données",\
"question_02.html", html)
if (bconsole):
print("les départements présents dans la base de données sont : ")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 3
def choixRegions():
print("Donnez le nom de la région :")
choix = input().capitalize()
cur.execute("""SELECT * FROM regionsocial WHERE region = '%s' """ % choix)
lst = []
for info in cur.fetchall():
lst=[["Numéro", info[0]],
["Taux de pauvreté (%)", info[2]],
["Part des jeunes non insérés (%) en 2014", info[3]],
["Part des jeunes non insérés (%) en 2009", info[4]],
["Poids de l'économie sociale dans les emplois salariés du territoire (%)", info[5]]]
df = pd.DataFrame(lst, columns=['Information', 'Valeur'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.set_properties(subset=["Valeur"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 3", "Informations concernant la régione " + choix,\
"question_03.html", html)
if (bconsole):
print("-------------- Informations concernant", choix, "--------------")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
def choix_departement_theme():
print("Donnez le nom du département :")
choix1 = input().capitalize()
print("Choisissez un thème : 1.Social ou 2.Environnement (par défaut)")
choix2 = input()
lst = []
if choix2 == "1" or choix2.lower() == "social":
cur.execute("""SELECT * FROM departementsocial WHERE departements = '%s' """ % choix1)
for info in cur.fetchall():
lst = [["Numéro", info[0]],
["Espérance de vie des hommes à la naissance en 2015 (années)", info[2]],
["Espérance de vie des hommes à la naissance en 2010 (années)", info[3]],
["Espérance de vie des femmes à la naissance en 2015 (années)", info[4]],
["Espérance de vie des femmes à la naissance en 2010 (années)", info[5]],
["Part de la population éloignée de plus de 7 mn des services de santé de proximité (%) en 2016", info[6]],
["Part de la population estimée en zone inondable (%)", info[7]]]
df = pd.DataFrame(lst, columns=['Information', 'Valeur'])
df["Valeur"] = pd.to_numeric(df["Valeur"], errors='coerce')
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.format({"Valeur": "{:.1f}"})
.set_properties(subset=["Valeur"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 4a",\
"Informations sociales concernant le département " + choix1,\
"question_04a.html", html)
if (bconsole):
df["Valeur"] = df["Valeur"].map("{:.1f}".format)
print("-------------- Informations concernant", choix1, "--------------")
print(df)
else :
cur.execute("""SELECT * FROM departementenvironnement WHERE departements = '%s' """ % choix1)
for info in cur.fetchall():
lst = [["Numéro", info[0]],
["Taux de valorisation matière et organique (%) en 2013", info[2]],
["Taux de valorisation matière et organique (%) en 2009", info[3]],
["Part de surfaces artificialisées (%) en 2012", info[4]],
["Part de surfaces artificialisées (%) en 2006", info[5]],
["Part de l'agriculture biologique dans la surface agricole totale (%) en 2016", info[6]],
["Part de l'agriculture biologique dans la surface agricole totale (%) en 2010", info[7]],
["Production de granulats (tonnes) en 2014", info[8]],
["Production de granulats (tonnes) en 2009", info[9]],
["Eolien (%) en 2015", info[10]],
["Eolien (%) en 2010", info[11]],
["Photovoltaïque (%) en 2015", info[12]],
["Photovoltaïque (%) en 2010", info[13]],
["Autre (biogaz, biomasse, géothermie, incinération de déchets, petite hydraulique) (%) en 2015",info[14]],
["Autre (biogaz, biomasse, géothermie, incinération de déchets, petite hydraulique) (%) en 2010",info[15]]]
df = pd.DataFrame(lst, columns=['Information', 'Valeur'])
df["Valeur"] = pd.to_numeric(df["Valeur"], errors='coerce')
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.format({"Valeur": "{:.1f}"})
.set_properties(subset=["Valeur"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 4b",\
"Informations environnementales concernant le département " + choix1,\
"question_04b.html", html)
if (bconsole):
df["Valeur"] = df["Valeur"].map("{:.1f}".format)
print("-------------- Informations concernant", choix1, "--------------")
print(df)
if (bconsole):
print("Appuyez sur entrée pour revenir au menu")
input()
def typeEnergie():
print("Choisissez un type d'energie : 1.Eolien, 2.Photovoltaique ou 3.Autre")
choix = input()
if choix == "1" or choix.lower() == "eolien":
cur.execute("""SELECT nb, departements, eolien2015 - eolien2010 AS croissance FROM departementenvironnement
WHERE eolien2015 > eolien2010
ORDER BY eolien2015 - eolien2010 DESC""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code', 'Département', 'Croissance'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', ' {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', ' ('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Département" else '' for i in x])
.background_gradient(cmap='Blues', subset=["Croissance"])
.format({"Croissance": "{:.1f}pts"})
.set_properties(subset=["Croissance"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 5a",\
"Départements où la part de l'énergie éolienne a augmenté entre les deux années de référence",\
"question_05a.html", html)
if (bconsole):
df["Croissance"] = df["Croissance"].map("{:.1f}pts".format)
print(
"Voici la liste des départements où la part de cette énergie a augmenté entre les deux années de référence : ")
print(df)
if choix == "2" or choix.lower() == "photovoltaique":
cur.execute("""SELECT nb, departements, photovoltaique2015 - photovoltaique2010 AS croissance FROM departementenvironnement
WHERE photovoltaique2015 > photovoltaique2010
ORDER BY photovoltaique2015 - photovoltaique2010 DESC""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code', 'Département', 'Croissance'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Département" else '' for i in x])
.background_gradient(cmap='Blues', subset=["Croissance"])
.format({"Croissance": "{:.1f}pts"})
.set_properties(subset=["Croissance"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 5b",\
"Départements où la part de l'énergie photovoltaïque a augmenté entre les deux années de référence",\
"question_05b.html", html)
if (bconsole):
df["Croissance"] = df["Croissance"].map("{:.1f}pts".format)
print("Voici la liste des départements où la part de cette énergie a augmenté entre les deux années de référence : ")
print(df)
if choix == "3" or choix.lower() == "autre":
cur.execute("""SELECT nb, departements, autre2015 - autre2010 AS croissance FROM departementenvironnement
WHERE autre2015 > autre2010
ORDER BY autre2015 - autre2010 DESC""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code', 'Département', 'Croissance'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', ' {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', ' ('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Département" else '' for i in x])
.background_gradient(cmap='Blues', subset=["Croissance"])
.format({"Croissance": "{:.1f}pts"})
.set_properties(subset=["Croissance"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 5c",\
"Départements où la part des énergies renouvelables autres a augmenté entre les deux années de référence",\
"question_05c.html", html)
if (bconsole):
df["Croissance"] = df["Croissance"].map("{:.1f}pts".format)
print("Voici la liste des départements où la part de cette énergie a augmenté entre les deux années de référence : ")
print(df)
if (bconsole):
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 6
def tonnes():
cur.execute("""SELECT departements.reg, regions.libelle AS region, departements.libelle AS departement
FROM departements, regions
WHERE departements.reg
IN (SELECT departements.reg from departements
INNER JOIN departementenvironnement
ON departements.dep = departementenvironnement.nb
INNER JOIN regions
ON departements.reg = regions.reg
GROUP BY departements.reg
HAVING SUM(prodgranulat2014) > 25000000
AND SUM(prodgranulat2014) <> 'NaN')
ORDER BY region, departement""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code région', 'Région', 'Département'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', ' {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', ' ('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Département" else '' for i in x])
.hide_index()
.render())
affiche_html("Question 6",\
"Départements dont la région a eu une production de granulats supérieure à 25 000 000 tonnes en 2014",\
"question_06.html", html)
if (bconsole):
print("les départements dont la région a eu une production de granulats supérieure à 25 000 000 tonnes en 2014 sont :")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 7
def topFive():
cur.execute("""SELECT nb, departements, eolien2015 FROM departementenvironnement
ORDER BY nullif(eolien2015, 'NaN')
DESC nulls last LIMIT 5""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code département', 'Département', "Part de l'énergie éolienne en 2015"])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Département" else '' for i in x])
.background_gradient(cmap='Blues', subset=["Part de l'énergie éolienne en 2015"])
.format({"Part de l'énergie éolienne en 2015": "{:.1f}%"})
.set_properties(subset=["Part de l'énergie éolienne en 2015"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 7",\
"Les 5 départements avec le plus grand taux d’énergie éolienne comme source de la puissance électrique en 2015",\
"question_07.html", html)
if (bconsole):
df["Part de l'énergie éolienne en 2015"] = df["Part de l'énergie éolienne en 2015"].map("{:.1f}%".format)
print("Les 5 départements avec le plus grand taux d’énergie éolienne comme source de la puissance électrique en 2015 sont :")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 8
def weak():
cur.execute("""SELECT regions.reg, regions.libelle AS region,
departements.libelle AS departement, departementenvironnement.valorisationorga2013
FROM departements
INNER JOIN regions
ON departements.reg = regions.reg
INNER JOIN departementenvironnement
ON departements.dep = departementenvironnement.nb
ORDER BY nullif(valorisationorga2013, 'NaN') nulls last LIMIT 1""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code région', 'Région', 'Département', 'Valorisation en 2013'])
# Formattage des valeurs
df["Valorisation en 2013"] = df["Valorisation en 2013"].map("{:.1f}".format)
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', ' {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', ' ('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Région" else '' for i in x])
.set_properties(subset=["Valorisation en 2013"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 8",\
"Région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013",\
"question_08.html", html)
if (bconsole):
print("La région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013 est :")
print("Reg, Région, Département, Valorisation2013")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
# Question 9
def bestPopMin():
cur.execute("""SELECT departementenvironnement.departements, departementenvironnement.agriculturebio2016
FROM departementenvironnement
INNER JOIN departementsocial
ON departementenvironnement.departements = departementsocial.departements
ORDER BY nullif(popeloignee7min, 'NaN') DESC nulls last LIMIT 1""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Département', "Part de l'agriculture biologique"])
df["Part de l'agriculture biologique"] = df["Part de l'agriculture biologique"].map("{:.1f}%".format)
titre_html = "Part en 2016 (en %) de l’agriculture biologique dans la surface agricole totale du département<br>" +\
"contenant le plus grand pourcentage de population éloignée de plus de 7 minutes des services de santé de proximité"
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Part de l'agriculture biologique" else '' for i in x])
.set_properties(subset=["Part de l'agriculture biologique"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 9", titre_html, "question_09.html", html)
if (bconsole):
print("En 2016, la part (en %) de l’agriculture biologique dans la surface agricole totale du département")
print("contenant le plus grand pourcentage de population éloignée de plus de 7 minutes des services de santé de proximité est : ")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
def pauvrete():
cur.execute("""SELECT pauvrete,region
FROM regionsocial
WHERE jeunesnoninseres2014 > 30
AND pauvrete <> 'NaN'
ORDER BY nullif(pauvrete, 'NaN') DESC nulls last""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Pauvreté', 'Région'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Pauvreté" else '' for i in x])
.format({"Pauvreté": "{:.2f}%"})
.set_properties(subset=["Pauvreté"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 10",\
"Taux de pauvreté connu en 2014 des régions dont la part des jeunes non insérés est supérieure à 30% en 2014",\
"question_10.html", html)
if (bconsole):
df["Pauvreté"] = df["Pauvreté"].map("{:.2f}%".format)
print("Le taux de pauvreté connu en 2014 des régions dont la part des jeunes non insérés est supérieure à 30% en 2014 sont : ")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
def poids_eco():
cur.execute("""SELECT regions.reg, regions.libelle, poidseco,
AVG(photovoltaique2015) AS photovoltaique2015,
AVG(agriculturebio2016) AS agriculturebio2016
FROM departements
INNER JOIN departementenvironnement
ON departements.dep = departementenvironnement.nb
INNER JOIN regionsocial
ON departements.reg = regionsocial.nb
INNER JOIN regions
ON departements.reg = regions.reg
GROUP BY poidseco, regions.reg
HAVING AVG(photovoltaique2015) >= 10
AND AVG(photovoltaique2015) <> 'NaN'
AND AVG(agriculturebio2016) >= 5
AND AVG(agriculturebio2016) <> 'NaN'
ORDER BY poidseco""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code région', 'Région', "Poids de l'économie sociale",\
"Part moyenne du photovoltaïque", "Part moyenne de l'agriculture Bio"])
df["Part moyenne du photovoltaïque"] = pd.to_numeric(df["Part moyenne du photovoltaïque"], errors='coerce').fillna(0)
df["Part moyenne de l'agriculture Bio"] = pd.to_numeric(df["Part moyenne de l'agriculture Bio"], errors="coerce").fillna(0)
titre_html = "Poids de l'économie sociale en 2015 dans les emplois salariés de la région<br>" +\
"dont la source de la puissance électrique en énergies renouvelables provenait à au moins 10% de l'énergie photovoltaïque<br>" +\
"et dont la part de l'agriculture biologique dans la surface agricole totale était d'au moins 5%"
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.set_properties(subset=["Poids de l'économie sociale", "Part moyenne du photovoltaïque",
"Part moyenne de l'agriculture Bio"], **{'text-align': 'right'})
.hide_index()
.background_gradient(cmap='Blues', subset=["Poids de l'économie sociale"])
.format({"Poids de l'économie sociale": "{:.1f}%"})
.format({"Part moyenne du photovoltaïque": "{:.1f}%"})
.format({"Part moyenne de l'agriculture Bio": "{:.1f}%"})
.render())
affiche_html("Question 11", titre_html, "question_11.html", html)
if (bconsole):
df["Poids de l'économie sociale"] = df["Poids de l'économie sociale"].map("{:.1f}%".format)
df["Part moyenne du photovoltaïque"] = df["Part moyenne du photovoltaïque"].map("{:.1f}%".format)
df["Part moyenne de l'agriculture Bio"] = df["Part moyenne de l'agriculture Bio"].map("{:.1f}%".format)
print("Poids de l'économie sociale en 2015 dans les emplois salariés de la région")
print("dont la source de la puissance électrique en énergies renouvelables provenait à au moins 10% de l'énergie photovoltaïque")
print("et dont la part de l'agriculture biologique dans la surface agricole totale était d'au moins 5%")
print(df)
print("Appuyez sur entrée pour revenir au menu")
input()
def menu():
print ("")
print ("------------------------------------ Projet INSEE -----------------------------------")
print ("")
print ("1...Afficher la liste des régions")
print ("2...Afficher la liste des départements")
print ("3...Demander à l’utilisateur de choisir une région et afficher les données de la region choisie")
print ("4...Demander à l’utilisateur de choisir un département et un thème : social ou environnemental,")
print (" | et afficher les données demandées pour le departement choisi")
print ("5...demander à l’utilisateur de choisir un type d’énergie (éolien, photovoltaïque, autre)")
print (" | et en fonction de ce choix retourner la liste des départements où la part de cette énergie a augmenté")
print (" | entre les deux années de référence, classés de la plus forte augmentation à la plus faible.")
print ("6...les départements dont la région a eu une production de granulats supérieure à 25 000 000 tonnes en 2014")
print ("7...les 5 départements avec le plus grand taux d’énergie éolienne comme source de la puissance électrique en 2015")
print ("8...La région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013")
print ("9...La part (en %) de l’agriculture biologique dans la surface agricole totale du département contenant")
print (" | le plus grand pourcentage de population éloignée de plus de 7 minutes des services de santé de proximité en 2016")
print ("10..Le taux de pauvreté en 2014 des régions dont la part des jeunes non insérés est supérieure à 30% en 2014 ")
print ("11..Le poids de l'économie sociale dans les emplois salariés de la région dont la source de la puissance électrique")
print (" | en énergies renouvelables provenait à au moins 10% de l’énergie photovoltaïque et dont la part")
print (" | de l’agriculture biologique dans la surface agricole totale était d’au moins 5% en 2015")
print ("")
print ("0...Quitter")
print ("-------------------------------------------------------------------------------------")
print("Souhaitez-vous afficher les résultats dans la console,")
print("en plus de la création des fichiers HTML ?")
print(" (O Oui / N Non)")
choix = input()
if (choix[0].lower() == "o"):
bconsole = True
# Menu principal
while True:
menu()
print("Chosissez un numéro de question pour avoir la réponse :")
choix = input()
if (choix == "1"):
listeRegions()
elif (choix == "2"):
listeDepartement()
elif (choix == "3"):
choixRegions()
elif (choix == "4"):
choix_departement_theme()
elif (choix == "5"):
typeEnergie()
elif (choix == "6"):
tonnes()
elif (choix == "7"):
topFive()
elif (choix == "8"):
weak()
elif (choix == "9"):
bestPopMin()
elif (choix == "10"):
pauvrete()
elif (choix == "11"):
poids_eco()
elif (choix == "0"):
break
else:
print ("Choix invalide")
# fermeture "propre" du curseur et de la connection
cur.close()
connection.close()
| true
| true
|
f707ef923ffadab284cc2684037cfd93b6bc232f
| 18,285
|
py
|
Python
|
lib/doFormatCheckDIF.py
|
joser1945/cmr-metadata-review
|
df0bb24dd06f981af907569f1a97966753053a99
|
[
"Apache-2.0"
] | 15
|
2018-06-26T19:58:44.000Z
|
2022-03-01T21:19:34.000Z
|
lib/doFormatCheckDIF.py
|
joser1945/cmr-metadata-review
|
df0bb24dd06f981af907569f1a97966753053a99
|
[
"Apache-2.0"
] | 61
|
2018-06-27T15:15:41.000Z
|
2022-03-08T15:39:32.000Z
|
lib/doFormatCheckDIF.py
|
vbjayanti/cmr-metadata-review
|
1c7ac12ef26f144289e3004588a2e2b305d4f940
|
[
"Apache-2.0"
] | 9
|
2019-01-22T15:48:48.000Z
|
2021-10-01T18:38:30.000Z
|
'''
Copyright 2016, United States Government, as represented by the Administrator of
the National Aeronautics and Space Administration. All rights reserved.
The "pyCMR" platform is licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. You may obtain a
copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied. See the License for the specific language
governing permissions and limitations under the License.
'''
#import sys
import os, errno
import json
import CollectionCheckerDIF
#import GranuleChecker
from cmr import searchCollection
#from cmr import searchGranule
from xml.etree import ElementTree
from xmlParser import XmlDictConfigDIF
from xmlParser import XmlDictConfig
collection_output_header = 'DIF10 Collection Elements,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,\n\
"* = GCMD controlled: http://gcmd.nasa.gov/learn/keyword_list.html\nThese datasets were reviewed in comparison to GCMD Keyword Version: 8.4.1",\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,\n\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,Platform\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,Spatial_Coverage\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,Organization\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,\n\
,,,,,,,,,,,,,,,,,,,,,,Personnel\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,Platform/ Instrument\
,,,,,,,,,,,,,,Platform/ Instrument/ Sensor\
,,,,,,,,,,,,"Temporal_Coverage (Must include a choice of 1, 2, 3 or 4)"\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"Spatial_Coverage/ Geometry (must have a choice of (1), (2), (3) or (4))"\
,,,,,,,,,,,Spatial_Coverage/ Spatial_Info\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,Organization/ Personnel\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,\n\
,,,,,Dataset_Citation,,,,,,,,,,,,,,,Personnel/ Contact_Person,,,,,,,,,,,Personnel/ Contact_Group,,,,,,,,,Science_Keywords,,,,,,,,,,,,Platform/ Characteristics,,,,,,,,,Platform/ Instrument/ Characteristics,,,,,,,,\
Platform/ Instrument/ Sensor/ Characteristics,,,,,,,,,,,Temporal_Coverage/ Range_DateTime (1),,,Temporal_Coverage/ Periodic_DateTime (3),,,,,,,Temporal_Coverage/ Paleo_DateTime (4),,,,,,,,,,,,,,Spatial_Coverage/ Geometry/ Bounding_Rectangle (1),,,,,,,,,,Spatial_Coverage/ Geometry/ Point (2),,Spatial_Coverage/ Geometry/ Line (3),,,,Spatial_Coverage/ Geometry/ Polygon (4),,,,Spatial_Coverage/ Orbit_Parameters,,,,,Spatial_Coverage/ Vertical_Spatial_Info,,,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model,,,,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geographic_Coordinate_System,,,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Local_Coordinate_System,,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System,,,,,Location,,,,,,Data_Resolution,,,,,,,Project,,,,,,,,,,,,,,,,,,Organization/ Personnel/ Contact_Person ,,,,,,,,,,,Organization/ Personnel/ Contact_Group,,,,,,,,,Distribution ,,,,Multimedia_Sample,,,,,Reference,,,,,,,,,,,,,,,,,Summary ,,Related_URL ,,,,,,,Metadata_Association,,,,Additional_Attributes,,,,,,,,,,,,,,,,,Metadata_Dates,,,,,,,,,Extended_Metadata,,,,,,,,,,,,,,,\n\
Dataset Id (short name) - umm-json link,Entry_ID/ Short_Name,Entry_ID/ Version,Version_Description,Entry_Title,Dataset_Citation/ Dataset_Creator,Dataset_Citation/ Dataset_Editor,Dataset_Citation/ Dataset_Title,Dataset_Citation/ Dataset_Series_Name,Dataset_Citation/ Dataset_Release_Date,Dataset_Citation/ Dataset_Release_Place,Dataset_Citation/ Dataset_Publisher,Dataset_Citation/ Version,Dataset_Citation/ Issue_Identification,Dataset_Citation/ Data_Presentation_Form,Dataset_Citation/ Other_Citation_Details,* where type must = \"DOI\" Dataset_Citation/ Persistent_Identifier/ Type,* DOI should be entered here Dataset_Citation/ Persistent_Identifier/ Identifier,Dataset_Citation/ Online_Resource,Personnel/ Role ,Personnel/ Contact_Person/ First_Name,Personnel/ Contact_Person/ Middle_Name,Personnel/ Contact_Person/ Last_Name,Personnel/ Contact_Person/ Address/ Street_Address,Personnel/ Contact_Person/ Address/ City,Personnel/ Contact_Person/ Address/ State_Province,Personnel/ Contact_Person/ Address/ Postal_Code,Personnel/ Contact_Person/ Address/ Country,Personnel/ Contact_Person/ Email,Personnel/ Contact_Person/ Phone/ Number,Personnel/ Contact_Person/ Phone/ Type ,Personnel/ Contact_Group/ Name,Personnel/ Contact_Group/ Address/ Street_Address,Personnel/ Contact_Group/ Address/ City,Personnel/ Contact_Group/ Address/ State_Province,Personnel/ Contact_Group/ Address/ Postal_Code,Personnel/ Contact_Group/ Address/ Country,Personnel/ Contact_Group/ Email,Personnel/ Contact_Group/ Phone/ Number,Personnel/ Contact_Group/ Phone/ Type,Science_Keywords/ Category *,Science_Keywords/ Topic *,Science_Keywords/ Term *,Science_Keywords/ Variable_Level_1 *,Science_Keywords/ Variable_Level_1/ Variable_Level_2 *,Science_Keywords/ Variable_Level_1/ Variable_Level_2/ Variable_Level_3 *,Science_Keywords/ Variable_Level_1/ Variable_Level_2/ Variable_Level_3/ Detailed_Variable,ISO_Topic_Category,Ancillary_Keyword,Platform/ Type *,Platform/ Short_Name *,Platform/ Long_Name*,Platform/ Characteristics/ Name ,Platform/ Characteristics/ Description,Platform/ Characteristics/ DataType,Platform/ Characteristics/ Unit,Platform/ Characteristics/ Value,Platform/ Instrument/ Short_Name *,Platform/ Instrument/ Long_Name *,Platform/ Instrument/ Technique,Platform/ Instrument/ NumberOfSensors,Platform/ Instrument/ Characteristics/ Name,Platform/ Instrument/ Characteristics/ Description,Platform/ Instrument/ Characteristics/ DataType,Platform/ Instrument/ Characteristics/ Unit ,Platform/ Instrument/ Characteristics/ Value,Platform/ Instrument/ OperationalMode,Platform/ Instrument/ Sensor/ Short_Name *,Platform/ Instrument/ Sensor/ Long_Name *,Platform/ Instrument/ Sensor/ Technique,Platform/ Instrument/ Sensor/ Characteristics/ Name ,Platform/ Instrument/ Sensor/ Characteristics/ Description ,Platform/ Instrument/ Sensor/ Characteristics/ DataType ,Platform/ Instrument/ Sensor/ Characteristics/ Unit ,Platform/ Instrument/ Sensor/ Characteristics/ Value ,Temporal_Coverage/ Time_Type,Temporal_Coverage/ Date_Type,Temporal_Coverage/ Temporal_Range_Type,Temporal_Coverage/ Precision_Of_Seconds,Temporal_Coverage/ Ends_At_Present_Flag,Temporal_Coverage/ Range_DateTime/ Beginning_Date_Time ,Temporal_Coverage/ Range_DateTime/ Ending_Date_Time ,Temporal_Coverage/ Single_Date_Time (2),Temporal_Coverage/ Periodic_DateTime/ Name,Temporal_Coverage/ Periodic_DateTime/ Start_Date,Temporal_Coverage/ Periodic_DateTime/ End_Date,Temporal_Coverage/ Periodic_DateTime/ Duration_Unit,Temporal_Coverage/ Periodic_DateTime/ Duration_Value,Temporal_Coverage/ Periodic_DateTime/ Period_Cycle_Duration_Unit,Temporal_Coverage/ Periodic_DateTime/ Period_Cycle_Duration_Value,Temporal_Coverage/ Paleo_DateTime/ Paleo_Start_Date,Temporal_Coverage/ Paleo_DateTime/ Paleo_Stop_Date,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Eon,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Era,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Period,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Epoch,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Stage,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Detailed_Classification ,Temporal_Coverage/ Temporal_Info/ Ancillary_Temporal_Keyword ,DataSet_Progress,Spatial_Coverage/ Spatial_Coverage_Type,Spatial_Coverage/ Granule_Spatial_Representation,Spatial_Coverage/ Zone_Identifier,Spatial_Coverage/ Geometry/ Coordinate_System ,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Southernmost_Latitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Northernmost_Latitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Westernmost_Longitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Easternmost_Longitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Minimum_Altitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Maximum_Altitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Altitude_Unit,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Minimum_Depth,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Maximum_Depth,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Depth_Unit,Spatial_Coverage/ Geometry/ Point/ Point_Longitude,Spatial_Coverage/ Geometry/ Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Line/ Point/ Point_Longitude,Spatial_Coverage/ Geometry/ Line/ Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Line/ Center_Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Line/ Center_Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Polygon/ Boundary/ Point/ Point_Longitude,Spatial_Coverage/ Geometry/ Polygon/ Boundary/ Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Polygon/ Exclusion_Zone/ Boundary/ Point/ Point_Longitude,Spatial_Coverage/ Geometry/ Polygon/ Exclusion_Zone/ Boundary/ Point/ Point_Latitude,Spatial_Coverage/ Orbit_Parameters/ Swath_Width,Spatial_Coverage/ Orbit_Parameters/ Period,Spatial_Coverage/ Orbit_Parameters/ Inclination_Angle,Spatial_Coverage/ Orbit_Parameters/ Number_of_Orbits,Spatial_Coverage/ Orbit_Parameters/ Start_Circular_Latitude,Spatial_Coverage/ Vertical_Spatial_Info/ Type,Spatial_Coverage/ Vertical_Spatial_Info/ Value,Spatial_Coverage/ Spatial_Info/ Spatial_Coverage_Type,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model/ Horizontal_DatumName,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model/ Ellipsoid_Name,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model/ Semi_Major_Axis,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model/ Denominator_Of_Flattening_Ratio,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geographic_Coordinate_System/ GeographicCoordinateUnits,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geographic_Coordinate_System/ LatitudeResolution,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geographic_Coordinate_System/ LongitudeResolution,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Local_Coordinate_System/ Description,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Local_Coordinate_System/ GeoReferenceInformation,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ TwoD_Coordinate_System_Name,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ Coordinate1/ Minimum_Value ,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ Coordinate1/ Maximum_Value,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ Coordinate2/ Minimum_Value,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ Coordinate2/ Maximum_Value,Location/ Location_Category *,Location/ Location_Type *,Location/ Location_Subregion1 *,Location/ Location_Subregion2 *,Location/ Location_Subregion3 *,Location/ Detailed_Location,Data_Resolution/ Latitude_Resolution,Data_Resolution/ Longitude_Resolution,Data_Resolution/ Horizontal_Resolution_Range,Data_Resolution/ Vertical_Resolution,Data_Resolution/ Vertical_Resolution_Range,Data_Resolution/ Temporal_Resolution,Data_Resolution/ Temporal_Resolution_Range,Project/ Short_Name *,Project/ Long_Name *,Project/ Campaign,Project/ Start_Date,Project/ End_Date,Quality,Access_Constraints,Use_Constraints,DataSet_Language,Originating_Center,Organization/ Organization_Type,Organization/ Organization_Name/ Short_Name *,Organization/ Organization_Name/ Long_Name *,Organization/ Hours_Of_Service,Organization/Instructions,Organization/Organization_URL,Organization/Data_Set_ID,Organization/ Personnel/ Role ,Organization/ Personnel/ Contact_Person/ First_Name,Organization/ Personnel/ Contact_Person/ Middle_Name,Organization/ Personnel/ Contact_Person/ Last_Name,Organization/ Personnel/ Contact_Person/ Address/ Street_Address,Organization/ Personnel/ Contact_Person/ Address/ City,Organization/ Personnel/ Contact_Person/ Address/ State_Province,Organization/ Personnel/ Contact_Person/ Address/ Postal_Code,Organization/ Personnel/ Contact_Person/ Address/ Country,Organization/ Personnel/ Contact_Person/ Email,Organization/ Personnel/ Contact_Person/ Phone/ Number,Organization/ Personnel/ Contact_Person/ Phone/ Type ,Organization/ Personnel/ Contact_Group/ Name,Organization/ Personnel/ Contact_Group/ Address/ Street_Address,Organization/ Personnel/ Contact_Group/ Address/ City,Organization/ Personnel/ Contact_Group/ Address/ State_Province,Organization/ Personnel/ Contact_Group/ Address/ Postal_Code,Organization/ Personnel/ Contact_Group/ Address/ Country,Organization/ Personnel/ Contact_Group/ Email,Organization/ Personnel/ Contact_Group/ Phone/ Number,Organization/ Personnel/ Contact_Group/ Phone/ Type,Distribution/ Distribution_Media,Distribution/ Distribution_Size,Distribution/ Distribution_Format,Distribution/ Fees,Multimedia_Sample/ File,Multimedia_Sample/ URL,Multimedia_Sample/Format,Multimedia_Sample/Caption,Multimedia_Sample/Description,Reference/ Citation,Reference/ Author,Reference/ Publication Date,Reference/ Title,Reference/ Series,Reference/ Edition,Reference/ Volume,Reference/ Issue,Reference/\
Report_Number,Reference/Publication_Place,Reference/ Publisher,Reference/ Pages,Reference/ ISBN,Reference/Persistent_Identifier/ Type,Reference/Persistent_Identifier/ Identifier,Reference/Online_Resource,Reference/Other_Reference_Details,Summary/ Abstract,Summary/ Purpose,Related_URL/ URL_Content_Type/ Type *,Related_URL/ URL_Content_Type/ Subtype *,Related_URL/ Protocol,Related_URL/ URL,Related_URL/ Title,Related_URL/ Description,Related_URL/ Mime_Type,Metadata_Association/ Entry_Id,Metadata_Association/ Type,Metadata_Association/ Description,IDN_Node/ Short_Name,Additional_Attributes/ Name,Additional_Attributes/ DataType,Additional_Attributes/ Description,Additional_Attributes/ MeasurementResolution,Additional_Attributes/ ParameterRangeBegin,Additional_Attributes/ ParameterRangeEnd,Additional_Attributes/ ParameterUnitsOfMeasure,Additional_Attributes/ ParameterValueAccuracy,Additional_Attributes/ ValueAccuracyExplanation,Additional_Attributes/ Value,Product_Level_ID,Product_Flag,Collection_Data_Type,Originating_Metadata_Node,Metadata_Name,Metadata_Version,DIF_Revision_History,Metadata_Dates/ Metadata_Creation,Metadata_Dates/ Metadata_Last_Revision,Metadata_Dates/ Metadata_Future_Review,Metadata_Dates/ Metadata_Delete,Metadata_Dates/ Data_Creation,Metadata_Dates/ Data_Last_Revision,Metadata_Dates/ Data_Future_Review,Metadata_Dates/ Data_Delete,Private,Extended_Metadata/ Metadata/ Group,Extended_Metadata/ Metadata/ Name,Extended_Metadata/ Metadata/ Description,Extended_Metadata/ Metadata/ Type,Extended_Metadata/ Metadata/ Update_Date,Extended_Metadata/ Value,Checked by:,Comments:,# Red fields (absolute errors):,# Yellow fields (recommended fixes),# Green fields (observations/ may or may not need to be fixed),# np fields (not in the metadata, and not marked by any color),# fields checked (265 - #np fields),% red fields,% yellow fields,% green fields\n'
def silentremove(filename):
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occured
def replace_file(filename):
f = open(filename)
lines = f.readlines()
f.close()
length = len(lines)
for i in range(length):
if(lines[i][0:4] == "<DIF"):
#print lines[i]
lines[i] = '<DIF>\n'
f = open(filename, 'w')
for l in lines:
f.write(l)
f.close()
def doCollectionCheckwithRecordsDIF(filename, outputform = 'CSV', outfilename = "result.csv"):
replace_file(filename)
xml = ElementTree.parse(filename)
root_element = xml.getroot()
ck = CollectionCheckerDIF.Checker()
if (outputform == 'JSON'):
for collection in root_element.iter('DIF'):
metadata = XmlDictConfigDIF(collection)
return ck.checkAllJSON(metadata)
else:
out_fp = open(outfilename, 'w')
out_fp.write(collection_output_header)
for collection in root_element.iter('DIF'):
metadata = XmlDictConfigDIF(collection)
result = ck.checkAll(metadata)
out_fp.write(filename+ ", " +result + "\n")
out_fp.close()
def doCollectionCheckwithShortNameListDIF(filename, outfilename = "result.csv", tmp_path = "./"):
in_fp = open(filename, 'r')
out_fp = open(outfilename, 'w')
out_fp.write(collection_output_header)
ck = CollectionCheckerDIF.Checker()
for line in iter(in_fp.readline, b''):
shortName = line.rstrip()
if len(shortName) != 0:
#print shortName
result = searchCollection(limit=100, short_name=shortName)
result[0].download(tmp_path);
fileNameTemp = tmp_path+shortName.replace('/', '')
replace_file(fileNameTemp)
xml = ElementTree.parse(fileNameTemp)
root_element = xml.getroot()
for collection in root_element.iter('DIF'):
metadata = XmlDictConfigDIF(collection)
result = ck.checkAll(metadata)
out_fp.write(shortName+ ", " + result + '\n')
silentremove(tmp_path+shortName.replace('/', ''))
in_fp.close()
out_fp.close()
| 122.718121
| 9,943
| 0.762811
|
import os, errno
import json
import CollectionCheckerDIF
from cmr import searchCollection
from xml.etree import ElementTree
from xmlParser import XmlDictConfigDIF
from xmlParser import XmlDictConfig
collection_output_header = 'DIF10 Collection Elements,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,\n\
"* = GCMD controlled: http://gcmd.nasa.gov/learn/keyword_list.html\nThese datasets were reviewed in comparison to GCMD Keyword Version: 8.4.1",\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,\n\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,Platform\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,Spatial_Coverage\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,Organization\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,\n\
,,,,,,,,,,,,,,,,,,,,,,Personnel\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,Platform/ Instrument\
,,,,,,,,,,,,,,Platform/ Instrument/ Sensor\
,,,,,,,,,,,,"Temporal_Coverage (Must include a choice of 1, 2, 3 or 4)"\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"Spatial_Coverage/ Geometry (must have a choice of (1), (2), (3) or (4))"\
,,,,,,,,,,,Spatial_Coverage/ Spatial_Info\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,Organization/ Personnel\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,\n\
,,,,,Dataset_Citation,,,,,,,,,,,,,,,Personnel/ Contact_Person,,,,,,,,,,,Personnel/ Contact_Group,,,,,,,,,Science_Keywords,,,,,,,,,,,,Platform/ Characteristics,,,,,,,,,Platform/ Instrument/ Characteristics,,,,,,,,\
Platform/ Instrument/ Sensor/ Characteristics,,,,,,,,,,,Temporal_Coverage/ Range_DateTime (1),,,Temporal_Coverage/ Periodic_DateTime (3),,,,,,,Temporal_Coverage/ Paleo_DateTime (4),,,,,,,,,,,,,,Spatial_Coverage/ Geometry/ Bounding_Rectangle (1),,,,,,,,,,Spatial_Coverage/ Geometry/ Point (2),,Spatial_Coverage/ Geometry/ Line (3),,,,Spatial_Coverage/ Geometry/ Polygon (4),,,,Spatial_Coverage/ Orbit_Parameters,,,,,Spatial_Coverage/ Vertical_Spatial_Info,,,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model,,,,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geographic_Coordinate_System,,,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Local_Coordinate_System,,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System,,,,,Location,,,,,,Data_Resolution,,,,,,,Project,,,,,,,,,,,,,,,,,,Organization/ Personnel/ Contact_Person ,,,,,,,,,,,Organization/ Personnel/ Contact_Group,,,,,,,,,Distribution ,,,,Multimedia_Sample,,,,,Reference,,,,,,,,,,,,,,,,,Summary ,,Related_URL ,,,,,,,Metadata_Association,,,,Additional_Attributes,,,,,,,,,,,,,,,,,Metadata_Dates,,,,,,,,,Extended_Metadata,,,,,,,,,,,,,,,\n\
Dataset Id (short name) - umm-json link,Entry_ID/ Short_Name,Entry_ID/ Version,Version_Description,Entry_Title,Dataset_Citation/ Dataset_Creator,Dataset_Citation/ Dataset_Editor,Dataset_Citation/ Dataset_Title,Dataset_Citation/ Dataset_Series_Name,Dataset_Citation/ Dataset_Release_Date,Dataset_Citation/ Dataset_Release_Place,Dataset_Citation/ Dataset_Publisher,Dataset_Citation/ Version,Dataset_Citation/ Issue_Identification,Dataset_Citation/ Data_Presentation_Form,Dataset_Citation/ Other_Citation_Details,* where type must = \"DOI\" Dataset_Citation/ Persistent_Identifier/ Type,* DOI should be entered here Dataset_Citation/ Persistent_Identifier/ Identifier,Dataset_Citation/ Online_Resource,Personnel/ Role ,Personnel/ Contact_Person/ First_Name,Personnel/ Contact_Person/ Middle_Name,Personnel/ Contact_Person/ Last_Name,Personnel/ Contact_Person/ Address/ Street_Address,Personnel/ Contact_Person/ Address/ City,Personnel/ Contact_Person/ Address/ State_Province,Personnel/ Contact_Person/ Address/ Postal_Code,Personnel/ Contact_Person/ Address/ Country,Personnel/ Contact_Person/ Email,Personnel/ Contact_Person/ Phone/ Number,Personnel/ Contact_Person/ Phone/ Type ,Personnel/ Contact_Group/ Name,Personnel/ Contact_Group/ Address/ Street_Address,Personnel/ Contact_Group/ Address/ City,Personnel/ Contact_Group/ Address/ State_Province,Personnel/ Contact_Group/ Address/ Postal_Code,Personnel/ Contact_Group/ Address/ Country,Personnel/ Contact_Group/ Email,Personnel/ Contact_Group/ Phone/ Number,Personnel/ Contact_Group/ Phone/ Type,Science_Keywords/ Category *,Science_Keywords/ Topic *,Science_Keywords/ Term *,Science_Keywords/ Variable_Level_1 *,Science_Keywords/ Variable_Level_1/ Variable_Level_2 *,Science_Keywords/ Variable_Level_1/ Variable_Level_2/ Variable_Level_3 *,Science_Keywords/ Variable_Level_1/ Variable_Level_2/ Variable_Level_3/ Detailed_Variable,ISO_Topic_Category,Ancillary_Keyword,Platform/ Type *,Platform/ Short_Name *,Platform/ Long_Name*,Platform/ Characteristics/ Name ,Platform/ Characteristics/ Description,Platform/ Characteristics/ DataType,Platform/ Characteristics/ Unit,Platform/ Characteristics/ Value,Platform/ Instrument/ Short_Name *,Platform/ Instrument/ Long_Name *,Platform/ Instrument/ Technique,Platform/ Instrument/ NumberOfSensors,Platform/ Instrument/ Characteristics/ Name,Platform/ Instrument/ Characteristics/ Description,Platform/ Instrument/ Characteristics/ DataType,Platform/ Instrument/ Characteristics/ Unit ,Platform/ Instrument/ Characteristics/ Value,Platform/ Instrument/ OperationalMode,Platform/ Instrument/ Sensor/ Short_Name *,Platform/ Instrument/ Sensor/ Long_Name *,Platform/ Instrument/ Sensor/ Technique,Platform/ Instrument/ Sensor/ Characteristics/ Name ,Platform/ Instrument/ Sensor/ Characteristics/ Description ,Platform/ Instrument/ Sensor/ Characteristics/ DataType ,Platform/ Instrument/ Sensor/ Characteristics/ Unit ,Platform/ Instrument/ Sensor/ Characteristics/ Value ,Temporal_Coverage/ Time_Type,Temporal_Coverage/ Date_Type,Temporal_Coverage/ Temporal_Range_Type,Temporal_Coverage/ Precision_Of_Seconds,Temporal_Coverage/ Ends_At_Present_Flag,Temporal_Coverage/ Range_DateTime/ Beginning_Date_Time ,Temporal_Coverage/ Range_DateTime/ Ending_Date_Time ,Temporal_Coverage/ Single_Date_Time (2),Temporal_Coverage/ Periodic_DateTime/ Name,Temporal_Coverage/ Periodic_DateTime/ Start_Date,Temporal_Coverage/ Periodic_DateTime/ End_Date,Temporal_Coverage/ Periodic_DateTime/ Duration_Unit,Temporal_Coverage/ Periodic_DateTime/ Duration_Value,Temporal_Coverage/ Periodic_DateTime/ Period_Cycle_Duration_Unit,Temporal_Coverage/ Periodic_DateTime/ Period_Cycle_Duration_Value,Temporal_Coverage/ Paleo_DateTime/ Paleo_Start_Date,Temporal_Coverage/ Paleo_DateTime/ Paleo_Stop_Date,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Eon,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Era,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Period,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Epoch,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Stage,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Detailed_Classification ,Temporal_Coverage/ Temporal_Info/ Ancillary_Temporal_Keyword ,DataSet_Progress,Spatial_Coverage/ Spatial_Coverage_Type,Spatial_Coverage/ Granule_Spatial_Representation,Spatial_Coverage/ Zone_Identifier,Spatial_Coverage/ Geometry/ Coordinate_System ,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Southernmost_Latitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Northernmost_Latitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Westernmost_Longitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Easternmost_Longitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Minimum_Altitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Maximum_Altitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Altitude_Unit,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Minimum_Depth,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Maximum_Depth,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Depth_Unit,Spatial_Coverage/ Geometry/ Point/ Point_Longitude,Spatial_Coverage/ Geometry/ Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Line/ Point/ Point_Longitude,Spatial_Coverage/ Geometry/ Line/ Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Line/ Center_Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Line/ Center_Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Polygon/ Boundary/ Point/ Point_Longitude,Spatial_Coverage/ Geometry/ Polygon/ Boundary/ Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Polygon/ Exclusion_Zone/ Boundary/ Point/ Point_Longitude,Spatial_Coverage/ Geometry/ Polygon/ Exclusion_Zone/ Boundary/ Point/ Point_Latitude,Spatial_Coverage/ Orbit_Parameters/ Swath_Width,Spatial_Coverage/ Orbit_Parameters/ Period,Spatial_Coverage/ Orbit_Parameters/ Inclination_Angle,Spatial_Coverage/ Orbit_Parameters/ Number_of_Orbits,Spatial_Coverage/ Orbit_Parameters/ Start_Circular_Latitude,Spatial_Coverage/ Vertical_Spatial_Info/ Type,Spatial_Coverage/ Vertical_Spatial_Info/ Value,Spatial_Coverage/ Spatial_Info/ Spatial_Coverage_Type,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model/ Horizontal_DatumName,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model/ Ellipsoid_Name,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model/ Semi_Major_Axis,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model/ Denominator_Of_Flattening_Ratio,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geographic_Coordinate_System/ GeographicCoordinateUnits,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geographic_Coordinate_System/ LatitudeResolution,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geographic_Coordinate_System/ LongitudeResolution,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Local_Coordinate_System/ Description,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Local_Coordinate_System/ GeoReferenceInformation,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ TwoD_Coordinate_System_Name,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ Coordinate1/ Minimum_Value ,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ Coordinate1/ Maximum_Value,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ Coordinate2/ Minimum_Value,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ Coordinate2/ Maximum_Value,Location/ Location_Category *,Location/ Location_Type *,Location/ Location_Subregion1 *,Location/ Location_Subregion2 *,Location/ Location_Subregion3 *,Location/ Detailed_Location,Data_Resolution/ Latitude_Resolution,Data_Resolution/ Longitude_Resolution,Data_Resolution/ Horizontal_Resolution_Range,Data_Resolution/ Vertical_Resolution,Data_Resolution/ Vertical_Resolution_Range,Data_Resolution/ Temporal_Resolution,Data_Resolution/ Temporal_Resolution_Range,Project/ Short_Name *,Project/ Long_Name *,Project/ Campaign,Project/ Start_Date,Project/ End_Date,Quality,Access_Constraints,Use_Constraints,DataSet_Language,Originating_Center,Organization/ Organization_Type,Organization/ Organization_Name/ Short_Name *,Organization/ Organization_Name/ Long_Name *,Organization/ Hours_Of_Service,Organization/Instructions,Organization/Organization_URL,Organization/Data_Set_ID,Organization/ Personnel/ Role ,Organization/ Personnel/ Contact_Person/ First_Name,Organization/ Personnel/ Contact_Person/ Middle_Name,Organization/ Personnel/ Contact_Person/ Last_Name,Organization/ Personnel/ Contact_Person/ Address/ Street_Address,Organization/ Personnel/ Contact_Person/ Address/ City,Organization/ Personnel/ Contact_Person/ Address/ State_Province,Organization/ Personnel/ Contact_Person/ Address/ Postal_Code,Organization/ Personnel/ Contact_Person/ Address/ Country,Organization/ Personnel/ Contact_Person/ Email,Organization/ Personnel/ Contact_Person/ Phone/ Number,Organization/ Personnel/ Contact_Person/ Phone/ Type ,Organization/ Personnel/ Contact_Group/ Name,Organization/ Personnel/ Contact_Group/ Address/ Street_Address,Organization/ Personnel/ Contact_Group/ Address/ City,Organization/ Personnel/ Contact_Group/ Address/ State_Province,Organization/ Personnel/ Contact_Group/ Address/ Postal_Code,Organization/ Personnel/ Contact_Group/ Address/ Country,Organization/ Personnel/ Contact_Group/ Email,Organization/ Personnel/ Contact_Group/ Phone/ Number,Organization/ Personnel/ Contact_Group/ Phone/ Type,Distribution/ Distribution_Media,Distribution/ Distribution_Size,Distribution/ Distribution_Format,Distribution/ Fees,Multimedia_Sample/ File,Multimedia_Sample/ URL,Multimedia_Sample/Format,Multimedia_Sample/Caption,Multimedia_Sample/Description,Reference/ Citation,Reference/ Author,Reference/ Publication Date,Reference/ Title,Reference/ Series,Reference/ Edition,Reference/ Volume,Reference/ Issue,Reference/\
Report_Number,Reference/Publication_Place,Reference/ Publisher,Reference/ Pages,Reference/ ISBN,Reference/Persistent_Identifier/ Type,Reference/Persistent_Identifier/ Identifier,Reference/Online_Resource,Reference/Other_Reference_Details,Summary/ Abstract,Summary/ Purpose,Related_URL/ URL_Content_Type/ Type *,Related_URL/ URL_Content_Type/ Subtype *,Related_URL/ Protocol,Related_URL/ URL,Related_URL/ Title,Related_URL/ Description,Related_URL/ Mime_Type,Metadata_Association/ Entry_Id,Metadata_Association/ Type,Metadata_Association/ Description,IDN_Node/ Short_Name,Additional_Attributes/ Name,Additional_Attributes/ DataType,Additional_Attributes/ Description,Additional_Attributes/ MeasurementResolution,Additional_Attributes/ ParameterRangeBegin,Additional_Attributes/ ParameterRangeEnd,Additional_Attributes/ ParameterUnitsOfMeasure,Additional_Attributes/ ParameterValueAccuracy,Additional_Attributes/ ValueAccuracyExplanation,Additional_Attributes/ Value,Product_Level_ID,Product_Flag,Collection_Data_Type,Originating_Metadata_Node,Metadata_Name,Metadata_Version,DIF_Revision_History,Metadata_Dates/ Metadata_Creation,Metadata_Dates/ Metadata_Last_Revision,Metadata_Dates/ Metadata_Future_Review,Metadata_Dates/ Metadata_Delete,Metadata_Dates/ Data_Creation,Metadata_Dates/ Data_Last_Revision,Metadata_Dates/ Data_Future_Review,Metadata_Dates/ Data_Delete,Private,Extended_Metadata/ Metadata/ Group,Extended_Metadata/ Metadata/ Name,Extended_Metadata/ Metadata/ Description,Extended_Metadata/ Metadata/ Type,Extended_Metadata/ Metadata/ Update_Date,Extended_Metadata/ Value,Checked by:,Comments:,# Red fields (absolute errors):,# Yellow fields (recommended fixes),# Green fields (observations/ may or may not need to be fixed),# np fields (not in the metadata, and not marked by any color),# fields checked (265 - #np fields),% red fields,% yellow fields,% green fields\n'
def silentremove(filename):
try:
os.remove(filename)
except OSError as e: if e.errno != errno.ENOENT: raise
def replace_file(filename):
f = open(filename)
lines = f.readlines()
f.close()
length = len(lines)
for i in range(length):
if(lines[i][0:4] == "<DIF"):
lines[i] = '<DIF>\n'
f = open(filename, 'w')
for l in lines:
f.write(l)
f.close()
def doCollectionCheckwithRecordsDIF(filename, outputform = 'CSV', outfilename = "result.csv"):
replace_file(filename)
xml = ElementTree.parse(filename)
root_element = xml.getroot()
ck = CollectionCheckerDIF.Checker()
if (outputform == 'JSON'):
for collection in root_element.iter('DIF'):
metadata = XmlDictConfigDIF(collection)
return ck.checkAllJSON(metadata)
else:
out_fp = open(outfilename, 'w')
out_fp.write(collection_output_header)
for collection in root_element.iter('DIF'):
metadata = XmlDictConfigDIF(collection)
result = ck.checkAll(metadata)
out_fp.write(filename+ ", " +result + "\n")
out_fp.close()
def doCollectionCheckwithShortNameListDIF(filename, outfilename = "result.csv", tmp_path = "./"):
in_fp = open(filename, 'r')
out_fp = open(outfilename, 'w')
out_fp.write(collection_output_header)
ck = CollectionCheckerDIF.Checker()
for line in iter(in_fp.readline, b''):
shortName = line.rstrip()
if len(shortName) != 0:
result = searchCollection(limit=100, short_name=shortName)
result[0].download(tmp_path);
fileNameTemp = tmp_path+shortName.replace('/', '')
replace_file(fileNameTemp)
xml = ElementTree.parse(fileNameTemp)
root_element = xml.getroot()
for collection in root_element.iter('DIF'):
metadata = XmlDictConfigDIF(collection)
result = ck.checkAll(metadata)
out_fp.write(shortName+ ", " + result + '\n')
silentremove(tmp_path+shortName.replace('/', ''))
in_fp.close()
out_fp.close()
| true
| true
|
f707eff3e3f76431a210c5b3441c3dfca87b5471
| 3,463
|
py
|
Python
|
code/storyboard.py
|
athna/cytrone
|
b37c8f249d27c786028595493887f9f0ecbeae39
|
[
"BSD-3-Clause"
] | 1
|
2020-04-23T02:09:45.000Z
|
2020-04-23T02:09:45.000Z
|
code/storyboard.py
|
LiteraryProgrammer/cytrone
|
93577d13de6102a61fbc306639253250847de537
|
[
"BSD-3-Clause"
] | null | null | null |
code/storyboard.py
|
LiteraryProgrammer/cytrone
|
93577d13de6102a61fbc306639253250847de537
|
[
"BSD-3-Clause"
] | null | null | null |
#############################################################################
# Classes related to the CyTrONE storyboard
#############################################################################
class Storyboard:
# Global configuration flags
ENABLE_HTTPS = True
ENABLE_PASSWORD = True
# Separator constants
SEPARATOR1 = "-------------------------------------------------------------------------"
SEPARATOR2 = "========================================================================="
SEPARATOR3 = "#########################################################################"
# Server status keys
SERVER_STATUS_KEY = "status"
SERVER_STATUS_SUCCESS = "SUCCESS"
SERVER_STATUS_ERROR = "ERROR"
SERVER_ACTIVITY_ID_KEY = "activity_id"
SERVER_MESSAGE_KEY = "message"
# Server status messages
USER_SETTINGS_LOADING_ERROR = "Server could not load the user information database"
USER_ID_MISSING_ERROR = "User id is missing"
USER_ID_INVALID_ERROR = "User id is invalid"
USER_PASSWORD_MISSING_ERROR = "User password is missing"
USER_PASSWORD_NOT_IN_DATABASE_ERROR = "User password not in database"
USER_ID_PASSWORD_INVALID_ERROR = "User id and/or password are invalid"
ACTION_MISSING_ERROR = "Action is missing"
ACTION_INVALID_ERROR = "Action is invalid"
LANGUAGE_MISSING_ERROR = "Language is missing"
LANGUAGE_INVALID_ERROR = "Language is invalid"
TRAINING_SETTINGS_LOADING_ERROR = "Server could not load the training settings database"
INSTANCE_COUNT_MISSING_ERROR = "Instance count is missing"
INSTANCE_COUNT_INVALID_ERROR = "Instance count is invalid"
TRAINING_TYPE_MISSING_ERROR = "Training type is invalid or missing"
SCENARIO_NAME_MISSING_ERROR = "Scenario name is missing"
LEVEL_NAME_MISSING_ERROR = "Level name is missing"
SESSION_ALLOCATION_ERROR = "Server could not allocate a new session (maximum number reached)"
CONTENT_IDENTIFICATION_ERROR = "Server could not determine the training content for the specified scenario and level"
CONTENT_LOADING_ERROR = "Server could not load the training content"
CONTENT_UPLOAD_ERROR = "LMS content manager could not upload the training content"
CONTENT_REMOVAL_ERROR = "LMS content manager could not remove the training activity"
CONTENT_SERVER_ERROR = "Server could not communicate with the LMS content manager"
TEMPLATE_IDENTIFICATION_ERROR = "Server could not determine the cyber range template for the specified scenario and level"
TEMPLATE_LOADING_ERROR = "Server could not load the cyber range template"
INSTANTIATION_SERVER_ERROR = "Server could not communicate with the cyber range manager"
INSTANTIATION_ERROR = "Cyber range manager could not instantiate the cyber range"
INSTANTIATION_STATUS_FILE_NOT_FOUND = "Instantiation status file could not be found"
INSTANTIATION_CYRIS_IO_ERROR = "CyRIS execution I/O error"
INSTANTIATION_SIMULATED_ERROR = "Simulated range instantiation error"
DESTRUCTION_ERROR = "Cyber range manager could not destroy the cyber range"
DESTRUCTION_SIMULATED_ERROR = "Simulated range destruction error"
DESTRUCTION_SCRIPT_NOT_FOUND = "Destruction script could not be found"
SESSION_ID_MISSING_ERROR = "Session id is missing"
SESSION_ID_INVALID_ERROR = "Session id is invalid"
SESSION_INFO_CONSISTENCY_ERROR = "Server encountered a session information consistency issue"
| 47.438356
| 127
| 0.689287
|
class Storyboard:
ENABLE_HTTPS = True
ENABLE_PASSWORD = True
SEPARATOR1 = "-------------------------------------------------------------------------"
SEPARATOR2 = "========================================================================="
SEPARATOR3 = "#########################################################################"
SERVER_STATUS_KEY = "status"
SERVER_STATUS_SUCCESS = "SUCCESS"
SERVER_STATUS_ERROR = "ERROR"
SERVER_ACTIVITY_ID_KEY = "activity_id"
SERVER_MESSAGE_KEY = "message"
USER_SETTINGS_LOADING_ERROR = "Server could not load the user information database"
USER_ID_MISSING_ERROR = "User id is missing"
USER_ID_INVALID_ERROR = "User id is invalid"
USER_PASSWORD_MISSING_ERROR = "User password is missing"
USER_PASSWORD_NOT_IN_DATABASE_ERROR = "User password not in database"
USER_ID_PASSWORD_INVALID_ERROR = "User id and/or password are invalid"
ACTION_MISSING_ERROR = "Action is missing"
ACTION_INVALID_ERROR = "Action is invalid"
LANGUAGE_MISSING_ERROR = "Language is missing"
LANGUAGE_INVALID_ERROR = "Language is invalid"
TRAINING_SETTINGS_LOADING_ERROR = "Server could not load the training settings database"
INSTANCE_COUNT_MISSING_ERROR = "Instance count is missing"
INSTANCE_COUNT_INVALID_ERROR = "Instance count is invalid"
TRAINING_TYPE_MISSING_ERROR = "Training type is invalid or missing"
SCENARIO_NAME_MISSING_ERROR = "Scenario name is missing"
LEVEL_NAME_MISSING_ERROR = "Level name is missing"
SESSION_ALLOCATION_ERROR = "Server could not allocate a new session (maximum number reached)"
CONTENT_IDENTIFICATION_ERROR = "Server could not determine the training content for the specified scenario and level"
CONTENT_LOADING_ERROR = "Server could not load the training content"
CONTENT_UPLOAD_ERROR = "LMS content manager could not upload the training content"
CONTENT_REMOVAL_ERROR = "LMS content manager could not remove the training activity"
CONTENT_SERVER_ERROR = "Server could not communicate with the LMS content manager"
TEMPLATE_IDENTIFICATION_ERROR = "Server could not determine the cyber range template for the specified scenario and level"
TEMPLATE_LOADING_ERROR = "Server could not load the cyber range template"
INSTANTIATION_SERVER_ERROR = "Server could not communicate with the cyber range manager"
INSTANTIATION_ERROR = "Cyber range manager could not instantiate the cyber range"
INSTANTIATION_STATUS_FILE_NOT_FOUND = "Instantiation status file could not be found"
INSTANTIATION_CYRIS_IO_ERROR = "CyRIS execution I/O error"
INSTANTIATION_SIMULATED_ERROR = "Simulated range instantiation error"
DESTRUCTION_ERROR = "Cyber range manager could not destroy the cyber range"
DESTRUCTION_SIMULATED_ERROR = "Simulated range destruction error"
DESTRUCTION_SCRIPT_NOT_FOUND = "Destruction script could not be found"
SESSION_ID_MISSING_ERROR = "Session id is missing"
SESSION_ID_INVALID_ERROR = "Session id is invalid"
SESSION_INFO_CONSISTENCY_ERROR = "Server encountered a session information consistency issue"
| true
| true
|
f707f0123f7df4e31a7504a3e66c201f5f811ebd
| 3,574
|
py
|
Python
|
pgAdmin/browser/server_groups/servers/databases/foreign_data_wrappers/tests/test_fdw_delete_multiple.py
|
WeilerWebServices/PostgreSQL
|
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
|
[
"PostgreSQL"
] | null | null | null |
pgAdmin/browser/server_groups/servers/databases/foreign_data_wrappers/tests/test_fdw_delete_multiple.py
|
WeilerWebServices/PostgreSQL
|
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
|
[
"PostgreSQL"
] | null | null | null |
pgAdmin/browser/server_groups/servers/databases/foreign_data_wrappers/tests/test_fdw_delete_multiple.py
|
WeilerWebServices/PostgreSQL
|
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
|
[
"PostgreSQL"
] | null | null | null |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from __future__ import print_function
import uuid
import json
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as fdw_utils
class FDWDDeleteMultipleTestCase(BaseTestGenerator):
"""This class will delete foreign data wrappers under test database."""
skip_on_database = ['gpdb']
scenarios = [ # Fetching default URL for foreign_data_wrapper node.
('Check FDW Node',
dict(url='/browser/foreign_data_wrapper/obj/'))]
def setUp(self):
""" This function will create extension and foreign data wrapper."""
super(FDWDDeleteMultipleTestCase, self).setUp()
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.schema_name = self.schema_data['schema_name']
self.fdw_names = ["fdw_{0}".format(str(uuid.uuid4())[1:8]),
"fdw_{0}".format(str(uuid.uuid4())[1:8])]
self.fdw_ids = [fdw_utils.create_fdw(self.server, self.db_name,
self.fdw_names[0]),
fdw_utils.create_fdw(self.server, self.db_name,
self.fdw_names[1])]
def runTest(self):
"""This function will fetch foreign data wrapper present under test
database."""
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
fdw_response = fdw_utils.verify_fdw(self.server, self.db_name,
self.fdw_names[0])
if not fdw_response:
raise Exception("Could not find FDW.")
fdw_response = fdw_utils.verify_fdw(self.server, self.db_name,
self.fdw_names[1])
if not fdw_response:
raise Exception("Could not find FDW.")
data = {'ids': self.fdw_ids}
delete_response = self.tester.delete(self.url +
str(utils.SERVER_GROUP) +
'/' + str(self.server_id) + '/' +
str(self.db_id) + '/',
follow_redirects=True,
data=json.dumps(data),
content_type='html/json')
self.assertEquals(delete_response.status_code, 200)
def tearDown(self):
"""This function disconnect the test database and drop added extension
and dependant objects."""
database_utils.disconnect_database(self, self.server_id,
self.db_id)
| 46.415584
| 78
| 0.535534
|
from __future__ import print_function
import uuid
import json
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as fdw_utils
class FDWDDeleteMultipleTestCase(BaseTestGenerator):
skip_on_database = ['gpdb']
scenarios = [ ('Check FDW Node',
dict(url='/browser/foreign_data_wrapper/obj/'))]
def setUp(self):
super(FDWDDeleteMultipleTestCase, self).setUp()
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.schema_name = self.schema_data['schema_name']
self.fdw_names = ["fdw_{0}".format(str(uuid.uuid4())[1:8]),
"fdw_{0}".format(str(uuid.uuid4())[1:8])]
self.fdw_ids = [fdw_utils.create_fdw(self.server, self.db_name,
self.fdw_names[0]),
fdw_utils.create_fdw(self.server, self.db_name,
self.fdw_names[1])]
def runTest(self):
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
fdw_response = fdw_utils.verify_fdw(self.server, self.db_name,
self.fdw_names[0])
if not fdw_response:
raise Exception("Could not find FDW.")
fdw_response = fdw_utils.verify_fdw(self.server, self.db_name,
self.fdw_names[1])
if not fdw_response:
raise Exception("Could not find FDW.")
data = {'ids': self.fdw_ids}
delete_response = self.tester.delete(self.url +
str(utils.SERVER_GROUP) +
'/' + str(self.server_id) + '/' +
str(self.db_id) + '/',
follow_redirects=True,
data=json.dumps(data),
content_type='html/json')
self.assertEquals(delete_response.status_code, 200)
def tearDown(self):
database_utils.disconnect_database(self, self.server_id,
self.db_id)
| true
| true
|
f707f1254d79d55178ea62e6f1135f1d5e0e002b
| 30,174
|
py
|
Python
|
discord_slash/context.py
|
RedstoneZockt/discord-py-interactions
|
5ddbd4f303e14787e16a2c906bccd1a4e828ef6c
|
[
"MIT"
] | null | null | null |
discord_slash/context.py
|
RedstoneZockt/discord-py-interactions
|
5ddbd4f303e14787e16a2c906bccd1a4e828ef6c
|
[
"MIT"
] | null | null | null |
discord_slash/context.py
|
RedstoneZockt/discord-py-interactions
|
5ddbd4f303e14787e16a2c906bccd1a4e828ef6c
|
[
"MIT"
] | null | null | null |
import datetime
import typing
from typing import TYPE_CHECKING
from warnings import warn
import discord
from discord.ext import commands
from discord.utils import snowflake_time
from . import error, http, model
from .dpy_overrides import ComponentMessage
if TYPE_CHECKING: # circular import sucks for typehinting
from . import client
class InteractionContext:
"""
Base context for interactions.\n
In some ways similar with discord.ext.commands.Context.
.. warning::
Do not manually init this model.
:ivar message: Message that invoked the slash command.
:ivar interaction_id: Interaction ID of the command message.
:ivar bot: discord.py client.
:ivar _http: :class:`.http.SlashCommandRequest` of the client.
:ivar _logger: Logger instance.
:ivar data: The raw data of the interaction.
:ivar values: The values sent with the interaction. Currently for selects.
:ivar deferred: Whether the command is current deferred (loading state)
:ivar _deferred_hidden: Internal var to check that state stays the same
:ivar responded: Whether you have responded with a message to the interaction.
:ivar guild_id: Guild ID of the command message. If the command was invoked in DM, then it is ``None``
:ivar author_id: User ID representing author of the command message.
:ivar channel_id: Channel ID representing channel of the command message.
:ivar author: User or Member instance of the command invoke.
"""
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
self._token = _json["token"]
self.message = None
self.menu_messages = None
self.data = _json["data"]
self.interaction_id = _json["id"]
self._http = _http
self.bot = _discord
self._logger = logger
self.deferred = False
self.responded = False
self.values = _json["data"]["values"] if "values" in _json["data"] else None
self._deferred_hidden = False # To check if the patch to the deferred response matches
self.guild_id = int(_json["guild_id"]) if "guild_id" in _json.keys() else None
self.author_id = int(
_json["member"]["user"]["id"] if "member" in _json.keys() else _json["user"]["id"]
)
self.channel_id = int(_json["channel_id"])
if self.guild:
self.author = discord.Member(
data=_json["member"], state=self.bot._connection, guild=self.guild
)
elif self.guild_id:
self.author = discord.User(data=_json["member"]["user"], state=self.bot._connection)
else:
self.author = discord.User(data=_json["user"], state=self.bot._connection)
self.created_at: datetime.datetime = snowflake_time(int(self.interaction_id))
@property
def _deffered_hidden(self):
warn(
"`_deffered_hidden` as been renamed to `_deferred_hidden`.",
DeprecationWarning,
stacklevel=2,
)
return self._deferred_hidden
@_deffered_hidden.setter
def _deffered_hidden(self, value):
warn(
"`_deffered_hidden` as been renamed to `_deferred_hidden`.",
DeprecationWarning,
stacklevel=2,
)
self._deferred_hidden = value
@property
def deffered(self):
warn("`deffered` as been renamed to `deferred`.", DeprecationWarning, stacklevel=2)
return self.deferred
@deffered.setter
def deffered(self, value):
warn("`deffered` as been renamed to `deferred`.", DeprecationWarning, stacklevel=2)
self.deferred = value
@property
def guild(self) -> typing.Optional[discord.Guild]:
"""
Guild instance of the command invoke. If the command was invoked in DM, then it is ``None``
:return: Optional[discord.Guild]
"""
return self.bot.get_guild(self.guild_id) if self.guild_id else None
@property
def channel(self) -> typing.Optional[typing.Union[discord.TextChannel, discord.DMChannel]]:
"""
Channel instance of the command invoke.
:return: Optional[Union[discord.abc.GuildChannel, discord.abc.PrivateChannel]]
"""
return self.bot.get_channel(self.channel_id)
@property
def voice_client(self) -> typing.Optional[discord.VoiceProtocol]:
"""
VoiceClient instance of the command invoke. If the command was invoked in DM, then it is ``None``.
If the bot is not connected to any Voice/Stage channels, then it is ``None``.
:return: Optional[discord.VoiceProtocol]
"""
return self.guild.voice_client if self.guild else None
@property
def me(self) -> typing.Union[discord.Member, discord.ClientUser]:
"""
Bot member instance of the command invoke. If the command was invoked in DM, then it is ``discord.ClientUser``.
:return: Union[discord.Member, discord.ClientUser]
"""
return self.guild.me if self.guild is not None else self.bot.user
async def defer(self, hidden: bool = False):
"""
'Defers' the response, showing a loading state to the user
:param hidden: Whether the deferred response should be ephemeral . Default ``False``.
"""
if self.deferred or self.responded:
raise error.AlreadyResponded("You have already responded to this command!")
base = {"type": 5}
if hidden:
base["data"] = {"flags": 64}
self._deferred_hidden = True
await self._http.post_initial_response(base, self.interaction_id, self._token)
self.deferred = True
async def send(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
"""
Sends response of the interaction.
.. warning::
- Since Release 1.0.9, this is completely changed. If you are migrating from older version, please make sure to fix the usage.
- You can't use both ``embed`` and ``embeds`` at the same time, also applies to ``file`` and ``files``.
- If you send files in the initial response, this will defer if it's not been deferred, and then PATCH with the message
:param content: Content of the response.
:type content: str
:param embed: Embed of the response.
:type embed: discord.Embed
:param embeds: Embeds of the response. Maximum 10.
:type embeds: List[discord.Embed]
:param tts: Whether to speak message using tts. Default ``False``.
:type tts: bool
:param file: File to send.
:type file: discord.File
:param files: Files to send.
:type files: List[discord.File]
:param allowed_mentions: AllowedMentions of the message.
:type allowed_mentions: discord.AllowedMentions
:param hidden: Whether the message is hidden, which means message content will only be seen to the author.
:type hidden: bool
:param delete_after: If provided, the number of seconds to wait in the background before deleting the message we just sent. If the deletion fails, then it is silently ignored.
:type delete_after: float
:param components: Message components in the response. The top level must be made of ActionRows.
:type components: List[dict]
:return: Union[discord.Message, dict]
"""
if embed and embeds:
raise error.IncorrectFormat("You can't use both `embed` and `embeds`!")
if embed:
embeds = [embed]
if embeds:
if not isinstance(embeds, list):
raise error.IncorrectFormat("Provide a list of embeds.")
elif len(embeds) > 10:
raise error.IncorrectFormat("Do not provide more than 10 embeds.")
if file and files:
raise error.IncorrectFormat("You can't use both `file` and `files`!")
if file:
files = [file]
if delete_after and hidden:
raise error.IncorrectFormat("You can't delete a hidden message!")
if components and not all(comp.get("type") == 1 for comp in components):
raise error.IncorrectFormat(
"The top level of the components list must be made of ActionRows!"
)
if allowed_mentions is not None:
if self.bot.allowed_mentions is not None:
allowed_mentions = self.bot.allowed_mentions.merge(allowed_mentions).to_dict()
else:
allowed_mentions = allowed_mentions.to_dict()
else:
if self.bot.allowed_mentions is not None:
allowed_mentions = self.bot.allowed_mentions.to_dict()
else:
allowed_mentions = {}
base = {
"content": content,
"tts": tts,
"embeds": [x.to_dict() for x in embeds] if embeds else [],
"allowed_mentions": allowed_mentions,
"components": components or [],
}
if hidden:
base["flags"] = 64
initial_message = False
if not self.responded:
initial_message = True
if files and not self.deferred:
await self.defer(hidden=hidden)
if self.deferred:
if self._deferred_hidden != hidden:
self._logger.warning(
"Deferred response might not be what you set it to! (hidden / visible) "
"This is because it was deferred in a different state."
)
resp = await self._http.edit(base, self._token, files=files)
self.deferred = False
else:
json_data = {"type": 4, "data": base}
await self._http.post_initial_response(json_data, self.interaction_id, self._token)
if not hidden:
resp = await self._http.edit({}, self._token)
else:
resp = {}
self.responded = True
else:
resp = await self._http.post_followup(base, self._token, files=files)
if files:
for file in files:
file.close()
if not hidden:
smsg = model.SlashMessage(
state=self.bot._connection,
data=resp,
channel=self.channel or discord.Object(id=self.channel_id),
_http=self._http,
interaction_token=self._token,
)
if delete_after:
self.bot.loop.create_task(smsg.delete(delay=delete_after))
if initial_message:
self.message = smsg
return smsg
else:
return resp
async def reply(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
"""
Sends response of the interaction. This is currently an alias of the ``.send()`` method.
.. warning::
- Since Release 1.0.9, this is completely changed. If you are migrating from older version, please make sure to fix the usage.
- You can't use both ``embed`` and ``embeds`` at the same time, also applies to ``file`` and ``files``.
- If you send files in the initial response, this will defer if it's not been deferred, and then PATCH with the message
:param content: Content of the response.
:type content: str
:param embed: Embed of the response.
:type embed: discord.Embed
:param embeds: Embeds of the response. Maximum 10.
:type embeds: List[discord.Embed]
:param tts: Whether to speak message using tts. Default ``False``.
:type tts: bool
:param file: File to send.
:type file: discord.File
:param files: Files to send.
:type files: List[discord.File]
:param allowed_mentions: AllowedMentions of the message.
:type allowed_mentions: discord.AllowedMentions
:param hidden: Whether the message is hidden, which means message content will only be seen to the author.
:type hidden: bool
:param delete_after: If provided, the number of seconds to wait in the background before deleting the message we just sent. If the deletion fails, then it is silently ignored.
:type delete_after: float
:param components: Message components in the response. The top level must be made of ActionRows.
:type components: List[dict]
:return: Union[discord.Message, dict]
"""
return await self.send(
content=content,
embed=embed,
embeds=embeds,
tts=tts,
file=file,
files=files,
allowed_mentions=allowed_mentions,
hidden=hidden,
delete_after=delete_after,
components=components,
)
class SlashContext(InteractionContext):
"""
Context of a slash command. Has all attributes from :class:`InteractionContext`, plus the slash-command-specific ones below.
:ivar name: Name of the command.
:ivar args: List of processed arguments invoked with the command.
:ivar kwargs: Dictionary of processed arguments invoked with the command.
:ivar subcommand_name: Subcommand of the command.
:ivar subcommand_group: Subcommand group of the command.
:ivar command_id: ID of the command.
"""
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
self.name = self.command = self.invoked_with = _json["data"]["name"]
self.args = []
self.kwargs = {}
self.subcommand_name = self.invoked_subcommand = self.subcommand_passed = None
self.subcommand_group = self.invoked_subcommand_group = self.subcommand_group_passed = None
self.command_id = _json["data"]["id"]
super().__init__(_http=_http, _json=_json, _discord=_discord, logger=logger)
@property
def slash(self) -> "client.SlashCommand":
"""
Returns the associated SlashCommand object created during Runtime.
:return: client.SlashCommand
"""
return self.bot.slash # noqa
@property
def cog(self) -> typing.Optional[commands.Cog]:
"""
Returns the cog associated with the command invoked, if any.
:return: Optional[commands.Cog]
"""
cmd_obj = self.slash.commands[self.command]
if isinstance(cmd_obj, (model.CogBaseCommandObject, model.CogSubcommandObject)):
return cmd_obj.cog
else:
return None
async def invoke(self, *args, **kwargs):
"""
Invokes a command with the arguments given.\n
Similar to d.py's `ctx.invoke` function and documentation.\n
.. note::
This does not handle converters, checks, cooldowns, pre-invoke,
or after-invoke hooks in any matter. It calls the internal callback
directly as-if it was a regular function.
You must take care in passing the proper arguments when
using this function.
.. warning::
The first parameter passed **must** be the command being invoked.
While using `ctx.defer`, if the command invoked includes usage of that command, do not invoke
`ctx.defer` before calling this function. It can not defer twice.
:param args: Args for the command.
:param kwargs: Keyword args for the command.
:raises: :exc:`TypeError`
"""
try:
command = args[0]
except IndexError:
raise TypeError("Missing command to invoke.") from None
ret = await self.slash.invoke_command(func=command, ctx=self, args=kwargs)
return ret
class ComponentContext(InteractionContext):
"""
Context of a component interaction. Has all attributes from :class:`InteractionContext`, plus the component-specific ones below.
:ivar custom_id: The custom ID of the component (has alias component_id).
:ivar component_type: The type of the component.
:ivar component: Component data retrieved from the message. Not available if the origin message was ephemeral.
:ivar origin_message: The origin message of the component. Not available if the origin message was ephemeral.
:ivar origin_message_id: The ID of the origin message.
:ivar selected_options: The options selected (only for selects)
"""
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
self.custom_id = self.component_id = _json["data"]["custom_id"]
self.component_type = _json["data"]["component_type"]
super().__init__(_http=_http, _json=_json, _discord=_discord, logger=logger)
self.origin_message = None
self.origin_message_id = int(_json["message"]["id"]) if "message" in _json.keys() else None
self.component = None
self._deferred_edit_origin = False
if self.origin_message_id and (_json["message"]["flags"] & 64) != 64:
self.origin_message = ComponentMessage(
state=self.bot._connection, channel=self.channel, data=_json["message"]
)
self.component = self.origin_message.get_component(self.custom_id)
self.selected_options = None
if self.component_type == 3:
self.selected_options = _json["data"].get("values", [])
async def defer(self, hidden: bool = False, edit_origin: bool = False, ignore: bool = False):
"""
'Defers' the response, showing a loading state to the user
:param hidden: Whether the deferred response should be ephemeral. Default ``False``.
:param edit_origin: Whether the type is editing the origin message. If ``False``, the deferred response will be for a follow up message. Defaults ``False``.
:param ignore: Whether to just ignore and not edit or send response. Using this can avoid showing interaction loading state. Default ``False``.
"""
if self.deferred or self.responded:
raise error.AlreadyResponded("You have already responded to this command!")
base = {"type": 6 if edit_origin or ignore else 5}
if edit_origin and ignore:
raise error.IncorrectFormat("'edit_origin' and 'ignore' are mutually exclusive")
if hidden:
if edit_origin:
raise error.IncorrectFormat(
"'hidden' and 'edit_origin' flags are mutually exclusive"
)
elif ignore:
self._deferred_hidden = True
else:
base["data"] = {"flags": 64}
self._deferred_hidden = True
self._deferred_edit_origin = edit_origin
await self._http.post_initial_response(base, self.interaction_id, self._token)
self.deferred = not ignore
if ignore:
self.responded = True
async def send(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
if self.deferred and self._deferred_edit_origin:
self._logger.warning(
"Deferred response might not be what you set it to! (edit origin / send response message) "
"This is because it was deferred with different response type."
)
return await super().send(
content,
embed=embed,
embeds=embeds,
tts=tts,
file=file,
files=files,
allowed_mentions=allowed_mentions,
hidden=hidden,
delete_after=delete_after,
components=components,
)
async def edit_origin(self, **fields):
"""
Edits the origin message of the component.
Refer to :meth:`discord.Message.edit` and :meth:`InteractionContext.send` for fields.
"""
_resp = {}
try:
content = fields["content"]
except KeyError:
pass
else:
if content is not None:
content = str(content)
_resp["content"] = content
try:
components = fields["components"]
except KeyError:
pass
else:
if components is None:
_resp["components"] = []
else:
_resp["components"] = components
try:
embeds = fields["embeds"]
except KeyError:
# Nope
pass
else:
if not isinstance(embeds, list):
raise error.IncorrectFormat("Provide a list of embeds.")
if len(embeds) > 10:
raise error.IncorrectFormat("Do not provide more than 10 embeds.")
_resp["embeds"] = [e.to_dict() for e in embeds]
try:
embed = fields["embed"]
except KeyError:
pass
else:
if "embeds" in _resp:
raise error.IncorrectFormat("You can't use both `embed` and `embeds`!")
if embed is None:
_resp["embeds"] = []
else:
_resp["embeds"] = [embed.to_dict()]
file = fields.get("file")
files = fields.get("files")
if files is not None and file is not None:
raise error.IncorrectFormat("You can't use both `file` and `files`!")
if file:
files = [file]
allowed_mentions = fields.get("allowed_mentions")
if allowed_mentions is not None:
if self.bot.allowed_mentions is not None:
_resp["allowed_mentions"] = self.bot.allowed_mentions.merge(
allowed_mentions
).to_dict()
else:
_resp["allowed_mentions"] = allowed_mentions.to_dict()
else:
if self.bot.allowed_mentions is not None:
_resp["allowed_mentions"] = self.bot.allowed_mentions.to_dict()
else:
_resp["allowed_mentions"] = {}
if not self.responded:
if files and not self.deferred:
await self.defer(edit_origin=True)
if self.deferred:
if not self._deferred_edit_origin:
self._logger.warning(
"Deferred response might not be what you set it to! (edit origin / send response message) "
"This is because it was deferred with different response type."
)
_json = await self._http.edit(_resp, self._token, files=files)
self.deferred = False
else: # noqa: F841
json_data = {"type": 7, "data": _resp}
_json = await self._http.post_initial_response( # noqa: F841
json_data, self.interaction_id, self._token
)
self.responded = True
else:
raise error.IncorrectFormat("Already responded")
if files:
for file in files:
file.close()
# Commented out for now as sometimes (or at least, when not deferred) _json is an empty string?
# self.origin_message = ComponentMessage(state=self.bot._connection, channel=self.channel,
# data=_json)
class MenuContext(InteractionContext):
"""
Context of a context menu interaction. Has all attributes from :class:`InteractionContext`, plus the context-specific ones below.
:ivar context_type: The type of context menu command.
:ivar _resolved: The data set for the context menu.
:ivar target_message: The targeted message of the context menu command if present. Defaults to ``None``.
:ivar target_id: The target ID of the context menu command.
:ivar target_author: The author targeted from the context menu command.
"""
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
super().__init__(_http=_http, _json=_json, _discord=_discord, logger=logger)
self.context_type = _json["type"]
self._resolved = self.data["resolved"] if "resolved" in self.data.keys() else None
self.target_message = None
self.target_author = None
self.target_id = self.data["target_id"]
if self._resolved is not None:
try:
if self._resolved["messages"]:
_msg = [msg for msg in self._resolved["messages"]][0]
self.target_message = model.SlashMessage(
state=self.bot._connection,
channel=_discord.get_channel(self.channel_id),
data=self._resolved["messages"][_msg],
_http=_http,
interaction_token=self._token,
)
except KeyError: # noqa
pass
try:
if self.guild and self._resolved["members"]:
_auth = [auth for auth in self._resolved["members"]][0]
self.target_author = discord.Member(
data=self._resolved["members"][_auth],
state=self.bot._connection,
guild=self.guild,
)
else:
_auth = [auth for auth in self._resolved["users"]][0]
self.target_author = discord.User(
data=self._resolved["users"][_auth], state=self.bot._connection
)
except KeyError: # noqa
pass
@property
def cog(self) -> typing.Optional[commands.Cog]:
"""
Returns the cog associated with the command invoked, if any.
:return: Optional[commands.Cog]
"""
cmd_obj = self.slash.commands[self.command]
if isinstance(cmd_obj, (model.CogBaseCommandObject, model.CogSubcommandObject)):
return cmd_obj.cog
else:
return None
async def defer(self, hidden: bool = False, edit_origin: bool = False, ignore: bool = False):
"""
'Defers' the response, showing a loading state to the user
:param hidden: Whether the deferred response should be ephemeral. Default ``False``.
:param edit_origin: Whether the type is editing the origin message. If ``False``, the deferred response will be for a follow up message. Defaults ``False``.
:param ignore: Whether to just ignore and not edit or send response. Using this can avoid showing interaction loading state. Default ``False``.
"""
if self.deferred or self.responded:
raise error.AlreadyResponded("You have already responded to this command!")
base = {"type": 6 if edit_origin or ignore else 5}
if edit_origin and ignore:
raise error.IncorrectFormat("'edit_origin' and 'ignore' are mutually exclusive")
if hidden:
if edit_origin:
raise error.IncorrectFormat(
"'hidden' and 'edit_origin' flags are mutually exclusive"
)
elif ignore:
self._deferred_hidden = True
else:
base["data"] = {"flags": 64}
self._deferred_hidden = True
self._deferred_edit_origin = edit_origin
await self._http.post_initial_response(base, self.interaction_id, self._token)
self.deferred = not ignore
if ignore:
self.responded = True
async def send(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
if self.deferred and self._deferred_edit_origin:
self._logger.warning(
"Deferred response might not be what you set it to! (edit origin / send response message) "
"This is because it was deferred with different response type."
)
return await super().send(
content,
embed=embed,
embeds=embeds,
tts=tts,
file=file,
files=files,
allowed_mentions=allowed_mentions,
hidden=hidden,
delete_after=delete_after,
components=components,
)
| 39.034929
| 183
| 0.598562
|
import datetime
import typing
from typing import TYPE_CHECKING
from warnings import warn
import discord
from discord.ext import commands
from discord.utils import snowflake_time
from . import error, http, model
from .dpy_overrides import ComponentMessage
if TYPE_CHECKING: from . import client
class InteractionContext:
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
self._token = _json["token"]
self.message = None
self.menu_messages = None
self.data = _json["data"]
self.interaction_id = _json["id"]
self._http = _http
self.bot = _discord
self._logger = logger
self.deferred = False
self.responded = False
self.values = _json["data"]["values"] if "values" in _json["data"] else None
self._deferred_hidden = False self.guild_id = int(_json["guild_id"]) if "guild_id" in _json.keys() else None
self.author_id = int(
_json["member"]["user"]["id"] if "member" in _json.keys() else _json["user"]["id"]
)
self.channel_id = int(_json["channel_id"])
if self.guild:
self.author = discord.Member(
data=_json["member"], state=self.bot._connection, guild=self.guild
)
elif self.guild_id:
self.author = discord.User(data=_json["member"]["user"], state=self.bot._connection)
else:
self.author = discord.User(data=_json["user"], state=self.bot._connection)
self.created_at: datetime.datetime = snowflake_time(int(self.interaction_id))
@property
def _deffered_hidden(self):
warn(
"`_deffered_hidden` as been renamed to `_deferred_hidden`.",
DeprecationWarning,
stacklevel=2,
)
return self._deferred_hidden
@_deffered_hidden.setter
def _deffered_hidden(self, value):
warn(
"`_deffered_hidden` as been renamed to `_deferred_hidden`.",
DeprecationWarning,
stacklevel=2,
)
self._deferred_hidden = value
@property
def deffered(self):
warn("`deffered` as been renamed to `deferred`.", DeprecationWarning, stacklevel=2)
return self.deferred
@deffered.setter
def deffered(self, value):
warn("`deffered` as been renamed to `deferred`.", DeprecationWarning, stacklevel=2)
self.deferred = value
@property
def guild(self) -> typing.Optional[discord.Guild]:
return self.bot.get_guild(self.guild_id) if self.guild_id else None
@property
def channel(self) -> typing.Optional[typing.Union[discord.TextChannel, discord.DMChannel]]:
return self.bot.get_channel(self.channel_id)
@property
def voice_client(self) -> typing.Optional[discord.VoiceProtocol]:
return self.guild.voice_client if self.guild else None
@property
def me(self) -> typing.Union[discord.Member, discord.ClientUser]:
return self.guild.me if self.guild is not None else self.bot.user
async def defer(self, hidden: bool = False):
if self.deferred or self.responded:
raise error.AlreadyResponded("You have already responded to this command!")
base = {"type": 5}
if hidden:
base["data"] = {"flags": 64}
self._deferred_hidden = True
await self._http.post_initial_response(base, self.interaction_id, self._token)
self.deferred = True
async def send(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
if embed and embeds:
raise error.IncorrectFormat("You can't use both `embed` and `embeds`!")
if embed:
embeds = [embed]
if embeds:
if not isinstance(embeds, list):
raise error.IncorrectFormat("Provide a list of embeds.")
elif len(embeds) > 10:
raise error.IncorrectFormat("Do not provide more than 10 embeds.")
if file and files:
raise error.IncorrectFormat("You can't use both `file` and `files`!")
if file:
files = [file]
if delete_after and hidden:
raise error.IncorrectFormat("You can't delete a hidden message!")
if components and not all(comp.get("type") == 1 for comp in components):
raise error.IncorrectFormat(
"The top level of the components list must be made of ActionRows!"
)
if allowed_mentions is not None:
if self.bot.allowed_mentions is not None:
allowed_mentions = self.bot.allowed_mentions.merge(allowed_mentions).to_dict()
else:
allowed_mentions = allowed_mentions.to_dict()
else:
if self.bot.allowed_mentions is not None:
allowed_mentions = self.bot.allowed_mentions.to_dict()
else:
allowed_mentions = {}
base = {
"content": content,
"tts": tts,
"embeds": [x.to_dict() for x in embeds] if embeds else [],
"allowed_mentions": allowed_mentions,
"components": components or [],
}
if hidden:
base["flags"] = 64
initial_message = False
if not self.responded:
initial_message = True
if files and not self.deferred:
await self.defer(hidden=hidden)
if self.deferred:
if self._deferred_hidden != hidden:
self._logger.warning(
"Deferred response might not be what you set it to! (hidden / visible) "
"This is because it was deferred in a different state."
)
resp = await self._http.edit(base, self._token, files=files)
self.deferred = False
else:
json_data = {"type": 4, "data": base}
await self._http.post_initial_response(json_data, self.interaction_id, self._token)
if not hidden:
resp = await self._http.edit({}, self._token)
else:
resp = {}
self.responded = True
else:
resp = await self._http.post_followup(base, self._token, files=files)
if files:
for file in files:
file.close()
if not hidden:
smsg = model.SlashMessage(
state=self.bot._connection,
data=resp,
channel=self.channel or discord.Object(id=self.channel_id),
_http=self._http,
interaction_token=self._token,
)
if delete_after:
self.bot.loop.create_task(smsg.delete(delay=delete_after))
if initial_message:
self.message = smsg
return smsg
else:
return resp
async def reply(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
return await self.send(
content=content,
embed=embed,
embeds=embeds,
tts=tts,
file=file,
files=files,
allowed_mentions=allowed_mentions,
hidden=hidden,
delete_after=delete_after,
components=components,
)
class SlashContext(InteractionContext):
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
self.name = self.command = self.invoked_with = _json["data"]["name"]
self.args = []
self.kwargs = {}
self.subcommand_name = self.invoked_subcommand = self.subcommand_passed = None
self.subcommand_group = self.invoked_subcommand_group = self.subcommand_group_passed = None
self.command_id = _json["data"]["id"]
super().__init__(_http=_http, _json=_json, _discord=_discord, logger=logger)
@property
def slash(self) -> "client.SlashCommand":
return self.bot.slash # noqa
@property
def cog(self) -> typing.Optional[commands.Cog]:
cmd_obj = self.slash.commands[self.command]
if isinstance(cmd_obj, (model.CogBaseCommandObject, model.CogSubcommandObject)):
return cmd_obj.cog
else:
return None
async def invoke(self, *args, **kwargs):
try:
command = args[0]
except IndexError:
raise TypeError("Missing command to invoke.") from None
ret = await self.slash.invoke_command(func=command, ctx=self, args=kwargs)
return ret
class ComponentContext(InteractionContext):
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
self.custom_id = self.component_id = _json["data"]["custom_id"]
self.component_type = _json["data"]["component_type"]
super().__init__(_http=_http, _json=_json, _discord=_discord, logger=logger)
self.origin_message = None
self.origin_message_id = int(_json["message"]["id"]) if "message" in _json.keys() else None
self.component = None
self._deferred_edit_origin = False
if self.origin_message_id and (_json["message"]["flags"] & 64) != 64:
self.origin_message = ComponentMessage(
state=self.bot._connection, channel=self.channel, data=_json["message"]
)
self.component = self.origin_message.get_component(self.custom_id)
self.selected_options = None
if self.component_type == 3:
self.selected_options = _json["data"].get("values", [])
async def defer(self, hidden: bool = False, edit_origin: bool = False, ignore: bool = False):
if self.deferred or self.responded:
raise error.AlreadyResponded("You have already responded to this command!")
base = {"type": 6 if edit_origin or ignore else 5}
if edit_origin and ignore:
raise error.IncorrectFormat("'edit_origin' and 'ignore' are mutually exclusive")
if hidden:
if edit_origin:
raise error.IncorrectFormat(
"'hidden' and 'edit_origin' flags are mutually exclusive"
)
elif ignore:
self._deferred_hidden = True
else:
base["data"] = {"flags": 64}
self._deferred_hidden = True
self._deferred_edit_origin = edit_origin
await self._http.post_initial_response(base, self.interaction_id, self._token)
self.deferred = not ignore
if ignore:
self.responded = True
async def send(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
if self.deferred and self._deferred_edit_origin:
self._logger.warning(
"Deferred response might not be what you set it to! (edit origin / send response message) "
"This is because it was deferred with different response type."
)
return await super().send(
content,
embed=embed,
embeds=embeds,
tts=tts,
file=file,
files=files,
allowed_mentions=allowed_mentions,
hidden=hidden,
delete_after=delete_after,
components=components,
)
async def edit_origin(self, **fields):
_resp = {}
try:
content = fields["content"]
except KeyError:
pass
else:
if content is not None:
content = str(content)
_resp["content"] = content
try:
components = fields["components"]
except KeyError:
pass
else:
if components is None:
_resp["components"] = []
else:
_resp["components"] = components
try:
embeds = fields["embeds"]
except KeyError:
# Nope
pass
else:
if not isinstance(embeds, list):
raise error.IncorrectFormat("Provide a list of embeds.")
if len(embeds) > 10:
raise error.IncorrectFormat("Do not provide more than 10 embeds.")
_resp["embeds"] = [e.to_dict() for e in embeds]
try:
embed = fields["embed"]
except KeyError:
pass
else:
if "embeds" in _resp:
raise error.IncorrectFormat("You can't use both `embed` and `embeds`!")
if embed is None:
_resp["embeds"] = []
else:
_resp["embeds"] = [embed.to_dict()]
file = fields.get("file")
files = fields.get("files")
if files is not None and file is not None:
raise error.IncorrectFormat("You can't use both `file` and `files`!")
if file:
files = [file]
allowed_mentions = fields.get("allowed_mentions")
if allowed_mentions is not None:
if self.bot.allowed_mentions is not None:
_resp["allowed_mentions"] = self.bot.allowed_mentions.merge(
allowed_mentions
).to_dict()
else:
_resp["allowed_mentions"] = allowed_mentions.to_dict()
else:
if self.bot.allowed_mentions is not None:
_resp["allowed_mentions"] = self.bot.allowed_mentions.to_dict()
else:
_resp["allowed_mentions"] = {}
if not self.responded:
if files and not self.deferred:
await self.defer(edit_origin=True)
if self.deferred:
if not self._deferred_edit_origin:
self._logger.warning(
"Deferred response might not be what you set it to! (edit origin / send response message) "
"This is because it was deferred with different response type."
)
_json = await self._http.edit(_resp, self._token, files=files)
self.deferred = False
else: # noqa: F841
json_data = {"type": 7, "data": _resp}
_json = await self._http.post_initial_response( # noqa: F841
json_data, self.interaction_id, self._token
)
self.responded = True
else:
raise error.IncorrectFormat("Already responded")
if files:
for file in files:
file.close()
# Commented out for now as sometimes (or at least, when not deferred) _json is an empty string?
# self.origin_message = ComponentMessage(state=self.bot._connection, channel=self.channel,
# data=_json)
class MenuContext(InteractionContext):
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
super().__init__(_http=_http, _json=_json, _discord=_discord, logger=logger)
self.context_type = _json["type"]
self._resolved = self.data["resolved"] if "resolved" in self.data.keys() else None
self.target_message = None
self.target_author = None
self.target_id = self.data["target_id"]
if self._resolved is not None:
try:
if self._resolved["messages"]:
_msg = [msg for msg in self._resolved["messages"]][0]
self.target_message = model.SlashMessage(
state=self.bot._connection,
channel=_discord.get_channel(self.channel_id),
data=self._resolved["messages"][_msg],
_http=_http,
interaction_token=self._token,
)
except KeyError: # noqa
pass
try:
if self.guild and self._resolved["members"]:
_auth = [auth for auth in self._resolved["members"]][0]
self.target_author = discord.Member(
data=self._resolved["members"][_auth],
state=self.bot._connection,
guild=self.guild,
)
else:
_auth = [auth for auth in self._resolved["users"]][0]
self.target_author = discord.User(
data=self._resolved["users"][_auth], state=self.bot._connection
)
except KeyError: # noqa
pass
@property
def cog(self) -> typing.Optional[commands.Cog]:
cmd_obj = self.slash.commands[self.command]
if isinstance(cmd_obj, (model.CogBaseCommandObject, model.CogSubcommandObject)):
return cmd_obj.cog
else:
return None
async def defer(self, hidden: bool = False, edit_origin: bool = False, ignore: bool = False):
if self.deferred or self.responded:
raise error.AlreadyResponded("You have already responded to this command!")
base = {"type": 6 if edit_origin or ignore else 5}
if edit_origin and ignore:
raise error.IncorrectFormat("'edit_origin' and 'ignore' are mutually exclusive")
if hidden:
if edit_origin:
raise error.IncorrectFormat(
"'hidden' and 'edit_origin' flags are mutually exclusive"
)
elif ignore:
self._deferred_hidden = True
else:
base["data"] = {"flags": 64}
self._deferred_hidden = True
self._deferred_edit_origin = edit_origin
await self._http.post_initial_response(base, self.interaction_id, self._token)
self.deferred = not ignore
if ignore:
self.responded = True
async def send(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
if self.deferred and self._deferred_edit_origin:
self._logger.warning(
"Deferred response might not be what you set it to! (edit origin / send response message) "
"This is because it was deferred with different response type."
)
return await super().send(
content,
embed=embed,
embeds=embeds,
tts=tts,
file=file,
files=files,
allowed_mentions=allowed_mentions,
hidden=hidden,
delete_after=delete_after,
components=components,
)
| true
| true
|
f707f24af08406bad7adef31a302134e4dc66c93
| 6,102
|
py
|
Python
|
v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/utils/metrics_hook_test.py
|
myelintek/results
|
11c38436a158c453e3011f8684570f7a55c03330
|
[
"Apache-2.0"
] | 44
|
2018-11-07T18:52:33.000Z
|
2019-07-06T12:48:18.000Z
|
v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/utils/metrics_hook_test.py
|
myelintek/results
|
11c38436a158c453e3011f8684570f7a55c03330
|
[
"Apache-2.0"
] | 63
|
2017-12-19T20:29:10.000Z
|
2021-08-04T21:49:36.000Z
|
v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/utils/metrics_hook_test.py
|
myelintek/results
|
11c38436a158c453e3011f8684570f7a55c03330
|
[
"Apache-2.0"
] | 44
|
2018-11-09T21:04:52.000Z
|
2019-06-24T07:40:28.000Z
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics_hook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import shutil
from tensor2tensor.utils import metrics_hook
import tensorflow as tf
class DummyHook(metrics_hook.MetricsBasedHook):
def _process_metrics(self, global_step, metrics):
if metrics:
assert "" in metrics
assert isinstance(metrics[""], dict)
if metrics[""]:
assert "global_step_1" in metrics[""]
self.test_metrics = metrics
if global_step >= 40:
return True
class MetricsHookTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
cls.base_checkpoint_dir = tf.test.get_temp_dir()
shutil.rmtree(cls.base_checkpoint_dir, ignore_errors=True)
def ckpt_dir(self, name):
return os.path.join(self.base_checkpoint_dir, name)
@contextlib.contextmanager
def sess(self, hook, ckpt_dir):
with tf.train.MonitoredTrainingSession(
checkpoint_dir=ckpt_dir,
save_checkpoint_secs=0,
save_summaries_steps=10,
hooks=[hook]) as sess:
self._sess = sess
yield sess
def flush(self):
self._sess._hooks[1]._summary_writer.flush()
def testStop(self):
global_step = tf.train.create_global_step()
tf.summary.scalar("global_step", global_step)
incr_global_step = tf.assign_add(global_step, 1)
ckpt_dir = self.ckpt_dir("stop")
dummy = DummyHook(ckpt_dir, every_n_steps=10)
with self.sess(dummy, ckpt_dir) as sess:
for _ in range(20):
sess.run(incr_global_step)
# Summary files should now have 2 global step values in them
self.flush()
# Run for 10 more so that the hook gets triggered again
for _ in range(10):
sess.run(incr_global_step)
# Check that the metrics have actually been collected.
self.assertTrue("" in dummy.test_metrics)
metrics = dummy.test_metrics[""]
self.assertTrue("global_step_1" in metrics)
steps, vals = metrics["global_step_1"]
self.assertTrue(len(steps) == len(vals))
self.assertTrue(len(steps) >= 2)
# Run for 10 more so that the hook triggers stoppage
for _ in range(10):
sess.run(incr_global_step)
with self.assertRaisesRegexp(RuntimeError, "after should_stop requested"):
sess.run(incr_global_step)
def testEarlyStoppingHook(self):
global_step = tf.train.create_global_step()
counter = tf.get_variable("count", initializer=0, dtype=tf.int32)
tf.summary.scalar("count", counter)
incr_global_step = tf.assign_add(global_step, 1)
incr_counter = tf.assign_add(counter, 1)
# Stop if the global step has not gone up by more than 1 in 20 steps.
ckpt_dir = self.ckpt_dir("early")
stop_hook = metrics_hook.EarlyStoppingHook(
ckpt_dir,
"count_1",
num_plateau_steps=20,
plateau_delta=1.,
plateau_decrease=False,
every_n_steps=10)
with self.sess(stop_hook, ckpt_dir) as sess:
for _ in range(20):
sess.run((incr_global_step, incr_counter))
# Summary files should now have 2 values in them
self.flush()
# Run for more steps so that the hook gets triggered and we verify that we
# don't stop.
for _ in range(30):
sess.run((incr_global_step, incr_counter))
self.flush()
# Run without incrementing the counter
for _ in range(40):
sess.run(incr_global_step)
# Metrics should be written such that now the counter has gone >20 steps
# without being incremented.
self.flush()
# Check that we ask for stop
with self.assertRaisesRegexp(RuntimeError, "after should_stop requested"):
for _ in range(30):
sess.run(incr_global_step)
def testPlateauOpHook(self):
global_step = tf.train.create_global_step()
counter = tf.get_variable("count", initializer=0, dtype=tf.int32)
indicator = tf.get_variable("indicator", initializer=0, dtype=tf.int32)
tf.summary.scalar("count", counter)
incr_global_step = tf.assign_add(global_step, 1)
incr_counter = tf.assign_add(counter, 1)
incr_indicator = tf.assign_add(indicator, 1)
# Stop if the global step has not gone up by more than 1 in 20 steps.
ckpt_dir = self.ckpt_dir("plateauop")
stop_hook = metrics_hook.PlateauOpHook(
ckpt_dir,
"count_1",
incr_indicator,
num_plateau_steps=20,
plateau_delta=1.,
plateau_decrease=False,
every_n_steps=10)
with self.sess(stop_hook, ckpt_dir) as sess:
for _ in range(20):
sess.run((incr_global_step, incr_counter))
# Summary files should now have 2 values in them
self.flush()
# Run for more steps so that the hook gets triggered and we verify that we
# don't stop.
for _ in range(30):
sess.run((incr_global_step, incr_counter))
self.flush()
# Run without incrementing the counter
for _ in range(30):
sess.run(incr_global_step)
self.flush()
self.assertTrue(sess.run(indicator) < 1)
# Metrics should be written such that now the counter has gone >20 steps
# without being incremented.
# Check that we run the incr_indicator op several times
for _ in range(3):
for _ in range(10):
sess.run(incr_global_step)
self.flush()
self.assertTrue(sess.run(indicator) > 1)
if __name__ == "__main__":
tf.test.main()
| 31.132653
| 80
| 0.683219
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import shutil
from tensor2tensor.utils import metrics_hook
import tensorflow as tf
class DummyHook(metrics_hook.MetricsBasedHook):
def _process_metrics(self, global_step, metrics):
if metrics:
assert "" in metrics
assert isinstance(metrics[""], dict)
if metrics[""]:
assert "global_step_1" in metrics[""]
self.test_metrics = metrics
if global_step >= 40:
return True
class MetricsHookTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
cls.base_checkpoint_dir = tf.test.get_temp_dir()
shutil.rmtree(cls.base_checkpoint_dir, ignore_errors=True)
def ckpt_dir(self, name):
return os.path.join(self.base_checkpoint_dir, name)
@contextlib.contextmanager
def sess(self, hook, ckpt_dir):
with tf.train.MonitoredTrainingSession(
checkpoint_dir=ckpt_dir,
save_checkpoint_secs=0,
save_summaries_steps=10,
hooks=[hook]) as sess:
self._sess = sess
yield sess
def flush(self):
self._sess._hooks[1]._summary_writer.flush()
def testStop(self):
global_step = tf.train.create_global_step()
tf.summary.scalar("global_step", global_step)
incr_global_step = tf.assign_add(global_step, 1)
ckpt_dir = self.ckpt_dir("stop")
dummy = DummyHook(ckpt_dir, every_n_steps=10)
with self.sess(dummy, ckpt_dir) as sess:
for _ in range(20):
sess.run(incr_global_step)
self.flush()
for _ in range(10):
sess.run(incr_global_step)
self.assertTrue("" in dummy.test_metrics)
metrics = dummy.test_metrics[""]
self.assertTrue("global_step_1" in metrics)
steps, vals = metrics["global_step_1"]
self.assertTrue(len(steps) == len(vals))
self.assertTrue(len(steps) >= 2)
for _ in range(10):
sess.run(incr_global_step)
with self.assertRaisesRegexp(RuntimeError, "after should_stop requested"):
sess.run(incr_global_step)
def testEarlyStoppingHook(self):
global_step = tf.train.create_global_step()
counter = tf.get_variable("count", initializer=0, dtype=tf.int32)
tf.summary.scalar("count", counter)
incr_global_step = tf.assign_add(global_step, 1)
incr_counter = tf.assign_add(counter, 1)
ckpt_dir = self.ckpt_dir("early")
stop_hook = metrics_hook.EarlyStoppingHook(
ckpt_dir,
"count_1",
num_plateau_steps=20,
plateau_delta=1.,
plateau_decrease=False,
every_n_steps=10)
with self.sess(stop_hook, ckpt_dir) as sess:
for _ in range(20):
sess.run((incr_global_step, incr_counter))
self.flush()
for _ in range(30):
sess.run((incr_global_step, incr_counter))
self.flush()
# Run without incrementing the counter
for _ in range(40):
sess.run(incr_global_step)
# Metrics should be written such that now the counter has gone >20 steps
# without being incremented.
self.flush()
# Check that we ask for stop
with self.assertRaisesRegexp(RuntimeError, "after should_stop requested"):
for _ in range(30):
sess.run(incr_global_step)
def testPlateauOpHook(self):
global_step = tf.train.create_global_step()
counter = tf.get_variable("count", initializer=0, dtype=tf.int32)
indicator = tf.get_variable("indicator", initializer=0, dtype=tf.int32)
tf.summary.scalar("count", counter)
incr_global_step = tf.assign_add(global_step, 1)
incr_counter = tf.assign_add(counter, 1)
incr_indicator = tf.assign_add(indicator, 1)
# Stop if the global step has not gone up by more than 1 in 20 steps.
ckpt_dir = self.ckpt_dir("plateauop")
stop_hook = metrics_hook.PlateauOpHook(
ckpt_dir,
"count_1",
incr_indicator,
num_plateau_steps=20,
plateau_delta=1.,
plateau_decrease=False,
every_n_steps=10)
with self.sess(stop_hook, ckpt_dir) as sess:
for _ in range(20):
sess.run((incr_global_step, incr_counter))
# Summary files should now have 2 values in them
self.flush()
# Run for more steps so that the hook gets triggered and we verify that we
# don't stop.
for _ in range(30):
sess.run((incr_global_step, incr_counter))
self.flush()
for _ in range(30):
sess.run(incr_global_step)
self.flush()
self.assertTrue(sess.run(indicator) < 1)
for _ in range(3):
for _ in range(10):
sess.run(incr_global_step)
self.flush()
self.assertTrue(sess.run(indicator) > 1)
if __name__ == "__main__":
tf.test.main()
| true
| true
|
f707f2d1609dcbab7ff45a0d3a1b347d509c7995
| 41,314
|
py
|
Python
|
torchbiggraph/train_cpu.py
|
stillmatic/PyTorch-BigGraph
|
d7d6576281faa54ec5850e204ffc07b1268fdb04
|
[
"BSD-3-Clause"
] | 3
|
2020-09-10T15:03:20.000Z
|
2020-09-13T17:38:31.000Z
|
torchbiggraph/train_cpu.py
|
stillmatic/PyTorch-BigGraph
|
d7d6576281faa54ec5850e204ffc07b1268fdb04
|
[
"BSD-3-Clause"
] | null | null | null |
torchbiggraph/train_cpu.py
|
stillmatic/PyTorch-BigGraph
|
d7d6576281faa54ec5850e204ffc07b1268fdb04
|
[
"BSD-3-Clause"
] | 1
|
2022-01-11T03:27:48.000Z
|
2022-01-11T03:27:48.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
import logging
import math
import time
from collections import defaultdict
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple
import torch
import torch.distributed as td
from torch.optim import Optimizer
from torchbiggraph.async_adagrad import AsyncAdagrad
from torchbiggraph.batching import AbstractBatchProcessor, call, process_in_batches
from torchbiggraph.bucket_scheduling import (
BucketStats,
DistributedBucketScheduler,
LockServer,
SingleMachineBucketScheduler,
)
from torchbiggraph.checkpoint_manager import (
CheckpointManager,
ConfigMetadataProvider,
MetadataProvider,
PartitionClient,
)
from torchbiggraph.config import ConfigSchema
from torchbiggraph.distributed import ProcessRanks, init_process_group, start_server
from torchbiggraph.edgelist import EdgeList
from torchbiggraph.eval import RankingEvaluator
from torchbiggraph.graph_storages import EDGE_STORAGES, ENTITY_STORAGES
from torchbiggraph.losses import LOSS_FUNCTIONS, AbstractLossFunction
from torchbiggraph.model import MultiRelationEmbedder, make_model
from torchbiggraph.parameter_sharing import ParameterServer, ParameterSharer
from torchbiggraph.row_adagrad import RowAdagrad
from torchbiggraph.stats import Stats, StatsHandler
from torchbiggraph.types import (
SINGLE_TRAINER,
UNPARTITIONED,
Bucket,
EntityName,
FloatTensorType,
ModuleStateDict,
Partition,
Rank,
)
from torchbiggraph.util import (
BucketLogger,
DummyOptimizer,
EmbeddingHolder,
allocate_shared_tensor,
create_pool,
fast_approx_rand,
get_async_result,
get_num_workers,
hide_distributed_logging,
round_up_to_nearest_multiple,
split_almost_equally,
tag_logs_with_process_name,
)
logger = logging.getLogger("torchbiggraph")
dist_logger = logging.LoggerAdapter(logger, {"distributed": True})
class Trainer(AbstractBatchProcessor):
def __init__(
self,
model_optimizer: Optimizer,
loss_fn: AbstractLossFunction,
relation_weights: List[float],
) -> None:
super().__init__(loss_fn, relation_weights)
self.model_optimizer = model_optimizer
self.unpartitioned_optimizers: Dict[EntityName, Optimizer] = {}
self.partitioned_optimizers: Dict[Tuple[EntityName, Partition], Optimizer] = {}
def _process_one_batch(
self, model: MultiRelationEmbedder, batch_edges: EdgeList
) -> Stats:
model.zero_grad()
scores, reg = model(batch_edges)
loss = self.calc_loss(scores, batch_edges)
stats = Stats(
loss=float(loss),
reg=float(reg) if reg is not None else 0.0,
violators_lhs=int((scores.lhs_neg > scores.lhs_pos.unsqueeze(1)).sum()),
violators_rhs=int((scores.rhs_neg > scores.rhs_pos.unsqueeze(1)).sum()),
count=len(batch_edges),
)
if reg is not None:
(loss + reg).backward()
else:
loss.backward()
self.model_optimizer.step(closure=None)
for optimizer in self.unpartitioned_optimizers.values():
optimizer.step(closure=None)
for optimizer in self.partitioned_optimizers.values():
optimizer.step(closure=None)
return stats
class IterationManager(MetadataProvider):
def __init__(
self,
num_epochs: int,
edge_paths: List[str],
num_edge_chunks: int,
*,
iteration_idx: int = 0,
) -> None:
self.num_epochs = num_epochs
self.edge_paths = edge_paths
self.num_edge_chunks = num_edge_chunks
self.iteration_idx = iteration_idx
@property
def epoch_idx(self) -> int:
return self.iteration_idx // self.num_edge_chunks // self.num_edge_paths
@property
def num_edge_paths(self) -> int:
return len(self.edge_paths)
@property
def edge_path_idx(self) -> int:
return self.iteration_idx // self.num_edge_chunks % self.num_edge_paths
@property
def edge_path(self) -> str:
return self.edge_paths[self.edge_path_idx]
@property
def edge_chunk_idx(self) -> int:
return self.iteration_idx % self.num_edge_chunks
def __iter__(self) -> Iterable[Tuple[int, int, int]]:
while self.epoch_idx < self.num_epochs:
yield self.epoch_idx, self.edge_path_idx, self.edge_chunk_idx
self.iteration_idx += 1
def get_checkpoint_metadata(self) -> Dict[str, Any]:
return {
"iteration/num_epochs": self.num_epochs,
"iteration/epoch_idx": self.epoch_idx,
"iteration/num_edge_paths": self.num_edge_paths,
"iteration/edge_path_idx": self.edge_path_idx,
"iteration/edge_path": self.edge_path,
"iteration/num_edge_chunks": self.num_edge_chunks,
"iteration/edge_chunk_idx": self.edge_chunk_idx,
}
def __add__(self, delta: int) -> "IterationManager":
return IterationManager(
self.num_epochs,
self.edge_paths,
self.num_edge_chunks,
iteration_idx=self.iteration_idx + delta,
)
def should_preserve_old_checkpoint(
iteration_manager: IterationManager, interval: Optional[int]
) -> bool:
"""Whether the checkpoint consumed by the current iteration should be kept
Given the period, in number of epochs, at which to snapshot checkpoints,
determinen whether the checkpoint that is used as input by the current
iteration (as determined by the given manager) should be preserved rather
than getting cleaned up.
"""
if interval is None:
return False
is_checkpoint_epoch = iteration_manager.epoch_idx % interval == 0
is_first_edge_path = iteration_manager.edge_path_idx == 0
is_first_edge_chunk = iteration_manager.edge_chunk_idx == 0
return is_checkpoint_epoch and is_first_edge_path and is_first_edge_chunk
def get_num_edge_chunks(config: ConfigSchema) -> int:
if config.num_edge_chunks is not None:
return config.num_edge_chunks
max_edges_per_bucket = 0
# We should check all edge paths, all lhs partitions and all rhs partitions,
# but the combinatorial explosion could lead to thousands of checks. Let's
# assume that edges are uniformly distributed among buckets (this is not
# exactly the case, as it's the entities that are uniformly distributed
# among the partitions, and edge assignments to buckets are a function of
# that, thus, for example, very high degree entities could skew this), and
# use the size of bucket (0, 0) as an estimate of the average bucket size.
# We still do it for all edge paths as there could be semantic differences
# between them which lead to different sizes.
for edge_path in config.edge_paths:
edge_storage = EDGE_STORAGES.make_instance(edge_path)
max_edges_per_bucket = max(
max_edges_per_bucket,
edge_storage.get_number_of_edges(UNPARTITIONED, UNPARTITIONED),
)
return max(1, math.ceil(max_edges_per_bucket / config.max_edges_per_chunk))
def make_optimizer(
config: ConfigSchema, params: Iterable[torch.nn.Parameter], is_emb: bool
) -> Optimizer:
params = list(params)
if len(params) == 0:
optimizer = DummyOptimizer()
elif is_emb:
optimizer = RowAdagrad(params, lr=config.lr)
else:
if config.relation_lr is not None:
lr = config.relation_lr
else:
lr = config.lr
optimizer = AsyncAdagrad(params, lr=lr)
optimizer.share_memory()
return optimizer
NOOP_STATS_HANDLER = StatsHandler()
class TrainingCoordinator:
def __init__( # noqa
self,
config: ConfigSchema,
model: Optional[MultiRelationEmbedder] = None,
trainer: Optional[AbstractBatchProcessor] = None,
evaluator: Optional[AbstractBatchProcessor] = None,
rank: Rank = SINGLE_TRAINER,
subprocess_init: Optional[Callable[[], None]] = None,
stats_handler: StatsHandler = NOOP_STATS_HANDLER,
):
"""Each epoch/pass, for each partition pair, loads in embeddings and edgelist
from disk, runs HOGWILD training on them, and writes partitions back to disk.
"""
tag_logs_with_process_name(f"Trainer-{rank}")
self.config = config
if config.verbose > 0:
import pprint
pprint.PrettyPrinter().pprint(config.to_dict())
logger.info("Loading entity counts...")
entity_storage = ENTITY_STORAGES.make_instance(config.entity_path)
entity_counts: Dict[str, List[int]] = {}
for entity, econf in config.entities.items():
entity_counts[entity] = []
for part in range(econf.num_partitions):
entity_counts[entity].append(entity_storage.load_count(entity, part))
# Figure out how many lhs and rhs partitions we need
holder = self.holder = EmbeddingHolder(config)
logger.debug(
f"nparts {holder.nparts_lhs} {holder.nparts_rhs} "
f"types {holder.lhs_partitioned_types} {holder.rhs_partitioned_types}"
)
# We know ahead of time that we wil need 1-2 storages for each embedding type,
# as well as the max size of this storage (num_entities x D).
# We allocate these storages n advance in `embedding_storage_freelist`.
# When we need storage for an entity type, we pop it from this free list,
# and then add it back when we 'delete' the embedding table.
embedding_storage_freelist: Dict[
EntityName, Set[torch.FloatStorage]
] = defaultdict(set)
for entity_type, counts in entity_counts.items():
max_count = max(counts)
num_sides = (
(1 if entity_type in holder.lhs_partitioned_types else 0)
+ (1 if entity_type in holder.rhs_partitioned_types else 0)
+ (
1
if entity_type
in (holder.lhs_unpartitioned_types | holder.rhs_unpartitioned_types)
else 0
)
)
for _ in range(num_sides):
embedding_storage_freelist[entity_type].add(
allocate_shared_tensor(
(max_count, config.entity_dimension(entity_type)),
dtype=torch.float,
).storage()
)
# create the handlers, threads, etc. for distributed training
if config.num_machines > 1 or config.num_partition_servers > 0:
if not 0 <= rank < config.num_machines:
raise RuntimeError("Invalid rank for trainer")
if not td.is_available():
raise RuntimeError(
"The installed PyTorch version doesn't provide "
"distributed training capabilities."
)
ranks = ProcessRanks.from_num_invocations(
config.num_machines, config.num_partition_servers
)
num_ps_groups = config.num_groups_for_partition_server
groups: List[List[int]] = [ranks.trainers] # barrier group
groups += [
ranks.trainers + ranks.partition_servers
] * num_ps_groups # ps groups
group_idxs_for_partition_servers = range(1, len(groups))
if rank == SINGLE_TRAINER:
logger.info("Setup lock server...")
start_server(
LockServer(
num_clients=len(ranks.trainers),
nparts_lhs=holder.nparts_lhs,
nparts_rhs=holder.nparts_rhs,
entities_lhs=holder.lhs_partitioned_types,
entities_rhs=holder.rhs_partitioned_types,
entity_counts=entity_counts,
init_tree=config.distributed_tree_init_order,
stats_handler=stats_handler,
),
process_name="LockServer",
init_method=config.distributed_init_method,
world_size=ranks.world_size,
server_rank=ranks.lock_server,
groups=groups,
subprocess_init=subprocess_init,
)
self.bucket_scheduler = DistributedBucketScheduler(
server_rank=ranks.lock_server, client_rank=ranks.trainers[rank]
)
logger.info("Setup param server...")
start_server(
ParameterServer(num_clients=len(ranks.trainers)),
process_name=f"ParamS-{rank}",
init_method=config.distributed_init_method,
world_size=ranks.world_size,
server_rank=ranks.parameter_servers[rank],
groups=groups,
subprocess_init=subprocess_init,
)
parameter_sharer = ParameterSharer(
process_name=f"ParamC-{rank}",
client_rank=ranks.parameter_clients[rank],
all_server_ranks=ranks.parameter_servers,
init_method=config.distributed_init_method,
world_size=ranks.world_size,
groups=groups,
subprocess_init=subprocess_init,
)
if config.num_partition_servers == -1:
start_server(
ParameterServer(
num_clients=len(ranks.trainers),
group_idxs=group_idxs_for_partition_servers,
log_stats=True,
),
process_name=f"PartS-{rank}",
init_method=config.distributed_init_method,
world_size=ranks.world_size,
server_rank=ranks.partition_servers[rank],
groups=groups,
subprocess_init=subprocess_init,
)
groups = init_process_group(
rank=ranks.trainers[rank],
world_size=ranks.world_size,
init_method=config.distributed_init_method,
groups=groups,
)
trainer_group, *groups_for_partition_servers = groups
self.barrier_group = trainer_group
if len(ranks.partition_servers) > 0:
partition_client = PartitionClient(
ranks.partition_servers,
groups=groups_for_partition_servers,
log_stats=True,
)
else:
partition_client = None
else:
self.barrier_group = None
self.bucket_scheduler = SingleMachineBucketScheduler(
holder.nparts_lhs, holder.nparts_rhs, config.bucket_order, stats_handler
)
parameter_sharer = None
partition_client = None
hide_distributed_logging()
# fork early for HOGWILD threads
logger.info("Creating workers...")
self.num_workers = get_num_workers(config.workers)
self.pool = create_pool(
self.num_workers,
subprocess_name=f"TWorker-{rank}",
subprocess_init=subprocess_init,
)
checkpoint_manager = CheckpointManager(
config.checkpoint_path,
rank=rank,
num_machines=config.num_machines,
partition_client=partition_client,
subprocess_name=f"BackgRW-{rank}",
subprocess_init=subprocess_init,
)
self.checkpoint_manager = checkpoint_manager
checkpoint_manager.register_metadata_provider(ConfigMetadataProvider(config))
if rank == 0:
checkpoint_manager.write_config(config)
num_edge_chunks = get_num_edge_chunks(config)
self.iteration_manager = IterationManager(
config.num_epochs,
config.edge_paths,
num_edge_chunks,
iteration_idx=checkpoint_manager.checkpoint_version,
)
checkpoint_manager.register_metadata_provider(self.iteration_manager)
logger.info("Initializing global model...")
if model is None:
model = make_model(config)
model.share_memory()
loss_fn = LOSS_FUNCTIONS.get_class(config.loss_fn)(margin=config.margin)
relation_weights = [relation.weight for relation in config.relations]
if trainer is None:
trainer = Trainer(
model_optimizer=make_optimizer(config, model.parameters(), False),
loss_fn=loss_fn,
relation_weights=relation_weights,
)
if evaluator is None:
eval_overrides = {}
if config.eval_num_batch_negs is not None:
eval_overrides["num_batch_negs"] = config.eval_num_batch_negs
if config.eval_num_uniform_negs is not None:
eval_overrides["num_uniform_negs"] = config.eval_num_uniform_negs
evaluator = RankingEvaluator(
loss_fn=loss_fn,
relation_weights=relation_weights,
overrides=eval_overrides,
)
if config.init_path is not None:
self.loadpath_manager = CheckpointManager(config.init_path)
else:
self.loadpath_manager = None
# load model from checkpoint or loadpath, if available
state_dict, optim_state = checkpoint_manager.maybe_read_model()
if state_dict is None and self.loadpath_manager is not None:
state_dict, optim_state = self.loadpath_manager.maybe_read_model()
if state_dict is not None:
model.load_state_dict(state_dict, strict=False)
if optim_state is not None:
trainer.model_optimizer.load_state_dict(optim_state)
logger.debug("Loading unpartitioned entities...")
for entity in holder.lhs_unpartitioned_types | holder.rhs_unpartitioned_types:
count = entity_counts[entity][0]
s = embedding_storage_freelist[entity].pop()
dimension = config.entity_dimension(entity)
embs = torch.FloatTensor(s).view(-1, dimension)[:count]
embs, optimizer = self._load_embeddings(entity, UNPARTITIONED, out=embs)
holder.unpartitioned_embeddings[entity] = embs
trainer.unpartitioned_optimizers[entity] = optimizer
# start communicating shared parameters with the parameter server
if parameter_sharer is not None:
shared_parameters: Set[int] = set()
for name, param in model.named_parameters():
if id(param) in shared_parameters:
continue
shared_parameters.add(id(param))
key = f"model.{name}"
logger.info(
f"Adding {key} ({param.numel()} params) to parameter server"
)
parameter_sharer.set_param(key, param.data)
for entity, embs in holder.unpartitioned_embeddings.items():
key = f"entity.{entity}"
logger.info(f"Adding {key} ({embs.numel()} params) to parameter server")
parameter_sharer.set_param(key, embs.data)
# store everything in self
self.model = model
self.trainer = trainer
self.evaluator = evaluator
self.rank = rank
self.entity_counts = entity_counts
self.embedding_storage_freelist = embedding_storage_freelist
self.stats_handler = stats_handler
self.strict = False
def train(self) -> None:
holder = self.holder
config = self.config
iteration_manager = self.iteration_manager
total_buckets = holder.nparts_lhs * holder.nparts_rhs
# yield stats from checkpoint, to reconstruct
# saved part of the learning curve
if self.rank == SINGLE_TRAINER:
for stats_dict in self.checkpoint_manager.maybe_read_stats():
index: int = stats_dict["index"]
stats: Optional[Stats] = None
if "stats" in stats_dict:
stats: Stats = Stats.from_dict(stats_dict["stats"])
eval_stats_before: Optional[Stats] = None
if "eval_stats_before" in stats_dict:
eval_stats_before = Stats.from_dict(stats_dict["eval_stats_before"])
eval_stats_after: Optional[Stats] = None
if "eval_stats_after" in stats_dict:
eval_stats_after = Stats.from_dict(stats_dict["eval_stats_after"])
eval_stats_chunk_avg: Optional[Stats] = None
if "eval_stats_chunk_avg" in stats_dict:
eval_stats_chunk_avg = Stats.from_dict(
stats_dict["eval_stats_chunk_avg"]
)
self.stats_handler.on_stats(
index,
eval_stats_before,
stats,
eval_stats_after,
eval_stats_chunk_avg,
)
for epoch_idx, edge_path_idx, edge_chunk_idx in iteration_manager:
logger.info(
f"Starting epoch {epoch_idx + 1} / {iteration_manager.num_epochs}, "
f"edge path {edge_path_idx + 1} / {iteration_manager.num_edge_paths}, "
f"edge chunk {edge_chunk_idx + 1} / {iteration_manager.num_edge_chunks}"
)
edge_storage = EDGE_STORAGES.make_instance(iteration_manager.edge_path)
logger.info(f"Edge path: {iteration_manager.edge_path}")
self._barrier()
dist_logger.info("Lock client new epoch...")
self.bucket_scheduler.new_pass(
is_first=iteration_manager.iteration_idx == 0
)
self._barrier()
remaining = total_buckets
cur_b: Optional[Bucket] = None
cur_stats: Optional[BucketStats] = None
while remaining > 0:
old_b: Optional[Bucket] = cur_b
old_stats: Optional[BucketStats] = cur_stats
cur_b, remaining = self.bucket_scheduler.acquire_bucket()
logger.info(f"still in queue: {remaining}")
if cur_b is None:
cur_stats = None
if old_b is not None:
# if you couldn't get a new pair, release the lock
# to prevent a deadlock!
tic = time.perf_counter()
release_bytes = self._swap_partitioned_embeddings(
old_b, None, old_stats
)
release_time = time.perf_counter() - tic
logger.info(
f"Swapping old embeddings to release lock. io: {release_time:.2f} s for {release_bytes:,} bytes "
f"( {release_bytes / release_time / 1e6:.2f} MB/sec )"
)
time.sleep(1) # don't hammer td
continue
tic = time.perf_counter()
self.cur_b = cur_b
bucket_logger = BucketLogger(logger, bucket=cur_b)
self.bucket_logger = bucket_logger
io_bytes = self._swap_partitioned_embeddings(old_b, cur_b, old_stats)
self.model.set_all_embeddings(holder, cur_b)
current_index = (
iteration_manager.iteration_idx + 1
) * total_buckets - remaining
bucket_logger.debug("Loading edges")
edges = edge_storage.load_chunk_of_edges(
cur_b.lhs,
cur_b.rhs,
edge_chunk_idx,
iteration_manager.num_edge_chunks,
shared=True,
)
num_edges = len(edges)
# this might be off in the case of tensorlist or extra edge fields
io_bytes += edges.lhs.tensor.numel() * edges.lhs.tensor.element_size()
io_bytes += edges.rhs.tensor.numel() * edges.rhs.tensor.element_size()
io_bytes += edges.rel.numel() * edges.rel.element_size()
io_time = time.perf_counter() - tic
tic = time.perf_counter()
bucket_logger.debug("Shuffling edges")
# Fix a seed to get the same permutation every time; have it
# depend on all and only what affects the set of edges.
# Note: for the sake of efficiency, we sample eval edge idxs
# from the edge set *with replacement*, meaning that there may
# be duplicates of the same edge in the eval set. When we swap
# edges into the eval set, if there are duplicates then all
# but one will be clobbered. These collisions are unlikely
# if eval_fraction is small.
#
# Importantly, this eval sampling strategy is theoretically
# sound:
# * Training and eval sets are (exactly) disjoint
# * Eval set may have (rare) duplicates, but they are
# uniformly sampled so it's still an unbiased estimator
# of the out-of-sample statistics
num_eval_edges = int(num_edges * config.eval_fraction)
num_train_edges = num_edges - num_eval_edges
if num_eval_edges > 0:
g = torch.Generator()
g.manual_seed(
hash((edge_path_idx, edge_chunk_idx, cur_b.lhs, cur_b.rhs))
)
eval_edge_idxs = torch.randint(
num_edges, (num_eval_edges,), dtype=torch.long, generator=g
)
else:
eval_edge_idxs = None
# HOGWILD evaluation before training
eval_stats_before = self._coordinate_eval(edges, eval_edge_idxs)
if eval_stats_before is not None:
bucket_logger.info(f"Stats before training: {eval_stats_before}")
eval_time = time.perf_counter() - tic
tic = time.perf_counter()
# HOGWILD training
bucket_logger.debug("Waiting for workers to perform training")
stats = self._coordinate_train(edges, eval_edge_idxs, epoch_idx)
if stats is not None:
bucket_logger.info(f"Training stats: {stats}")
train_time = time.perf_counter() - tic
tic = time.perf_counter()
# HOGWILD evaluation after training
eval_stats_after = self._coordinate_eval(edges, eval_edge_idxs)
if eval_stats_after is not None:
bucket_logger.info(f"Stats after training: {eval_stats_after}")
eval_time += time.perf_counter() - tic
bucket_logger.info(
f"bucket {total_buckets - remaining} / {total_buckets} : "
f"Trained {num_train_edges} edges in {train_time:.2f} s "
f"( {num_train_edges / train_time / 1e6:.2g} M/sec ); "
f"Eval 2*{num_eval_edges} edges in {eval_time:.2f} s "
f"( {2 * num_eval_edges / eval_time / 1e6:.2g} M/sec ); "
f"io: {io_time:.2f} s for {io_bytes:,} bytes ( {io_bytes / io_time / 1e6:.2f} MB/sec )"
)
self.model.clear_all_embeddings()
cur_stats = BucketStats(
lhs_partition=cur_b.lhs,
rhs_partition=cur_b.rhs,
index=current_index,
train=stats,
eval_before=eval_stats_before,
eval_after=eval_stats_after,
)
# release the final bucket
self._swap_partitioned_embeddings(cur_b, None, cur_stats)
# Distributed Processing: all machines can leave the barrier now.
self._barrier()
current_index = (iteration_manager.iteration_idx + 1) * total_buckets - 1
self._maybe_write_checkpoint(
epoch_idx, edge_path_idx, edge_chunk_idx, current_index
)
# now we're sure that all partition files exist,
# so be strict about loading them
self.strict = True
def close(self):
# cleanup
self.pool.close()
self.pool.join()
self._barrier()
self.checkpoint_manager.close()
if self.loadpath_manager is not None:
self.loadpath_manager.close()
# FIXME join distributed workers (not really necessary)
logger.info("Exiting")
###########################################################################
# private functions
###########################################################################
def _barrier(self) -> None:
if self.barrier_group is not None:
td.barrier(group=self.barrier_group)
def _load_embeddings(
self,
entity: EntityName,
part: Partition,
out: FloatTensorType,
strict: bool = False,
force_dirty: bool = False,
) -> Tuple[torch.nn.Parameter, Optimizer]:
if strict:
embs, optim_state = self.checkpoint_manager.read(
entity, part, out=out, force_dirty=force_dirty
)
else:
# Strict is only false during the first iteration, because in that
# case the checkpoint may not contain any data (unless a previous
# run was resumed) so we fall back on initial values.
embs, optim_state = self.checkpoint_manager.maybe_read(
entity, part, out=out, force_dirty=force_dirty
)
if embs is None and self.loadpath_manager is not None:
embs, optim_state = self.loadpath_manager.maybe_read(
entity, part, out=out
)
if embs is None:
embs = out
fast_approx_rand(embs)
embs.mul_(self.config.init_scale)
optim_state = None
embs = torch.nn.Parameter(embs)
optimizer = make_optimizer(self.config, [embs], True)
if optim_state is not None:
optimizer.load_state_dict(optim_state)
return embs, optimizer
def _swap_partitioned_embeddings(
self,
old_b: Optional[Bucket],
new_b: Optional[Bucket],
old_stats: Optional[BucketStats],
) -> int:
io_bytes = 0
logger.info(f"Swapping partitioned embeddings {old_b} {new_b}")
holder = self.holder
old_parts: Set[Tuple[EntityName, Partition]] = set()
if old_b is not None:
old_parts.update((e, old_b.lhs) for e in holder.lhs_partitioned_types)
old_parts.update((e, old_b.rhs) for e in holder.rhs_partitioned_types)
new_parts: Set[Tuple[EntityName, Partition]] = set()
if new_b is not None:
new_parts.update((e, new_b.lhs) for e in holder.lhs_partitioned_types)
new_parts.update((e, new_b.rhs) for e in holder.rhs_partitioned_types)
assert old_parts == holder.partitioned_embeddings.keys()
if old_b is not None:
if old_stats is None:
raise TypeError("Got old bucket but not its stats")
logger.info("Saving partitioned embeddings to checkpoint")
for entity, part in old_parts - new_parts:
logger.debug(f"Saving ({entity} {part})")
embs = holder.partitioned_embeddings.pop((entity, part))
optimizer = self.trainer.partitioned_optimizers.pop((entity, part))
self.checkpoint_manager.write(
entity, part, embs.detach(), optimizer.state_dict()
)
self.embedding_storage_freelist[entity].add(embs.storage())
io_bytes += embs.numel() * embs.element_size() # ignore optim state
# these variables are holding large objects; let them be freed
del embs
del optimizer
self.bucket_scheduler.release_bucket(old_b, old_stats)
if new_b is not None:
logger.info("Loading partitioned embeddings from checkpoint")
for entity, part in new_parts - old_parts:
logger.debug(f"Loading ({entity} {part})")
force_dirty = self.bucket_scheduler.check_and_set_dirty(entity, part)
count = self.entity_counts[entity][part]
s = self.embedding_storage_freelist[entity].pop()
dimension = self.config.entity_dimension(entity)
embs = torch.FloatTensor(s).view(-1, dimension)[:count]
embs, optimizer = self._load_embeddings(
entity, part, out=embs, strict=self.strict, force_dirty=force_dirty
)
holder.partitioned_embeddings[entity, part] = embs
self.trainer.partitioned_optimizers[entity, part] = optimizer
io_bytes += embs.numel() * embs.element_size() # ignore optim state
assert new_parts == holder.partitioned_embeddings.keys()
return io_bytes
def _coordinate_train(self, edges, eval_edge_idxs, epoch_idx) -> Stats:
assert self.config.num_gpus == 0, "GPU training not supported"
if eval_edge_idxs is not None:
num_train_edges = len(edges) - len(eval_edge_idxs)
train_edge_idxs = torch.arange(len(edges))
train_edge_idxs[eval_edge_idxs] = torch.arange(num_train_edges, len(edges))
train_edge_idxs = train_edge_idxs[:num_train_edges]
edge_perm = train_edge_idxs[torch.randperm(num_train_edges)]
else:
edge_perm = torch.randperm(len(edges))
future_all_stats = self.pool.map_async(
call,
[
partial(
process_in_batches,
batch_size=self.config.batch_size,
model=self.model,
batch_processor=self.trainer,
edges=edges,
indices=edge_perm[s],
# FIXME should we only delay if iteration_idx == 0?
delay=self.config.hogwild_delay
if epoch_idx == 0 and self.rank > 0
else 0,
)
for rank, s in enumerate(
split_almost_equally(edge_perm.size(0), num_parts=self.num_workers)
)
],
)
all_stats = get_async_result(future_all_stats, self.pool)
return Stats.sum(all_stats).average()
def _coordinate_eval(self, edges, eval_edge_idxs) -> Optional[Stats]:
eval_batch_size = round_up_to_nearest_multiple(
self.config.batch_size, self.config.eval_num_batch_negs
)
if eval_edge_idxs is not None:
self.bucket_logger.debug("Waiting for workers to perform evaluation")
future_all_eval_stats = self.pool.map_async(
call,
[
partial(
process_in_batches,
batch_size=eval_batch_size,
model=self.model,
batch_processor=self.evaluator,
edges=edges,
indices=eval_edge_idxs[s],
)
for s in split_almost_equally(
eval_edge_idxs.size(0), num_parts=self.num_workers
)
],
)
all_eval_stats = get_async_result(future_all_eval_stats, self.pool)
return Stats.sum(all_eval_stats).average()
else:
return None
def _maybe_write_checkpoint(
self,
epoch_idx: int,
edge_path_idx: int,
edge_chunk_idx: int,
current_index: int,
) -> None:
config = self.config
# Preserving a checkpoint requires two steps:
# - create a snapshot (w/ symlinks) after it's first written;
# - don't delete it once the following one is written.
# These two happen in two successive iterations of the main loop: the
# one just before and the one just after the epoch boundary.
preserve_old_checkpoint = should_preserve_old_checkpoint(
self.iteration_manager, config.checkpoint_preservation_interval
)
preserve_new_checkpoint = should_preserve_old_checkpoint(
self.iteration_manager + 1, config.checkpoint_preservation_interval
)
# Write metadata: for multiple machines, write from rank-0
logger.info(
f"Finished epoch {epoch_idx + 1} / {self.iteration_manager.num_epochs}, "
f"edge path {edge_path_idx + 1} / {self.iteration_manager.num_edge_paths}, "
f"edge chunk {edge_chunk_idx + 1} / "
f"{self.iteration_manager.num_edge_chunks}"
)
if self.rank == 0:
for entity, embs in self.holder.unpartitioned_embeddings.items():
logger.info(f"Writing {entity} embeddings")
optimizer = self.trainer.unpartitioned_optimizers[entity]
self.checkpoint_manager.write(
entity,
UNPARTITIONED,
embs.detach(),
optimizer.state_dict(),
unpartitioned=True,
)
logger.info("Writing the metadata")
state_dict: ModuleStateDict = self.model.state_dict()
self.checkpoint_manager.write_model(
state_dict, self.trainer.model_optimizer.state_dict()
)
logger.info("Writing the training stats")
all_stats_dicts: List[Dict[str, Any]] = []
bucket_eval_stats_list = []
chunk_stats_dict = {
"epoch_idx": epoch_idx,
"edge_path_idx": edge_path_idx,
"edge_chunk_idx": edge_chunk_idx,
}
for stats in self.bucket_scheduler.get_stats_for_pass():
stats_dict = {
"lhs_partition": stats.lhs_partition,
"rhs_partition": stats.rhs_partition,
"index": stats.index,
"stats": stats.train.to_dict(),
}
if stats.eval_before is not None:
stats_dict["eval_stats_before"] = stats.eval_before.to_dict()
bucket_eval_stats_list.append(stats.eval_before)
if stats.eval_after is not None:
stats_dict["eval_stats_after"] = stats.eval_after.to_dict()
stats_dict.update(chunk_stats_dict)
all_stats_dicts.append(stats_dict)
if len(bucket_eval_stats_list) != 0:
eval_stats_chunk_avg = Stats.average_list(bucket_eval_stats_list)
self.stats_handler.on_stats(
index=current_index, eval_stats_chunk_avg=eval_stats_chunk_avg
)
chunk_stats_dict["index"] = current_index
chunk_stats_dict[
"eval_stats_chunk_avg"
] = eval_stats_chunk_avg.to_dict()
all_stats_dicts.append(chunk_stats_dict)
self.checkpoint_manager.append_stats(all_stats_dicts)
logger.info("Writing the checkpoint")
self.checkpoint_manager.write_new_version(
config, self.entity_counts, self.embedding_storage_freelist
)
dist_logger.info(
"Waiting for other workers to write their parts of the checkpoint"
)
self._barrier()
dist_logger.info("All parts of the checkpoint have been written")
logger.info("Switching to the new checkpoint version")
self.checkpoint_manager.switch_to_new_version()
dist_logger.info(
"Waiting for other workers to switch to the new checkpoint version"
)
self._barrier()
dist_logger.info("All workers have switched to the new checkpoint version")
# After all the machines have finished committing
# checkpoints, we either remove the old checkpoints
# or we preserve it
if preserve_new_checkpoint:
# Add 1 so the index is a multiple of the interval, it looks nicer.
self.checkpoint_manager.preserve_current_version(config, epoch_idx + 1)
if not preserve_old_checkpoint:
self.checkpoint_manager.remove_old_version(config)
| 41.314
| 125
| 0.596819
|
import logging
import math
import time
from collections import defaultdict
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple
import torch
import torch.distributed as td
from torch.optim import Optimizer
from torchbiggraph.async_adagrad import AsyncAdagrad
from torchbiggraph.batching import AbstractBatchProcessor, call, process_in_batches
from torchbiggraph.bucket_scheduling import (
BucketStats,
DistributedBucketScheduler,
LockServer,
SingleMachineBucketScheduler,
)
from torchbiggraph.checkpoint_manager import (
CheckpointManager,
ConfigMetadataProvider,
MetadataProvider,
PartitionClient,
)
from torchbiggraph.config import ConfigSchema
from torchbiggraph.distributed import ProcessRanks, init_process_group, start_server
from torchbiggraph.edgelist import EdgeList
from torchbiggraph.eval import RankingEvaluator
from torchbiggraph.graph_storages import EDGE_STORAGES, ENTITY_STORAGES
from torchbiggraph.losses import LOSS_FUNCTIONS, AbstractLossFunction
from torchbiggraph.model import MultiRelationEmbedder, make_model
from torchbiggraph.parameter_sharing import ParameterServer, ParameterSharer
from torchbiggraph.row_adagrad import RowAdagrad
from torchbiggraph.stats import Stats, StatsHandler
from torchbiggraph.types import (
SINGLE_TRAINER,
UNPARTITIONED,
Bucket,
EntityName,
FloatTensorType,
ModuleStateDict,
Partition,
Rank,
)
from torchbiggraph.util import (
BucketLogger,
DummyOptimizer,
EmbeddingHolder,
allocate_shared_tensor,
create_pool,
fast_approx_rand,
get_async_result,
get_num_workers,
hide_distributed_logging,
round_up_to_nearest_multiple,
split_almost_equally,
tag_logs_with_process_name,
)
logger = logging.getLogger("torchbiggraph")
dist_logger = logging.LoggerAdapter(logger, {"distributed": True})
class Trainer(AbstractBatchProcessor):
def __init__(
self,
model_optimizer: Optimizer,
loss_fn: AbstractLossFunction,
relation_weights: List[float],
) -> None:
super().__init__(loss_fn, relation_weights)
self.model_optimizer = model_optimizer
self.unpartitioned_optimizers: Dict[EntityName, Optimizer] = {}
self.partitioned_optimizers: Dict[Tuple[EntityName, Partition], Optimizer] = {}
def _process_one_batch(
self, model: MultiRelationEmbedder, batch_edges: EdgeList
) -> Stats:
model.zero_grad()
scores, reg = model(batch_edges)
loss = self.calc_loss(scores, batch_edges)
stats = Stats(
loss=float(loss),
reg=float(reg) if reg is not None else 0.0,
violators_lhs=int((scores.lhs_neg > scores.lhs_pos.unsqueeze(1)).sum()),
violators_rhs=int((scores.rhs_neg > scores.rhs_pos.unsqueeze(1)).sum()),
count=len(batch_edges),
)
if reg is not None:
(loss + reg).backward()
else:
loss.backward()
self.model_optimizer.step(closure=None)
for optimizer in self.unpartitioned_optimizers.values():
optimizer.step(closure=None)
for optimizer in self.partitioned_optimizers.values():
optimizer.step(closure=None)
return stats
class IterationManager(MetadataProvider):
def __init__(
self,
num_epochs: int,
edge_paths: List[str],
num_edge_chunks: int,
*,
iteration_idx: int = 0,
) -> None:
self.num_epochs = num_epochs
self.edge_paths = edge_paths
self.num_edge_chunks = num_edge_chunks
self.iteration_idx = iteration_idx
@property
def epoch_idx(self) -> int:
return self.iteration_idx // self.num_edge_chunks // self.num_edge_paths
@property
def num_edge_paths(self) -> int:
return len(self.edge_paths)
@property
def edge_path_idx(self) -> int:
return self.iteration_idx // self.num_edge_chunks % self.num_edge_paths
@property
def edge_path(self) -> str:
return self.edge_paths[self.edge_path_idx]
@property
def edge_chunk_idx(self) -> int:
return self.iteration_idx % self.num_edge_chunks
def __iter__(self) -> Iterable[Tuple[int, int, int]]:
while self.epoch_idx < self.num_epochs:
yield self.epoch_idx, self.edge_path_idx, self.edge_chunk_idx
self.iteration_idx += 1
def get_checkpoint_metadata(self) -> Dict[str, Any]:
return {
"iteration/num_epochs": self.num_epochs,
"iteration/epoch_idx": self.epoch_idx,
"iteration/num_edge_paths": self.num_edge_paths,
"iteration/edge_path_idx": self.edge_path_idx,
"iteration/edge_path": self.edge_path,
"iteration/num_edge_chunks": self.num_edge_chunks,
"iteration/edge_chunk_idx": self.edge_chunk_idx,
}
def __add__(self, delta: int) -> "IterationManager":
return IterationManager(
self.num_epochs,
self.edge_paths,
self.num_edge_chunks,
iteration_idx=self.iteration_idx + delta,
)
def should_preserve_old_checkpoint(
iteration_manager: IterationManager, interval: Optional[int]
) -> bool:
if interval is None:
return False
is_checkpoint_epoch = iteration_manager.epoch_idx % interval == 0
is_first_edge_path = iteration_manager.edge_path_idx == 0
is_first_edge_chunk = iteration_manager.edge_chunk_idx == 0
return is_checkpoint_epoch and is_first_edge_path and is_first_edge_chunk
def get_num_edge_chunks(config: ConfigSchema) -> int:
if config.num_edge_chunks is not None:
return config.num_edge_chunks
max_edges_per_bucket = 0
# assume that edges are uniformly distributed among buckets (this is not
# exactly the case, as it's the entities that are uniformly distributed
for edge_path in config.edge_paths:
edge_storage = EDGE_STORAGES.make_instance(edge_path)
max_edges_per_bucket = max(
max_edges_per_bucket,
edge_storage.get_number_of_edges(UNPARTITIONED, UNPARTITIONED),
)
return max(1, math.ceil(max_edges_per_bucket / config.max_edges_per_chunk))
def make_optimizer(
config: ConfigSchema, params: Iterable[torch.nn.Parameter], is_emb: bool
) -> Optimizer:
params = list(params)
if len(params) == 0:
optimizer = DummyOptimizer()
elif is_emb:
optimizer = RowAdagrad(params, lr=config.lr)
else:
if config.relation_lr is not None:
lr = config.relation_lr
else:
lr = config.lr
optimizer = AsyncAdagrad(params, lr=lr)
optimizer.share_memory()
return optimizer
NOOP_STATS_HANDLER = StatsHandler()
class TrainingCoordinator:
def __init__( self,
config: ConfigSchema,
model: Optional[MultiRelationEmbedder] = None,
trainer: Optional[AbstractBatchProcessor] = None,
evaluator: Optional[AbstractBatchProcessor] = None,
rank: Rank = SINGLE_TRAINER,
subprocess_init: Optional[Callable[[], None]] = None,
stats_handler: StatsHandler = NOOP_STATS_HANDLER,
):
tag_logs_with_process_name(f"Trainer-{rank}")
self.config = config
if config.verbose > 0:
import pprint
pprint.PrettyPrinter().pprint(config.to_dict())
logger.info("Loading entity counts...")
entity_storage = ENTITY_STORAGES.make_instance(config.entity_path)
entity_counts: Dict[str, List[int]] = {}
for entity, econf in config.entities.items():
entity_counts[entity] = []
for part in range(econf.num_partitions):
entity_counts[entity].append(entity_storage.load_count(entity, part))
holder = self.holder = EmbeddingHolder(config)
logger.debug(
f"nparts {holder.nparts_lhs} {holder.nparts_rhs} "
f"types {holder.lhs_partitioned_types} {holder.rhs_partitioned_types}"
)
embedding_storage_freelist: Dict[
EntityName, Set[torch.FloatStorage]
] = defaultdict(set)
for entity_type, counts in entity_counts.items():
max_count = max(counts)
num_sides = (
(1 if entity_type in holder.lhs_partitioned_types else 0)
+ (1 if entity_type in holder.rhs_partitioned_types else 0)
+ (
1
if entity_type
in (holder.lhs_unpartitioned_types | holder.rhs_unpartitioned_types)
else 0
)
)
for _ in range(num_sides):
embedding_storage_freelist[entity_type].add(
allocate_shared_tensor(
(max_count, config.entity_dimension(entity_type)),
dtype=torch.float,
).storage()
)
if config.num_machines > 1 or config.num_partition_servers > 0:
if not 0 <= rank < config.num_machines:
raise RuntimeError("Invalid rank for trainer")
if not td.is_available():
raise RuntimeError(
"The installed PyTorch version doesn't provide "
"distributed training capabilities."
)
ranks = ProcessRanks.from_num_invocations(
config.num_machines, config.num_partition_servers
)
num_ps_groups = config.num_groups_for_partition_server
groups: List[List[int]] = [ranks.trainers] # barrier group
groups += [
ranks.trainers + ranks.partition_servers
] * num_ps_groups # ps groups
group_idxs_for_partition_servers = range(1, len(groups))
if rank == SINGLE_TRAINER:
logger.info("Setup lock server...")
start_server(
LockServer(
num_clients=len(ranks.trainers),
nparts_lhs=holder.nparts_lhs,
nparts_rhs=holder.nparts_rhs,
entities_lhs=holder.lhs_partitioned_types,
entities_rhs=holder.rhs_partitioned_types,
entity_counts=entity_counts,
init_tree=config.distributed_tree_init_order,
stats_handler=stats_handler,
),
process_name="LockServer",
init_method=config.distributed_init_method,
world_size=ranks.world_size,
server_rank=ranks.lock_server,
groups=groups,
subprocess_init=subprocess_init,
)
self.bucket_scheduler = DistributedBucketScheduler(
server_rank=ranks.lock_server, client_rank=ranks.trainers[rank]
)
logger.info("Setup param server...")
start_server(
ParameterServer(num_clients=len(ranks.trainers)),
process_name=f"ParamS-{rank}",
init_method=config.distributed_init_method,
world_size=ranks.world_size,
server_rank=ranks.parameter_servers[rank],
groups=groups,
subprocess_init=subprocess_init,
)
parameter_sharer = ParameterSharer(
process_name=f"ParamC-{rank}",
client_rank=ranks.parameter_clients[rank],
all_server_ranks=ranks.parameter_servers,
init_method=config.distributed_init_method,
world_size=ranks.world_size,
groups=groups,
subprocess_init=subprocess_init,
)
if config.num_partition_servers == -1:
start_server(
ParameterServer(
num_clients=len(ranks.trainers),
group_idxs=group_idxs_for_partition_servers,
log_stats=True,
),
process_name=f"PartS-{rank}",
init_method=config.distributed_init_method,
world_size=ranks.world_size,
server_rank=ranks.partition_servers[rank],
groups=groups,
subprocess_init=subprocess_init,
)
groups = init_process_group(
rank=ranks.trainers[rank],
world_size=ranks.world_size,
init_method=config.distributed_init_method,
groups=groups,
)
trainer_group, *groups_for_partition_servers = groups
self.barrier_group = trainer_group
if len(ranks.partition_servers) > 0:
partition_client = PartitionClient(
ranks.partition_servers,
groups=groups_for_partition_servers,
log_stats=True,
)
else:
partition_client = None
else:
self.barrier_group = None
self.bucket_scheduler = SingleMachineBucketScheduler(
holder.nparts_lhs, holder.nparts_rhs, config.bucket_order, stats_handler
)
parameter_sharer = None
partition_client = None
hide_distributed_logging()
# fork early for HOGWILD threads
logger.info("Creating workers...")
self.num_workers = get_num_workers(config.workers)
self.pool = create_pool(
self.num_workers,
subprocess_name=f"TWorker-{rank}",
subprocess_init=subprocess_init,
)
checkpoint_manager = CheckpointManager(
config.checkpoint_path,
rank=rank,
num_machines=config.num_machines,
partition_client=partition_client,
subprocess_name=f"BackgRW-{rank}",
subprocess_init=subprocess_init,
)
self.checkpoint_manager = checkpoint_manager
checkpoint_manager.register_metadata_provider(ConfigMetadataProvider(config))
if rank == 0:
checkpoint_manager.write_config(config)
num_edge_chunks = get_num_edge_chunks(config)
self.iteration_manager = IterationManager(
config.num_epochs,
config.edge_paths,
num_edge_chunks,
iteration_idx=checkpoint_manager.checkpoint_version,
)
checkpoint_manager.register_metadata_provider(self.iteration_manager)
logger.info("Initializing global model...")
if model is None:
model = make_model(config)
model.share_memory()
loss_fn = LOSS_FUNCTIONS.get_class(config.loss_fn)(margin=config.margin)
relation_weights = [relation.weight for relation in config.relations]
if trainer is None:
trainer = Trainer(
model_optimizer=make_optimizer(config, model.parameters(), False),
loss_fn=loss_fn,
relation_weights=relation_weights,
)
if evaluator is None:
eval_overrides = {}
if config.eval_num_batch_negs is not None:
eval_overrides["num_batch_negs"] = config.eval_num_batch_negs
if config.eval_num_uniform_negs is not None:
eval_overrides["num_uniform_negs"] = config.eval_num_uniform_negs
evaluator = RankingEvaluator(
loss_fn=loss_fn,
relation_weights=relation_weights,
overrides=eval_overrides,
)
if config.init_path is not None:
self.loadpath_manager = CheckpointManager(config.init_path)
else:
self.loadpath_manager = None
# load model from checkpoint or loadpath, if available
state_dict, optim_state = checkpoint_manager.maybe_read_model()
if state_dict is None and self.loadpath_manager is not None:
state_dict, optim_state = self.loadpath_manager.maybe_read_model()
if state_dict is not None:
model.load_state_dict(state_dict, strict=False)
if optim_state is not None:
trainer.model_optimizer.load_state_dict(optim_state)
logger.debug("Loading unpartitioned entities...")
for entity in holder.lhs_unpartitioned_types | holder.rhs_unpartitioned_types:
count = entity_counts[entity][0]
s = embedding_storage_freelist[entity].pop()
dimension = config.entity_dimension(entity)
embs = torch.FloatTensor(s).view(-1, dimension)[:count]
embs, optimizer = self._load_embeddings(entity, UNPARTITIONED, out=embs)
holder.unpartitioned_embeddings[entity] = embs
trainer.unpartitioned_optimizers[entity] = optimizer
# start communicating shared parameters with the parameter server
if parameter_sharer is not None:
shared_parameters: Set[int] = set()
for name, param in model.named_parameters():
if id(param) in shared_parameters:
continue
shared_parameters.add(id(param))
key = f"model.{name}"
logger.info(
f"Adding {key} ({param.numel()} params) to parameter server"
)
parameter_sharer.set_param(key, param.data)
for entity, embs in holder.unpartitioned_embeddings.items():
key = f"entity.{entity}"
logger.info(f"Adding {key} ({embs.numel()} params) to parameter server")
parameter_sharer.set_param(key, embs.data)
# store everything in self
self.model = model
self.trainer = trainer
self.evaluator = evaluator
self.rank = rank
self.entity_counts = entity_counts
self.embedding_storage_freelist = embedding_storage_freelist
self.stats_handler = stats_handler
self.strict = False
def train(self) -> None:
holder = self.holder
config = self.config
iteration_manager = self.iteration_manager
total_buckets = holder.nparts_lhs * holder.nparts_rhs
# yield stats from checkpoint, to reconstruct
# saved part of the learning curve
if self.rank == SINGLE_TRAINER:
for stats_dict in self.checkpoint_manager.maybe_read_stats():
index: int = stats_dict["index"]
stats: Optional[Stats] = None
if "stats" in stats_dict:
stats: Stats = Stats.from_dict(stats_dict["stats"])
eval_stats_before: Optional[Stats] = None
if "eval_stats_before" in stats_dict:
eval_stats_before = Stats.from_dict(stats_dict["eval_stats_before"])
eval_stats_after: Optional[Stats] = None
if "eval_stats_after" in stats_dict:
eval_stats_after = Stats.from_dict(stats_dict["eval_stats_after"])
eval_stats_chunk_avg: Optional[Stats] = None
if "eval_stats_chunk_avg" in stats_dict:
eval_stats_chunk_avg = Stats.from_dict(
stats_dict["eval_stats_chunk_avg"]
)
self.stats_handler.on_stats(
index,
eval_stats_before,
stats,
eval_stats_after,
eval_stats_chunk_avg,
)
for epoch_idx, edge_path_idx, edge_chunk_idx in iteration_manager:
logger.info(
f"Starting epoch {epoch_idx + 1} / {iteration_manager.num_epochs}, "
f"edge path {edge_path_idx + 1} / {iteration_manager.num_edge_paths}, "
f"edge chunk {edge_chunk_idx + 1} / {iteration_manager.num_edge_chunks}"
)
edge_storage = EDGE_STORAGES.make_instance(iteration_manager.edge_path)
logger.info(f"Edge path: {iteration_manager.edge_path}")
self._barrier()
dist_logger.info("Lock client new epoch...")
self.bucket_scheduler.new_pass(
is_first=iteration_manager.iteration_idx == 0
)
self._barrier()
remaining = total_buckets
cur_b: Optional[Bucket] = None
cur_stats: Optional[BucketStats] = None
while remaining > 0:
old_b: Optional[Bucket] = cur_b
old_stats: Optional[BucketStats] = cur_stats
cur_b, remaining = self.bucket_scheduler.acquire_bucket()
logger.info(f"still in queue: {remaining}")
if cur_b is None:
cur_stats = None
if old_b is not None:
# if you couldn't get a new pair, release the lock
tic = time.perf_counter()
release_bytes = self._swap_partitioned_embeddings(
old_b, None, old_stats
)
release_time = time.perf_counter() - tic
logger.info(
f"Swapping old embeddings to release lock. io: {release_time:.2f} s for {release_bytes:,} bytes "
f"( {release_bytes / release_time / 1e6:.2f} MB/sec )"
)
time.sleep(1) continue
tic = time.perf_counter()
self.cur_b = cur_b
bucket_logger = BucketLogger(logger, bucket=cur_b)
self.bucket_logger = bucket_logger
io_bytes = self._swap_partitioned_embeddings(old_b, cur_b, old_stats)
self.model.set_all_embeddings(holder, cur_b)
current_index = (
iteration_manager.iteration_idx + 1
) * total_buckets - remaining
bucket_logger.debug("Loading edges")
edges = edge_storage.load_chunk_of_edges(
cur_b.lhs,
cur_b.rhs,
edge_chunk_idx,
iteration_manager.num_edge_chunks,
shared=True,
)
num_edges = len(edges)
# this might be off in the case of tensorlist or extra edge fields
io_bytes += edges.lhs.tensor.numel() * edges.lhs.tensor.element_size()
io_bytes += edges.rhs.tensor.numel() * edges.rhs.tensor.element_size()
io_bytes += edges.rel.numel() * edges.rel.element_size()
io_time = time.perf_counter() - tic
tic = time.perf_counter()
bucket_logger.debug("Shuffling edges")
# Fix a seed to get the same permutation every time; have it
# depend on all and only what affects the set of edges.
# Note: for the sake of efficiency, we sample eval edge idxs
# from the edge set *with replacement*, meaning that there may
# be duplicates of the same edge in the eval set. When we swap
# edges into the eval set, if there are duplicates then all
# but one will be clobbered. These collisions are unlikely
# if eval_fraction is small.
#
# Importantly, this eval sampling strategy is theoretically
# sound:
# * Training and eval sets are (exactly) disjoint
# * Eval set may have (rare) duplicates, but they are
# uniformly sampled so it's still an unbiased estimator
num_eval_edges = int(num_edges * config.eval_fraction)
num_train_edges = num_edges - num_eval_edges
if num_eval_edges > 0:
g = torch.Generator()
g.manual_seed(
hash((edge_path_idx, edge_chunk_idx, cur_b.lhs, cur_b.rhs))
)
eval_edge_idxs = torch.randint(
num_edges, (num_eval_edges,), dtype=torch.long, generator=g
)
else:
eval_edge_idxs = None
eval_stats_before = self._coordinate_eval(edges, eval_edge_idxs)
if eval_stats_before is not None:
bucket_logger.info(f"Stats before training: {eval_stats_before}")
eval_time = time.perf_counter() - tic
tic = time.perf_counter()
bucket_logger.debug("Waiting for workers to perform training")
stats = self._coordinate_train(edges, eval_edge_idxs, epoch_idx)
if stats is not None:
bucket_logger.info(f"Training stats: {stats}")
train_time = time.perf_counter() - tic
tic = time.perf_counter()
eval_stats_after = self._coordinate_eval(edges, eval_edge_idxs)
if eval_stats_after is not None:
bucket_logger.info(f"Stats after training: {eval_stats_after}")
eval_time += time.perf_counter() - tic
bucket_logger.info(
f"bucket {total_buckets - remaining} / {total_buckets} : "
f"Trained {num_train_edges} edges in {train_time:.2f} s "
f"( {num_train_edges / train_time / 1e6:.2g} M/sec ); "
f"Eval 2*{num_eval_edges} edges in {eval_time:.2f} s "
f"( {2 * num_eval_edges / eval_time / 1e6:.2g} M/sec ); "
f"io: {io_time:.2f} s for {io_bytes:,} bytes ( {io_bytes / io_time / 1e6:.2f} MB/sec )"
)
self.model.clear_all_embeddings()
cur_stats = BucketStats(
lhs_partition=cur_b.lhs,
rhs_partition=cur_b.rhs,
index=current_index,
train=stats,
eval_before=eval_stats_before,
eval_after=eval_stats_after,
)
self._swap_partitioned_embeddings(cur_b, None, cur_stats)
self._barrier()
current_index = (iteration_manager.iteration_idx + 1) * total_buckets - 1
self._maybe_write_checkpoint(
epoch_idx, edge_path_idx, edge_chunk_idx, current_index
)
# so be strict about loading them
self.strict = True
def close(self):
# cleanup
self.pool.close()
self.pool.join()
self._barrier()
self.checkpoint_manager.close()
if self.loadpath_manager is not None:
self.loadpath_manager.close()
# FIXME join distributed workers (not really necessary)
logger.info("Exiting")
###########################################################################
# private functions
###########################################################################
def _barrier(self) -> None:
if self.barrier_group is not None:
td.barrier(group=self.barrier_group)
def _load_embeddings(
self,
entity: EntityName,
part: Partition,
out: FloatTensorType,
strict: bool = False,
force_dirty: bool = False,
) -> Tuple[torch.nn.Parameter, Optimizer]:
if strict:
embs, optim_state = self.checkpoint_manager.read(
entity, part, out=out, force_dirty=force_dirty
)
else:
# Strict is only false during the first iteration, because in that
# case the checkpoint may not contain any data (unless a previous
# run was resumed) so we fall back on initial values.
embs, optim_state = self.checkpoint_manager.maybe_read(
entity, part, out=out, force_dirty=force_dirty
)
if embs is None and self.loadpath_manager is not None:
embs, optim_state = self.loadpath_manager.maybe_read(
entity, part, out=out
)
if embs is None:
embs = out
fast_approx_rand(embs)
embs.mul_(self.config.init_scale)
optim_state = None
embs = torch.nn.Parameter(embs)
optimizer = make_optimizer(self.config, [embs], True)
if optim_state is not None:
optimizer.load_state_dict(optim_state)
return embs, optimizer
def _swap_partitioned_embeddings(
self,
old_b: Optional[Bucket],
new_b: Optional[Bucket],
old_stats: Optional[BucketStats],
) -> int:
io_bytes = 0
logger.info(f"Swapping partitioned embeddings {old_b} {new_b}")
holder = self.holder
old_parts: Set[Tuple[EntityName, Partition]] = set()
if old_b is not None:
old_parts.update((e, old_b.lhs) for e in holder.lhs_partitioned_types)
old_parts.update((e, old_b.rhs) for e in holder.rhs_partitioned_types)
new_parts: Set[Tuple[EntityName, Partition]] = set()
if new_b is not None:
new_parts.update((e, new_b.lhs) for e in holder.lhs_partitioned_types)
new_parts.update((e, new_b.rhs) for e in holder.rhs_partitioned_types)
assert old_parts == holder.partitioned_embeddings.keys()
if old_b is not None:
if old_stats is None:
raise TypeError("Got old bucket but not its stats")
logger.info("Saving partitioned embeddings to checkpoint")
for entity, part in old_parts - new_parts:
logger.debug(f"Saving ({entity} {part})")
embs = holder.partitioned_embeddings.pop((entity, part))
optimizer = self.trainer.partitioned_optimizers.pop((entity, part))
self.checkpoint_manager.write(
entity, part, embs.detach(), optimizer.state_dict()
)
self.embedding_storage_freelist[entity].add(embs.storage())
io_bytes += embs.numel() * embs.element_size() # ignore optim state
# these variables are holding large objects; let them be freed
del embs
del optimizer
self.bucket_scheduler.release_bucket(old_b, old_stats)
if new_b is not None:
logger.info("Loading partitioned embeddings from checkpoint")
for entity, part in new_parts - old_parts:
logger.debug(f"Loading ({entity} {part})")
force_dirty = self.bucket_scheduler.check_and_set_dirty(entity, part)
count = self.entity_counts[entity][part]
s = self.embedding_storage_freelist[entity].pop()
dimension = self.config.entity_dimension(entity)
embs = torch.FloatTensor(s).view(-1, dimension)[:count]
embs, optimizer = self._load_embeddings(
entity, part, out=embs, strict=self.strict, force_dirty=force_dirty
)
holder.partitioned_embeddings[entity, part] = embs
self.trainer.partitioned_optimizers[entity, part] = optimizer
io_bytes += embs.numel() * embs.element_size() # ignore optim state
assert new_parts == holder.partitioned_embeddings.keys()
return io_bytes
def _coordinate_train(self, edges, eval_edge_idxs, epoch_idx) -> Stats:
assert self.config.num_gpus == 0, "GPU training not supported"
if eval_edge_idxs is not None:
num_train_edges = len(edges) - len(eval_edge_idxs)
train_edge_idxs = torch.arange(len(edges))
train_edge_idxs[eval_edge_idxs] = torch.arange(num_train_edges, len(edges))
train_edge_idxs = train_edge_idxs[:num_train_edges]
edge_perm = train_edge_idxs[torch.randperm(num_train_edges)]
else:
edge_perm = torch.randperm(len(edges))
future_all_stats = self.pool.map_async(
call,
[
partial(
process_in_batches,
batch_size=self.config.batch_size,
model=self.model,
batch_processor=self.trainer,
edges=edges,
indices=edge_perm[s],
# FIXME should we only delay if iteration_idx == 0?
delay=self.config.hogwild_delay
if epoch_idx == 0 and self.rank > 0
else 0,
)
for rank, s in enumerate(
split_almost_equally(edge_perm.size(0), num_parts=self.num_workers)
)
],
)
all_stats = get_async_result(future_all_stats, self.pool)
return Stats.sum(all_stats).average()
def _coordinate_eval(self, edges, eval_edge_idxs) -> Optional[Stats]:
eval_batch_size = round_up_to_nearest_multiple(
self.config.batch_size, self.config.eval_num_batch_negs
)
if eval_edge_idxs is not None:
self.bucket_logger.debug("Waiting for workers to perform evaluation")
future_all_eval_stats = self.pool.map_async(
call,
[
partial(
process_in_batches,
batch_size=eval_batch_size,
model=self.model,
batch_processor=self.evaluator,
edges=edges,
indices=eval_edge_idxs[s],
)
for s in split_almost_equally(
eval_edge_idxs.size(0), num_parts=self.num_workers
)
],
)
all_eval_stats = get_async_result(future_all_eval_stats, self.pool)
return Stats.sum(all_eval_stats).average()
else:
return None
def _maybe_write_checkpoint(
self,
epoch_idx: int,
edge_path_idx: int,
edge_chunk_idx: int,
current_index: int,
) -> None:
config = self.config
# Preserving a checkpoint requires two steps:
# - create a snapshot (w/ symlinks) after it's first written;
# These two happen in two successive iterations of the main loop: the
# one just before and the one just after the epoch boundary.
preserve_old_checkpoint = should_preserve_old_checkpoint(
self.iteration_manager, config.checkpoint_preservation_interval
)
preserve_new_checkpoint = should_preserve_old_checkpoint(
self.iteration_manager + 1, config.checkpoint_preservation_interval
)
# Write metadata: for multiple machines, write from rank-0
logger.info(
f"Finished epoch {epoch_idx + 1} / {self.iteration_manager.num_epochs}, "
f"edge path {edge_path_idx + 1} / {self.iteration_manager.num_edge_paths}, "
f"edge chunk {edge_chunk_idx + 1} / "
f"{self.iteration_manager.num_edge_chunks}"
)
if self.rank == 0:
for entity, embs in self.holder.unpartitioned_embeddings.items():
logger.info(f"Writing {entity} embeddings")
optimizer = self.trainer.unpartitioned_optimizers[entity]
self.checkpoint_manager.write(
entity,
UNPARTITIONED,
embs.detach(),
optimizer.state_dict(),
unpartitioned=True,
)
logger.info("Writing the metadata")
state_dict: ModuleStateDict = self.model.state_dict()
self.checkpoint_manager.write_model(
state_dict, self.trainer.model_optimizer.state_dict()
)
logger.info("Writing the training stats")
all_stats_dicts: List[Dict[str, Any]] = []
bucket_eval_stats_list = []
chunk_stats_dict = {
"epoch_idx": epoch_idx,
"edge_path_idx": edge_path_idx,
"edge_chunk_idx": edge_chunk_idx,
}
for stats in self.bucket_scheduler.get_stats_for_pass():
stats_dict = {
"lhs_partition": stats.lhs_partition,
"rhs_partition": stats.rhs_partition,
"index": stats.index,
"stats": stats.train.to_dict(),
}
if stats.eval_before is not None:
stats_dict["eval_stats_before"] = stats.eval_before.to_dict()
bucket_eval_stats_list.append(stats.eval_before)
if stats.eval_after is not None:
stats_dict["eval_stats_after"] = stats.eval_after.to_dict()
stats_dict.update(chunk_stats_dict)
all_stats_dicts.append(stats_dict)
if len(bucket_eval_stats_list) != 0:
eval_stats_chunk_avg = Stats.average_list(bucket_eval_stats_list)
self.stats_handler.on_stats(
index=current_index, eval_stats_chunk_avg=eval_stats_chunk_avg
)
chunk_stats_dict["index"] = current_index
chunk_stats_dict[
"eval_stats_chunk_avg"
] = eval_stats_chunk_avg.to_dict()
all_stats_dicts.append(chunk_stats_dict)
self.checkpoint_manager.append_stats(all_stats_dicts)
logger.info("Writing the checkpoint")
self.checkpoint_manager.write_new_version(
config, self.entity_counts, self.embedding_storage_freelist
)
dist_logger.info(
"Waiting for other workers to write their parts of the checkpoint"
)
self._barrier()
dist_logger.info("All parts of the checkpoint have been written")
logger.info("Switching to the new checkpoint version")
self.checkpoint_manager.switch_to_new_version()
dist_logger.info(
"Waiting for other workers to switch to the new checkpoint version"
)
self._barrier()
dist_logger.info("All workers have switched to the new checkpoint version")
# After all the machines have finished committing
# checkpoints, we either remove the old checkpoints
# or we preserve it
if preserve_new_checkpoint:
# Add 1 so the index is a multiple of the interval, it looks nicer.
self.checkpoint_manager.preserve_current_version(config, epoch_idx + 1)
if not preserve_old_checkpoint:
self.checkpoint_manager.remove_old_version(config)
| true
| true
|
f707f2e3b19fb24668146e651f30715302cff2ea
| 886
|
py
|
Python
|
setup.py
|
nurikk/icloud_photos_downloader
|
d159b13e43a7ad216e88e7a6aa7641cd2af5615f
|
[
"MIT"
] | null | null | null |
setup.py
|
nurikk/icloud_photos_downloader
|
d159b13e43a7ad216e88e7a6aa7641cd2af5615f
|
[
"MIT"
] | null | null | null |
setup.py
|
nurikk/icloud_photos_downloader
|
d159b13e43a7ad216e88e7a6aa7641cd2af5615f
|
[
"MIT"
] | 1
|
2019-12-22T06:14:20.000Z
|
2019-12-22T06:14:20.000Z
|
from setuptools import setup, find_packages
with open("requirements.txt") as f:
required = f.read().splitlines()
setup(
name="icloudpd",
version="1.4.3",
url="https://github.com/ndbroadbent/icloud_photos_downloader",
description=(
"icloudpd is a command-line tool to download photos and videos from iCloud."
),
maintainer="Nathan Broadbent",
maintainer_email="icloudpd@ndbroadbent.com",
license="MIT",
packages=find_packages(),
install_requires=required,
classifiers=[
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
entry_points={"console_scripts": ["icloudpd = icloudpd.base:main"]},
)
| 31.642857
| 84
| 0.65237
|
from setuptools import setup, find_packages
with open("requirements.txt") as f:
required = f.read().splitlines()
setup(
name="icloudpd",
version="1.4.3",
url="https://github.com/ndbroadbent/icloud_photos_downloader",
description=(
"icloudpd is a command-line tool to download photos and videos from iCloud."
),
maintainer="Nathan Broadbent",
maintainer_email="icloudpd@ndbroadbent.com",
license="MIT",
packages=find_packages(),
install_requires=required,
classifiers=[
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
entry_points={"console_scripts": ["icloudpd = icloudpd.base:main"]},
)
| true
| true
|
f707f2fa90d4f7a0f5673b62cd8e5c20008c595e
| 1,648
|
py
|
Python
|
setup.py
|
jeffshurtliff/khorosjx
|
1530fad25eb8ccefcbb5a9ae63c09a6858cb033d
|
[
"MIT"
] | 2
|
2019-11-18T03:52:51.000Z
|
2020-12-30T04:08:06.000Z
|
setup.py
|
jeffshurtliff/khorosjx
|
1530fad25eb8ccefcbb5a9ae63c09a6858cb033d
|
[
"MIT"
] | 2
|
2019-12-17T17:02:30.000Z
|
2021-06-02T13:56:02.000Z
|
setup.py
|
jeffshurtliff/khorosjx
|
1530fad25eb8ccefcbb5a9ae63c09a6858cb033d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import setuptools
import khorosjx.utils.version
with open("README.md", "r") as fh:
long_description = fh.read()
version = khorosjx.utils.version.__version__
setuptools.setup(
name="khorosjx",
version=version,
author="Jeff Shurtliff",
author_email="jeff.shurtliff@rsa.com",
description="Useful tools and utilities to assist in managing a Khoros JX (formerly Jive-x) or Jive-n community.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jeffshurtliff/khorosjx",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Communications",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Content Management System",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards",
"Topic :: Internet :: WWW/HTTP :: Site Management"
],
python_requires='>=3.6',
install_requires=[
"PyYAML>=5.4.1",
"urllib3>=1.26.6",
"requests>=2.26.0",
"pandas>=1.3.3",
"python-dateutil>=2.8.2",
],
)
| 34.333333
| 118
| 0.62682
|
import setuptools
import khorosjx.utils.version
with open("README.md", "r") as fh:
long_description = fh.read()
version = khorosjx.utils.version.__version__
setuptools.setup(
name="khorosjx",
version=version,
author="Jeff Shurtliff",
author_email="jeff.shurtliff@rsa.com",
description="Useful tools and utilities to assist in managing a Khoros JX (formerly Jive-x) or Jive-n community.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jeffshurtliff/khorosjx",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Communications",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Content Management System",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards",
"Topic :: Internet :: WWW/HTTP :: Site Management"
],
python_requires='>=3.6',
install_requires=[
"PyYAML>=5.4.1",
"urllib3>=1.26.6",
"requests>=2.26.0",
"pandas>=1.3.3",
"python-dateutil>=2.8.2",
],
)
| true
| true
|
f707f3249684f30c3709d6e9af33fad130dcc728
| 3,698
|
py
|
Python
|
allennlp/data/dataset_readers/babi.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 2
|
2021-04-27T19:56:28.000Z
|
2021-08-19T05:34:37.000Z
|
allennlp/data/dataset_readers/babi.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 5
|
2021-05-03T14:40:33.000Z
|
2021-05-03T14:40:34.000Z
|
allennlp/data/dataset_readers/babi.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 2
|
2019-12-21T05:58:44.000Z
|
2021-08-16T07:41:21.000Z
|
import logging
from typing import Dict, List
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.fields import Field, TextField, ListField, IndexField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__)
@DatasetReader.register("babi")
class BabiReader(DatasetReader):
"""
Reads one single task in the bAbI tasks format as formulated in
Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks
(https://arxiv.org/abs/1502.05698). Since this class handle a single file,
if one wants to load multiple tasks together it has to merge them into a
single file and use this reader.
# Parameters
keep_sentences : `bool`, optional, (default = `False`)
Whether to keep each sentence in the context or to concatenate them.
Default is `False` that corresponds to concatenation.
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
"""
def __init__(
self,
keep_sentences: bool = False,
token_indexers: Dict[str, TokenIndexer] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._keep_sentences = keep_sentences
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset = dataset_file.readlines()
logger.info("Reading the dataset")
context: List[List[str]] = [[]]
for line in dataset:
if "?" in line:
question_str, answer, supports_str = line.replace("?", " ?").split("\t")
question = question_str.split()[1:]
supports = [int(support) - 1 for support in supports_str.split()]
yield self.text_to_instance(context, question, answer, supports)
else:
new_entry = line.replace(".", " .").split()[1:]
if line[0] == "1":
context = [new_entry]
else:
context.append(new_entry)
@overrides
def text_to_instance(
self, # type: ignore
context: List[List[str]],
question: List[str],
answer: str,
supports: List[int],
) -> Instance:
fields: Dict[str, Field] = {}
if self._keep_sentences:
context_field_ks = ListField(
[
TextField([Token(word) for word in line], self._token_indexers)
for line in context
]
)
fields["supports"] = ListField(
[IndexField(support, context_field_ks) for support in supports]
)
else:
context_field = TextField(
[Token(word) for line in context for word in line], self._token_indexers
)
fields["context"] = context_field_ks if self._keep_sentences else context_field
fields["question"] = TextField([Token(word) for word in question], self._token_indexers)
fields["answer"] = TextField([Token(answer)], self._token_indexers)
return Instance(fields)
| 35.219048
| 103
| 0.626555
|
import logging
from typing import Dict, List
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.fields import Field, TextField, ListField, IndexField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__)
@DatasetReader.register("babi")
class BabiReader(DatasetReader):
def __init__(
self,
keep_sentences: bool = False,
token_indexers: Dict[str, TokenIndexer] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._keep_sentences = keep_sentences
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset = dataset_file.readlines()
logger.info("Reading the dataset")
context: List[List[str]] = [[]]
for line in dataset:
if "?" in line:
question_str, answer, supports_str = line.replace("?", " ?").split("\t")
question = question_str.split()[1:]
supports = [int(support) - 1 for support in supports_str.split()]
yield self.text_to_instance(context, question, answer, supports)
else:
new_entry = line.replace(".", " .").split()[1:]
if line[0] == "1":
context = [new_entry]
else:
context.append(new_entry)
@overrides
def text_to_instance(
self, context: List[List[str]],
question: List[str],
answer: str,
supports: List[int],
) -> Instance:
fields: Dict[str, Field] = {}
if self._keep_sentences:
context_field_ks = ListField(
[
TextField([Token(word) for word in line], self._token_indexers)
for line in context
]
)
fields["supports"] = ListField(
[IndexField(support, context_field_ks) for support in supports]
)
else:
context_field = TextField(
[Token(word) for line in context for word in line], self._token_indexers
)
fields["context"] = context_field_ks if self._keep_sentences else context_field
fields["question"] = TextField([Token(word) for word in question], self._token_indexers)
fields["answer"] = TextField([Token(answer)], self._token_indexers)
return Instance(fields)
| true
| true
|
f707f42d22b005e6e216b1e9374ce2cbe6b88023
| 3,685
|
py
|
Python
|
lib/scrapy/http/response/text.py
|
langzeyu/book-crawler
|
e2d96648384658c7775bd02d94eab086c9ece677
|
[
"MIT"
] | 5
|
2019-04-02T05:00:03.000Z
|
2021-04-21T11:03:50.000Z
|
lib/scrapy/http/response/text.py
|
langzeyu/book-crawler
|
e2d96648384658c7775bd02d94eab086c9ece677
|
[
"MIT"
] | null | null | null |
lib/scrapy/http/response/text.py
|
langzeyu/book-crawler
|
e2d96648384658c7775bd02d94eab086c9ece677
|
[
"MIT"
] | null | null | null |
"""
This module implements the TextResponse class which adds encoding handling and
discovering (through HTTP headers) to base Response class.
See documentation in docs/topics/request-response.rst
"""
import re
import codecs
from scrapy.xlib.BeautifulSoup import UnicodeDammit
from scrapy.http.response import Response
from scrapy.utils.python import memoizemethod_noargs
from scrapy.utils.encoding import encoding_exists, resolve_encoding
from scrapy.conf import settings
# Python decoder doesn't follow unicode standard when handling
# bad utf-8 encoded strings. see http://bugs.python.org/issue8271
codecs.register_error('scrapy_replace', lambda exc: (u'\ufffd', exc.start+1))
class TextResponse(Response):
_DEFAULT_ENCODING = settings['DEFAULT_RESPONSE_ENCODING']
_ENCODING_RE = re.compile(r'charset=([\w-]+)', re.I)
__slots__ = ['_encoding', '_cached_benc', '_cached_ubody']
def __init__(self, *args, **kwargs):
self._encoding = kwargs.pop('encoding', None)
self._cached_benc = None
self._cached_ubody = None
super(TextResponse, self).__init__(*args, **kwargs)
def _set_url(self, url):
if isinstance(url, unicode):
if self.encoding is None:
raise TypeError('Cannot convert unicode url - %s has no encoding' %
type(self).__name__)
self._url = url.encode(self.encoding)
else:
super(TextResponse, self)._set_url(url)
def _set_body(self, body):
self._body = ''
if isinstance(body, unicode):
if self.encoding is None:
raise TypeError('Cannot convert unicode body - %s has no encoding' %
type(self).__name__)
self._body = body.encode(self._encoding)
else:
super(TextResponse, self)._set_body(body)
def replace(self, *args, **kwargs):
kwargs.setdefault('encoding', self.encoding)
return Response.replace(self, *args, **kwargs)
@property
def encoding(self):
return self._get_encoding(infer=True)
def _get_encoding(self, infer=False):
enc = self._declared_encoding()
if enc and not encoding_exists(enc):
enc = None
if not enc and infer:
enc = self._body_inferred_encoding()
if not enc:
enc = self._DEFAULT_ENCODING
return resolve_encoding(enc)
def _declared_encoding(self):
return self._encoding or self._headers_encoding() \
or self._body_declared_encoding()
def body_as_unicode(self):
"""Return body as unicode"""
if self._cached_ubody is None:
self._cached_ubody = self.body.decode(self.encoding, 'scrapy_replace')
return self._cached_ubody
@memoizemethod_noargs
def _headers_encoding(self):
content_type = self.headers.get('Content-Type')
if content_type:
m = self._ENCODING_RE.search(content_type)
if m:
encoding = m.group(1)
if encoding_exists(encoding):
return encoding
def _body_inferred_encoding(self):
if self._cached_benc is None:
enc = self._get_encoding()
dammit = UnicodeDammit(self.body, [enc])
benc = dammit.originalEncoding
self._cached_benc = benc
# UnicodeDammit is buggy decoding utf-16
if self._cached_ubody is None and benc != 'utf-16':
self._cached_ubody = dammit.unicode
return self._cached_benc
def _body_declared_encoding(self):
# implemented in subclasses (XmlResponse, HtmlResponse)
return None
| 34.764151
| 84
| 0.646404
|
import re
import codecs
from scrapy.xlib.BeautifulSoup import UnicodeDammit
from scrapy.http.response import Response
from scrapy.utils.python import memoizemethod_noargs
from scrapy.utils.encoding import encoding_exists, resolve_encoding
from scrapy.conf import settings
# bad utf-8 encoded strings. see http://bugs.python.org/issue8271
codecs.register_error('scrapy_replace', lambda exc: (u'\ufffd', exc.start+1))
class TextResponse(Response):
_DEFAULT_ENCODING = settings['DEFAULT_RESPONSE_ENCODING']
_ENCODING_RE = re.compile(r'charset=([\w-]+)', re.I)
__slots__ = ['_encoding', '_cached_benc', '_cached_ubody']
def __init__(self, *args, **kwargs):
self._encoding = kwargs.pop('encoding', None)
self._cached_benc = None
self._cached_ubody = None
super(TextResponse, self).__init__(*args, **kwargs)
def _set_url(self, url):
if isinstance(url, unicode):
if self.encoding is None:
raise TypeError('Cannot convert unicode url - %s has no encoding' %
type(self).__name__)
self._url = url.encode(self.encoding)
else:
super(TextResponse, self)._set_url(url)
def _set_body(self, body):
self._body = ''
if isinstance(body, unicode):
if self.encoding is None:
raise TypeError('Cannot convert unicode body - %s has no encoding' %
type(self).__name__)
self._body = body.encode(self._encoding)
else:
super(TextResponse, self)._set_body(body)
def replace(self, *args, **kwargs):
kwargs.setdefault('encoding', self.encoding)
return Response.replace(self, *args, **kwargs)
@property
def encoding(self):
return self._get_encoding(infer=True)
def _get_encoding(self, infer=False):
enc = self._declared_encoding()
if enc and not encoding_exists(enc):
enc = None
if not enc and infer:
enc = self._body_inferred_encoding()
if not enc:
enc = self._DEFAULT_ENCODING
return resolve_encoding(enc)
def _declared_encoding(self):
return self._encoding or self._headers_encoding() \
or self._body_declared_encoding()
def body_as_unicode(self):
if self._cached_ubody is None:
self._cached_ubody = self.body.decode(self.encoding, 'scrapy_replace')
return self._cached_ubody
@memoizemethod_noargs
def _headers_encoding(self):
content_type = self.headers.get('Content-Type')
if content_type:
m = self._ENCODING_RE.search(content_type)
if m:
encoding = m.group(1)
if encoding_exists(encoding):
return encoding
def _body_inferred_encoding(self):
if self._cached_benc is None:
enc = self._get_encoding()
dammit = UnicodeDammit(self.body, [enc])
benc = dammit.originalEncoding
self._cached_benc = benc
# UnicodeDammit is buggy decoding utf-16
if self._cached_ubody is None and benc != 'utf-16':
self._cached_ubody = dammit.unicode
return self._cached_benc
def _body_declared_encoding(self):
# implemented in subclasses (XmlResponse, HtmlResponse)
return None
| true
| true
|
f707f7ff5b358bb0c46615922c3a1238ed20d144
| 11,394
|
py
|
Python
|
qlib/strategy/base.py
|
goodchinas/qlib
|
d01de411a879dc98dfa7eab4da41d52b903f466c
|
[
"MIT"
] | null | null | null |
qlib/strategy/base.py
|
goodchinas/qlib
|
d01de411a879dc98dfa7eab4da41d52b903f466c
|
[
"MIT"
] | null | null | null |
qlib/strategy/base.py
|
goodchinas/qlib
|
d01de411a879dc98dfa7eab4da41d52b903f466c
|
[
"MIT"
] | 1
|
2021-11-04T09:24:45.000Z
|
2021-11-04T09:24:45.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from qlib.backtest.exchange import Exchange
from qlib.backtest.position import BasePosition
from typing import List, Tuple, Union
from ..model.base import BaseModel
from ..data.dataset import DatasetH
from ..data.dataset.utils import convert_index_format
from ..rl.interpreter import ActionInterpreter, StateInterpreter
from ..utils import init_instance_by_config
from ..backtest.utils import CommonInfrastructure, LevelInfrastructure, TradeCalendarManager
from ..backtest.decision import BaseTradeDecision
__all__ = ["BaseStrategy", "ModelStrategy", "RLStrategy", "RLIntStrategy"]
class BaseStrategy:
"""Base strategy for trading"""
def __init__(
self,
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
trade_exchange: Exchange = None,
):
"""
Parameters
----------
outer_trade_decision : BaseTradeDecision, optional
the trade decision of outer strategy which this startegy relies, and it will be traded in [start_time, end_time], by default None
- If the strategy is used to split trade decision, it will be used
- If the strategy is used for portfolio management, it can be ignored
level_infra : LevelInfrastructure, optional
level shared infrastructure for backtesting, including trade calendar
common_infra : CommonInfrastructure, optional
common infrastructure for backtesting, including trade_account, trade_exchange, .etc
trade_exchange : Exchange
exchange that provides market info, used to deal order and generate report
- If `trade_exchange` is None, self.trade_exchange will be set with common_infra
- It allowes different trade_exchanges is used in different executions.
- For example:
- In daily execution, both daily exchange and minutely are usable, but the daily exchange is recommended because it run faster.
- In minutely execution, the daily exchange is not usable, only the minutely exchange is recommended.
"""
self._reset(level_infra=level_infra, common_infra=common_infra, outer_trade_decision=outer_trade_decision)
self._trade_exchange = trade_exchange
@property
def trade_calendar(self) -> TradeCalendarManager:
return self.level_infra.get("trade_calendar")
@property
def trade_position(self) -> BasePosition:
return self.common_infra.get("trade_account").current_position
@property
def trade_exchange(self) -> Exchange:
"""get trade exchange in a prioritized order"""
return getattr(self, "_trade_exchange", None) or self.common_infra.get("trade_exchange")
def reset_level_infra(self, level_infra: LevelInfrastructure):
if not hasattr(self, "level_infra"):
self.level_infra = level_infra
else:
self.level_infra.update(level_infra)
def reset_common_infra(self, common_infra: CommonInfrastructure):
if not hasattr(self, "common_infra"):
self.common_infra: CommonInfrastructure = common_infra
else:
self.common_infra.update(common_infra)
def reset(
self,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
outer_trade_decision=None,
**kwargs,
):
"""
- reset `level_infra`, used to reset trade calendar, .etc
- reset `common_infra`, used to reset `trade_account`, `trade_exchange`, .etc
- reset `outer_trade_decision`, used to make split decision
**NOTE**:
split this function into `reset` and `_reset` will make following cases more convenient
1. Users want to initialize his strategy by overriding `reset`, but they don't want to affect the `_reset` called
when initialization
"""
self._reset(
level_infra=level_infra, common_infra=common_infra, outer_trade_decision=outer_trade_decision, **kwargs
)
def _reset(
self,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
outer_trade_decision=None,
):
"""
Please refer to the docs of `reset`
"""
if level_infra is not None:
self.reset_level_infra(level_infra)
if common_infra is not None:
self.reset_common_infra(common_infra)
if outer_trade_decision is not None:
self.outer_trade_decision = outer_trade_decision
def generate_trade_decision(self, execute_result=None):
"""Generate trade decision in each trading bar
Parameters
----------
execute_result : List[object], optional
the executed result for trade decision, by default None
- When call the generate_trade_decision firstly, `execute_result` could be None
"""
raise NotImplementedError("generate_trade_decision is not implemented!")
def update_trade_decision(
self, trade_decision: BaseTradeDecision, trade_calendar: TradeCalendarManager
) -> Union[BaseTradeDecision, None]:
"""
update trade decision in each step of inner execution, this method enable all order
Parameters
----------
trade_decision : BaseTradeDecision
the trade decision that will be updated
trade_calendar : TradeCalendarManager
The calendar of the **inner strategy**!!!!!
Returns
-------
BaseTradeDecision:
"""
# default to return None, which indicates that the trade decision is not changed
return None
def alter_outer_trade_decision(self, outer_trade_decision: BaseTradeDecision):
"""
A method for updating the outer_trade_decision.
The outer strategy may change its decision during updating.
Parameters
----------
outer_trade_decision : BaseTradeDecision
the decision updated by the outer strategy
"""
# default to reset the decision directly
# NOTE: normally, user should do something to the strategy due to the change of outer decision
raise NotImplementedError(f"Please implement the `alter_outer_trade_decision` method")
# helper methods: not necessary but for convenience
def get_data_cal_avail_range(self, rtype: str = "full") -> Tuple[int, int]:
"""
return data calendar's available decision range for `self` strategy
the range consider following factors
- data calendar in the charge of `self` strategy
- trading range limitation from the decision of outer strategy
related methods
- TradeCalendarManager.get_data_cal_range
- BaseTradeDecision.get_data_cal_range_limit
Parameters
----------
rtype: str
- "full": return the available data index range of the strategy from `start_time` to `end_time`
- "step": return the available data index range of the strategy of current step
Returns
-------
Tuple[int, int]:
the available range both sides are closed
"""
cal_range = self.trade_calendar.get_data_cal_range(rtype=rtype)
if self.outer_trade_decision is None:
raise ValueError(f"There is not limitation for strategy {self}")
range_limit = self.outer_trade_decision.get_data_cal_range_limit(rtype=rtype)
return max(cal_range[0], range_limit[0]), min(cal_range[1], range_limit[1])
class ModelStrategy(BaseStrategy):
"""Model-based trading strategy, use model to make predictions for trading"""
def __init__(
self,
model: BaseModel,
dataset: DatasetH,
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
):
"""
Parameters
----------
model : BaseModel
the model used in when making predictions
dataset : DatasetH
provide test data for model
kwargs : dict
arguments that will be passed into `reset` method
"""
super(ModelStrategy, self).__init__(outer_trade_decision, level_infra, common_infra, **kwargs)
self.model = model
self.dataset = dataset
self.pred_scores = convert_index_format(self.model.predict(dataset), level="datetime")
def _update_model(self):
"""
When using online data, pdate model in each bar as the following steps:
- update dataset with online data, the dataset should support online update
- make the latest prediction scores of the new bar
- update the pred score into the latest prediction
"""
raise NotImplementedError("_update_model is not implemented!")
class RLStrategy(BaseStrategy):
"""RL-based strategy"""
def __init__(
self,
policy,
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
):
"""
Parameters
----------
policy :
RL policy for generate action
"""
super(RLStrategy, self).__init__(outer_trade_decision, level_infra, common_infra, **kwargs)
self.policy = policy
class RLIntStrategy(RLStrategy):
"""(RL)-based (Strategy) with (Int)erpreter"""
def __init__(
self,
policy,
state_interpreter: Union[dict, StateInterpreter],
action_interpreter: Union[dict, ActionInterpreter],
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
):
"""
Parameters
----------
state_interpreter : Union[dict, StateInterpreter]
interpretor that interprets the qlib execute result into rl env state
action_interpreter : Union[dict, ActionInterpreter]
interpretor that interprets the rl agent action into qlib order list
start_time : Union[str, pd.Timestamp], optional
start time of trading, by default None
end_time : Union[str, pd.Timestamp], optional
end time of trading, by default None
"""
super(RLIntStrategy, self).__init__(policy, outer_trade_decision, level_infra, common_infra, **kwargs)
self.policy = policy
self.state_interpreter = init_instance_by_config(state_interpreter, accept_types=StateInterpreter)
self.action_interpreter = init_instance_by_config(action_interpreter, accept_types=ActionInterpreter)
def generate_trade_decision(self, execute_result=None):
_interpret_state = self.state_interpreter.interpret(execute_result=execute_result)
_action = self.policy.step(_interpret_state)
_trade_decision = self.action_interpreter.interpret(action=_action)
return _trade_decision
| 39.289655
| 143
| 0.669124
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from qlib.backtest.exchange import Exchange
from qlib.backtest.position import BasePosition
from typing import List, Tuple, Union
from ..model.base import BaseModel
from ..data.dataset import DatasetH
from ..data.dataset.utils import convert_index_format
from ..rl.interpreter import ActionInterpreter, StateInterpreter
from ..utils import init_instance_by_config
from ..backtest.utils import CommonInfrastructure, LevelInfrastructure, TradeCalendarManager
from ..backtest.decision import BaseTradeDecision
__all__ = ["BaseStrategy", "ModelStrategy", "RLStrategy", "RLIntStrategy"]
class BaseStrategy:
def __init__(
self,
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
trade_exchange: Exchange = None,
):
self._reset(level_infra=level_infra, common_infra=common_infra, outer_trade_decision=outer_trade_decision)
self._trade_exchange = trade_exchange
@property
def trade_calendar(self) -> TradeCalendarManager:
return self.level_infra.get("trade_calendar")
@property
def trade_position(self) -> BasePosition:
return self.common_infra.get("trade_account").current_position
@property
def trade_exchange(self) -> Exchange:
return getattr(self, "_trade_exchange", None) or self.common_infra.get("trade_exchange")
def reset_level_infra(self, level_infra: LevelInfrastructure):
if not hasattr(self, "level_infra"):
self.level_infra = level_infra
else:
self.level_infra.update(level_infra)
def reset_common_infra(self, common_infra: CommonInfrastructure):
if not hasattr(self, "common_infra"):
self.common_infra: CommonInfrastructure = common_infra
else:
self.common_infra.update(common_infra)
def reset(
self,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
outer_trade_decision=None,
**kwargs,
):
self._reset(
level_infra=level_infra, common_infra=common_infra, outer_trade_decision=outer_trade_decision, **kwargs
)
def _reset(
self,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
outer_trade_decision=None,
):
if level_infra is not None:
self.reset_level_infra(level_infra)
if common_infra is not None:
self.reset_common_infra(common_infra)
if outer_trade_decision is not None:
self.outer_trade_decision = outer_trade_decision
def generate_trade_decision(self, execute_result=None):
raise NotImplementedError("generate_trade_decision is not implemented!")
def update_trade_decision(
self, trade_decision: BaseTradeDecision, trade_calendar: TradeCalendarManager
) -> Union[BaseTradeDecision, None]:
return None
def alter_outer_trade_decision(self, outer_trade_decision: BaseTradeDecision):
raise NotImplementedError(f"Please implement the `alter_outer_trade_decision` method")
def get_data_cal_avail_range(self, rtype: str = "full") -> Tuple[int, int]:
cal_range = self.trade_calendar.get_data_cal_range(rtype=rtype)
if self.outer_trade_decision is None:
raise ValueError(f"There is not limitation for strategy {self}")
range_limit = self.outer_trade_decision.get_data_cal_range_limit(rtype=rtype)
return max(cal_range[0], range_limit[0]), min(cal_range[1], range_limit[1])
class ModelStrategy(BaseStrategy):
def __init__(
self,
model: BaseModel,
dataset: DatasetH,
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
):
super(ModelStrategy, self).__init__(outer_trade_decision, level_infra, common_infra, **kwargs)
self.model = model
self.dataset = dataset
self.pred_scores = convert_index_format(self.model.predict(dataset), level="datetime")
def _update_model(self):
raise NotImplementedError("_update_model is not implemented!")
class RLStrategy(BaseStrategy):
def __init__(
self,
policy,
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
):
super(RLStrategy, self).__init__(outer_trade_decision, level_infra, common_infra, **kwargs)
self.policy = policy
class RLIntStrategy(RLStrategy):
def __init__(
self,
policy,
state_interpreter: Union[dict, StateInterpreter],
action_interpreter: Union[dict, ActionInterpreter],
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
):
super(RLIntStrategy, self).__init__(policy, outer_trade_decision, level_infra, common_infra, **kwargs)
self.policy = policy
self.state_interpreter = init_instance_by_config(state_interpreter, accept_types=StateInterpreter)
self.action_interpreter = init_instance_by_config(action_interpreter, accept_types=ActionInterpreter)
def generate_trade_decision(self, execute_result=None):
_interpret_state = self.state_interpreter.interpret(execute_result=execute_result)
_action = self.policy.step(_interpret_state)
_trade_decision = self.action_interpreter.interpret(action=_action)
return _trade_decision
| true
| true
|
f707f9f5620e2e46442586433401d453c0cdd2a0
| 26,593
|
py
|
Python
|
tensorflow/python/ops/ragged/ragged_getitem_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 7
|
2022-03-04T21:14:47.000Z
|
2022-03-22T23:07:39.000Z
|
tensorflow/python/ops/ragged/ragged_getitem_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 19
|
2021-12-28T12:44:55.000Z
|
2022-01-13T08:11:28.000Z
|
tensorflow/python/ops/ragged/ragged_getitem_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 1
|
2021-11-21T02:32:27.000Z
|
2021-11-21T02:32:27.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for third_party.tensorflow.python.ops.ragged_tensor."""
import re
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.platform import googletest
class _SliceBuilder:
"""Helper to construct arguments for __getitem__.
Usage: _SliceBuilder()[<expr>] slice_spec Python generates for <expr>.
"""
def __getitem__(self, slice_spec):
return slice_spec
SLICE_BUILDER = _SliceBuilder()
def _make_tensor_slice_spec(slice_spec, use_constant=True):
"""Wraps all integers in an extended slice spec w/ a tensor.
This function is used to help test slicing when the slice spec contains
tensors, rather than integers.
Args:
slice_spec: The extended slice spec.
use_constant: If true, then wrap each integer with a tf.constant. If false,
then wrap each integer with a tf.placeholder.
Returns:
A copy of slice_spec, but with each integer i replaced with tf.constant(i).
"""
def make_piece_scalar(piece):
if isinstance(piece, int):
scalar = constant_op.constant(piece)
if use_constant:
return scalar
else:
return array_ops.placeholder_with_default(scalar, [])
elif isinstance(piece, slice):
return slice(
make_piece_scalar(piece.start), make_piece_scalar(piece.stop),
make_piece_scalar(piece.step))
else:
return piece
if isinstance(slice_spec, tuple):
return tuple(make_piece_scalar(piece) for piece in slice_spec)
else:
return make_piece_scalar(slice_spec)
# Example 2D ragged tensor value with one ragged dimension and with scalar
# values, expressed as nested python lists and as splits+values.
EXAMPLE_RAGGED_TENSOR_2D = [[b'a', b'b'], [b'c', b'd', b'e'], [b'f'], [],
[b'g']]
EXAMPLE_RAGGED_TENSOR_2D_SPLITS = [0, 2, 5, 6, 6, 7]
EXAMPLE_RAGGED_TENSOR_2D_VALUES = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# Example 4D ragged tensor value, with two ragged dimensions and with values
# whose shape is [2], expressed as nested python lists and as splits+values.
EXAMPLE_RAGGED_TENSOR_4D = [
[ # rt[0]
[[1, 2], [3, 4], [5, 6]], # rt[0][0]
[[7, 8], [9, 10], [11, 12]]], # rt[0][1]
[], # rt[1]
[ # rt[2]
[[13, 14], [15, 16], [17, 18]]], # rt[2][0]
[ # rt[3]
[[19, 20]]] # rt[3][0]
] # pyformat: disable
EXAMPLE_RAGGED_TENSOR_4D_SPLITS1 = [0, 2, 2, 3, 4]
EXAMPLE_RAGGED_TENSOR_4D_SPLITS2 = [0, 3, 6, 9, 10]
EXAMPLE_RAGGED_TENSOR_4D_VALUES = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18],
[19, 20]]
# Example 3D ragged tensor with uniform_row_lengths.
EXAMPLE_RAGGED_TENSOR_3D = [[[1, 2, 3], [4], [5, 6]], [[], [7, 8, 9], []]]
EXAMPLE_RAGGED_TENSOR_3D_ROWLEN = 3
EXAMPLE_RAGGED_TENSOR_3D_SPLITS = [0, 3, 4, 6, 6, 9, 9]
EXAMPLE_RAGGED_TENSOR_3D_VALUES = [1, 2, 3, 4, 5, 6, 7, 8, 9]
@test_util.run_all_in_graph_and_eager_modes
class RaggedGetItemTest(test_util.TensorFlowTestCase, parameterized.TestCase):
longMessage = True # Property in unittest.Testcase. pylint: disable=invalid-name
#=============================================================================
# RaggedTensor.__getitem__
#=============================================================================
def _TestGetItem(self, rt, slice_spec, expected, expected_shape=None):
"""Helper function for testing RaggedTensor.__getitem__.
Checks that calling `rt.__getitem__(slice_spec) returns the expected value.
Checks three different configurations for each slice spec:
* Call __getitem__ with the slice spec as-is (with int values)
* Call __getitem__ with int values in the slice spec wrapped in
`tf.constant()`.
* Call __getitem__ with int values in the slice spec wrapped in
`tf.compat.v1.placeholder()` (so value is not known at graph
construction time).
Args:
rt: The RaggedTensor to test.
slice_spec: The slice spec.
expected: The expected value of rt.__getitem__(slice_spec), as a python
list; or an exception class.
expected_shape: The expected shape for `rt.__getitem__(slice_spec)`.
"""
tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)
tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)
value1 = rt.__getitem__(slice_spec)
value2 = rt.__getitem__(tensor_slice_spec1)
value3 = rt.__getitem__(tensor_slice_spec2)
self.assertAllEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))
self.assertAllEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))
self.assertAllEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))
if expected_shape is not None:
value1.shape.assert_is_compatible_with(expected_shape)
value2.shape.assert_is_compatible_with(expected_shape)
value3.shape.assert_is_compatible_with(expected_shape)
def _TestGetItemException(self, rt, slice_spec, expected, message):
"""Helper function for testing RaggedTensor.__getitem__ exceptions."""
tensor_slice_spec = _make_tensor_slice_spec(slice_spec, True)
with self.assertRaisesRegex(expected, message):
self.evaluate(rt.__getitem__(slice_spec))
with self.assertRaisesRegex(expected, message):
self.evaluate(rt.__getitem__(tensor_slice_spec))
@parameterized.parameters(
# Tests for rt[i]
(SLICE_BUILDER[-5], EXAMPLE_RAGGED_TENSOR_2D[-5]),
(SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),
(SLICE_BUILDER[-1], EXAMPLE_RAGGED_TENSOR_2D[-1]),
(SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),
(SLICE_BUILDER[1], EXAMPLE_RAGGED_TENSOR_2D[1]),
(SLICE_BUILDER[4], EXAMPLE_RAGGED_TENSOR_2D[4]),
# Tests for rt[i:]
(SLICE_BUILDER[-6:], EXAMPLE_RAGGED_TENSOR_2D[-6:]),
(SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),
(SLICE_BUILDER[-1:], EXAMPLE_RAGGED_TENSOR_2D[-1:]),
(SLICE_BUILDER[0:], EXAMPLE_RAGGED_TENSOR_2D[0:]),
(SLICE_BUILDER[3:], EXAMPLE_RAGGED_TENSOR_2D[3:]),
(SLICE_BUILDER[5:], EXAMPLE_RAGGED_TENSOR_2D[5:]),
# Tests for rt[:j]
(SLICE_BUILDER[:-6], EXAMPLE_RAGGED_TENSOR_2D[:-6]),
(SLICE_BUILDER[:-3], EXAMPLE_RAGGED_TENSOR_2D[:-3]),
(SLICE_BUILDER[:-1], EXAMPLE_RAGGED_TENSOR_2D[:-1]),
(SLICE_BUILDER[:0], EXAMPLE_RAGGED_TENSOR_2D[:0]),
(SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),
(SLICE_BUILDER[:5], EXAMPLE_RAGGED_TENSOR_2D[:5]),
# Tests for rt[i:j]
(SLICE_BUILDER[0:3], EXAMPLE_RAGGED_TENSOR_2D[0:3]),
(SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),
(SLICE_BUILDER[-5:3], EXAMPLE_RAGGED_TENSOR_2D[-5:3]),
(SLICE_BUILDER[3:1], EXAMPLE_RAGGED_TENSOR_2D[3:1]),
(SLICE_BUILDER[-1:1], EXAMPLE_RAGGED_TENSOR_2D[-1:1]),
(SLICE_BUILDER[1:-1], EXAMPLE_RAGGED_TENSOR_2D[1:-1]),
# Tests for rt[i, j]
(SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),
(SLICE_BUILDER[1, 2], EXAMPLE_RAGGED_TENSOR_2D[1][2]),
(SLICE_BUILDER[-1, 0], EXAMPLE_RAGGED_TENSOR_2D[-1][0]),
(SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),
(SLICE_BUILDER[:], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_2D),
# Empty slice spec.
([], EXAMPLE_RAGGED_TENSOR_2D),
# Test for ellipsis
(SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_2D[2]),
(SLICE_BUILDER[..., :], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[..., 2, 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
(SLICE_BUILDER[2, ..., 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
(SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
# Test for array_ops.newaxis
(SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, array_ops.newaxis],
[[row] for row in EXAMPLE_RAGGED_TENSOR_2D]),
# Slicing inner ragged dimensions.
(SLICE_BUILDER[-1:,
1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D[-1:]]),
(SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_2D]),
# Strided slices
(SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_2D[::2]),
(SLICE_BUILDER[::-1], EXAMPLE_RAGGED_TENSOR_2D[::-1]),
(SLICE_BUILDER[::-2], EXAMPLE_RAGGED_TENSOR_2D[::-2]),
(SLICE_BUILDER[::-3], EXAMPLE_RAGGED_TENSOR_2D[::-3]),
(SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, ::-1], [row[::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, ::-2], [row[::-2] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, ::-3], [row[::-3] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, 2::-1],
[row[2::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, -1::-1],
[row[-1::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[..., -1::-1],
[row[-1::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, 2::-2],
[row[2::-2] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[::-1, ::-1],
[row[::-1] for row in EXAMPLE_RAGGED_TENSOR_2D[::-1]]),
) # pyformat: disable
def testWithRaggedRank1(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
# Ragged tensor
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,
EXAMPLE_RAGGED_TENSOR_2D_SPLITS)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItem(rt, slice_spec, expected)
# pylint: disable=g-complex-comprehension
@parameterized.parameters([(start, stop)
for start in [-2, -1, None, 0, 1, 2]
for stop in [-2, -1, None, 0, 1, 2]])
def testWithStridedSlices(self, start, stop):
test_value = [[1, 2, 3, 4, 5], [6, 7], [8, 9, 10], [], [9],
[1, 2, 3, 4, 5, 6, 7, 8]]
rt = ragged_factory_ops.constant(test_value)
for step in [-3, -2, -1, 1, 2, 3]:
# Slice outer dimension
self.assertAllEqual(rt[start:stop:step], test_value[start:stop:step],
'slice=%s:%s:%s' % (start, stop, step))
# Slice inner dimension
self.assertAllEqual(rt[:, start:stop:step],
[row[start:stop:step] for row in test_value],
'slice=%s:%s:%s' % (start, stop, step))
# pylint: disable=invalid-slice-index
@parameterized.parameters(
# Tests for out-of-bound errors
(SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[-6], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[0, 2], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[3, 0], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
# Indexing into an inner ragged dimension
(SLICE_BUILDER[:, 3], ValueError,
'Cannot index into an inner ragged dimension'),
(SLICE_BUILDER[:1, 3], ValueError,
'Cannot index into an inner ragged dimension'),
(SLICE_BUILDER[..., 3], ValueError,
'Cannot index into an inner ragged dimension'),
# Tests for type errors
(SLICE_BUILDER[0.5], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[1:3:0.5], TypeError, re.escape(
array_ops._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[:, 1:3:0.5], TypeError,
'slice strides must be integers or None'),
(SLICE_BUILDER[:, 0.5:1.5], TypeError,
'slice offsets must be integers or None'),
(SLICE_BUILDER['foo'], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[:, 'foo':'foo'], TypeError,
'slice offsets must be integers or None'),
# Tests for other errors
(SLICE_BUILDER[..., 0, 0,
0], IndexError, 'Too many indices for RaggedTensor'),
)
def testErrorsWithRaggedRank1(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
# Ragged tensor
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,
EXAMPLE_RAGGED_TENSOR_2D_SPLITS)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
# Tests for rt[index, index, ...]
(SLICE_BUILDER[2, 0], EXAMPLE_RAGGED_TENSOR_4D[2][0]),
(SLICE_BUILDER[2, 0, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),
(SLICE_BUILDER[2, 0, 1, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1][1]),
(SLICE_BUILDER[2, 0, 1:], EXAMPLE_RAGGED_TENSOR_4D[2][0][1:]),
(SLICE_BUILDER[2, 0, 1:, 1:], [[16], [18]]),
(SLICE_BUILDER[2, 0, :, 1], [14, 16, 18]),
(SLICE_BUILDER[2, 0, 1, :], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),
# Tests for rt[index, slice, ...]
(SLICE_BUILDER[0, :], EXAMPLE_RAGGED_TENSOR_4D[0]),
(SLICE_BUILDER[1, :], EXAMPLE_RAGGED_TENSOR_4D[1]),
(SLICE_BUILDER[0, :, :, 1], [[2, 4, 6], [8, 10, 12]]),
(SLICE_BUILDER[1, :, :, 1], []),
(SLICE_BUILDER[2, :, :, 1], [[14, 16, 18]]),
(SLICE_BUILDER[3, :, :, 1], [[20]]),
# Tests for rt[slice, slice, ...]
(SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_4D),
(SLICE_BUILDER[:, :, :, 1], [[[2, 4, 6], [8, 10, 12]], [], [[14, 16, 18]],
[[20]]]),
(SLICE_BUILDER[1:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),
(SLICE_BUILDER[-3:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),
# Test for ellipsis
(SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_4D),
(SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_4D[2]),
(SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_4D[2][0]),
(SLICE_BUILDER[..., 0], [[[1, 3, 5], [7, 9, 11]], [], [[13, 15, 17]],
[[19]]]),
(SLICE_BUILDER[2, ..., 0], [[13, 15, 17]]),
(SLICE_BUILDER[2, 0, ..., 0], [13, 15, 17]),
# Test for array_ops.newaxis
(SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, array_ops.newaxis],
[[row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
# Empty slice spec.
([], EXAMPLE_RAGGED_TENSOR_4D),
# Slicing inner ragged dimensions.
(SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, :-1],
[[v[:-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, 1:2],
[[v[1:2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[1:, 1:3, 1:2],
[[v[1:2] for v in row[1:3]] for row in EXAMPLE_RAGGED_TENSOR_4D[1:]]),
# Strided slices
(SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_4D[::2]),
(SLICE_BUILDER[::-1], EXAMPLE_RAGGED_TENSOR_4D[::-1]),
(SLICE_BUILDER[::-2], EXAMPLE_RAGGED_TENSOR_4D[::-2]),
(SLICE_BUILDER[1::2], EXAMPLE_RAGGED_TENSOR_4D[1::2]),
(SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, 1::2], [row[1::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::2],
[[v[::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, 1::2],
[[v[1::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::-1],
[[v[::-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::-2],
[[v[::-2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[..., ::-1, :],
[[v[::-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[..., ::-1], [[[v[::-1] for v in col] for col in row]
for row in EXAMPLE_RAGGED_TENSOR_4D]),
) # pyformat: disable
def testWithRaggedRank2(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_nested_row_splits(
EXAMPLE_RAGGED_TENSOR_4D_VALUES,
[EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
# Test for errors in unsupported cases
(SLICE_BUILDER[:, 0], ValueError,
'Cannot index into an inner ragged dimension.'),
(SLICE_BUILDER[:, :, 0], ValueError,
'Cannot index into an inner ragged dimension.'),
# Test for out-of-bounds errors.
(SLICE_BUILDER[1, 0], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[0, 0, 3],
(IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[0, 5], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
)
def testErrorsWithRaggedRank2(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_nested_row_splits(
EXAMPLE_RAGGED_TENSOR_4D_VALUES,
[EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D)
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
(SLICE_BUILDER[:], []),
(SLICE_BUILDER[2:], []),
(SLICE_BUILDER[:-3], []),
)
def testWithEmptyTensor(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_row_splits([], [0])
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
(SLICE_BUILDER[0], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[-1], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
)
def testErrorsWithEmptyTensor(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_row_splits([], [0])
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
(SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),
(SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),
(SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),
(SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),
(SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),
(SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),
(SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),
)
def testWithPlaceholderShapes(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
# Intentionally use an unknown shape for `splits`, to force the code path
# that deals with having nrows unknown at graph construction time.
splits = constant_op.constant(
EXAMPLE_RAGGED_TENSOR_2D_SPLITS, dtype=dtypes.int64)
splits = array_ops.placeholder_with_default(splits, None)
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES, splits)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
(SLICE_BUILDER[..., 2], ValueError,
'Ellipsis not supported for unknown shape RaggedTensors'),)
def testErrorsWithPlaceholderShapes(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
if not context.executing_eagerly():
# Intentionally use an unknown shape for `values`.
values = array_ops.placeholder_with_default([0], None)
rt = RaggedTensor.from_row_splits(values, [0, 1])
self._TestGetItemException(rt, slice_spec, expected, message)
def testNewAxis(self):
# rt: [[[['a', 'b'], ['c', 'd']], [], [['e', 'f']]], []]
splits1 = [0, 3, 3]
splits2 = [0, 2, 2, 3]
values = constant_op.constant([['a', 'b'], ['c', 'd'], ['e', 'f']])
rt = RaggedTensor.from_nested_row_splits(values, [splits1, splits2])
rt_newaxis0 = rt[array_ops.newaxis]
rt_newaxis1 = rt[:, array_ops.newaxis]
rt_newaxis2 = rt[:, :, array_ops.newaxis]
rt_newaxis3 = rt[:, :, :, array_ops.newaxis]
rt_newaxis4 = rt[:, :, :, :, array_ops.newaxis]
self.assertAllEqual(
rt, [[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []])
self.assertAllEqual(
rt_newaxis0, [[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []]])
self.assertAllEqual(
rt_newaxis1,
[[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]]], [[]]])
self.assertAllEqual(
rt_newaxis2,
[[[[[b'a', b'b'], [b'c', b'd']]], [[]], [[[b'e', b'f']]]], []])
self.assertAllEqual(
rt_newaxis3,
[[[[[b'a', b'b']], [[b'c', b'd']]], [], [[[b'e', b'f']]]], []])
self.assertAllEqual(
rt_newaxis4,
[[[[[b'a'], [b'b']], [[b'c'], [b'd']]], [], [[[b'e'], [b'f']]]], []])
self.assertEqual(rt.ragged_rank, 2)
self.assertEqual(rt_newaxis0.ragged_rank, 3)
self.assertEqual(rt_newaxis1.ragged_rank, 3)
self.assertEqual(rt_newaxis2.ragged_rank, 3)
self.assertEqual(rt_newaxis3.ragged_rank, 2)
self.assertEqual(rt_newaxis4.ragged_rank, 2)
self.assertEqual(rt_newaxis0.shape.as_list(), [1, 2, None, None, 2])
self.assertEqual(rt_newaxis1.shape.as_list(), [2, 1, None, None, 2])
self.assertEqual(rt_newaxis2.shape.as_list(), [2, None, 1, None, 2])
self.assertEqual(rt_newaxis3.shape.as_list(), [2, None, None, 1, 2])
self.assertEqual(rt_newaxis4.shape.as_list(), [2, None, None, 2, 1])
@parameterized.parameters(
# EXAMPLE_RAGGED_TENSOR_3D.shape = [2, 3, None]
# Indexing into uniform_row_splits dimension:
(SLICE_BUILDER[:, 1], [r[1] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[:, 2], [r[2] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[:, -2], [r[-2] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[:, -3], [r[-3] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[1:, 2], [r[2] for r in EXAMPLE_RAGGED_TENSOR_3D[1:]],
[1, None]),
(SLICE_BUILDER[:, 1, 1:], [r[1][1:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[1:, 1, 1:],
[r[1][1:] for r in EXAMPLE_RAGGED_TENSOR_3D[1:]],
[1, None]),
# Slicing uniform_row_splits dimension:
(SLICE_BUILDER[:, 2:], [r[2:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 1, None]),
(SLICE_BUILDER[:, -2:], [r[-2:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 2, None]),
(SLICE_BUILDER[:, :, 1:],
[[c[1:] for c in r] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 3, None]),
(SLICE_BUILDER[:, 5:], [r[5:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 0, None]),
# Slicing uniform_row_splits dimension with a non-default step size:
(SLICE_BUILDER[:, ::2], [r[::2] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 2, None]),
(SLICE_BUILDER[:, ::-1], [r[::-1] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 3, None]),
) # pyformat: disable
def testWithUniformRowLength(self, slice_spec, expected, expected_shape):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_uniform_row_length(
RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_3D_VALUES,
EXAMPLE_RAGGED_TENSOR_3D_SPLITS),
EXAMPLE_RAGGED_TENSOR_3D_ROWLEN)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_3D)
self.assertIsNot(rt.uniform_row_length, None)
self._TestGetItem(rt, slice_spec, expected, expected_shape)
# If the result is 3D, then check that it still has a uniform row length:
actual = rt.__getitem__(slice_spec) # pylint: disable=assignment-from-no-return
if actual.shape.rank == 3:
self.assertIsNot(actual.uniform_row_length, None)
self.assertAllEqual(actual.uniform_row_length, expected_shape[1])
@parameterized.parameters(
(SLICE_BUILDER[:, 3], errors.InvalidArgumentError, 'out of bounds'),
(SLICE_BUILDER[:, -4], errors.InvalidArgumentError, 'out of bounds'),
(SLICE_BUILDER[:, 10], errors.InvalidArgumentError, 'out of bounds'),
(SLICE_BUILDER[:, -10], errors.InvalidArgumentError, 'out of bounds'),
)
def testErrorsWithUniformRowLength(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_uniform_row_length(
RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_3D_VALUES,
EXAMPLE_RAGGED_TENSOR_3D_SPLITS),
EXAMPLE_RAGGED_TENSOR_3D_ROWLEN)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_3D)
self._TestGetItemException(rt, slice_spec, expected, message)
if __name__ == '__main__':
googletest.main()
| 45.303237
| 84
| 0.63167
|
import re
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.platform import googletest
class _SliceBuilder:
def __getitem__(self, slice_spec):
return slice_spec
SLICE_BUILDER = _SliceBuilder()
def _make_tensor_slice_spec(slice_spec, use_constant=True):
def make_piece_scalar(piece):
if isinstance(piece, int):
scalar = constant_op.constant(piece)
if use_constant:
return scalar
else:
return array_ops.placeholder_with_default(scalar, [])
elif isinstance(piece, slice):
return slice(
make_piece_scalar(piece.start), make_piece_scalar(piece.stop),
make_piece_scalar(piece.step))
else:
return piece
if isinstance(slice_spec, tuple):
return tuple(make_piece_scalar(piece) for piece in slice_spec)
else:
return make_piece_scalar(slice_spec)
EXAMPLE_RAGGED_TENSOR_2D = [[b'a', b'b'], [b'c', b'd', b'e'], [b'f'], [],
[b'g']]
EXAMPLE_RAGGED_TENSOR_2D_SPLITS = [0, 2, 5, 6, 6, 7]
EXAMPLE_RAGGED_TENSOR_2D_VALUES = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
EXAMPLE_RAGGED_TENSOR_4D = [
[ [[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]], [], [ [[13, 14], [15, 16], [17, 18]]], [ [[19, 20]]] ] EXAMPLE_RAGGED_TENSOR_4D_SPLITS1 = [0, 2, 2, 3, 4]
EXAMPLE_RAGGED_TENSOR_4D_SPLITS2 = [0, 3, 6, 9, 10]
EXAMPLE_RAGGED_TENSOR_4D_VALUES = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18],
[19, 20]]
EXAMPLE_RAGGED_TENSOR_3D = [[[1, 2, 3], [4], [5, 6]], [[], [7, 8, 9], []]]
EXAMPLE_RAGGED_TENSOR_3D_ROWLEN = 3
EXAMPLE_RAGGED_TENSOR_3D_SPLITS = [0, 3, 4, 6, 6, 9, 9]
EXAMPLE_RAGGED_TENSOR_3D_VALUES = [1, 2, 3, 4, 5, 6, 7, 8, 9]
@test_util.run_all_in_graph_and_eager_modes
class RaggedGetItemTest(test_util.TensorFlowTestCase, parameterized.TestCase):
longMessage = True
def _TestGetItem(self, rt, slice_spec, expected, expected_shape=None):
tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)
tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)
value1 = rt.__getitem__(slice_spec)
value2 = rt.__getitem__(tensor_slice_spec1)
value3 = rt.__getitem__(tensor_slice_spec2)
self.assertAllEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))
self.assertAllEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))
self.assertAllEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))
if expected_shape is not None:
value1.shape.assert_is_compatible_with(expected_shape)
value2.shape.assert_is_compatible_with(expected_shape)
value3.shape.assert_is_compatible_with(expected_shape)
def _TestGetItemException(self, rt, slice_spec, expected, message):
tensor_slice_spec = _make_tensor_slice_spec(slice_spec, True)
with self.assertRaisesRegex(expected, message):
self.evaluate(rt.__getitem__(slice_spec))
with self.assertRaisesRegex(expected, message):
self.evaluate(rt.__getitem__(tensor_slice_spec))
@parameterized.parameters(
(SLICE_BUILDER[-5], EXAMPLE_RAGGED_TENSOR_2D[-5]),
(SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),
(SLICE_BUILDER[-1], EXAMPLE_RAGGED_TENSOR_2D[-1]),
(SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),
(SLICE_BUILDER[1], EXAMPLE_RAGGED_TENSOR_2D[1]),
(SLICE_BUILDER[4], EXAMPLE_RAGGED_TENSOR_2D[4]),
(SLICE_BUILDER[-6:], EXAMPLE_RAGGED_TENSOR_2D[-6:]),
(SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),
(SLICE_BUILDER[-1:], EXAMPLE_RAGGED_TENSOR_2D[-1:]),
(SLICE_BUILDER[0:], EXAMPLE_RAGGED_TENSOR_2D[0:]),
(SLICE_BUILDER[3:], EXAMPLE_RAGGED_TENSOR_2D[3:]),
(SLICE_BUILDER[5:], EXAMPLE_RAGGED_TENSOR_2D[5:]),
(SLICE_BUILDER[:-6], EXAMPLE_RAGGED_TENSOR_2D[:-6]),
(SLICE_BUILDER[:-3], EXAMPLE_RAGGED_TENSOR_2D[:-3]),
(SLICE_BUILDER[:-1], EXAMPLE_RAGGED_TENSOR_2D[:-1]),
(SLICE_BUILDER[:0], EXAMPLE_RAGGED_TENSOR_2D[:0]),
(SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),
(SLICE_BUILDER[:5], EXAMPLE_RAGGED_TENSOR_2D[:5]),
(SLICE_BUILDER[0:3], EXAMPLE_RAGGED_TENSOR_2D[0:3]),
(SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),
(SLICE_BUILDER[-5:3], EXAMPLE_RAGGED_TENSOR_2D[-5:3]),
(SLICE_BUILDER[3:1], EXAMPLE_RAGGED_TENSOR_2D[3:1]),
(SLICE_BUILDER[-1:1], EXAMPLE_RAGGED_TENSOR_2D[-1:1]),
(SLICE_BUILDER[1:-1], EXAMPLE_RAGGED_TENSOR_2D[1:-1]),
(SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),
(SLICE_BUILDER[1, 2], EXAMPLE_RAGGED_TENSOR_2D[1][2]),
(SLICE_BUILDER[-1, 0], EXAMPLE_RAGGED_TENSOR_2D[-1][0]),
(SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),
(SLICE_BUILDER[:], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_2D),
([], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_2D[2]),
(SLICE_BUILDER[..., :], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[..., 2, 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
(SLICE_BUILDER[2, ..., 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
(SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
(SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, array_ops.newaxis],
[[row] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[-1:,
1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D[-1:]]),
(SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_2D[::2]),
(SLICE_BUILDER[::-1], EXAMPLE_RAGGED_TENSOR_2D[::-1]),
(SLICE_BUILDER[::-2], EXAMPLE_RAGGED_TENSOR_2D[::-2]),
(SLICE_BUILDER[::-3], EXAMPLE_RAGGED_TENSOR_2D[::-3]),
(SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, ::-1], [row[::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, ::-2], [row[::-2] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, ::-3], [row[::-3] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, 2::-1],
[row[2::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, -1::-1],
[row[-1::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[..., -1::-1],
[row[-1::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, 2::-2],
[row[2::-2] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[::-1, ::-1],
[row[::-1] for row in EXAMPLE_RAGGED_TENSOR_2D[::-1]]),
) def testWithRaggedRank1(self, slice_spec, expected):
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,
EXAMPLE_RAGGED_TENSOR_2D_SPLITS)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters([(start, stop)
for start in [-2, -1, None, 0, 1, 2]
for stop in [-2, -1, None, 0, 1, 2]])
def testWithStridedSlices(self, start, stop):
test_value = [[1, 2, 3, 4, 5], [6, 7], [8, 9, 10], [], [9],
[1, 2, 3, 4, 5, 6, 7, 8]]
rt = ragged_factory_ops.constant(test_value)
for step in [-3, -2, -1, 1, 2, 3]:
self.assertAllEqual(rt[start:stop:step], test_value[start:stop:step],
'slice=%s:%s:%s' % (start, stop, step))
self.assertAllEqual(rt[:, start:stop:step],
[row[start:stop:step] for row in test_value],
'slice=%s:%s:%s' % (start, stop, step))
@parameterized.parameters(
(SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[-6], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[0, 2], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[3, 0], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[:, 3], ValueError,
'Cannot index into an inner ragged dimension'),
(SLICE_BUILDER[:1, 3], ValueError,
'Cannot index into an inner ragged dimension'),
(SLICE_BUILDER[..., 3], ValueError,
'Cannot index into an inner ragged dimension'),
(SLICE_BUILDER[0.5], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[1:3:0.5], TypeError, re.escape(
array_ops._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[:, 1:3:0.5], TypeError,
'slice strides must be integers or None'),
(SLICE_BUILDER[:, 0.5:1.5], TypeError,
'slice offsets must be integers or None'),
(SLICE_BUILDER['foo'], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[:, 'foo':'foo'], TypeError,
'slice offsets must be integers or None'),
(SLICE_BUILDER[..., 0, 0,
0], IndexError, 'Too many indices for RaggedTensor'),
)
def testErrorsWithRaggedRank1(self, slice_spec, expected, message):
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,
EXAMPLE_RAGGED_TENSOR_2D_SPLITS)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
(SLICE_BUILDER[2, 0], EXAMPLE_RAGGED_TENSOR_4D[2][0]),
(SLICE_BUILDER[2, 0, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),
(SLICE_BUILDER[2, 0, 1, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1][1]),
(SLICE_BUILDER[2, 0, 1:], EXAMPLE_RAGGED_TENSOR_4D[2][0][1:]),
(SLICE_BUILDER[2, 0, 1:, 1:], [[16], [18]]),
(SLICE_BUILDER[2, 0, :, 1], [14, 16, 18]),
(SLICE_BUILDER[2, 0, 1, :], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),
(SLICE_BUILDER[0, :], EXAMPLE_RAGGED_TENSOR_4D[0]),
(SLICE_BUILDER[1, :], EXAMPLE_RAGGED_TENSOR_4D[1]),
(SLICE_BUILDER[0, :, :, 1], [[2, 4, 6], [8, 10, 12]]),
(SLICE_BUILDER[1, :, :, 1], []),
(SLICE_BUILDER[2, :, :, 1], [[14, 16, 18]]),
(SLICE_BUILDER[3, :, :, 1], [[20]]),
(SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_4D),
(SLICE_BUILDER[:, :, :, 1], [[[2, 4, 6], [8, 10, 12]], [], [[14, 16, 18]],
[[20]]]),
(SLICE_BUILDER[1:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),
(SLICE_BUILDER[-3:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),
(SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_4D),
(SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_4D[2]),
(SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_4D[2][0]),
(SLICE_BUILDER[..., 0], [[[1, 3, 5], [7, 9, 11]], [], [[13, 15, 17]],
[[19]]]),
(SLICE_BUILDER[2, ..., 0], [[13, 15, 17]]),
(SLICE_BUILDER[2, 0, ..., 0], [13, 15, 17]),
(SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, array_ops.newaxis],
[[row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
([], EXAMPLE_RAGGED_TENSOR_4D),
(SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, :-1],
[[v[:-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, 1:2],
[[v[1:2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[1:, 1:3, 1:2],
[[v[1:2] for v in row[1:3]] for row in EXAMPLE_RAGGED_TENSOR_4D[1:]]),
(SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_4D[::2]),
(SLICE_BUILDER[::-1], EXAMPLE_RAGGED_TENSOR_4D[::-1]),
(SLICE_BUILDER[::-2], EXAMPLE_RAGGED_TENSOR_4D[::-2]),
(SLICE_BUILDER[1::2], EXAMPLE_RAGGED_TENSOR_4D[1::2]),
(SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, 1::2], [row[1::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::2],
[[v[::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, 1::2],
[[v[1::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::-1],
[[v[::-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::-2],
[[v[::-2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[..., ::-1, :],
[[v[::-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[..., ::-1], [[[v[::-1] for v in col] for col in row]
for row in EXAMPLE_RAGGED_TENSOR_4D]),
) def testWithRaggedRank2(self, slice_spec, expected):
rt = RaggedTensor.from_nested_row_splits(
EXAMPLE_RAGGED_TENSOR_4D_VALUES,
[EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
(SLICE_BUILDER[:, 0], ValueError,
'Cannot index into an inner ragged dimension.'),
(SLICE_BUILDER[:, :, 0], ValueError,
'Cannot index into an inner ragged dimension.'),
(SLICE_BUILDER[1, 0], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[0, 0, 3],
(IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[0, 5], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
)
def testErrorsWithRaggedRank2(self, slice_spec, expected, message):
rt = RaggedTensor.from_nested_row_splits(
EXAMPLE_RAGGED_TENSOR_4D_VALUES,
[EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D)
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
(SLICE_BUILDER[:], []),
(SLICE_BUILDER[2:], []),
(SLICE_BUILDER[:-3], []),
)
def testWithEmptyTensor(self, slice_spec, expected):
rt = RaggedTensor.from_row_splits([], [0])
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
(SLICE_BUILDER[0], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[-1], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
)
def testErrorsWithEmptyTensor(self, slice_spec, expected, message):
rt = RaggedTensor.from_row_splits([], [0])
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
(SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),
(SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),
(SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),
(SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),
(SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),
(SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),
(SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),
)
def testWithPlaceholderShapes(self, slice_spec, expected):
splits = constant_op.constant(
EXAMPLE_RAGGED_TENSOR_2D_SPLITS, dtype=dtypes.int64)
splits = array_ops.placeholder_with_default(splits, None)
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES, splits)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
(SLICE_BUILDER[..., 2], ValueError,
'Ellipsis not supported for unknown shape RaggedTensors'),)
def testErrorsWithPlaceholderShapes(self, slice_spec, expected, message):
if not context.executing_eagerly():
values = array_ops.placeholder_with_default([0], None)
rt = RaggedTensor.from_row_splits(values, [0, 1])
self._TestGetItemException(rt, slice_spec, expected, message)
def testNewAxis(self):
splits1 = [0, 3, 3]
splits2 = [0, 2, 2, 3]
values = constant_op.constant([['a', 'b'], ['c', 'd'], ['e', 'f']])
rt = RaggedTensor.from_nested_row_splits(values, [splits1, splits2])
rt_newaxis0 = rt[array_ops.newaxis]
rt_newaxis1 = rt[:, array_ops.newaxis]
rt_newaxis2 = rt[:, :, array_ops.newaxis]
rt_newaxis3 = rt[:, :, :, array_ops.newaxis]
rt_newaxis4 = rt[:, :, :, :, array_ops.newaxis]
self.assertAllEqual(
rt, [[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []])
self.assertAllEqual(
rt_newaxis0, [[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []]])
self.assertAllEqual(
rt_newaxis1,
[[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]]], [[]]])
self.assertAllEqual(
rt_newaxis2,
[[[[[b'a', b'b'], [b'c', b'd']]], [[]], [[[b'e', b'f']]]], []])
self.assertAllEqual(
rt_newaxis3,
[[[[[b'a', b'b']], [[b'c', b'd']]], [], [[[b'e', b'f']]]], []])
self.assertAllEqual(
rt_newaxis4,
[[[[[b'a'], [b'b']], [[b'c'], [b'd']]], [], [[[b'e'], [b'f']]]], []])
self.assertEqual(rt.ragged_rank, 2)
self.assertEqual(rt_newaxis0.ragged_rank, 3)
self.assertEqual(rt_newaxis1.ragged_rank, 3)
self.assertEqual(rt_newaxis2.ragged_rank, 3)
self.assertEqual(rt_newaxis3.ragged_rank, 2)
self.assertEqual(rt_newaxis4.ragged_rank, 2)
self.assertEqual(rt_newaxis0.shape.as_list(), [1, 2, None, None, 2])
self.assertEqual(rt_newaxis1.shape.as_list(), [2, 1, None, None, 2])
self.assertEqual(rt_newaxis2.shape.as_list(), [2, None, 1, None, 2])
self.assertEqual(rt_newaxis3.shape.as_list(), [2, None, None, 1, 2])
self.assertEqual(rt_newaxis4.shape.as_list(), [2, None, None, 2, 1])
@parameterized.parameters(
(SLICE_BUILDER[:, 1], [r[1] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[:, 2], [r[2] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[:, -2], [r[-2] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[:, -3], [r[-3] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[1:, 2], [r[2] for r in EXAMPLE_RAGGED_TENSOR_3D[1:]],
[1, None]),
(SLICE_BUILDER[:, 1, 1:], [r[1][1:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[1:, 1, 1:],
[r[1][1:] for r in EXAMPLE_RAGGED_TENSOR_3D[1:]],
[1, None]),
(SLICE_BUILDER[:, 2:], [r[2:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 1, None]),
(SLICE_BUILDER[:, -2:], [r[-2:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 2, None]),
(SLICE_BUILDER[:, :, 1:],
[[c[1:] for c in r] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 3, None]),
(SLICE_BUILDER[:, 5:], [r[5:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 0, None]),
(SLICE_BUILDER[:, ::2], [r[::2] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 2, None]),
(SLICE_BUILDER[:, ::-1], [r[::-1] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 3, None]),
) def testWithUniformRowLength(self, slice_spec, expected, expected_shape):
rt = RaggedTensor.from_uniform_row_length(
RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_3D_VALUES,
EXAMPLE_RAGGED_TENSOR_3D_SPLITS),
EXAMPLE_RAGGED_TENSOR_3D_ROWLEN)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_3D)
self.assertIsNot(rt.uniform_row_length, None)
self._TestGetItem(rt, slice_spec, expected, expected_shape)
actual = rt.__getitem__(slice_spec) if actual.shape.rank == 3:
self.assertIsNot(actual.uniform_row_length, None)
self.assertAllEqual(actual.uniform_row_length, expected_shape[1])
@parameterized.parameters(
(SLICE_BUILDER[:, 3], errors.InvalidArgumentError, 'out of bounds'),
(SLICE_BUILDER[:, -4], errors.InvalidArgumentError, 'out of bounds'),
(SLICE_BUILDER[:, 10], errors.InvalidArgumentError, 'out of bounds'),
(SLICE_BUILDER[:, -10], errors.InvalidArgumentError, 'out of bounds'),
)
def testErrorsWithUniformRowLength(self, slice_spec, expected, message):
rt = RaggedTensor.from_uniform_row_length(
RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_3D_VALUES,
EXAMPLE_RAGGED_TENSOR_3D_SPLITS),
EXAMPLE_RAGGED_TENSOR_3D_ROWLEN)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_3D)
self._TestGetItemException(rt, slice_spec, expected, message)
if __name__ == '__main__':
googletest.main()
| true
| true
|
f707fb38fc701fe6ce1b8c40b3ffe14b403435a4
| 3,901
|
py
|
Python
|
tests/test_ucuenca.py
|
stsewd/ucuenca.py
|
1c74dbc9ec133dbcaa2967004e7fc9e4cf12feb5
|
[
"MIT"
] | 3
|
2017-06-13T03:23:59.000Z
|
2019-02-22T05:18:57.000Z
|
tests/test_ucuenca.py
|
stsewd/ucuenca.py
|
1c74dbc9ec133dbcaa2967004e7fc9e4cf12feb5
|
[
"MIT"
] | 6
|
2017-01-09T20:00:51.000Z
|
2017-03-23T05:25:44.000Z
|
tests/test_ucuenca.py
|
stsewd/ucuenca.py
|
1c74dbc9ec133dbcaa2967004e7fc9e4cf12feb5
|
[
"MIT"
] | 2
|
2018-03-06T03:25:57.000Z
|
2019-03-14T21:53:44.000Z
|
import unittest
import json
import os
from ucuenca.ucuenca import Ucuenca
TEST_RESOURCES = os.path.join(
os.path.dirname(__file__),
"..", "tests_resources"
)
class GetCareersTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
def test_careers(self):
"""Check 0104926787's careers."""
student_id = '0104926787'
expected_result = self._get_careers()
actual_result = self.ucuenca.careers(student_id)
self.assertEqual(expected_result, actual_result)
def test_careers_invalid_student(self):
"""Check invalid student's careers."""
student_id = '1234567890'
result = self.ucuenca.careers(student_id)
self.assertFalse(result)
def _get_careers(self):
path = os.path.join(TEST_RESOURCES, "careers.json")
with open(path) as f:
json_file = json.load(f)
return json_file
class GetNotesTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
@unittest.expectedFailure
def test_notes(self):
"""Check 0302068309's notes."""
student_id = '0302068309'
career_id = 16
perdiod_id = 115
expected_result = {} # TODO
actual_result = self.ucuenca.notes(student_id, career_id, perdiod_id)
self.assertEqual(actual_result, expected_result)
def test_notes_invalid_student(self):
"""Check invalid student's notes."""
student_id = '1234567890'
career_id = 34
perdiod_id = 115
result = self.ucuenca.notes(student_id, career_id, perdiod_id)
self.assertFalse(result)
class GetScheduleTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
def test_schedule(self):
"""Check 0104378690's schedule."""
student_id = '0104378690'
expected_result = self._get_schedule()
actual_result = self.ucuenca.schedule(student_id)
self.assertEqual(actual_result, expected_result)
def test_careers_invalid_student(self):
"""Check invalid student's schedule."""
student_id = '1234567890'
result = self.ucuenca.schedule(student_id)
self.assertFalse(result)
def _get_schedule(self):
path = os.path.join(TEST_RESOURCES, "schedule.json")
with open(path) as f:
json_file = json.load(f)
return json_file
class GetCurriculumProgressTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
def test_curriculum_progress(self):
"""Check 0104926787's curriculum progress."""
student_id = '0104926787'
career_id = 44
curriculum_id = 1
career_plan = 4
expected_result = self._get_curriculum_progress()
actual_result = self.ucuenca.curriculum_progress(
student_id, career_id, curriculum_id, career_plan
)
self.assertEqual(actual_result, expected_result)
def test_curriculum_progress_invalid_student(self):
"""Check invalid student's curriculum progress."""
student_id = '1234567890'
career_id = 44
curriculum_id = 1
career_plan = 4
result = self.ucuenca.curriculum_progress(
student_id, career_id, curriculum_id, career_plan
)
self.assertFalse(result)
def _get_curriculum_progress(self):
path = os.path.join(TEST_RESOURCES, "curriculum_progress.json")
with open(path) as f:
json_file = json.load(f)
return json_file
class AuthenticationTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
def test_bad_password(self):
"""Check authentication with a bad password."""
user = 'santos.gallegos'
passw = '1234'
result = self.ucuenca.authentication(user, passw)
self.assertFalse(result['autenticacion'])
| 30.24031
| 77
| 0.651115
|
import unittest
import json
import os
from ucuenca.ucuenca import Ucuenca
TEST_RESOURCES = os.path.join(
os.path.dirname(__file__),
"..", "tests_resources"
)
class GetCareersTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
def test_careers(self):
student_id = '0104926787'
expected_result = self._get_careers()
actual_result = self.ucuenca.careers(student_id)
self.assertEqual(expected_result, actual_result)
def test_careers_invalid_student(self):
student_id = '1234567890'
result = self.ucuenca.careers(student_id)
self.assertFalse(result)
def _get_careers(self):
path = os.path.join(TEST_RESOURCES, "careers.json")
with open(path) as f:
json_file = json.load(f)
return json_file
class GetNotesTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
@unittest.expectedFailure
def test_notes(self):
student_id = '0302068309'
career_id = 16
perdiod_id = 115
expected_result = {} actual_result = self.ucuenca.notes(student_id, career_id, perdiod_id)
self.assertEqual(actual_result, expected_result)
def test_notes_invalid_student(self):
student_id = '1234567890'
career_id = 34
perdiod_id = 115
result = self.ucuenca.notes(student_id, career_id, perdiod_id)
self.assertFalse(result)
class GetScheduleTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
def test_schedule(self):
student_id = '0104378690'
expected_result = self._get_schedule()
actual_result = self.ucuenca.schedule(student_id)
self.assertEqual(actual_result, expected_result)
def test_careers_invalid_student(self):
student_id = '1234567890'
result = self.ucuenca.schedule(student_id)
self.assertFalse(result)
def _get_schedule(self):
path = os.path.join(TEST_RESOURCES, "schedule.json")
with open(path) as f:
json_file = json.load(f)
return json_file
class GetCurriculumProgressTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
def test_curriculum_progress(self):
student_id = '0104926787'
career_id = 44
curriculum_id = 1
career_plan = 4
expected_result = self._get_curriculum_progress()
actual_result = self.ucuenca.curriculum_progress(
student_id, career_id, curriculum_id, career_plan
)
self.assertEqual(actual_result, expected_result)
def test_curriculum_progress_invalid_student(self):
student_id = '1234567890'
career_id = 44
curriculum_id = 1
career_plan = 4
result = self.ucuenca.curriculum_progress(
student_id, career_id, curriculum_id, career_plan
)
self.assertFalse(result)
def _get_curriculum_progress(self):
path = os.path.join(TEST_RESOURCES, "curriculum_progress.json")
with open(path) as f:
json_file = json.load(f)
return json_file
class AuthenticationTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
def test_bad_password(self):
user = 'santos.gallegos'
passw = '1234'
result = self.ucuenca.authentication(user, passw)
self.assertFalse(result['autenticacion'])
| true
| true
|
f707fb42cf5a2edc45e9a23382387f7ca9663d6c
| 577
|
py
|
Python
|
oops_fhir/r4/value_set/common_ucumcodes_for_age.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/common_ucumcodes_for_age.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/common_ucumcodes_for_age.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
__all__ = ["CommonUCUMCodesForAge"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class CommonUCUMCodesForAge(ValueSet):
"""
Common UCUM Codes for Age
Unified Code for Units of Measure (UCUM). This value set includes all
UCUM codes
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/age-units
"""
# TODO: fix this template issue1
pass
class Meta:
resource = _resource
| 19.233333
| 73
| 0.712305
|
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
__all__ = ["CommonUCUMCodesForAge"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class CommonUCUMCodesForAge(ValueSet):
pass
class Meta:
resource = _resource
| true
| true
|
f707fc86a94333cce79cba21f503b205a669be39
| 4,171
|
py
|
Python
|
Incident-Response/Tools/grr/grr/core/grr_response_core/lib/parsers/__init__.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/grr/grr/core/grr_response_core/lib/parsers/__init__.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/grr/grr/core/grr_response_core/lib/parsers/__init__.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
#!/usr/bin/env python
"""Generic parsers (for GRR server and client code)."""
from typing import Iterator
from typing import Text
from typing import Type
from typing import TypeVar
from grr_response_core.lib import factory
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.parsers import abstract
from grr_response_core.lib.util import collection
from grr_response_core.lib.util import precondition
ParseError = abstract.ParseError
Parser = abstract.Parser
SingleResponseParser = abstract.SingleResponseParser
SingleFileParser = abstract.SingleFileParser
MultiResponseParser = abstract.MultiResponseParser
MultiFileParser = abstract.MultiFileParser
_Factory = factory.Factory
_RDFValue = rdfvalue.RDFValue
SINGLE_RESPONSE_PARSER_FACTORY: _Factory[SingleResponseParser[_RDFValue]] = (
_Factory(SingleResponseParser[_RDFValue]))
MULTI_RESPONSE_PARSER_FACTORY: _Factory[MultiResponseParser[_RDFValue]] = (
_Factory(MultiResponseParser[_RDFValue]))
SINGLE_FILE_PARSER_FACTORY: _Factory[SingleFileParser[_RDFValue]] = (
_Factory(SingleFileParser[_RDFValue]))
MULTI_FILE_PARSER_FACTORY: _Factory[MultiFileParser[_RDFValue]] = (
_Factory(MultiFileParser[_RDFValue]))
_P = TypeVar("_P", bound=Parser)
class ArtifactParserFactory(object):
"""A factory wrapper class that yields parsers for specific artifact."""
def __init__(self, artifact_name: Text) -> None:
"""Initializes the artifact parser factory.
Args:
artifact_name: A name of the artifact this factory is supposed to provide
parser instances for.
"""
precondition.AssertType(artifact_name, Text)
self._artifact_name = artifact_name
def HasParsers(self) -> bool:
return (self.HasSingleResponseParsers() or self.HasMultiResponseParsers() or
self.HasSingleFileParsers() or self.HasMultiFileParsers())
def HasSingleResponseParsers(self) -> bool:
return any(self.SingleResponseParserTypes())
def SingleResponseParsers(self) -> Iterator[SingleResponseParser[_RDFValue]]:
return self._CreateSupportedParsers(SINGLE_RESPONSE_PARSER_FACTORY)
def SingleResponseParserTypes(
self) -> Iterator[Type[SingleResponseParser[_RDFValue]]]:
return self._SupportedTypes(SINGLE_RESPONSE_PARSER_FACTORY)
def HasMultiResponseParsers(self) -> bool:
return any(self.MultiResponseParserTypes())
def MultiResponseParsers(self) -> Iterator[MultiResponseParser[_RDFValue]]:
return self._CreateSupportedParsers(MULTI_RESPONSE_PARSER_FACTORY)
def MultiResponseParserTypes(
self) -> Iterator[Type[MultiResponseParser[_RDFValue]]]:
return self._SupportedTypes(MULTI_RESPONSE_PARSER_FACTORY)
def HasSingleFileParsers(self) -> bool:
return any(self.SingleFileParserTypes())
def SingleFileParsers(self) -> Iterator[SingleFileParser[_RDFValue]]:
return self._CreateSupportedParsers(SINGLE_FILE_PARSER_FACTORY)
def SingleFileParserTypes(
self) -> Iterator[Type[SingleFileParser[_RDFValue]]]:
return self._SupportedTypes(SINGLE_FILE_PARSER_FACTORY)
def HasMultiFileParsers(self) -> bool:
return any(self.MultiFileParserTypes())
def MultiFileParsers(self) -> Iterator[MultiFileParser[_RDFValue]]:
return self._CreateSupportedParsers(MULTI_FILE_PARSER_FACTORY)
def MultiFileParserTypes(self) -> Iterator[Type[MultiFileParser[_RDFValue]]]:
return self._SupportedTypes(MULTI_FILE_PARSER_FACTORY)
def AllParserTypes(self) -> Iterator[Type[Parser[_RDFValue]]]:
"""Returns all known parser types applicable for the artifact."""
return collection.Flatten([
self.SingleResponseParserTypes(),
self.MultiResponseParserTypes(),
self.SingleFileParserTypes(),
self.MultiFileParserTypes(),
])
def _CreateSupportedParsers(self, fac: _Factory[_P]) -> Iterator[_P]:
for name in fac.Names():
cls = fac.GetType(name)
if self._artifact_name in cls.supported_artifacts:
yield fac.Create(name)
def _SupportedTypes(self, fac: _Factory[_P]) -> Iterator[Type[_P]]:
for name in fac.Names():
cls = fac.GetType(name)
if self._artifact_name in cls.supported_artifacts:
yield cls
| 35.649573
| 80
| 0.771997
|
from typing import Iterator
from typing import Text
from typing import Type
from typing import TypeVar
from grr_response_core.lib import factory
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.parsers import abstract
from grr_response_core.lib.util import collection
from grr_response_core.lib.util import precondition
ParseError = abstract.ParseError
Parser = abstract.Parser
SingleResponseParser = abstract.SingleResponseParser
SingleFileParser = abstract.SingleFileParser
MultiResponseParser = abstract.MultiResponseParser
MultiFileParser = abstract.MultiFileParser
_Factory = factory.Factory
_RDFValue = rdfvalue.RDFValue
SINGLE_RESPONSE_PARSER_FACTORY: _Factory[SingleResponseParser[_RDFValue]] = (
_Factory(SingleResponseParser[_RDFValue]))
MULTI_RESPONSE_PARSER_FACTORY: _Factory[MultiResponseParser[_RDFValue]] = (
_Factory(MultiResponseParser[_RDFValue]))
SINGLE_FILE_PARSER_FACTORY: _Factory[SingleFileParser[_RDFValue]] = (
_Factory(SingleFileParser[_RDFValue]))
MULTI_FILE_PARSER_FACTORY: _Factory[MultiFileParser[_RDFValue]] = (
_Factory(MultiFileParser[_RDFValue]))
_P = TypeVar("_P", bound=Parser)
class ArtifactParserFactory(object):
def __init__(self, artifact_name: Text) -> None:
precondition.AssertType(artifact_name, Text)
self._artifact_name = artifact_name
def HasParsers(self) -> bool:
return (self.HasSingleResponseParsers() or self.HasMultiResponseParsers() or
self.HasSingleFileParsers() or self.HasMultiFileParsers())
def HasSingleResponseParsers(self) -> bool:
return any(self.SingleResponseParserTypes())
def SingleResponseParsers(self) -> Iterator[SingleResponseParser[_RDFValue]]:
return self._CreateSupportedParsers(SINGLE_RESPONSE_PARSER_FACTORY)
def SingleResponseParserTypes(
self) -> Iterator[Type[SingleResponseParser[_RDFValue]]]:
return self._SupportedTypes(SINGLE_RESPONSE_PARSER_FACTORY)
def HasMultiResponseParsers(self) -> bool:
return any(self.MultiResponseParserTypes())
def MultiResponseParsers(self) -> Iterator[MultiResponseParser[_RDFValue]]:
return self._CreateSupportedParsers(MULTI_RESPONSE_PARSER_FACTORY)
def MultiResponseParserTypes(
self) -> Iterator[Type[MultiResponseParser[_RDFValue]]]:
return self._SupportedTypes(MULTI_RESPONSE_PARSER_FACTORY)
def HasSingleFileParsers(self) -> bool:
return any(self.SingleFileParserTypes())
def SingleFileParsers(self) -> Iterator[SingleFileParser[_RDFValue]]:
return self._CreateSupportedParsers(SINGLE_FILE_PARSER_FACTORY)
def SingleFileParserTypes(
self) -> Iterator[Type[SingleFileParser[_RDFValue]]]:
return self._SupportedTypes(SINGLE_FILE_PARSER_FACTORY)
def HasMultiFileParsers(self) -> bool:
return any(self.MultiFileParserTypes())
def MultiFileParsers(self) -> Iterator[MultiFileParser[_RDFValue]]:
return self._CreateSupportedParsers(MULTI_FILE_PARSER_FACTORY)
def MultiFileParserTypes(self) -> Iterator[Type[MultiFileParser[_RDFValue]]]:
return self._SupportedTypes(MULTI_FILE_PARSER_FACTORY)
def AllParserTypes(self) -> Iterator[Type[Parser[_RDFValue]]]:
return collection.Flatten([
self.SingleResponseParserTypes(),
self.MultiResponseParserTypes(),
self.SingleFileParserTypes(),
self.MultiFileParserTypes(),
])
def _CreateSupportedParsers(self, fac: _Factory[_P]) -> Iterator[_P]:
for name in fac.Names():
cls = fac.GetType(name)
if self._artifact_name in cls.supported_artifacts:
yield fac.Create(name)
def _SupportedTypes(self, fac: _Factory[_P]) -> Iterator[Type[_P]]:
for name in fac.Names():
cls = fac.GetType(name)
if self._artifact_name in cls.supported_artifacts:
yield cls
| true
| true
|
f707fcc55aa082ff4a710a1c92b8613d1ec4a4b8
| 464
|
py
|
Python
|
utils/message_cooldown.py
|
Chr1sDev/Bloo
|
3e580c06c415f949997a1b6417308aa93543d64b
|
[
"MIT"
] | 34
|
2021-10-30T16:48:28.000Z
|
2022-03-25T03:22:12.000Z
|
utils/message_cooldown.py
|
Chr1sDev/Bloo
|
3e580c06c415f949997a1b6417308aa93543d64b
|
[
"MIT"
] | 9
|
2021-11-19T04:25:29.000Z
|
2022-03-09T22:35:46.000Z
|
utils/message_cooldown.py
|
Chr1sDev/Bloo
|
3e580c06c415f949997a1b6417308aa93543d64b
|
[
"MIT"
] | 20
|
2021-11-05T21:14:59.000Z
|
2022-03-30T21:15:40.000Z
|
from discord.ext import commands
"""
A custom Cooldown type subclassing built in cooldowns from discord.ext commands.
This is a bucket type that allows cooldowns to work based on some text, allowing
things like cooldown on individual `Tags`, or message spam detection.
"""
class MessageTextBucket(commands.BucketType):
custom = 7
def get_key(self, text):
return text
def __call__(self, msg):
return self.get_key(msg)
| 27.294118
| 80
| 0.711207
|
from discord.ext import commands
class MessageTextBucket(commands.BucketType):
custom = 7
def get_key(self, text):
return text
def __call__(self, msg):
return self.get_key(msg)
| true
| true
|
f707fcd87d09d2e1a64f0ee2d3b03762ada6274b
| 243
|
py
|
Python
|
leetCode/algorithms/easy/find_greatest_common_divisor_of_array.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 25
|
2015-01-21T16:39:18.000Z
|
2021-05-24T07:01:24.000Z
|
leetCode/algorithms/easy/find_greatest_common_divisor_of_array.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 2
|
2020-09-30T19:39:36.000Z
|
2020-10-01T17:15:16.000Z
|
leetCode/algorithms/easy/find_greatest_common_divisor_of_array.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 15
|
2015-01-21T16:39:27.000Z
|
2020-10-01T17:00:22.000Z
|
from typing import List
class Solution:
def findGCD(self, nums: List[int]) -> int:
a, b = min(nums), max(nums)
for i in range(a, 1, -1):
if b % i == 0 and a % i == 0:
return i
return 1
| 22.090909
| 46
| 0.477366
|
from typing import List
class Solution:
def findGCD(self, nums: List[int]) -> int:
a, b = min(nums), max(nums)
for i in range(a, 1, -1):
if b % i == 0 and a % i == 0:
return i
return 1
| true
| true
|
f707fcf120b0aa566709b4090ad0439dfca5c5d7
| 3,940
|
py
|
Python
|
cs15211/InsertintoaBinarySearchTree.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | 1
|
2021-07-05T01:53:30.000Z
|
2021-07-05T01:53:30.000Z
|
cs15211/InsertintoaBinarySearchTree.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | null | null | null |
cs15211/InsertintoaBinarySearchTree.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | 1
|
2018-01-08T07:14:08.000Z
|
2018-01-08T07:14:08.000Z
|
__source__ = 'https://leetcode.com/problems/insert-into-a-binary-search-tree/'
# Time: O(h) h: height of the tree
# Space: O(h)
#
# Description: Leetcode # 701. Insert into a Binary Search Tree
#
# Given the root node of a binary search tree (BST) and a value to be inserted into the tree,
# insert the value into the BST. Return the root node of the BST after the insertion.
# It is guaranteed that the new value does not exist in the original BST.
#
# Note that there may exist multiple valid ways for the insertion,
# as long as the tree remains a BST after insertion.
# You can return any of them.
#
# For example,
#
# Given the tree:
# 4
# / \
# 2 7
# / \
# 1 3
# And the value to insert: 5
# You can return this binary search tree:
#
# 4
# / \
# 2 7
# / \ /
# 1 3 5
# This tree is also valid:
#
# 5
# / \
# 2 7
# / \
# 1 3
# \
# 4
#
import unittest
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
#108ms 55.06%
class Solution(object):
def insertIntoBST(self, root, val):
"""
:type root: TreeNode
:type val: int
:rtype: TreeNode
"""
if not root:
root = TreeNode(val)
return root
if val > root.val:
root.right = self.insertIntoBST(root.right, val)
else:
root.left = self.insertIntoBST(root.left, val)
return root
#100ms 98.14%
class Solution2(object):
def insertIntoBST(self, root, val):
"""
:type root: TreeNode
:type val: int
:rtype: TreeNode
"""
return Solution2.BST_insert(root, val)
@staticmethod
def BST_insert(root, val):
if root == None:
root = TreeNode(val)
elif root.val < val:
root.right = Solution.BST_insert(root.right, val)
else:
root.left = Solution.BST_insert(root.left, val)
return root
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
/**
* Definition for a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
time complexity of the insertion operation is the same with search operation which is O(h).
Or O(N) in the worst case and O(logN) ideally if the tree is well organized.
The space complexity of the recursion soultion is O(h) as well.
In other word, O(N) in the worst case and O(logN) ideally.
If you implement the algorithm iteratively, the space complexity can be O(1).
# Recursion
# 1ms 100%
class Solution {
public TreeNode insertIntoBST(TreeNode root, int val) {
if (root == null) return new TreeNode(val);
if (val < root.val) {
root.left = insertIntoBST(root.left, val);
} else {
root.right = insertIntoBST(root.right, val);
}
return root;
}
}
Ex: [7,3,9,2,5], insert 4,
the new BST will be : [7,3,9,2,5,null,null,null,null,4]. no need to balance
# Iteration
# 1ms 100%
class Solution {
public TreeNode insertIntoBST(TreeNode root, int val) {
if(root == null) return new TreeNode(val);
TreeNode cur = root;
while(true) {
if(cur.val <= val) {
if(cur.right != null) cur = cur.right;
else {
cur.right = new TreeNode(val);
break;
}
} else {
if(cur.left != null) cur = cur.left;
else {
cur.left = new TreeNode(val);
break;
}
}
}
return root;
}
}
'''
| 25.584416
| 93
| 0.555838
|
__source__ = 'https://leetcode.com/problems/insert-into-a-binary-search-tree/'
import unittest
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def insertIntoBST(self, root, val):
if not root:
root = TreeNode(val)
return root
if val > root.val:
root.right = self.insertIntoBST(root.right, val)
else:
root.left = self.insertIntoBST(root.left, val)
return root
class Solution2(object):
def insertIntoBST(self, root, val):
return Solution2.BST_insert(root, val)
@staticmethod
def BST_insert(root, val):
if root == None:
root = TreeNode(val)
elif root.val < val:
root.right = Solution.BST_insert(root.right, val)
else:
root.left = Solution.BST_insert(root.left, val)
return root
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
/**
* Definition for a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
time complexity of the insertion operation is the same with search operation which is O(h).
Or O(N) in the worst case and O(logN) ideally if the tree is well organized.
The space complexity of the recursion soultion is O(h) as well.
In other word, O(N) in the worst case and O(logN) ideally.
If you implement the algorithm iteratively, the space complexity can be O(1).
# Recursion
# 1ms 100%
class Solution {
public TreeNode insertIntoBST(TreeNode root, int val) {
if (root == null) return new TreeNode(val);
if (val < root.val) {
root.left = insertIntoBST(root.left, val);
} else {
root.right = insertIntoBST(root.right, val);
}
return root;
}
}
Ex: [7,3,9,2,5], insert 4,
the new BST will be : [7,3,9,2,5,null,null,null,null,4]. no need to balance
# Iteration
# 1ms 100%
class Solution {
public TreeNode insertIntoBST(TreeNode root, int val) {
if(root == null) return new TreeNode(val);
TreeNode cur = root;
while(true) {
if(cur.val <= val) {
if(cur.right != null) cur = cur.right;
else {
cur.right = new TreeNode(val);
break;
}
} else {
if(cur.left != null) cur = cur.left;
else {
cur.left = new TreeNode(val);
break;
}
}
}
return root;
}
}
'''
| true
| true
|
f707fdcef8fbf5d9da0777b3f6d831fb0715ffb7
| 909
|
py
|
Python
|
run.py
|
dipans/case-service
|
65c8514c344dff895daf41298f0225837e6c3207
|
[
"MIT"
] | null | null | null |
run.py
|
dipans/case-service
|
65c8514c344dff895daf41298f0225837e6c3207
|
[
"MIT"
] | null | null | null |
run.py
|
dipans/case-service
|
65c8514c344dff895daf41298f0225837e6c3207
|
[
"MIT"
] | null | null | null |
from flask import Flask, json
import logging
def log_exception(sender, exception, **extra):
sender.logger.debug('Got exception during processing: %s', exception)
def create_app(config_file):
#Instantiating Flask and appling config
app = Flask(__name__)
app.config.from_object(config_file)
from app import api_bp
app.register_blueprint(api_bp, url_prefix='/api')
#Or db = SQLAlchemy(app) then use this db ref in model definition
from model import db
db.init_app(app)
@app.route('/')
def index():
pass
@app.route('/isAlive')
def is_alive():
res = app.response_class(
response=json.dumps('Case Service API is healthy'),
mimetype='application/json'
)
return res
return app
if __name__ == "__main__":
app = create_app("config")
app.run(debug=True, host='0.0.0.0', port='8000')
| 24.567568
| 73
| 0.645765
|
from flask import Flask, json
import logging
def log_exception(sender, exception, **extra):
sender.logger.debug('Got exception during processing: %s', exception)
def create_app(config_file):
app = Flask(__name__)
app.config.from_object(config_file)
from app import api_bp
app.register_blueprint(api_bp, url_prefix='/api')
from model import db
db.init_app(app)
@app.route('/')
def index():
pass
@app.route('/isAlive')
def is_alive():
res = app.response_class(
response=json.dumps('Case Service API is healthy'),
mimetype='application/json'
)
return res
return app
if __name__ == "__main__":
app = create_app("config")
app.run(debug=True, host='0.0.0.0', port='8000')
| true
| true
|
f707ff9314e132e4d7436906b76792c00a7e668a
| 2,245
|
py
|
Python
|
examples/tf/trpo_cubecrash.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | 2
|
2021-02-07T12:14:52.000Z
|
2021-07-29T08:07:22.000Z
|
examples/tf/trpo_cubecrash.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | null | null | null |
examples/tf/trpo_cubecrash.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""This is an example to train a task with TRPO algorithm.
Here it runs CubeCrash-v0 environment with 100 iterations.
"""
import click
import gym
from metarl import wrap_experiment
from metarl.envs import MetaRLEnv, normalize
from metarl.experiment import LocalTFRunner
from metarl.experiment.deterministic import set_seed
from metarl.tf.algos import TRPO
from metarl.tf.baselines import GaussianCNNBaseline
from metarl.tf.policies import CategoricalCNNPolicy
@click.command()
@click.option('--batch_size', type=int, default=4000)
@wrap_experiment
def trpo_cubecrash(ctxt=None, seed=1, batch_size=4000):
"""Train TRPO with CubeCrash-v0 environment.
Args:
ctxt (metarl.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = MetaRLEnv(normalize(gym.make('CubeCrash-v0')))
policy = CategoricalCNNPolicy(env_spec=env.spec,
filters=((32, (8, 8)), (64, (4, 4))),
strides=(4, 2),
padding='VALID',
hidden_sizes=(32, 32))
baseline = GaussianCNNBaseline(
env_spec=env.spec,
regressor_args=dict(filters=((32, (8, 8)), (64, (4, 4))),
strides=(4, 2),
padding='VALID',
hidden_sizes=(32, 32),
use_trust_region=True))
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
flatten_input=False)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=batch_size)
trpo_cubecrash()
| 35.078125
| 75
| 0.575056
|
import click
import gym
from metarl import wrap_experiment
from metarl.envs import MetaRLEnv, normalize
from metarl.experiment import LocalTFRunner
from metarl.experiment.deterministic import set_seed
from metarl.tf.algos import TRPO
from metarl.tf.baselines import GaussianCNNBaseline
from metarl.tf.policies import CategoricalCNNPolicy
@click.command()
@click.option('--batch_size', type=int, default=4000)
@wrap_experiment
def trpo_cubecrash(ctxt=None, seed=1, batch_size=4000):
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = MetaRLEnv(normalize(gym.make('CubeCrash-v0')))
policy = CategoricalCNNPolicy(env_spec=env.spec,
filters=((32, (8, 8)), (64, (4, 4))),
strides=(4, 2),
padding='VALID',
hidden_sizes=(32, 32))
baseline = GaussianCNNBaseline(
env_spec=env.spec,
regressor_args=dict(filters=((32, (8, 8)), (64, (4, 4))),
strides=(4, 2),
padding='VALID',
hidden_sizes=(32, 32),
use_trust_region=True))
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
flatten_input=False)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=batch_size)
trpo_cubecrash()
| true
| true
|
f708019880f311a66cf3983cc6ca3b09d0e8303c
| 4,562
|
py
|
Python
|
mpld3/utils.py
|
fdeheeger/mpld3
|
c1b7fb0e3a2b0e3fd061f976c0fd19435028e611
|
[
"BSD-3-Clause"
] | 2
|
2021-08-04T08:00:34.000Z
|
2021-08-04T08:00:35.000Z
|
mpld3/utils.py
|
boris-arzur/mpld3
|
e28a87210f974ca0659aaafa38af9f01596daa22
|
[
"BSD-3-Clause"
] | null | null | null |
mpld3/utils.py
|
boris-arzur/mpld3
|
e28a87210f974ca0659aaafa38af9f01596daa22
|
[
"BSD-3-Clause"
] | 1
|
2020-06-15T12:53:39.000Z
|
2020-06-15T12:53:39.000Z
|
"""
mpld3 Utilities
===============
Utility routines for the mpld3 package
"""
import os
import re
import shutil
import warnings
from functools import wraps
from . import urls
# Make sure that DeprecationWarning gets printed
warnings.simplefilter("always", DeprecationWarning)
def html_id_ok(objid, html5=False):
"""Check whether objid is valid as an HTML id attribute.
If html5 == True, then use the more liberal html5 rules.
"""
if html5:
return not re.search('\s', objid)
else:
return bool(re.match("^[a-zA-Z][a-zA-Z0-9\-\.\:\_]*$", objid))
def get_id(obj, suffix="", prefix="el", warn_on_invalid=True):
"""Get a unique id for the object"""
if not suffix:
suffix = ""
if not prefix:
prefix = ""
objid = prefix + str(os.getpid()) + str(id(obj)) + suffix
if warn_on_invalid and not html_id_ok(objid):
warnings.warn('"{0}" is not a valid html ID. This may cause problems')
return objid
def deprecated(func, old_name, new_name):
"""Decorator to mark functions as deprecated."""
@wraps(func)
def new_func(*args, **kwargs):
warnings.warn(("{0} is deprecated and will be removed. "
"Use {1} instead".format(old_name, new_name)),
category=DeprecationWarning)
return func(*args, **kwargs)
new_func.__doc__ = ("*%s is deprecated: use %s instead*\n\n "
% (old_name, new_name)) + new_func.__doc__
return new_func
def write_ipynb_local_js(location=None, d3_src=None, mpld3_src=None):
"""
Write the mpld3 and d3 javascript libraries to the given file location.
This utility is used by the IPython notebook tools to enable easy use
of mpld3 with no web connection.
Parameters
----------
location : string (optioal)
the directory in which the d3 and mpld3 javascript libraries will be
written. If not specified, the IPython nbextensions directory will be
used. If IPython doesn't support nbextensions (< 2.0),
the current working directory will be used.
d3_src : string (optional)
the source location of the d3 library. If not specified, the standard
path in mpld3.urls.D3_LOCAL will be used.
mpld3_src : string (optional)
the source location of the mpld3 library. If not specified, the
standard path in mpld3.urls.MPLD3_LOCAL will be used.
Returns
-------
d3_url, mpld3_url : string
The URLs to be used for loading these js files.
"""
if location is None:
try:
from IPython.html import install_nbextension
except ImportError:
location = os.getcwd()
nbextension = False
else:
nbextension = True
else:
nbextension = False
if d3_src is None:
d3_src = urls.D3_LOCAL
if mpld3_src is None:
mpld3_src = urls.MPLD3_LOCAL
d3js = os.path.basename(d3_src)
mpld3js = os.path.basename(mpld3_src)
if not os.path.exists(d3_src):
raise ValueError("d3 src not found at '{0}'".format(d3_src))
if not os.path.exists(mpld3_src):
raise ValueError("mpld3 src not found at '{0}'".format(mpld3_src))
if nbextension:
# IPython 2.0+.
# This will not work if a url prefix is added
prefix = '/nbextensions/'
try:
install_nbextension([d3_src, mpld3_src])
except IOError:
# files may be read only. We'll try deleting them and re-installing
from IPython.utils.path import get_ipython_dir
nbext = os.path.join(get_ipython_dir(), "nbextensions")
for src in [d3_src, mpld3_src]:
dest = os.path.join(nbext, os.path.basename(src))
if os.path.exists(dest):
os.remove(dest)
install_nbextension([d3_src, mpld3_src])
else:
# IPython < 2.0 or explicit path.
# This won't work if users have changed the kernel directory.
prefix = '/files/'
d3_dest = os.path.join(location, d3js)
mpld3_dest = os.path.join(location, mpld3js)
for src, dest in [(d3_src, d3_dest), (mpld3_src, mpld3_dest)]:
try:
shutil.copyfile(src, dest)
except IOError:
# file may be read only. We'll try deleting it first
if os.path.exists(dest):
os.remove(dest)
shutil.copyfile(src, dest)
return prefix + d3js, prefix + mpld3js
| 31.680556
| 79
| 0.612889
|
import os
import re
import shutil
import warnings
from functools import wraps
from . import urls
warnings.simplefilter("always", DeprecationWarning)
def html_id_ok(objid, html5=False):
if html5:
return not re.search('\s', objid)
else:
return bool(re.match("^[a-zA-Z][a-zA-Z0-9\-\.\:\_]*$", objid))
def get_id(obj, suffix="", prefix="el", warn_on_invalid=True):
if not suffix:
suffix = ""
if not prefix:
prefix = ""
objid = prefix + str(os.getpid()) + str(id(obj)) + suffix
if warn_on_invalid and not html_id_ok(objid):
warnings.warn('"{0}" is not a valid html ID. This may cause problems')
return objid
def deprecated(func, old_name, new_name):
@wraps(func)
def new_func(*args, **kwargs):
warnings.warn(("{0} is deprecated and will be removed. "
"Use {1} instead".format(old_name, new_name)),
category=DeprecationWarning)
return func(*args, **kwargs)
new_func.__doc__ = ("*%s is deprecated: use %s instead*\n\n "
% (old_name, new_name)) + new_func.__doc__
return new_func
def write_ipynb_local_js(location=None, d3_src=None, mpld3_src=None):
if location is None:
try:
from IPython.html import install_nbextension
except ImportError:
location = os.getcwd()
nbextension = False
else:
nbextension = True
else:
nbextension = False
if d3_src is None:
d3_src = urls.D3_LOCAL
if mpld3_src is None:
mpld3_src = urls.MPLD3_LOCAL
d3js = os.path.basename(d3_src)
mpld3js = os.path.basename(mpld3_src)
if not os.path.exists(d3_src):
raise ValueError("d3 src not found at '{0}'".format(d3_src))
if not os.path.exists(mpld3_src):
raise ValueError("mpld3 src not found at '{0}'".format(mpld3_src))
if nbextension:
prefix = '/nbextensions/'
try:
install_nbextension([d3_src, mpld3_src])
except IOError:
from IPython.utils.path import get_ipython_dir
nbext = os.path.join(get_ipython_dir(), "nbextensions")
for src in [d3_src, mpld3_src]:
dest = os.path.join(nbext, os.path.basename(src))
if os.path.exists(dest):
os.remove(dest)
install_nbextension([d3_src, mpld3_src])
else:
# IPython < 2.0 or explicit path.
# This won't work if users have changed the kernel directory.
prefix = '/files/'
d3_dest = os.path.join(location, d3js)
mpld3_dest = os.path.join(location, mpld3js)
for src, dest in [(d3_src, d3_dest), (mpld3_src, mpld3_dest)]:
try:
shutil.copyfile(src, dest)
except IOError:
if os.path.exists(dest):
os.remove(dest)
shutil.copyfile(src, dest)
return prefix + d3js, prefix + mpld3js
| true
| true
|
f7080296db42b6afe1d58bb05fa41a87097d1b7a
| 6,495
|
py
|
Python
|
examples/v1alpha3/nas/darts-cnn-cifar10/model.py
|
ChenjunZou/katib
|
6a07daae796c29d24f63375cce71b75c4eee8d9c
|
[
"Apache-2.0"
] | null | null | null |
examples/v1alpha3/nas/darts-cnn-cifar10/model.py
|
ChenjunZou/katib
|
6a07daae796c29d24f63375cce71b75c4eee8d9c
|
[
"Apache-2.0"
] | null | null | null |
examples/v1alpha3/nas/darts-cnn-cifar10/model.py
|
ChenjunZou/katib
|
6a07daae796c29d24f63375cce71b75c4eee8d9c
|
[
"Apache-2.0"
] | 2
|
2020-03-03T06:15:14.000Z
|
2020-03-31T05:39:05.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import FactorizedReduce, StdConv, MixedOp
class Cell(nn.Module):
""" Cell for search
Each edge is mixed and continuous relaxed.
"""
def __init__(self, num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space):
"""
Args:
num_nodes: Number of intermediate cell nodes
c_prev_prev: channels_out[k-2]
c_prev : Channels_out[k-1]
c_cur : Channels_in[k] (current)
reduction_prev: flag for whether the previous cell is reduction cell or not
reduction_cur: flag for whether the current cell is reduction cell or not
"""
super(Cell, self).__init__()
self.reduction_cur = reduction_cur
self.num_nodes = num_nodes
# If previous cell is reduction cell, current input size does not match with
# output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing
if reduction_prev:
self.preprocess0 = FactorizedReduce(c_prev_prev, c_cur)
else:
self.preprocess0 = StdConv(c_prev_prev, c_cur, kernel_size=1, stride=1, padding=0)
self.preprocess1 = StdConv(c_prev, c_cur, kernel_size=1, stride=1, padding=0)
# Generate dag from mixed operations
self.dag_ops = nn.ModuleList()
for i in range(self.num_nodes):
self.dag_ops.append(nn.ModuleList())
# Include 2 input nodes
for j in range(2+i):
# Reduction with stride = 2 must be only for the input node
stride = 2 if reduction_cur and j < 2 else 1
op = MixedOp(c_cur, stride, search_space)
self.dag_ops[i].append(op)
def forward(self, s0, s1, w_dag):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for edges, w_list in zip(self.dag_ops, w_dag):
state_cur = sum(edges[i](s, w) for i, (s, w) in enumerate((zip(states, w_list))))
states.append(state_cur)
state_out = torch.cat(states[2:], dim=1)
return state_out
class NetworkCNN(nn.Module):
def __init__(self, init_channels, input_channels, num_classes,
num_layers, criterion, search_space, num_nodes, stem_multiplier):
super(NetworkCNN, self).__init__()
self.init_channels = init_channels
self.num_classes = num_classes
self.num_layers = num_layers
self.criterion = criterion
# TODO: Algorithm settings?
self.num_nodes = num_nodes
self.stem_multiplier = stem_multiplier
c_cur = self.stem_multiplier*self.init_channels
self.stem = nn.Sequential(
nn.Conv2d(input_channels, c_cur, 3, padding=1, bias=False),
nn.BatchNorm2d(c_cur)
)
# In first Cell stem is used for s0 and s1
# c_prev_prev and c_prev - output channels size
# c_cur - init channels size
c_prev_prev, c_prev, c_cur = c_cur, c_cur, self.init_channels
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(self.num_layers):
# For Network with 1 layer: Only Normal Cell
if self.num_layers == 1:
reduction_cur = False
else:
# For Network with two layers: First layer - Normal, Second - Reduction
# For Other Networks: [1/3, 2/3] Layers - Reduction cell with double channels
# Others - Normal cell
if ((self.num_layers == 2 and i == 1) or
(self.num_layers > 2 and i in [self.num_layers//3, 2*self.num_layers//3])):
c_cur *= 2
reduction_cur = True
else:
reduction_cur = False
cell = Cell(self.num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space)
reduction_prev = reduction_cur
self.cells.append(cell)
c_cur_out = c_cur * self.num_nodes
c_prev_prev, c_prev = c_prev, c_cur_out
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(c_prev, self.num_classes)
# Initialize alphas parameters
num_ops = len(search_space.primitives)
self.alpha_normal = nn.ParameterList()
self.alpha_reduce = nn.ParameterList()
for i in range(self.num_nodes):
self.alpha_normal.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))
if self.num_layers > 1:
self.alpha_reduce.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))
# Setup alphas list
self.alphas = []
for name, parameter in self.named_parameters():
if "alpha" in name:
self.alphas.append((name, parameter))
def forward(self, x):
weights_normal = [F.softmax(alpha, dim=-1) for alpha in self.alpha_normal]
weights_reduce = [F.softmax(alpha, dim=-1) for alpha in self.alpha_reduce]
s0 = s1 = self.stem(x)
for cell in self.cells:
weights = weights_reduce if cell.reduction_cur else weights_normal
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
# Make out flatten
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return logits
def print_alphas(self):
print("\n>>> Alphas Normal <<<")
for alpha in self.alpha_normal:
print(F.softmax(alpha, dim=-1))
if self.num_layers > 1:
print("\n>>> Alpha Reduce <<<")
for alpha in self.alpha_reduce:
print(F.softmax(alpha, dim=-1))
print("\n")
def getWeights(self):
return self.parameters()
def getAlphas(self):
for _, parameter in self.alphas:
yield parameter
def loss(self, x, y):
logits = self.forward(x)
return self.criterion(logits, y)
def genotype(self, search_space):
gene_normal = search_space.parse(self.alpha_normal, k=2)
gene_reduce = search_space.parse(self.alpha_reduce, k=2)
# concat all intermediate nodes
concat = range(2, 2 + self.num_nodes)
return search_space.genotype(normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat)
| 35.686813
| 112
| 0.602771
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import FactorizedReduce, StdConv, MixedOp
class Cell(nn.Module):
def __init__(self, num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space):
super(Cell, self).__init__()
self.reduction_cur = reduction_cur
self.num_nodes = num_nodes
if reduction_prev:
self.preprocess0 = FactorizedReduce(c_prev_prev, c_cur)
else:
self.preprocess0 = StdConv(c_prev_prev, c_cur, kernel_size=1, stride=1, padding=0)
self.preprocess1 = StdConv(c_prev, c_cur, kernel_size=1, stride=1, padding=0)
self.dag_ops = nn.ModuleList()
for i in range(self.num_nodes):
self.dag_ops.append(nn.ModuleList())
for j in range(2+i):
stride = 2 if reduction_cur and j < 2 else 1
op = MixedOp(c_cur, stride, search_space)
self.dag_ops[i].append(op)
def forward(self, s0, s1, w_dag):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for edges, w_list in zip(self.dag_ops, w_dag):
state_cur = sum(edges[i](s, w) for i, (s, w) in enumerate((zip(states, w_list))))
states.append(state_cur)
state_out = torch.cat(states[2:], dim=1)
return state_out
class NetworkCNN(nn.Module):
def __init__(self, init_channels, input_channels, num_classes,
num_layers, criterion, search_space, num_nodes, stem_multiplier):
super(NetworkCNN, self).__init__()
self.init_channels = init_channels
self.num_classes = num_classes
self.num_layers = num_layers
self.criterion = criterion
self.num_nodes = num_nodes
self.stem_multiplier = stem_multiplier
c_cur = self.stem_multiplier*self.init_channels
self.stem = nn.Sequential(
nn.Conv2d(input_channels, c_cur, 3, padding=1, bias=False),
nn.BatchNorm2d(c_cur)
)
c_prev_prev, c_prev, c_cur = c_cur, c_cur, self.init_channels
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(self.num_layers):
if self.num_layers == 1:
reduction_cur = False
else:
if ((self.num_layers == 2 and i == 1) or
(self.num_layers > 2 and i in [self.num_layers//3, 2*self.num_layers//3])):
c_cur *= 2
reduction_cur = True
else:
reduction_cur = False
cell = Cell(self.num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space)
reduction_prev = reduction_cur
self.cells.append(cell)
c_cur_out = c_cur * self.num_nodes
c_prev_prev, c_prev = c_prev, c_cur_out
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(c_prev, self.num_classes)
num_ops = len(search_space.primitives)
self.alpha_normal = nn.ParameterList()
self.alpha_reduce = nn.ParameterList()
for i in range(self.num_nodes):
self.alpha_normal.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))
if self.num_layers > 1:
self.alpha_reduce.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))
self.alphas = []
for name, parameter in self.named_parameters():
if "alpha" in name:
self.alphas.append((name, parameter))
def forward(self, x):
weights_normal = [F.softmax(alpha, dim=-1) for alpha in self.alpha_normal]
weights_reduce = [F.softmax(alpha, dim=-1) for alpha in self.alpha_reduce]
s0 = s1 = self.stem(x)
for cell in self.cells:
weights = weights_reduce if cell.reduction_cur else weights_normal
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return logits
def print_alphas(self):
print("\n>>> Alphas Normal <<<")
for alpha in self.alpha_normal:
print(F.softmax(alpha, dim=-1))
if self.num_layers > 1:
print("\n>>> Alpha Reduce <<<")
for alpha in self.alpha_reduce:
print(F.softmax(alpha, dim=-1))
print("\n")
def getWeights(self):
return self.parameters()
def getAlphas(self):
for _, parameter in self.alphas:
yield parameter
def loss(self, x, y):
logits = self.forward(x)
return self.criterion(logits, y)
def genotype(self, search_space):
gene_normal = search_space.parse(self.alpha_normal, k=2)
gene_reduce = search_space.parse(self.alpha_reduce, k=2)
concat = range(2, 2 + self.num_nodes)
return search_space.genotype(normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat)
| true
| true
|
f70802eb0367315e5902b750a11bf191194df1bf
| 41
|
py
|
Python
|
tools/uniqprimer/uniqprimer/test.py
|
InternationalRiceResearchInstitute/RiceGalaxy
|
35083ed17d59ae91e622613587228d3f7ae7d794
|
[
"CC-BY-3.0"
] | 4
|
2018-10-29T18:34:38.000Z
|
2021-09-29T23:30:42.000Z
|
tools/uniqprimer/uniqprimer/test.py
|
InternationalRiceResearchInstitute/RiceGalaxy
|
35083ed17d59ae91e622613587228d3f7ae7d794
|
[
"CC-BY-3.0"
] | null | null | null |
tools/uniqprimer/uniqprimer/test.py
|
InternationalRiceResearchInstitute/RiceGalaxy
|
35083ed17d59ae91e622613587228d3f7ae7d794
|
[
"CC-BY-3.0"
] | 3
|
2020-02-12T15:22:24.000Z
|
2021-08-19T10:27:39.000Z
|
#!/usr/bin/python
from Bio import SeqIO
| 10.25
| 21
| 0.731707
|
from Bio import SeqIO
| true
| true
|
f708035a79355897d18599363f3ee63ca5175dac
| 6,103
|
py
|
Python
|
tests/test_networkx.py
|
enourbakhsh/skylink
|
83270f3351ff637abeb0af25786412d4dd09134a
|
[
"MIT"
] | null | null | null |
tests/test_networkx.py
|
enourbakhsh/skylink
|
83270f3351ff637abeb0af25786412d4dd09134a
|
[
"MIT"
] | null | null | null |
tests/test_networkx.py
|
enourbakhsh/skylink
|
83270f3351ff637abeb0af25786412d4dd09134a
|
[
"MIT"
] | null | null | null |
import os
import skylink
from skylink import testing
import numpy as np
from astropy.table import Table
import FoFCatalogMatching
import pytest # noqa
# TODO: test the matching with more than two catalogs
# TODO: test N-way matching with `linking_lengths` as a dictionary
# TODO: test if we catch illegal footprints that are not gnomonic-projectable
# TODO: test MPI implementation
# TODO: test a wide range of linking lengths
graph_lib = "networkx"
ncpus_max = os.cpu_count() # maximum number of cpus
linking_lengths_default = 0.75 # arcsec
n = 2_000 # number of objects for the mock-up data
def make_mockup():
def tnormal(mu=None, sigma=None, n=None, lower=-0.5, upper=0.5):
return np.clip(np.random.normal(np.repeat(mu, n), sigma), lower, upper)
np.random.seed(2)
ra = np.random.uniform(4, 6, n)
dec = np.random.uniform(-1, 1, n)
cat_a = Table({"ra": ra, "dec": dec})
cat_b = Table(
{
"ra": np.append(ra + tnormal(0, 0.0004, n), ra + tnormal(0, 0.0001, n)),
"dec": np.append(dec + tnormal(0, 0.0002, n), dec + tnormal(0, 0.0002, n)),
}
)
return cat_a, cat_b
def run_FoFCatalogMatching(cat_a, cat_b, return_pandas=False):
""" Genetare an output using `FoFCatalogMatching` as our benchmark """
res_fcm = FoFCatalogMatching.match(
{"a": cat_a, "b": cat_b}, linking_lengths_default
)
if return_pandas:
return res_fcm.to_pandas()
else:
return res_fcm
def test_graph_lib():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl = skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
testing.assert_equal(res_fcm, res_sl)
def run_with_ncpus(cat_a, cat_b, ncpus):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
nprocs=ncpus,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_nprocs():
# TODO: test equality with more than 2 catalogs
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_ncpus(cat_a, cat_b, 1)
res_sl2 = run_with_ncpus(cat_a, cat_b, 2)
res_sl3 = run_with_ncpus(cat_a, cat_b, ncpus_max)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
testing.assert_equal(res_sl2, res_sl3)
def run_with_overlap(cat_a, cat_b, overlap):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
overlap=overlap,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_overlap():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_overlap(cat_a, cat_b, 1.0)
res_sl2 = run_with_overlap(cat_a, cat_b, 1.1)
res_sl3 = run_with_overlap(cat_a, cat_b, 1.2)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
testing.assert_equal(res_sl2, res_sl3)
def run_with_linked_mask(cat_a, cat_b, use_linked_mask):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
use_linked_mask=use_linked_mask,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
)
@pytest.mark.skip(
reason="FIXME: The `networkx` graph library does not give the right results with use_linked_mask=True"
)
def test_linked_mask():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_linked_mask(cat_a, cat_b, True)
res_sl2 = run_with_linked_mask(cat_a, cat_b, False)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
def run_with_order(cat_a, cat_b, reverse=False):
cats = {"b": cat_b, "a": cat_a} if reverse else {"a": cat_a, "b": cat_b}
return skylink.match(
cats,
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_cat_orders():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_order(cat_a, cat_b, False)
res_sl2 = run_with_order(cat_a, cat_b, True)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
def run_with_sort(cat_a, cat_b, sort):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
sort=sort,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_sort():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_sort(cat_a, cat_b, True)
res_sl2 = run_with_sort(cat_a, cat_b, False)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
def run_with_storekdtree(cat_a, cat_b, storekdtree):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
storekdtree=storekdtree,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_storekdtree():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl2 = run_with_storekdtree(cat_a, cat_b, False)
res_sl1 = run_with_storekdtree(cat_a, cat_b, True)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
| 30.212871
| 106
| 0.677372
|
import os
import skylink
from skylink import testing
import numpy as np
from astropy.table import Table
import FoFCatalogMatching
import pytest
graph_lib = "networkx"
ncpus_max = os.cpu_count() linking_lengths_default = 0.75 n = 2_000
def make_mockup():
def tnormal(mu=None, sigma=None, n=None, lower=-0.5, upper=0.5):
return np.clip(np.random.normal(np.repeat(mu, n), sigma), lower, upper)
np.random.seed(2)
ra = np.random.uniform(4, 6, n)
dec = np.random.uniform(-1, 1, n)
cat_a = Table({"ra": ra, "dec": dec})
cat_b = Table(
{
"ra": np.append(ra + tnormal(0, 0.0004, n), ra + tnormal(0, 0.0001, n)),
"dec": np.append(dec + tnormal(0, 0.0002, n), dec + tnormal(0, 0.0002, n)),
}
)
return cat_a, cat_b
def run_FoFCatalogMatching(cat_a, cat_b, return_pandas=False):
res_fcm = FoFCatalogMatching.match(
{"a": cat_a, "b": cat_b}, linking_lengths_default
)
if return_pandas:
return res_fcm.to_pandas()
else:
return res_fcm
def test_graph_lib():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl = skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
testing.assert_equal(res_fcm, res_sl)
def run_with_ncpus(cat_a, cat_b, ncpus):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
nprocs=ncpus,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_nprocs():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_ncpus(cat_a, cat_b, 1)
res_sl2 = run_with_ncpus(cat_a, cat_b, 2)
res_sl3 = run_with_ncpus(cat_a, cat_b, ncpus_max)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
testing.assert_equal(res_sl2, res_sl3)
def run_with_overlap(cat_a, cat_b, overlap):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
overlap=overlap,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_overlap():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_overlap(cat_a, cat_b, 1.0)
res_sl2 = run_with_overlap(cat_a, cat_b, 1.1)
res_sl3 = run_with_overlap(cat_a, cat_b, 1.2)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
testing.assert_equal(res_sl2, res_sl3)
def run_with_linked_mask(cat_a, cat_b, use_linked_mask):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
use_linked_mask=use_linked_mask,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
)
@pytest.mark.skip(
reason="FIXME: The `networkx` graph library does not give the right results with use_linked_mask=True"
)
def test_linked_mask():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_linked_mask(cat_a, cat_b, True)
res_sl2 = run_with_linked_mask(cat_a, cat_b, False)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
def run_with_order(cat_a, cat_b, reverse=False):
cats = {"b": cat_b, "a": cat_a} if reverse else {"a": cat_a, "b": cat_b}
return skylink.match(
cats,
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_cat_orders():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_order(cat_a, cat_b, False)
res_sl2 = run_with_order(cat_a, cat_b, True)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
def run_with_sort(cat_a, cat_b, sort):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
sort=sort,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_sort():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_sort(cat_a, cat_b, True)
res_sl2 = run_with_sort(cat_a, cat_b, False)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
def run_with_storekdtree(cat_a, cat_b, storekdtree):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
storekdtree=storekdtree,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_storekdtree():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl2 = run_with_storekdtree(cat_a, cat_b, False)
res_sl1 = run_with_storekdtree(cat_a, cat_b, True)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
| true
| true
|
f70803ffb59c53fef789cee13dc4890da0d7e084
| 2,527
|
py
|
Python
|
common_parser.py
|
jackalhan/tf_autoencoder_local
|
57212dd64e53ac51dc0d14a9a9480905ebf537c0
|
[
"Apache-2.0"
] | null | null | null |
common_parser.py
|
jackalhan/tf_autoencoder_local
|
57212dd64e53ac51dc0d14a9a9480905ebf537c0
|
[
"Apache-2.0"
] | null | null | null |
common_parser.py
|
jackalhan/tf_autoencoder_local
|
57212dd64e53ac51dc0d14a9a9480905ebf537c0
|
[
"Apache-2.0"
] | null | null | null |
import argparse
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir', default='./text_training',
help='Output directory for model and training stats.')
parser.add_argument(
'--data_dir', default='./text_data',
help='Directory to download the data to.')
parser.add_argument('--model', default='convolutional')
parser.add_argument('--number_of_filters', default="16,8,8")
parser.add_argument('--dense_layers', default="1:1024,2:512", type=str)
parser.add_argument('--number_of_tokens', default=144, type=int)
parser.add_argument('--is_l2_normed', default=True, type=str2bool)
parser.add_argument(
'--batch_size', type=int, default=64,
help='Batch size (default: 256)')
parser.add_argument(
'--noise_factor', type=float, default=0.5,
help='Amount of noise to add to input (default: 0)')
parser.add_argument(
'--dropout', type=float, default=0.5,
help='The probability that each element is kept in dropout layers (default: 1)')
parser.add_argument(
'--loss', type=str, default="custom_distance_loss")
parser.add_argument(
'--learning_rate', type=float, default=0.001,
help='Learning rate (default: 0.001)')
parser.add_argument(
'--epochs', type=int, default=50,
help='Number of epochs to perform for training (default: 50)')
parser.add_argument(
'--weight_decay', type=float, default=1e-5,
help='Amount of weight decay to apply (default: 1e-5)')
parser.add_argument(
'--save_images',
help='Path to directory to store intermediate reconstructed images (default: disabled)')
parser.add_argument(
'--images', type=int, default=10,
help='Number of test images to reconstruct (default: 10)')
parser.add_argument(
'--what', choices=['reconstruction', 'embedding'],
default='embedding',
help='Whether to display reconstructed images or '
'create checkpoint with encoder output to visualize '
'in TensorBoard.')
return parser
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def extract_number_of_filters(number_of_filters_as_arg):
return [int(filter.strip()) for filter in number_of_filters_as_arg.split(',')]
| 42.830508
| 96
| 0.647408
|
import argparse
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir', default='./text_training',
help='Output directory for model and training stats.')
parser.add_argument(
'--data_dir', default='./text_data',
help='Directory to download the data to.')
parser.add_argument('--model', default='convolutional')
parser.add_argument('--number_of_filters', default="16,8,8")
parser.add_argument('--dense_layers', default="1:1024,2:512", type=str)
parser.add_argument('--number_of_tokens', default=144, type=int)
parser.add_argument('--is_l2_normed', default=True, type=str2bool)
parser.add_argument(
'--batch_size', type=int, default=64,
help='Batch size (default: 256)')
parser.add_argument(
'--noise_factor', type=float, default=0.5,
help='Amount of noise to add to input (default: 0)')
parser.add_argument(
'--dropout', type=float, default=0.5,
help='The probability that each element is kept in dropout layers (default: 1)')
parser.add_argument(
'--loss', type=str, default="custom_distance_loss")
parser.add_argument(
'--learning_rate', type=float, default=0.001,
help='Learning rate (default: 0.001)')
parser.add_argument(
'--epochs', type=int, default=50,
help='Number of epochs to perform for training (default: 50)')
parser.add_argument(
'--weight_decay', type=float, default=1e-5,
help='Amount of weight decay to apply (default: 1e-5)')
parser.add_argument(
'--save_images',
help='Path to directory to store intermediate reconstructed images (default: disabled)')
parser.add_argument(
'--images', type=int, default=10,
help='Number of test images to reconstruct (default: 10)')
parser.add_argument(
'--what', choices=['reconstruction', 'embedding'],
default='embedding',
help='Whether to display reconstructed images or '
'create checkpoint with encoder output to visualize '
'in TensorBoard.')
return parser
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def extract_number_of_filters(number_of_filters_as_arg):
return [int(filter.strip()) for filter in number_of_filters_as_arg.split(',')]
| true
| true
|
f708049ab33a4320070c1cc6272ae9571f5d8f8f
| 2,269
|
py
|
Python
|
tests/dualtor/conftest.py
|
saravanansv/sonic-mgmt
|
677fc0cb2722dba43606957fae75d1cdf852bed7
|
[
"Apache-2.0"
] | null | null | null |
tests/dualtor/conftest.py
|
saravanansv/sonic-mgmt
|
677fc0cb2722dba43606957fae75d1cdf852bed7
|
[
"Apache-2.0"
] | null | null | null |
tests/dualtor/conftest.py
|
saravanansv/sonic-mgmt
|
677fc0cb2722dba43606957fae75d1cdf852bed7
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import logging
import time
from tests.common.dualtor.dual_tor_utils import get_crm_nexthop_counter # lgtm[py/unused-import]
from tests.common.helpers.assertions import pytest_assert as py_assert
from tests.common.fixtures.ptfhost_utils import change_mac_addresses, run_garp_service
CRM_POLL_INTERVAL = 1
CRM_DEFAULT_POLL_INTERVAL = 300
@pytest.fixture
def set_crm_polling_interval(rand_selected_dut):
"""
A function level fixture to set crm polling interval to 1 second
"""
wait_time = 2
logging.info("Setting crm polling interval to {} seconds".format(CRM_POLL_INTERVAL))
rand_selected_dut.command("crm config polling interval {}".format(CRM_POLL_INTERVAL))
logging.info("Waiting {} sec for CRM counters to become updated".format(wait_time))
time.sleep(wait_time)
yield
logging.info("Setting crm polling interval to {} seconds".format(CRM_DEFAULT_POLL_INTERVAL))
rand_selected_dut.command("crm config polling interval {}".format(CRM_DEFAULT_POLL_INTERVAL))
@pytest.fixture
def verify_crm_nexthop_counter_not_increased(rand_selected_dut, set_crm_polling_interval):
"""
A function level fixture to verify crm nexthop counter not increased
"""
original_counter = get_crm_nexthop_counter(rand_selected_dut)
logging.info("Before test: crm nexthop counter = {}".format(original_counter))
yield
time.sleep(CRM_POLL_INTERVAL)
diff = get_crm_nexthop_counter(rand_selected_dut) - original_counter
logging.info("Before test: crm nexthop counter = {}".format(original_counter + diff))
py_assert(diff <= 0, "crm nexthop counter is increased by {}.".format(diff))
def pytest_addoption(parser):
"""
Adds pytest options that are used by dual ToR tests
"""
dual_tor_group = parser.getgroup("Dual ToR test suite options")
dual_tor_group.addoption(
"--mux-stress-count",
action="store",
default=2,
type=int,
help="The number of iterations for mux stress test"
)
@pytest.fixture(scope="module", autouse=True)
def common_setup_teardown(request, tbinfo, vmhost):
if 'dualtor' in tbinfo['topo']['name']:
request.getfixturevalue('run_garp_service')
vmhost.shell('systemctl restart mux-simulator')
| 35.453125
| 97
| 0.74394
|
import pytest
import logging
import time
from tests.common.dualtor.dual_tor_utils import get_crm_nexthop_counter from tests.common.helpers.assertions import pytest_assert as py_assert
from tests.common.fixtures.ptfhost_utils import change_mac_addresses, run_garp_service
CRM_POLL_INTERVAL = 1
CRM_DEFAULT_POLL_INTERVAL = 300
@pytest.fixture
def set_crm_polling_interval(rand_selected_dut):
wait_time = 2
logging.info("Setting crm polling interval to {} seconds".format(CRM_POLL_INTERVAL))
rand_selected_dut.command("crm config polling interval {}".format(CRM_POLL_INTERVAL))
logging.info("Waiting {} sec for CRM counters to become updated".format(wait_time))
time.sleep(wait_time)
yield
logging.info("Setting crm polling interval to {} seconds".format(CRM_DEFAULT_POLL_INTERVAL))
rand_selected_dut.command("crm config polling interval {}".format(CRM_DEFAULT_POLL_INTERVAL))
@pytest.fixture
def verify_crm_nexthop_counter_not_increased(rand_selected_dut, set_crm_polling_interval):
original_counter = get_crm_nexthop_counter(rand_selected_dut)
logging.info("Before test: crm nexthop counter = {}".format(original_counter))
yield
time.sleep(CRM_POLL_INTERVAL)
diff = get_crm_nexthop_counter(rand_selected_dut) - original_counter
logging.info("Before test: crm nexthop counter = {}".format(original_counter + diff))
py_assert(diff <= 0, "crm nexthop counter is increased by {}.".format(diff))
def pytest_addoption(parser):
dual_tor_group = parser.getgroup("Dual ToR test suite options")
dual_tor_group.addoption(
"--mux-stress-count",
action="store",
default=2,
type=int,
help="The number of iterations for mux stress test"
)
@pytest.fixture(scope="module", autouse=True)
def common_setup_teardown(request, tbinfo, vmhost):
if 'dualtor' in tbinfo['topo']['name']:
request.getfixturevalue('run_garp_service')
vmhost.shell('systemctl restart mux-simulator')
| true
| true
|
f708051a44eab9b8ed607d9c993ed299aa963e52
| 351
|
py
|
Python
|
fable/fable_sources/libtbx/command_line/clear_paths.py
|
hickerson/bbn
|
17ef63ad1717553ab2abb50592f8de79228c8523
|
[
"MIT"
] | 4
|
2016-09-30T15:03:39.000Z
|
2021-03-25T13:27:08.000Z
|
fable/fable_sources/libtbx/command_line/clear_paths.py
|
hickerson/bbn
|
17ef63ad1717553ab2abb50592f8de79228c8523
|
[
"MIT"
] | 1
|
2018-04-18T14:41:18.000Z
|
2018-04-20T19:33:52.000Z
|
fable/fable_sources/libtbx/command_line/clear_paths.py
|
hickerson/bbn
|
17ef63ad1717553ab2abb50592f8de79228c8523
|
[
"MIT"
] | 3
|
2016-04-19T18:20:30.000Z
|
2019-04-03T14:54:29.000Z
|
from __future__ import division
from libtbx.clear_paths \
import remove_or_rename_files_and_directories_if_possible
import sys
def run(args):
remaining = remove_or_rename_files_and_directories_if_possible(paths=args)
for path in remaining:
"WARNING: unable to remove or rename:", path
if (__name__ == "__main__"):
run(args=sys.argv[1:])
| 27
| 76
| 0.786325
|
from __future__ import division
from libtbx.clear_paths \
import remove_or_rename_files_and_directories_if_possible
import sys
def run(args):
remaining = remove_or_rename_files_and_directories_if_possible(paths=args)
for path in remaining:
"WARNING: unable to remove or rename:", path
if (__name__ == "__main__"):
run(args=sys.argv[1:])
| true
| true
|
f70807754f550c96b6d126376377ea2da89993a5
| 678
|
py
|
Python
|
app/bank/urls.py
|
salmanAndroidDev/digify-task
|
f80f1707f973b92efe6fc34d0f5bcf73b41eda36
|
[
"MIT"
] | null | null | null |
app/bank/urls.py
|
salmanAndroidDev/digify-task
|
f80f1707f973b92efe6fc34d0f5bcf73b41eda36
|
[
"MIT"
] | null | null | null |
app/bank/urls.py
|
salmanAndroidDev/digify-task
|
f80f1707f973b92efe6fc34d0f5bcf73b41eda36
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('open-account/',
views.OpenAccountAPIView.as_view(),
name='open_account'),
path('delete-account/<pk>/',
views.DeleteAccountAPIView.as_view(),
name='delete_account'),
path('deposit/<pk>/',
views.DepositAPIView.as_view(),
name='deposit'),
path('withdraw/<pk>/',
views.WithdrawAPIView.as_view(),
name='withdraw'),
path('transfer/<pk>/',
views.TransferAPIView.as_view(),
name='transfer'),
path('create-branch/',
views.CreateBranchAPIView.as_view(),
name='create_branch')
]
| 22.6
| 46
| 0.589971
|
from django.urls import path
from . import views
urlpatterns = [
path('open-account/',
views.OpenAccountAPIView.as_view(),
name='open_account'),
path('delete-account/<pk>/',
views.DeleteAccountAPIView.as_view(),
name='delete_account'),
path('deposit/<pk>/',
views.DepositAPIView.as_view(),
name='deposit'),
path('withdraw/<pk>/',
views.WithdrawAPIView.as_view(),
name='withdraw'),
path('transfer/<pk>/',
views.TransferAPIView.as_view(),
name='transfer'),
path('create-branch/',
views.CreateBranchAPIView.as_view(),
name='create_branch')
]
| true
| true
|
f70808e06acf6eb0d1977a072b76ca3f1f62fd15
| 4,797
|
py
|
Python
|
walk_though(Run this one).py
|
GoreNetwork/What-s-attached-to-switch-walk-though
|
e592b0f467b0a3d05bc1df9c5f884bde31c1c8de
|
[
"Apache-2.0"
] | 1
|
2018-03-28T21:13:40.000Z
|
2018-03-28T21:13:40.000Z
|
walk_though(Run this one).py
|
GoreNetwork/What-s-attached-to-switch-walk-though
|
e592b0f467b0a3d05bc1df9c5f884bde31c1c8de
|
[
"Apache-2.0"
] | null | null | null |
walk_though(Run this one).py
|
GoreNetwork/What-s-attached-to-switch-walk-though
|
e592b0f467b0a3d05bc1df9c5f884bde31c1c8de
|
[
"Apache-2.0"
] | null | null | null |
#from excel_work import*
from common_functions import *
from pull import *
#from mac_and_arp_work import *
from napalm import get_network_driver
from getpass import getpass
from pprint import pprint
from name_work import *
import openpyxl
from openpyxl import Workbook
from openpyxl.compat import range
from openpyxl.utils import get_column_letter
def write_excel_data(row,column,value,sheet):
tmp = str(get_column_letter(column))+str(row)
sheet[tmp] = value
return column+1
username = input("Username: ")
password = getpass()
def normalize_mac (mac):
mac = mac.strip(" ")
mac = mac.replace('.','')
mac = mac.upper()
t = iter(mac)
mac = ':'.join(a+b for a,b in zip(t, t))
return mac
def pull_mac_table (netconnect):
mac_table_list = []
mac_table = net_connect.send_command_expect('sh mac address-table')
for line in mac_table.split('\n'):
mac_int = {}
line = line.lstrip(" ")
line = line.rstrip(" ")
if len(get_mac (line)) == 0:
continue
mac = normalize_mac (get_mac (line)[0])
if mac =='FF:FF:FF:FF:FF:FF':
continue
mac_int['mac']= mac
#print (line.split(" ")[-1])
interface = normalize_interface_names(line.split(" ")[-1])
mac_int['interface'] = interface
mac_table_list.append(mac_int)
return mac_table_list
def ouicorrect(list):
templist = []
for oui in list:
templist.append(oui[0:7])
return (templist)
def ouicorrect(list):
templist = []
for oui in list:
templist.append(normalize_mac (oui[0:7]))
return (templist)
def check_ouis(folder_name):
os.chdir(folder_name)
files = os.listdir()
#print (files)
OUIs = {}
for file in files:
q =open(file).readlines()
fixed_oui = ouicorrect (q)
for each_oui in fixed_oui:
OUIs [each_oui]= file
os.chdir(os.pardir)
return OUIs
def pull_oui_type(mac_address,OUIs):
mac_oui = mac_address[0:8]
if mac_oui in OUIs:
#print ("ITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKED")
return OUIs[mac_oui]
else:
#print (mac_oui)
return "Unknown"
driver = get_network_driver('ios')
to_check = read_doc ('pull_these.csv')
cdp_file_name= "temp_cdp_info.txt"
##device = driver("10.9.106.238", username,password)
#device.open()
#pprint(device.get_mac_address_table())
interfaces =[]
wb = openpyxl.Workbook()
folder_name = "OUIs"
OUIs = check_ouis(folder_name)
for device_and_arp in to_check:
print (device_and_arp)
# try:
interfaces = []
switch_to_check = device_and_arp.split(',')[0]
arp_device = device_and_arp.split(',')[1]
arp_device = arp_device.rstrip("\n")
print ("working on "+switch_to_check )
driver = get_network_driver('ios')
device = driver(arp_device, username,password)
device.open()
arp_table = device.get_arp_table()
net_connect = make_connection (switch_to_check,username,password)
mac_table = pull_mac_table (net_connect)
#pprint (mac_table )
#pprint(arp_table)
for mac_entry in mac_table:
for arp_entry in arp_table:
if mac_entry['mac'] == arp_entry['mac']:
tmp = {}
tmp['mac'] = mac_entry['mac']
tmp['interface'] = mac_entry['interface']
tmp['ip'] = arp_entry['ip']
tmp['type'] = pull_oui_type(tmp['mac'],OUIs)
try:
tmp['hostname'] = socket.gethostbyaddr(tmp['ip'])[0]
#print (tmp['hostname'])
except:
tmp['hostname'] = "Unknown"
interfaces.append(tmp)
#pprint (interfaces)
sheet = wb.create_sheet(switch_to_check)
row = 1
for interface in interfaces:
column = 1
if interface['interface'] == "Switch":
continue
column = write_excel_data(row,column,interface['interface'],sheet)
column = write_excel_data(row,column,interface['ip'],sheet)
column = write_excel_data(row,column,interface['type'],sheet)
column = write_excel_data(row,column,interface['hostname'],sheet)
column = write_excel_data(row,column,interface['mac'],sheet)
row = row+1
row = row+1
file_name = switch_to_check+ " show cdp"
pull_cdp_output(switch_to_check,username,password,file_name)
#cdp_info = cdp_info.split('\n')
for each in parse_cdp_out(file_name):
column = 1
column = write_excel_data(row,column,each['remote_id'],sheet)
column = write_excel_data(row,column,each['remote_ip'],sheet)
column = write_excel_data(row,column,each['local_int'],sheet)
column = write_excel_data(row,column,each['remote_int'],sheet)
column = write_excel_data(row,column,each['platform'],sheet)
row = row+1
# except:
# print(switch_to_check+ " Didn't work"+switch_to_check+ " Didn't work"+switch_to_check+ " Didn't work"+switch_to_check+ " Didn't work")
wb.save('output.xlsx')
| 27.411429
| 166
| 0.691057
|
from common_functions import *
from pull import *
from napalm import get_network_driver
from getpass import getpass
from pprint import pprint
from name_work import *
import openpyxl
from openpyxl import Workbook
from openpyxl.compat import range
from openpyxl.utils import get_column_letter
def write_excel_data(row,column,value,sheet):
tmp = str(get_column_letter(column))+str(row)
sheet[tmp] = value
return column+1
username = input("Username: ")
password = getpass()
def normalize_mac (mac):
mac = mac.strip(" ")
mac = mac.replace('.','')
mac = mac.upper()
t = iter(mac)
mac = ':'.join(a+b for a,b in zip(t, t))
return mac
def pull_mac_table (netconnect):
mac_table_list = []
mac_table = net_connect.send_command_expect('sh mac address-table')
for line in mac_table.split('\n'):
mac_int = {}
line = line.lstrip(" ")
line = line.rstrip(" ")
if len(get_mac (line)) == 0:
continue
mac = normalize_mac (get_mac (line)[0])
if mac =='FF:FF:FF:FF:FF:FF':
continue
mac_int['mac']= mac
interface = normalize_interface_names(line.split(" ")[-1])
mac_int['interface'] = interface
mac_table_list.append(mac_int)
return mac_table_list
def ouicorrect(list):
templist = []
for oui in list:
templist.append(oui[0:7])
return (templist)
def ouicorrect(list):
templist = []
for oui in list:
templist.append(normalize_mac (oui[0:7]))
return (templist)
def check_ouis(folder_name):
os.chdir(folder_name)
files = os.listdir()
OUIs = {}
for file in files:
q =open(file).readlines()
fixed_oui = ouicorrect (q)
for each_oui in fixed_oui:
OUIs [each_oui]= file
os.chdir(os.pardir)
return OUIs
def pull_oui_type(mac_address,OUIs):
mac_oui = mac_address[0:8]
if mac_oui in OUIs:
return OUIs[mac_oui]
else:
return "Unknown"
driver = get_network_driver('ios')
to_check = read_doc ('pull_these.csv')
cdp_file_name= "temp_cdp_info.txt"
interfaces =[]
wb = openpyxl.Workbook()
folder_name = "OUIs"
OUIs = check_ouis(folder_name)
for device_and_arp in to_check:
print (device_and_arp)
interfaces = []
switch_to_check = device_and_arp.split(',')[0]
arp_device = device_and_arp.split(',')[1]
arp_device = arp_device.rstrip("\n")
print ("working on "+switch_to_check )
driver = get_network_driver('ios')
device = driver(arp_device, username,password)
device.open()
arp_table = device.get_arp_table()
net_connect = make_connection (switch_to_check,username,password)
mac_table = pull_mac_table (net_connect)
for mac_entry in mac_table:
for arp_entry in arp_table:
if mac_entry['mac'] == arp_entry['mac']:
tmp = {}
tmp['mac'] = mac_entry['mac']
tmp['interface'] = mac_entry['interface']
tmp['ip'] = arp_entry['ip']
tmp['type'] = pull_oui_type(tmp['mac'],OUIs)
try:
tmp['hostname'] = socket.gethostbyaddr(tmp['ip'])[0]
except:
tmp['hostname'] = "Unknown"
interfaces.append(tmp)
sheet = wb.create_sheet(switch_to_check)
row = 1
for interface in interfaces:
column = 1
if interface['interface'] == "Switch":
continue
column = write_excel_data(row,column,interface['interface'],sheet)
column = write_excel_data(row,column,interface['ip'],sheet)
column = write_excel_data(row,column,interface['type'],sheet)
column = write_excel_data(row,column,interface['hostname'],sheet)
column = write_excel_data(row,column,interface['mac'],sheet)
row = row+1
row = row+1
file_name = switch_to_check+ " show cdp"
pull_cdp_output(switch_to_check,username,password,file_name)
for each in parse_cdp_out(file_name):
column = 1
column = write_excel_data(row,column,each['remote_id'],sheet)
column = write_excel_data(row,column,each['remote_ip'],sheet)
column = write_excel_data(row,column,each['local_int'],sheet)
column = write_excel_data(row,column,each['remote_int'],sheet)
column = write_excel_data(row,column,each['platform'],sheet)
row = row+1
wb.save('output.xlsx')
| true
| true
|
f70809961ec15ad12717baef40cb3cd51be1420e
| 18,949
|
py
|
Python
|
tastypie/test.py
|
mthornhill/django-tastypie
|
a3783a584e3853513746c65c012fa2d1369b8fd3
|
[
"BSD-3-Clause"
] | null | null | null |
tastypie/test.py
|
mthornhill/django-tastypie
|
a3783a584e3853513746c65c012fa2d1369b8fd3
|
[
"BSD-3-Clause"
] | null | null | null |
tastypie/test.py
|
mthornhill/django-tastypie
|
a3783a584e3853513746c65c012fa2d1369b8fd3
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
import time
from django.conf import settings
from django.test import TestCase
from django.test.client import FakePayload, Client
from django.utils.encoding import force_text
from tastypie.serializers import Serializer
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class TestApiClient(object):
def __init__(self, serializer=None):
"""
Sets up a fresh ``TestApiClient`` instance.
If you are employing a custom serializer, you can pass the class to the
``serializer=`` kwarg.
"""
self.client = Client()
self.serializer = serializer
if not self.serializer:
self.serializer = Serializer()
def get_content_type(self, short_format):
"""
Given a short name (such as ``json`` or ``xml``), returns the full content-type
for it (``application/json`` or ``application/xml`` in this case).
"""
return self.serializer.content_types.get(short_format, 'json')
def get(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``GET`` request to the provided URI.
Optionally accepts a ``data`` kwarg, which in the case of ``GET``, lets you
send along ``GET`` parameters. This is useful when testing filtering or other
things that read off the ``GET`` params. Example::
from tastypie.test import TestApiClient
client = TestApiClient()
response = client.get('/api/v1/entry/1/', data={'format': 'json', 'title__startswith': 'a', 'limit': 20, 'offset': 60})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['HTTP_ACCEPT'] = content_type
# GET & DELETE are the only times we don't serialize the data.
if data is not None:
kwargs['data'] = data
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.get(uri, **kwargs)
def post(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``POST`` request to the provided URI.
Optionally accepts a ``data`` kwarg. **Unlike** ``GET``, in ``POST`` the
``data`` gets serialized & sent as the body instead of becoming part of the URI.
Example::
from tastypie.test import TestApiClient
client = TestApiClient()
response = client.post('/api/v1/entry/', data={
'created': '2012-05-01T20:02:36',
'slug': 'another-post',
'title': 'Another Post',
'user': '/api/v1/user/1/',
})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = self.serializer.serialize(data, format=content_type)
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.post(uri, **kwargs)
def put(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``PUT`` request to the provided URI.
Optionally accepts a ``data`` kwarg. **Unlike** ``GET``, in ``PUT`` the
``data`` gets serialized & sent as the body instead of becoming part of the URI.
Example::
from tastypie.test import TestApiClient
client = TestApiClient()
response = client.put('/api/v1/entry/1/', data={
'created': '2012-05-01T20:02:36',
'slug': 'another-post',
'title': 'Another Post',
'user': '/api/v1/user/1/',
})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = self.serializer.serialize(data, format=content_type)
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.put(uri, **kwargs)
def patch(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``PATCH`` request to the provided URI.
Optionally accepts a ``data`` kwarg. **Unlike** ``GET``, in ``PATCH`` the
``data`` gets serialized & sent as the body instead of becoming part of the URI.
Example::
from tastypie.test import TestApiClient
client = TestApiClient()
response = client.patch('/api/v1/entry/1/', data={
'created': '2012-05-01T20:02:36',
'slug': 'another-post',
'title': 'Another Post',
'user': '/api/v1/user/1/',
})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = self.serializer.serialize(data, format=content_type)
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
# This hurts because Django doesn't support PATCH natively.
parsed = urlparse(uri)
r = {
'CONTENT_LENGTH': len(kwargs['data']),
'CONTENT_TYPE': content_type,
'PATH_INFO': self.client._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'PATCH',
'wsgi.input': FakePayload(kwargs['data']),
}
r.update(kwargs)
return self.client.request(**r)
def delete(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``DELETE`` request to the provided URI.
Optionally accepts a ``data`` kwarg, which in the case of ``DELETE``, lets you
send along ``DELETE`` parameters. This is useful when testing filtering or other
things that read off the ``DELETE`` params. Example::
from tastypie.test import TestApiClient
client = TestApiClient()
response = client.delete('/api/v1/entry/1/', data={'format': 'json'})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
# GET & DELETE are the only times we don't serialize the data.
if data is not None:
kwargs['data'] = data
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.delete(uri, **kwargs)
class ResourceTestCase(TestCase):
"""
A useful base class for the start of testing Tastypie APIs.
"""
def setUp(self):
super(ResourceTestCase, self).setUp()
self.serializer = Serializer()
self.api_client = TestApiClient()
def get_credentials(self):
"""
A convenience method for the user as a way to shorten up the
often repetitious calls to create the same authentication.
Raises ``NotImplementedError`` by default.
Usage::
class MyResourceTestCase(ResourceTestCase):
def get_credentials(self):
return self.create_basic('daniel', 'pass')
# Then the usual tests...
"""
raise NotImplementedError("You must return the class for your Resource to test.")
def create_basic(self, username, password):
"""
Creates & returns the HTTP ``Authorization`` header for use with BASIC
Auth.
"""
import base64
return 'Basic %s' % base64.b64encode(':'.join([username, password]).encode('utf-8')).decode('utf-8')
def create_apikey(self, username, api_key):
"""
Creates & returns the HTTP ``Authorization`` header for use with
``ApiKeyAuthentication``.
"""
return 'ApiKey %s:%s' % (username, api_key)
def create_digest(self, username, api_key, method, uri):
"""
Creates & returns the HTTP ``Authorization`` header for use with Digest
Auth.
"""
from tastypie.authentication import hmac, sha1, uuid, python_digest
new_uuid = uuid.uuid4()
opaque = hmac.new(str(new_uuid).encode('utf-8'), digestmod=sha1).hexdigest().decode('utf-8')
return python_digest.build_authorization_request(
username,
method.upper(),
uri,
1, # nonce_count
digest_challenge=python_digest.build_digest_challenge(time.time(), getattr(settings, 'SECRET_KEY', ''), 'django-tastypie', opaque, False),
password=api_key
)
def create_oauth(self, user):
"""
Creates & returns the HTTP ``Authorization`` header for use with Oauth.
"""
from oauth_provider.models import Consumer, Token, Resource
# Necessary setup for ``oauth_provider``.
resource, _ = Resource.objects.get_or_create(url='test', defaults={
'name': 'Test Resource'
})
consumer, _ = Consumer.objects.get_or_create(key='123', defaults={
'name': 'Test',
'description': 'Testing...'
})
token, _ = Token.objects.get_or_create(key='foo', token_type=Token.ACCESS, defaults={
'consumer': consumer,
'resource': resource,
'secret': '',
'user': user,
})
# Then generate the header.
oauth_data = {
'oauth_consumer_key': '123',
'oauth_nonce': 'abc',
'oauth_signature': '&',
'oauth_signature_method': 'PLAINTEXT',
'oauth_timestamp': str(int(time.time())),
'oauth_token': 'foo',
}
return 'OAuth %s' % ','.join([key+'='+value for key, value in oauth_data.items()])
def assertHttpOK(self, resp):
"""
Ensures the response is returning a HTTP 200.
"""
return self.assertEqual(resp.status_code, 200)
def assertHttpCreated(self, resp):
"""
Ensures the response is returning a HTTP 201.
"""
return self.assertEqual(resp.status_code, 201)
def assertHttpAccepted(self, resp):
"""
Ensures the response is returning either a HTTP 202 or a HTTP 204.
"""
self.assertIn(resp.status_code, [202, 204])
self.assertNotIn('Content-Type', resp)
def assertHttpMultipleChoices(self, resp):
"""
Ensures the response is returning a HTTP 300.
"""
return self.assertEqual(resp.status_code, 300)
def assertHttpSeeOther(self, resp):
"""
Ensures the response is returning a HTTP 303.
"""
return self.assertEqual(resp.status_code, 303)
def assertHttpNotModified(self, resp):
"""
Ensures the response is returning a HTTP 304.
"""
return self.assertEqual(resp.status_code, 304)
def assertHttpBadRequest(self, resp):
"""
Ensures the response is returning a HTTP 400.
"""
return self.assertEqual(resp.status_code, 400)
def assertHttpUnauthorized(self, resp):
"""
Ensures the response is returning a HTTP 401.
"""
return self.assertEqual(resp.status_code, 401)
def assertHttpForbidden(self, resp):
"""
Ensures the response is returning a HTTP 403.
"""
return self.assertEqual(resp.status_code, 403)
def assertHttpNotFound(self, resp):
"""
Ensures the response is returning a HTTP 404.
"""
return self.assertEqual(resp.status_code, 404)
def assertHttpMethodNotAllowed(self, resp):
"""
Ensures the response is returning a HTTP 405.
"""
return self.assertEqual(resp.status_code, 405)
def assertHttpConflict(self, resp):
"""
Ensures the response is returning a HTTP 409.
"""
return self.assertEqual(resp.status_code, 409)
def assertHttpGone(self, resp):
"""
Ensures the response is returning a HTTP 410.
"""
return self.assertEqual(resp.status_code, 410)
def assertHttpUnprocessableEntity(self, resp):
"""
Ensures the response is returning a HTTP 422.
"""
return self.assertEqual(resp.status_code, 422)
def assertHttpTooManyRequests(self, resp):
"""
Ensures the response is returning a HTTP 429.
"""
return self.assertEqual(resp.status_code, 429)
def assertHttpApplicationError(self, resp):
"""
Ensures the response is returning a HTTP 500.
"""
return self.assertEqual(resp.status_code, 500)
def assertHttpNotImplemented(self, resp):
"""
Ensures the response is returning a HTTP 501.
"""
return self.assertEqual(resp.status_code, 501)
def assertValidJSON(self, data):
"""
Given the provided ``data`` as a string, ensures that it is valid JSON &
can be loaded properly.
"""
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_json(data)
def assertValidXML(self, data):
"""
Given the provided ``data`` as a string, ensures that it is valid XML &
can be loaded properly.
"""
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_xml(data)
def assertValidYAML(self, data):
"""
Given the provided ``data`` as a string, ensures that it is valid YAML &
can be loaded properly.
"""
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_yaml(data)
def assertValidPlist(self, data):
"""
Given the provided ``data`` as a string, ensures that it is valid
binary plist & can be loaded properly.
"""
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_plist(data)
def assertValidJSONResponse(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, assert that
you get back:
* An HTTP 200
* The correct content-type (``application/json``)
* The content is valid JSON
"""
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('application/json'))
self.assertValidJSON(force_text(resp.content))
def assertValidXMLResponse(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, assert that
you get back:
* An HTTP 200
* The correct content-type (``application/xml``)
* The content is valid XML
"""
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('application/xml'))
self.assertValidXML(force_text(resp.content))
def assertValidYAMLResponse(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, assert that
you get back:
* An HTTP 200
* The correct content-type (``text/yaml``)
* The content is valid YAML
"""
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('text/yaml'))
self.assertValidYAML(force_text(resp.content))
def assertValidPlistResponse(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, assert that
you get back:
* An HTTP 200
* The correct content-type (``application/x-plist``)
* The content is valid binary plist data
"""
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('application/x-plist'))
self.assertValidPlist(force_text(resp.content))
def deserialize(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, this method
checks the ``Content-Type`` header & attempts to deserialize the data based on
that.
It returns a Python datastructure (typically a ``dict``) of the serialized data.
"""
return self.serializer.deserialize(resp.content, format=resp['Content-Type'])
def serialize(self, data, format='application/json'):
"""
Given a Python datastructure (typically a ``dict``) & a desired content-type,
this method will return a serialized string of that data.
"""
return self.serializer.serialize(data, format=format)
def assertKeys(self, data, expected):
"""
This method ensures that the keys of the ``data`` match up to the keys of
``expected``.
It covers the (extremely) common case where you want to make sure the keys of
a response match up to what is expected. This is typically less fragile than
testing the full structure, which can be prone to data changes.
"""
self.assertEqual(sorted(data.keys()), sorted(expected))
| 35.888258
| 150
| 0.608845
|
from __future__ import unicode_literals
import time
from django.conf import settings
from django.test import TestCase
from django.test.client import FakePayload, Client
from django.utils.encoding import force_text
from tastypie.serializers import Serializer
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class TestApiClient(object):
def __init__(self, serializer=None):
self.client = Client()
self.serializer = serializer
if not self.serializer:
self.serializer = Serializer()
def get_content_type(self, short_format):
return self.serializer.content_types.get(short_format, 'json')
def get(self, uri, format='json', data=None, authentication=None, **kwargs):
content_type = self.get_content_type(format)
kwargs['HTTP_ACCEPT'] = content_type
if data is not None:
kwargs['data'] = data
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.get(uri, **kwargs)
def post(self, uri, format='json', data=None, authentication=None, **kwargs):
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = self.serializer.serialize(data, format=content_type)
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.post(uri, **kwargs)
def put(self, uri, format='json', data=None, authentication=None, **kwargs):
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = self.serializer.serialize(data, format=content_type)
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.put(uri, **kwargs)
def patch(self, uri, format='json', data=None, authentication=None, **kwargs):
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = self.serializer.serialize(data, format=content_type)
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
# This hurts because Django doesn't support PATCH natively.
parsed = urlparse(uri)
r = {
'CONTENT_LENGTH': len(kwargs['data']),
'CONTENT_TYPE': content_type,
'PATH_INFO': self.client._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'PATCH',
'wsgi.input': FakePayload(kwargs['data']),
}
r.update(kwargs)
return self.client.request(**r)
def delete(self, uri, format='json', data=None, authentication=None, **kwargs):
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = data
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.delete(uri, **kwargs)
class ResourceTestCase(TestCase):
def setUp(self):
super(ResourceTestCase, self).setUp()
self.serializer = Serializer()
self.api_client = TestApiClient()
def get_credentials(self):
raise NotImplementedError("You must return the class for your Resource to test.")
def create_basic(self, username, password):
import base64
return 'Basic %s' % base64.b64encode(':'.join([username, password]).encode('utf-8')).decode('utf-8')
def create_apikey(self, username, api_key):
return 'ApiKey %s:%s' % (username, api_key)
def create_digest(self, username, api_key, method, uri):
from tastypie.authentication import hmac, sha1, uuid, python_digest
new_uuid = uuid.uuid4()
opaque = hmac.new(str(new_uuid).encode('utf-8'), digestmod=sha1).hexdigest().decode('utf-8')
return python_digest.build_authorization_request(
username,
method.upper(),
uri,
1, # nonce_count
digest_challenge=python_digest.build_digest_challenge(time.time(), getattr(settings, 'SECRET_KEY', ''), 'django-tastypie', opaque, False),
password=api_key
)
def create_oauth(self, user):
from oauth_provider.models import Consumer, Token, Resource
# Necessary setup for ``oauth_provider``.
resource, _ = Resource.objects.get_or_create(url='test', defaults={
'name': 'Test Resource'
})
consumer, _ = Consumer.objects.get_or_create(key='123', defaults={
'name': 'Test',
'description': 'Testing...'
})
token, _ = Token.objects.get_or_create(key='foo', token_type=Token.ACCESS, defaults={
'consumer': consumer,
'resource': resource,
'secret': '',
'user': user,
})
# Then generate the header.
oauth_data = {
'oauth_consumer_key': '123',
'oauth_nonce': 'abc',
'oauth_signature': '&',
'oauth_signature_method': 'PLAINTEXT',
'oauth_timestamp': str(int(time.time())),
'oauth_token': 'foo',
}
return 'OAuth %s' % ','.join([key+'='+value for key, value in oauth_data.items()])
def assertHttpOK(self, resp):
return self.assertEqual(resp.status_code, 200)
def assertHttpCreated(self, resp):
return self.assertEqual(resp.status_code, 201)
def assertHttpAccepted(self, resp):
self.assertIn(resp.status_code, [202, 204])
self.assertNotIn('Content-Type', resp)
def assertHttpMultipleChoices(self, resp):
return self.assertEqual(resp.status_code, 300)
def assertHttpSeeOther(self, resp):
return self.assertEqual(resp.status_code, 303)
def assertHttpNotModified(self, resp):
return self.assertEqual(resp.status_code, 304)
def assertHttpBadRequest(self, resp):
return self.assertEqual(resp.status_code, 400)
def assertHttpUnauthorized(self, resp):
return self.assertEqual(resp.status_code, 401)
def assertHttpForbidden(self, resp):
return self.assertEqual(resp.status_code, 403)
def assertHttpNotFound(self, resp):
return self.assertEqual(resp.status_code, 404)
def assertHttpMethodNotAllowed(self, resp):
return self.assertEqual(resp.status_code, 405)
def assertHttpConflict(self, resp):
return self.assertEqual(resp.status_code, 409)
def assertHttpGone(self, resp):
return self.assertEqual(resp.status_code, 410)
def assertHttpUnprocessableEntity(self, resp):
return self.assertEqual(resp.status_code, 422)
def assertHttpTooManyRequests(self, resp):
return self.assertEqual(resp.status_code, 429)
def assertHttpApplicationError(self, resp):
return self.assertEqual(resp.status_code, 500)
def assertHttpNotImplemented(self, resp):
return self.assertEqual(resp.status_code, 501)
def assertValidJSON(self, data):
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_json(data)
def assertValidXML(self, data):
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_xml(data)
def assertValidYAML(self, data):
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_yaml(data)
def assertValidPlist(self, data):
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_plist(data)
def assertValidJSONResponse(self, resp):
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('application/json'))
self.assertValidJSON(force_text(resp.content))
def assertValidXMLResponse(self, resp):
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('application/xml'))
self.assertValidXML(force_text(resp.content))
def assertValidYAMLResponse(self, resp):
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('text/yaml'))
self.assertValidYAML(force_text(resp.content))
def assertValidPlistResponse(self, resp):
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('application/x-plist'))
self.assertValidPlist(force_text(resp.content))
def deserialize(self, resp):
return self.serializer.deserialize(resp.content, format=resp['Content-Type'])
def serialize(self, data, format='application/json'):
return self.serializer.serialize(data, format=format)
def assertKeys(self, data, expected):
self.assertEqual(sorted(data.keys()), sorted(expected))
| true
| true
|
f7080b1b05bc9b4df0a3a79e8b0eb0dbc73a0cf1
| 78
|
py
|
Python
|
fedml_core/distributed/communication/mqtt/__init__.py
|
Alex-Roudjiat/Federated-ML-AI-Federated-ML-
|
8ccc24cf2c01b868988f5d5bd65f1666cf5526bc
|
[
"Apache-2.0"
] | 1,120
|
2020-07-22T02:30:52.000Z
|
2022-03-31T08:10:44.000Z
|
fedml_core/distributed/communication/mqtt/__init__.py
|
Alex-Roudjiat/Federated-ML-AI-Federated-ML-
|
8ccc24cf2c01b868988f5d5bd65f1666cf5526bc
|
[
"Apache-2.0"
] | 113
|
2020-07-27T03:48:09.000Z
|
2022-03-30T03:25:56.000Z
|
fedml_core/distributed/communication/mqtt/__init__.py
|
Alex-Roudjiat/Federated-ML-AI-Federated-ML-
|
8ccc24cf2c01b868988f5d5bd65f1666cf5526bc
|
[
"Apache-2.0"
] | 381
|
2020-07-22T06:12:57.000Z
|
2022-03-30T18:38:35.000Z
|
__all__ = ['MqttCommManager']
from .mqtt_comm_manager import MqttCommManager
| 19.5
| 46
| 0.820513
|
__all__ = ['MqttCommManager']
from .mqtt_comm_manager import MqttCommManager
| true
| true
|
f7080bda87962f392c237b1d02e4098153d22ac8
| 17,856
|
py
|
Python
|
tests/rest/admin/test_device.py
|
mlakkadshaw/synapse
|
74a2365bd5066955567cc551e72632d6cece94b9
|
[
"Apache-2.0"
] | 9,945
|
2015-01-02T07:41:06.000Z
|
2022-03-31T23:22:42.000Z
|
tests/rest/admin/test_device.py
|
t2bot/synapse
|
62ca554ef09330cb88d46fca8296a859d0adc143
|
[
"Apache-2.0"
] | 9,320
|
2015-01-08T14:09:03.000Z
|
2022-03-31T21:11:24.000Z
|
tests/rest/admin/test_device.py
|
t2bot/synapse
|
62ca554ef09330cb88d46fca8296a859d0adc143
|
[
"Apache-2.0"
] | 2,299
|
2015-01-31T22:16:29.000Z
|
2022-03-31T06:08:26.000Z
|
# Copyright 2020 Dirk Klimpel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.parse
from http import HTTPStatus
from parameterized import parameterized
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.errors import Codes
from synapse.rest.client import login
from synapse.server import HomeServer
from synapse.util import Clock
from tests import unittest
class DeviceRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.handler = hs.get_device_handler()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.other_user_token = self.login("user", "pass")
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.other_user_device_id = res[0]["device_id"]
self.url = "/_synapse/admin/v2/users/%s/devices/%s" % (
urllib.parse.quote(self.other_user),
self.other_user_device_id,
)
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_no_auth(self, method: str) -> None:
"""
Try to get a device of an user without authentication.
"""
channel = self.make_request(method, self.url, b"{}")
self.assertEqual(
HTTPStatus.UNAUTHORIZED,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_requester_is_no_admin(self, method: str) -> None:
"""
If the user is not a server admin, an error is returned.
"""
channel = self.make_request(
method,
self.url,
access_token=self.other_user_token,
)
self.assertEqual(
HTTPStatus.FORBIDDEN,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_user_does_not_exist(self, method: str) -> None:
"""
Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
"""
url = (
"/_synapse/admin/v2/users/@unknown_person:test/devices/%s"
% self.other_user_device_id
)
channel = self.make_request(
method,
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_user_is_not_local(self, method: str) -> None:
"""
Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
"""
url = (
"/_synapse/admin/v2/users/@unknown_person:unknown_domain/devices/%s"
% self.other_user_device_id
)
channel = self.make_request(
method,
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_unknown_device(self) -> None:
"""
Tests that a lookup for a device that does not exist returns either HTTPStatus.NOT_FOUND or HTTPStatus.OK.
"""
url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote(
self.other_user
)
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
channel = self.make_request(
"DELETE",
url,
access_token=self.admin_user_tok,
)
# Delete unknown device returns status HTTPStatus.OK
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
def test_update_device_too_long_display_name(self) -> None:
"""
Update a device with a display name that is invalid (too long).
"""
# Set iniital display name.
update = {"display_name": "new display"}
self.get_success(
self.handler.update_device(
self.other_user, self.other_user_device_id, update
)
)
# Request to update a device display name with a new value that is longer than allowed.
update = {
"display_name": "a"
* (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1)
}
channel = self.make_request(
"PUT",
self.url,
access_token=self.admin_user_tok,
content=update,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
self.assertEqual(Codes.TOO_LARGE, channel.json_body["errcode"])
# Ensure the display name was not updated.
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("new display", channel.json_body["display_name"])
def test_update_no_display_name(self) -> None:
"""
Tests that a update for a device without JSON returns a HTTPStatus.OK
"""
# Set iniital display name.
update = {"display_name": "new display"}
self.get_success(
self.handler.update_device(
self.other_user, self.other_user_device_id, update
)
)
channel = self.make_request(
"PUT",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
# Ensure the display name was not updated.
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("new display", channel.json_body["display_name"])
def test_update_display_name(self) -> None:
"""
Tests a normal successful update of display name
"""
# Set new display_name
channel = self.make_request(
"PUT",
self.url,
access_token=self.admin_user_tok,
content={"display_name": "new displayname"},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
# Check new display_name
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("new displayname", channel.json_body["display_name"])
def test_get_device(self) -> None:
"""
Tests that a normal lookup for a device is successfully
"""
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual(self.other_user, channel.json_body["user_id"])
# Check that all fields are available
self.assertIn("user_id", channel.json_body)
self.assertIn("device_id", channel.json_body)
self.assertIn("display_name", channel.json_body)
self.assertIn("last_seen_ip", channel.json_body)
self.assertIn("last_seen_ts", channel.json_body)
def test_delete_device(self) -> None:
"""
Tests that a remove of a device is successfully
"""
# Count number of devies of an user.
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
number_devices = len(res)
self.assertEqual(1, number_devices)
# Delete device
channel = self.make_request(
"DELETE",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
# Ensure that the number of devices is decreased
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.assertEqual(number_devices - 1, len(res))
class DevicesRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.url = "/_synapse/admin/v2/users/%s/devices" % urllib.parse.quote(
self.other_user
)
def test_no_auth(self) -> None:
"""
Try to list devices of an user without authentication.
"""
channel = self.make_request("GET", self.url, b"{}")
self.assertEqual(
HTTPStatus.UNAUTHORIZED,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
"""
If the user is not a server admin, an error is returned.
"""
other_user_token = self.login("user", "pass")
channel = self.make_request(
"GET",
self.url,
access_token=other_user_token,
)
self.assertEqual(
HTTPStatus.FORBIDDEN,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self) -> None:
"""
Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
"""
url = "/_synapse/admin/v2/users/@unknown_person:test/devices"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self) -> None:
"""
Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
"""
url = "/_synapse/admin/v2/users/@unknown_person:unknown_domain/devices"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_user_has_no_devices(self) -> None:
"""
Tests that a normal lookup for devices is successfully
if user has no devices
"""
# Get devices
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["devices"]))
def test_get_devices(self) -> None:
"""
Tests that a normal lookup for devices is successfully
"""
# Create devices
number_devices = 5
for _ in range(number_devices):
self.login("user", "pass")
# Get devices
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual(number_devices, channel.json_body["total"])
self.assertEqual(number_devices, len(channel.json_body["devices"]))
self.assertEqual(self.other_user, channel.json_body["devices"][0]["user_id"])
# Check that all fields are available
for d in channel.json_body["devices"]:
self.assertIn("user_id", d)
self.assertIn("device_id", d)
self.assertIn("display_name", d)
self.assertIn("last_seen_ip", d)
self.assertIn("last_seen_ts", d)
class DeleteDevicesRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.handler = hs.get_device_handler()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.url = "/_synapse/admin/v2/users/%s/delete_devices" % urllib.parse.quote(
self.other_user
)
def test_no_auth(self) -> None:
"""
Try to delete devices of an user without authentication.
"""
channel = self.make_request("POST", self.url, b"{}")
self.assertEqual(
HTTPStatus.UNAUTHORIZED,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
"""
If the user is not a server admin, an error is returned.
"""
other_user_token = self.login("user", "pass")
channel = self.make_request(
"POST",
self.url,
access_token=other_user_token,
)
self.assertEqual(
HTTPStatus.FORBIDDEN,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self) -> None:
"""
Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
"""
url = "/_synapse/admin/v2/users/@unknown_person:test/delete_devices"
channel = self.make_request(
"POST",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self) -> None:
"""
Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
"""
url = "/_synapse/admin/v2/users/@unknown_person:unknown_domain/delete_devices"
channel = self.make_request(
"POST",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_unknown_devices(self) -> None:
"""
Tests that a remove of a device that does not exist returns HTTPStatus.OK.
"""
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={"devices": ["unknown_device1", "unknown_device2"]},
)
# Delete unknown devices returns status HTTPStatus.OK
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
def test_delete_devices(self) -> None:
"""
Tests that a remove of devices is successfully
"""
# Create devices
number_devices = 5
for _ in range(number_devices):
self.login("user", "pass")
# Get devices
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.assertEqual(number_devices, len(res))
# Create list of device IDs
device_ids = []
for d in res:
device_ids.append(str(d["device_id"]))
# Delete devices
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={"devices": device_ids},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.assertEqual(0, len(res))
| 33.066667
| 114
| 0.613295
|
import urllib.parse
from http import HTTPStatus
from parameterized import parameterized
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.errors import Codes
from synapse.rest.client import login
from synapse.server import HomeServer
from synapse.util import Clock
from tests import unittest
class DeviceRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.handler = hs.get_device_handler()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.other_user_token = self.login("user", "pass")
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.other_user_device_id = res[0]["device_id"]
self.url = "/_synapse/admin/v2/users/%s/devices/%s" % (
urllib.parse.quote(self.other_user),
self.other_user_device_id,
)
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_no_auth(self, method: str) -> None:
channel = self.make_request(method, self.url, b"{}")
self.assertEqual(
HTTPStatus.UNAUTHORIZED,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_requester_is_no_admin(self, method: str) -> None:
channel = self.make_request(
method,
self.url,
access_token=self.other_user_token,
)
self.assertEqual(
HTTPStatus.FORBIDDEN,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_user_does_not_exist(self, method: str) -> None:
url = (
"/_synapse/admin/v2/users/@unknown_person:test/devices/%s"
% self.other_user_device_id
)
channel = self.make_request(
method,
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_user_is_not_local(self, method: str) -> None:
url = (
"/_synapse/admin/v2/users/@unknown_person:unknown_domain/devices/%s"
% self.other_user_device_id
)
channel = self.make_request(
method,
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_unknown_device(self) -> None:
url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote(
self.other_user
)
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
channel = self.make_request(
"DELETE",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
def test_update_device_too_long_display_name(self) -> None:
update = {"display_name": "new display"}
self.get_success(
self.handler.update_device(
self.other_user, self.other_user_device_id, update
)
)
update = {
"display_name": "a"
* (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1)
}
channel = self.make_request(
"PUT",
self.url,
access_token=self.admin_user_tok,
content=update,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
self.assertEqual(Codes.TOO_LARGE, channel.json_body["errcode"])
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("new display", channel.json_body["display_name"])
def test_update_no_display_name(self) -> None:
update = {"display_name": "new display"}
self.get_success(
self.handler.update_device(
self.other_user, self.other_user_device_id, update
)
)
channel = self.make_request(
"PUT",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("new display", channel.json_body["display_name"])
def test_update_display_name(self) -> None:
channel = self.make_request(
"PUT",
self.url,
access_token=self.admin_user_tok,
content={"display_name": "new displayname"},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("new displayname", channel.json_body["display_name"])
def test_get_device(self) -> None:
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual(self.other_user, channel.json_body["user_id"])
self.assertIn("user_id", channel.json_body)
self.assertIn("device_id", channel.json_body)
self.assertIn("display_name", channel.json_body)
self.assertIn("last_seen_ip", channel.json_body)
self.assertIn("last_seen_ts", channel.json_body)
def test_delete_device(self) -> None:
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
number_devices = len(res)
self.assertEqual(1, number_devices)
channel = self.make_request(
"DELETE",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.assertEqual(number_devices - 1, len(res))
class DevicesRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.url = "/_synapse/admin/v2/users/%s/devices" % urllib.parse.quote(
self.other_user
)
def test_no_auth(self) -> None:
channel = self.make_request("GET", self.url, b"{}")
self.assertEqual(
HTTPStatus.UNAUTHORIZED,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
other_user_token = self.login("user", "pass")
channel = self.make_request(
"GET",
self.url,
access_token=other_user_token,
)
self.assertEqual(
HTTPStatus.FORBIDDEN,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self) -> None:
url = "/_synapse/admin/v2/users/@unknown_person:test/devices"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self) -> None:
url = "/_synapse/admin/v2/users/@unknown_person:unknown_domain/devices"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_user_has_no_devices(self) -> None:
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["devices"]))
def test_get_devices(self) -> None:
number_devices = 5
for _ in range(number_devices):
self.login("user", "pass")
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual(number_devices, channel.json_body["total"])
self.assertEqual(number_devices, len(channel.json_body["devices"]))
self.assertEqual(self.other_user, channel.json_body["devices"][0]["user_id"])
for d in channel.json_body["devices"]:
self.assertIn("user_id", d)
self.assertIn("device_id", d)
self.assertIn("display_name", d)
self.assertIn("last_seen_ip", d)
self.assertIn("last_seen_ts", d)
class DeleteDevicesRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.handler = hs.get_device_handler()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.url = "/_synapse/admin/v2/users/%s/delete_devices" % urllib.parse.quote(
self.other_user
)
def test_no_auth(self) -> None:
channel = self.make_request("POST", self.url, b"{}")
self.assertEqual(
HTTPStatus.UNAUTHORIZED,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
other_user_token = self.login("user", "pass")
channel = self.make_request(
"POST",
self.url,
access_token=other_user_token,
)
self.assertEqual(
HTTPStatus.FORBIDDEN,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self) -> None:
url = "/_synapse/admin/v2/users/@unknown_person:test/delete_devices"
channel = self.make_request(
"POST",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self) -> None:
url = "/_synapse/admin/v2/users/@unknown_person:unknown_domain/delete_devices"
channel = self.make_request(
"POST",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_unknown_devices(self) -> None:
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={"devices": ["unknown_device1", "unknown_device2"]},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
def test_delete_devices(self) -> None:
number_devices = 5
for _ in range(number_devices):
self.login("user", "pass")
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.assertEqual(number_devices, len(res))
device_ids = []
for d in res:
device_ids.append(str(d["device_id"]))
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={"devices": device_ids},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.assertEqual(0, len(res))
| true
| true
|
f7080bfceabcc1942419df8b4bafac017707b78d
| 21,935
|
py
|
Python
|
pysc2/lib/features_test.py
|
radiantprism/StarCraft-2
|
1f159ae84feaed17c5e0bd70e272c06992ae0c48
|
[
"Apache-2.0"
] | 8,095
|
2017-08-09T17:28:48.000Z
|
2022-03-31T00:19:45.000Z
|
pysc2/lib/features_test.py
|
radiantprism/StarCraft-2
|
1f159ae84feaed17c5e0bd70e272c06992ae0c48
|
[
"Apache-2.0"
] | 328
|
2017-08-09T20:10:20.000Z
|
2022-03-29T05:04:37.000Z
|
pysc2/lib/features_test.py
|
radiantprism/StarCraft-2
|
1f159ae84feaed17c5e0bd70e272c06992ae0c48
|
[
"Apache-2.0"
] | 1,391
|
2017-08-09T18:46:20.000Z
|
2022-03-18T07:46:23.000Z
|
#!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import pickle
from absl.testing import absltest
from absl.testing import parameterized
from future.builtins import range # pylint: disable=redefined-builtin
import numpy
import six
from pysc2.lib import actions
from pysc2.lib import features
from pysc2.lib import point
from google.protobuf import text_format
from s2clientprotocol import sc2api_pb2 as sc_pb
# Heavily trimmed, so this is useful for testing actions, but not observations.
observation_text_proto = """
player_common {
player_id: 1
minerals: 0
vespene: 0
food_cap: 10
food_used: 0
food_army: 0
food_workers: 0
idle_worker_count: 0
army_count: 0
warp_gate_count: 0
larva_count: 0
}
game_loop: 20
"""
RECTANGULAR_DIMENSIONS = features.Dimensions(screen=(84, 80), minimap=(64, 67))
SQUARE_DIMENSIONS = features.Dimensions(screen=84, minimap=64)
class AvailableActionsTest(absltest.TestCase):
always_expected = {
"no_op", "move_camera", "select_point", "select_rect",
"select_control_group"
}
def setUp(self):
super(AvailableActionsTest, self).setUp()
self.obs = text_format.Parse(observation_text_proto, sc_pb.Observation())
self.hideSpecificActions(True)
def hideSpecificActions(self, hide_specific_actions): # pylint: disable=invalid-name
self.features = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=hide_specific_actions))
def assertAvail(self, expected):
actual = self.features.available_actions(self.obs)
actual_names = {actions.FUNCTIONS[i].name for i in actual}
self.assertEqual(actual_names, set(expected) | self.always_expected)
def testAlways(self):
self.assertAvail([])
def testSelectUnit(self):
self.obs.ui_data.multi.units.add(unit_type=1)
self.assertAvail(["select_unit"])
def testSelectIdleWorkder(self):
self.obs.player_common.idle_worker_count = 1
self.assertAvail(["select_idle_worker"])
def testSelectArmy(self):
self.obs.player_common.army_count = 3
self.assertAvail(["select_army"])
def testSelectWarpGates(self):
self.obs.player_common.warp_gate_count = 1
self.assertAvail(["select_warp_gates"])
def testSelectLarva(self):
self.obs.player_common.larva_count = 2
self.assertAvail(["select_larva"])
def testQuick(self):
self.obs.abilities.add(ability_id=32)
self.assertAvail(["Effect_Salvage_quick"])
def testScreen(self):
self.obs.abilities.add(ability_id=326, requires_point=True)
self.assertAvail(["Build_SensorTower_screen"])
def testScreenMinimap(self):
self.obs.abilities.add(ability_id=17, requires_point=True)
self.assertAvail(["Patrol_screen", "Patrol_minimap"])
def testScreenAutocast(self):
self.obs.abilities.add(ability_id=386, requires_point=True)
self.assertAvail(["Effect_Heal_screen", "Effect_Heal_autocast"])
def testScreenQuick(self):
a = self.obs.abilities.add(ability_id=421)
self.hideSpecificActions(True)
a.requires_point = False
self.assertAvail(["Build_TechLab_quick"])
a.requires_point = True
self.assertAvail(["Build_TechLab_screen"])
self.hideSpecificActions(False)
a.requires_point = False
self.assertAvail(["Build_TechLab_Barracks_quick", "Build_TechLab_quick"])
a.requires_point = True
self.assertAvail(["Build_TechLab_Barracks_screen", "Build_TechLab_screen"])
def testGeneral(self):
self.obs.abilities.add(ability_id=1374)
self.hideSpecificActions(False)
self.assertAvail(["BurrowDown_quick", "BurrowDown_Baneling_quick"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowDown_quick"])
def testGeneralType(self):
a = self.obs.abilities.add(ability_id=1376)
self.hideSpecificActions(False)
self.assertAvail(["BurrowUp_quick", "BurrowUp_Baneling_quick",
"BurrowUp_autocast", "BurrowUp_Baneling_autocast"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowUp_quick", "BurrowUp_autocast"])
a.ability_id = 2110
self.hideSpecificActions(False)
self.assertAvail(["BurrowUp_quick", "BurrowUp_Lurker_quick"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowUp_quick"])
def testMany(self):
add = [
(23, True), # Attack
(318, True), # Build_CommandCenter
(320, True), # Build_Refinery
(319, True), # Build_SupplyDepot
(316, True), # Effect_Repair_SCV
(295, True), # Harvest_Gather_SCV
(16, True), # Move
(17, True), # Patrol
(4, False), # Stop
]
for a, r in add:
self.obs.abilities.add(ability_id=a, requires_point=r)
self.hideSpecificActions(False)
self.assertAvail([
"Attack_Attack_minimap",
"Attack_Attack_screen",
"Attack_minimap",
"Attack_screen",
"Build_CommandCenter_screen",
"Build_Refinery_screen",
"Build_SupplyDepot_screen",
"Effect_Repair_screen",
"Effect_Repair_autocast",
"Effect_Repair_SCV_autocast",
"Effect_Repair_SCV_screen",
"Harvest_Gather_screen",
"Harvest_Gather_SCV_screen",
"Move_minimap",
"Move_screen",
"Move_Move_minimap",
"Move_Move_screen",
"Patrol_minimap",
"Patrol_screen",
"Patrol_Patrol_minimap",
"Patrol_Patrol_screen",
"Stop_quick",
"Stop_Stop_quick"
])
self.hideSpecificActions(True)
self.assertAvail([
"Attack_minimap",
"Attack_screen",
"Build_CommandCenter_screen",
"Build_Refinery_screen",
"Build_SupplyDepot_screen",
"Effect_Repair_screen",
"Effect_Repair_autocast",
"Harvest_Gather_screen",
"Move_minimap",
"Move_screen",
"Patrol_minimap",
"Patrol_screen",
"Stop_quick",
])
class ToPointTest(absltest.TestCase):
def testIntAsString(self):
value = features._to_point("32")
self.assertEqual(value, point.Point(32, 32))
def testIntStringTwoTuple(self):
value = features._to_point(("32", 64))
self.assertEqual(value, point.Point(32, 64))
def testNoneInputReturnsNoneOutput(self):
with self.assertRaises(AssertionError):
features._to_point(None)
def testNoneAsFirstElementOfTupleRaises(self):
with self.assertRaises(TypeError):
features._to_point((None, 32))
def testNoneAsSecondElementOfTupleRaises(self):
with self.assertRaises(TypeError):
features._to_point((32, None))
def testSingletonTupleRaises(self):
with self.assertRaises(ValueError):
features._to_point((32,))
def testThreeTupleRaises(self):
with self.assertRaises(ValueError):
features._to_point((32, 32, 32))
class DimensionsTest(absltest.TestCase):
def testScreenSizeWithoutMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=84)
def testScreenWidthWithoutHeightRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(84, 0), minimap=64)
def testScreenWidthHeightWithoutMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(84, 80))
def testMinimapWidthAndHeightWithoutScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(minimap=(64, 67))
def testNoneNoneRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=None, minimap=None)
def testSingularZeroesRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=0, minimap=0)
def testTwoZeroesRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(0, 0), minimap=(0, 0))
def testThreeTupleScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(1, 2, 3), minimap=32)
def testThreeTupleMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=(1, 2, 3))
def testNegativeScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=-64, minimap=32)
def testNegativeMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=-32)
def testNegativeScreenTupleRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(-64, -64), minimap=32)
def testNegativeMinimapTupleRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=(-32, -32))
def testEquality(self):
self.assertEqual(features.Dimensions(screen=64, minimap=64),
features.Dimensions(screen=64, minimap=64))
self.assertNotEqual(features.Dimensions(screen=64, minimap=64),
features.Dimensions(screen=64, minimap=32))
self.assertNotEqual(features.Dimensions(screen=64, minimap=64), None)
class TestParseAgentInterfaceFormat(parameterized.TestCase):
def test_no_arguments_raises(self):
with self.assertRaises(ValueError):
features.parse_agent_interface_format()
@parameterized.parameters((32, None), (None, 32))
def test_invalid_feature_combinations_raise(self, screen, minimap):
with self.assertRaises(ValueError):
features.parse_agent_interface_format(
feature_screen=screen,
feature_minimap=minimap)
def test_valid_feature_specification_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24))
self.assertEqual(
agent_interface_format.feature_dimensions.screen,
point.Point(32, 32))
self.assertEqual(
agent_interface_format.feature_dimensions.minimap,
point.Point(24, 24))
@parameterized.parameters((32, None), (None, 32), (32, 64))
def test_invalid_minimap_combinations_raise(self, screen, minimap):
with self.assertRaises(ValueError):
features.parse_agent_interface_format(
rgb_screen=screen,
rgb_minimap=minimap)
def test_valid_minimap_specification_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
rgb_screen=32,
rgb_minimap=(24, 24))
self.assertEqual(
agent_interface_format.rgb_dimensions.screen,
point.Point(32, 32))
self.assertEqual(
agent_interface_format.rgb_dimensions.minimap,
point.Point(24, 24))
def test_invalid_action_space_raises(self):
with self.assertRaises(KeyError):
features.parse_agent_interface_format(
feature_screen=64,
feature_minimap=64,
action_space="UNKNOWN_ACTION_SPACE")
@parameterized.parameters(actions.ActionSpace.__members__.keys())
def test_valid_action_space_is_parsed(self, action_space):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
rgb_screen=64,
rgb_minimap=(48, 48),
use_raw_units=True,
action_space=action_space)
self.assertEqual(
agent_interface_format.action_space,
actions.ActionSpace[action_space])
def test_camera_width_world_units_are_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
camera_width_world_units=77)
self.assertEqual(agent_interface_format.camera_width_world_units, 77)
def test_use_feature_units_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
use_feature_units=True)
self.assertEqual(agent_interface_format.use_feature_units, True)
class FeaturesTest(absltest.TestCase):
def testFunctionsIdsAreConsistent(self):
for i, f in enumerate(actions.FUNCTIONS):
self.assertEqual(i, f.id, "id doesn't match for %s" % f.id)
def testAllVersionsOfAnAbilityHaveTheSameGeneral(self):
for ability_id, funcs in six.iteritems(actions.ABILITY_IDS):
self.assertLen({f.general_id for f in funcs}, 1,
"Multiple generals for %s" % ability_id)
def testValidFunctionsAreConsistent(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
valid_funcs = feats.action_spec()
for func_def in valid_funcs.functions:
func = actions.FUNCTIONS[func_def.id]
self.assertEqual(func_def.id, func.id)
self.assertEqual(func_def.name, func.name)
self.assertEqual(len(func_def.args), len(func.args)) # pylint: disable=g-generic-assert
def gen_random_function_call(self, action_spec, func_id):
args = [[numpy.random.randint(0, size) for size in arg.sizes] # pylint: disable=g-complex-comprehension
for arg in action_spec.functions[func_id].args]
return actions.FunctionCall(func_id, args)
def testIdsMatchIndex(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
action_spec = feats.action_spec()
for func_index, func_def in enumerate(action_spec.functions):
self.assertEqual(func_index, func_def.id)
for type_index, type_def in enumerate(action_spec.types):
self.assertEqual(type_index, type_def.id)
def testReversingUnknownAction(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=False))
sc2_action = sc_pb.Action()
sc2_action.action_feature_layer.unit_command.ability_id = 6 # Cheer
func_call = feats.reverse_action(sc2_action)
self.assertEqual(func_call.function, 0) # No-op
def testSpecificActionsAreReversible(self):
"""Test that the `transform_action` and `reverse_action` are inverses."""
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=False))
action_spec = feats.action_spec()
for func_def in action_spec.functions:
for _ in range(10):
func_call = self.gen_random_function_call(action_spec, func_def.id)
sc2_action = feats.transform_action(
None, func_call, skip_available=True)
func_call2 = feats.reverse_action(sc2_action)
sc2_action2 = feats.transform_action(
None, func_call2, skip_available=True)
if func_def.id == actions.FUNCTIONS.select_rect.id:
# Need to check this one manually since the same rect can be
# defined in multiple ways.
def rect(a):
return point.Rect(point.Point(*a[1]).floor(),
point.Point(*a[2]).floor())
self.assertEqual(func_call.function, func_call2.function)
self.assertEqual(len(func_call.arguments), len(func_call2.arguments)) # pylint: disable=g-generic-assert
self.assertEqual(func_call.arguments[0], func_call2.arguments[0])
self.assertEqual(rect(func_call.arguments),
rect(func_call2.arguments))
else:
self.assertEqual(func_call, func_call2, msg=sc2_action)
self.assertEqual(sc2_action, sc2_action2)
def testRawActionUnitTags(self):
feats = features.Features(
features.AgentInterfaceFormat(
use_raw_units=True,
action_space=actions.ActionSpace.RAW),
map_size=point.Point(100, 100))
tags = [numpy.random.randint(2**20, 2**24) for _ in range(10)]
ntags = numpy.array(tags, dtype=numpy.int64)
tag = tags[0]
ntag = numpy.array(tag, dtype=numpy.int64)
def transform(fn, *args):
func_call = actions.RAW_FUNCTIONS[fn]("now", *args)
proto = feats.transform_action(None, func_call, skip_available=True)
return proto.action_raw.unit_command
self.assertEqual(transform("Attack_pt", tag, [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", ntag, [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", [tag], [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", [ntag], [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", tags, [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_pt", ntags, [15, 20]).unit_tags, tags)
# Weird, but needed for backwards compatibility
self.assertEqual(transform("Attack_pt", [tags], [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_pt", [ntags], [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_unit", tag, tag).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, ntag).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, [tag]).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, [ntag]).target_unit_tag, tag)
def testCanPickleSpecs(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=SQUARE_DIMENSIONS))
action_spec = feats.action_spec()
observation_spec = feats.observation_spec()
self.assertEqual(action_spec, pickle.loads(pickle.dumps(action_spec)))
self.assertEqual(observation_spec,
pickle.loads(pickle.dumps(observation_spec)))
def testCanPickleFunctionCall(self):
func = actions.FUNCTIONS.select_point("select", [1, 2])
self.assertEqual(func, pickle.loads(pickle.dumps(func)))
def testCanDeepcopyNumpyFunctionCall(self):
arguments = [numpy.float32] * len(actions.Arguments._fields)
dtypes = actions.FunctionCall(
function=numpy.float32,
arguments=actions.Arguments(*arguments))
self.assertEqual(dtypes, copy.deepcopy(dtypes))
def testSizeConstructors(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=SQUARE_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 84))
self.assertEqual(spec.types.screen2.sizes, (84, 84))
self.assertEqual(spec.types.minimap.sizes, (64, 64))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
# Missing one or the other of game_info and dimensions.
with self.assertRaises(ValueError):
features.Features()
# Resolution/action space mismatch.
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
action_space=actions.ActionSpace.RGB))
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
rgb_dimensions=RECTANGULAR_DIMENSIONS,
action_space=actions.ActionSpace.FEATURES))
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=RECTANGULAR_DIMENSIONS))
def testFlRgbActionSpec(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.FEATURES))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.RGB))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (128, 132))
self.assertEqual(spec.types.screen2.sizes, (128, 132))
self.assertEqual(spec.types.minimap.sizes, (74, 77))
def testFlRgbObservationSpec(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.FEATURES))
obs_spec = feats.observation_spec()
self.assertEqual(obs_spec["feature_screen"], # pylint: disable=g-generic-assert
(len(features.SCREEN_FEATURES), 80, 84))
self.assertEqual(obs_spec["feature_minimap"], # pylint: disable=g-generic-assert
(len(features.MINIMAP_FEATURES), 67, 64))
self.assertEqual(obs_spec["rgb_screen"], (132, 128, 3))
self.assertEqual(obs_spec["rgb_minimap"], (77, 74, 3))
if __name__ == "__main__":
absltest.main()
| 36.742044
| 115
| 0.716389
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import pickle
from absl.testing import absltest
from absl.testing import parameterized
from future.builtins import range import numpy
import six
from pysc2.lib import actions
from pysc2.lib import features
from pysc2.lib import point
from google.protobuf import text_format
from s2clientprotocol import sc2api_pb2 as sc_pb
observation_text_proto = """
player_common {
player_id: 1
minerals: 0
vespene: 0
food_cap: 10
food_used: 0
food_army: 0
food_workers: 0
idle_worker_count: 0
army_count: 0
warp_gate_count: 0
larva_count: 0
}
game_loop: 20
"""
RECTANGULAR_DIMENSIONS = features.Dimensions(screen=(84, 80), minimap=(64, 67))
SQUARE_DIMENSIONS = features.Dimensions(screen=84, minimap=64)
class AvailableActionsTest(absltest.TestCase):
always_expected = {
"no_op", "move_camera", "select_point", "select_rect",
"select_control_group"
}
def setUp(self):
super(AvailableActionsTest, self).setUp()
self.obs = text_format.Parse(observation_text_proto, sc_pb.Observation())
self.hideSpecificActions(True)
def hideSpecificActions(self, hide_specific_actions): self.features = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=hide_specific_actions))
def assertAvail(self, expected):
actual = self.features.available_actions(self.obs)
actual_names = {actions.FUNCTIONS[i].name for i in actual}
self.assertEqual(actual_names, set(expected) | self.always_expected)
def testAlways(self):
self.assertAvail([])
def testSelectUnit(self):
self.obs.ui_data.multi.units.add(unit_type=1)
self.assertAvail(["select_unit"])
def testSelectIdleWorkder(self):
self.obs.player_common.idle_worker_count = 1
self.assertAvail(["select_idle_worker"])
def testSelectArmy(self):
self.obs.player_common.army_count = 3
self.assertAvail(["select_army"])
def testSelectWarpGates(self):
self.obs.player_common.warp_gate_count = 1
self.assertAvail(["select_warp_gates"])
def testSelectLarva(self):
self.obs.player_common.larva_count = 2
self.assertAvail(["select_larva"])
def testQuick(self):
self.obs.abilities.add(ability_id=32)
self.assertAvail(["Effect_Salvage_quick"])
def testScreen(self):
self.obs.abilities.add(ability_id=326, requires_point=True)
self.assertAvail(["Build_SensorTower_screen"])
def testScreenMinimap(self):
self.obs.abilities.add(ability_id=17, requires_point=True)
self.assertAvail(["Patrol_screen", "Patrol_minimap"])
def testScreenAutocast(self):
self.obs.abilities.add(ability_id=386, requires_point=True)
self.assertAvail(["Effect_Heal_screen", "Effect_Heal_autocast"])
def testScreenQuick(self):
a = self.obs.abilities.add(ability_id=421)
self.hideSpecificActions(True)
a.requires_point = False
self.assertAvail(["Build_TechLab_quick"])
a.requires_point = True
self.assertAvail(["Build_TechLab_screen"])
self.hideSpecificActions(False)
a.requires_point = False
self.assertAvail(["Build_TechLab_Barracks_quick", "Build_TechLab_quick"])
a.requires_point = True
self.assertAvail(["Build_TechLab_Barracks_screen", "Build_TechLab_screen"])
def testGeneral(self):
self.obs.abilities.add(ability_id=1374)
self.hideSpecificActions(False)
self.assertAvail(["BurrowDown_quick", "BurrowDown_Baneling_quick"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowDown_quick"])
def testGeneralType(self):
a = self.obs.abilities.add(ability_id=1376)
self.hideSpecificActions(False)
self.assertAvail(["BurrowUp_quick", "BurrowUp_Baneling_quick",
"BurrowUp_autocast", "BurrowUp_Baneling_autocast"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowUp_quick", "BurrowUp_autocast"])
a.ability_id = 2110
self.hideSpecificActions(False)
self.assertAvail(["BurrowUp_quick", "BurrowUp_Lurker_quick"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowUp_quick"])
def testMany(self):
add = [
(23, True), (318, True), (320, True), (319, True), (316, True), (295, True), (16, True), (17, True), (4, False), ]
for a, r in add:
self.obs.abilities.add(ability_id=a, requires_point=r)
self.hideSpecificActions(False)
self.assertAvail([
"Attack_Attack_minimap",
"Attack_Attack_screen",
"Attack_minimap",
"Attack_screen",
"Build_CommandCenter_screen",
"Build_Refinery_screen",
"Build_SupplyDepot_screen",
"Effect_Repair_screen",
"Effect_Repair_autocast",
"Effect_Repair_SCV_autocast",
"Effect_Repair_SCV_screen",
"Harvest_Gather_screen",
"Harvest_Gather_SCV_screen",
"Move_minimap",
"Move_screen",
"Move_Move_minimap",
"Move_Move_screen",
"Patrol_minimap",
"Patrol_screen",
"Patrol_Patrol_minimap",
"Patrol_Patrol_screen",
"Stop_quick",
"Stop_Stop_quick"
])
self.hideSpecificActions(True)
self.assertAvail([
"Attack_minimap",
"Attack_screen",
"Build_CommandCenter_screen",
"Build_Refinery_screen",
"Build_SupplyDepot_screen",
"Effect_Repair_screen",
"Effect_Repair_autocast",
"Harvest_Gather_screen",
"Move_minimap",
"Move_screen",
"Patrol_minimap",
"Patrol_screen",
"Stop_quick",
])
class ToPointTest(absltest.TestCase):
def testIntAsString(self):
value = features._to_point("32")
self.assertEqual(value, point.Point(32, 32))
def testIntStringTwoTuple(self):
value = features._to_point(("32", 64))
self.assertEqual(value, point.Point(32, 64))
def testNoneInputReturnsNoneOutput(self):
with self.assertRaises(AssertionError):
features._to_point(None)
def testNoneAsFirstElementOfTupleRaises(self):
with self.assertRaises(TypeError):
features._to_point((None, 32))
def testNoneAsSecondElementOfTupleRaises(self):
with self.assertRaises(TypeError):
features._to_point((32, None))
def testSingletonTupleRaises(self):
with self.assertRaises(ValueError):
features._to_point((32,))
def testThreeTupleRaises(self):
with self.assertRaises(ValueError):
features._to_point((32, 32, 32))
class DimensionsTest(absltest.TestCase):
def testScreenSizeWithoutMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=84)
def testScreenWidthWithoutHeightRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(84, 0), minimap=64)
def testScreenWidthHeightWithoutMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(84, 80))
def testMinimapWidthAndHeightWithoutScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(minimap=(64, 67))
def testNoneNoneRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=None, minimap=None)
def testSingularZeroesRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=0, minimap=0)
def testTwoZeroesRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(0, 0), minimap=(0, 0))
def testThreeTupleScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(1, 2, 3), minimap=32)
def testThreeTupleMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=(1, 2, 3))
def testNegativeScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=-64, minimap=32)
def testNegativeMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=-32)
def testNegativeScreenTupleRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(-64, -64), minimap=32)
def testNegativeMinimapTupleRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=(-32, -32))
def testEquality(self):
self.assertEqual(features.Dimensions(screen=64, minimap=64),
features.Dimensions(screen=64, minimap=64))
self.assertNotEqual(features.Dimensions(screen=64, minimap=64),
features.Dimensions(screen=64, minimap=32))
self.assertNotEqual(features.Dimensions(screen=64, minimap=64), None)
class TestParseAgentInterfaceFormat(parameterized.TestCase):
def test_no_arguments_raises(self):
with self.assertRaises(ValueError):
features.parse_agent_interface_format()
@parameterized.parameters((32, None), (None, 32))
def test_invalid_feature_combinations_raise(self, screen, minimap):
with self.assertRaises(ValueError):
features.parse_agent_interface_format(
feature_screen=screen,
feature_minimap=minimap)
def test_valid_feature_specification_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24))
self.assertEqual(
agent_interface_format.feature_dimensions.screen,
point.Point(32, 32))
self.assertEqual(
agent_interface_format.feature_dimensions.minimap,
point.Point(24, 24))
@parameterized.parameters((32, None), (None, 32), (32, 64))
def test_invalid_minimap_combinations_raise(self, screen, minimap):
with self.assertRaises(ValueError):
features.parse_agent_interface_format(
rgb_screen=screen,
rgb_minimap=minimap)
def test_valid_minimap_specification_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
rgb_screen=32,
rgb_minimap=(24, 24))
self.assertEqual(
agent_interface_format.rgb_dimensions.screen,
point.Point(32, 32))
self.assertEqual(
agent_interface_format.rgb_dimensions.minimap,
point.Point(24, 24))
def test_invalid_action_space_raises(self):
with self.assertRaises(KeyError):
features.parse_agent_interface_format(
feature_screen=64,
feature_minimap=64,
action_space="UNKNOWN_ACTION_SPACE")
@parameterized.parameters(actions.ActionSpace.__members__.keys())
def test_valid_action_space_is_parsed(self, action_space):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
rgb_screen=64,
rgb_minimap=(48, 48),
use_raw_units=True,
action_space=action_space)
self.assertEqual(
agent_interface_format.action_space,
actions.ActionSpace[action_space])
def test_camera_width_world_units_are_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
camera_width_world_units=77)
self.assertEqual(agent_interface_format.camera_width_world_units, 77)
def test_use_feature_units_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
use_feature_units=True)
self.assertEqual(agent_interface_format.use_feature_units, True)
class FeaturesTest(absltest.TestCase):
def testFunctionsIdsAreConsistent(self):
for i, f in enumerate(actions.FUNCTIONS):
self.assertEqual(i, f.id, "id doesn't match for %s" % f.id)
def testAllVersionsOfAnAbilityHaveTheSameGeneral(self):
for ability_id, funcs in six.iteritems(actions.ABILITY_IDS):
self.assertLen({f.general_id for f in funcs}, 1,
"Multiple generals for %s" % ability_id)
def testValidFunctionsAreConsistent(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
valid_funcs = feats.action_spec()
for func_def in valid_funcs.functions:
func = actions.FUNCTIONS[func_def.id]
self.assertEqual(func_def.id, func.id)
self.assertEqual(func_def.name, func.name)
self.assertEqual(len(func_def.args), len(func.args)) # pylint: disable=g-generic-assert
def gen_random_function_call(self, action_spec, func_id):
args = [[numpy.random.randint(0, size) for size in arg.sizes] # pylint: disable=g-complex-comprehension
for arg in action_spec.functions[func_id].args]
return actions.FunctionCall(func_id, args)
def testIdsMatchIndex(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
action_spec = feats.action_spec()
for func_index, func_def in enumerate(action_spec.functions):
self.assertEqual(func_index, func_def.id)
for type_index, type_def in enumerate(action_spec.types):
self.assertEqual(type_index, type_def.id)
def testReversingUnknownAction(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=False))
sc2_action = sc_pb.Action()
sc2_action.action_feature_layer.unit_command.ability_id = 6 # Cheer
func_call = feats.reverse_action(sc2_action)
self.assertEqual(func_call.function, 0) # No-op
def testSpecificActionsAreReversible(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=False))
action_spec = feats.action_spec()
for func_def in action_spec.functions:
for _ in range(10):
func_call = self.gen_random_function_call(action_spec, func_def.id)
sc2_action = feats.transform_action(
None, func_call, skip_available=True)
func_call2 = feats.reverse_action(sc2_action)
sc2_action2 = feats.transform_action(
None, func_call2, skip_available=True)
if func_def.id == actions.FUNCTIONS.select_rect.id:
# Need to check this one manually since the same rect can be
# defined in multiple ways.
def rect(a):
return point.Rect(point.Point(*a[1]).floor(),
point.Point(*a[2]).floor())
self.assertEqual(func_call.function, func_call2.function)
self.assertEqual(len(func_call.arguments), len(func_call2.arguments)) # pylint: disable=g-generic-assert
self.assertEqual(func_call.arguments[0], func_call2.arguments[0])
self.assertEqual(rect(func_call.arguments),
rect(func_call2.arguments))
else:
self.assertEqual(func_call, func_call2, msg=sc2_action)
self.assertEqual(sc2_action, sc2_action2)
def testRawActionUnitTags(self):
feats = features.Features(
features.AgentInterfaceFormat(
use_raw_units=True,
action_space=actions.ActionSpace.RAW),
map_size=point.Point(100, 100))
tags = [numpy.random.randint(2**20, 2**24) for _ in range(10)]
ntags = numpy.array(tags, dtype=numpy.int64)
tag = tags[0]
ntag = numpy.array(tag, dtype=numpy.int64)
def transform(fn, *args):
func_call = actions.RAW_FUNCTIONS[fn]("now", *args)
proto = feats.transform_action(None, func_call, skip_available=True)
return proto.action_raw.unit_command
self.assertEqual(transform("Attack_pt", tag, [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", ntag, [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", [tag], [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", [ntag], [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", tags, [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_pt", ntags, [15, 20]).unit_tags, tags)
# Weird, but needed for backwards compatibility
self.assertEqual(transform("Attack_pt", [tags], [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_pt", [ntags], [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_unit", tag, tag).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, ntag).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, [tag]).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, [ntag]).target_unit_tag, tag)
def testCanPickleSpecs(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=SQUARE_DIMENSIONS))
action_spec = feats.action_spec()
observation_spec = feats.observation_spec()
self.assertEqual(action_spec, pickle.loads(pickle.dumps(action_spec)))
self.assertEqual(observation_spec,
pickle.loads(pickle.dumps(observation_spec)))
def testCanPickleFunctionCall(self):
func = actions.FUNCTIONS.select_point("select", [1, 2])
self.assertEqual(func, pickle.loads(pickle.dumps(func)))
def testCanDeepcopyNumpyFunctionCall(self):
arguments = [numpy.float32] * len(actions.Arguments._fields)
dtypes = actions.FunctionCall(
function=numpy.float32,
arguments=actions.Arguments(*arguments))
self.assertEqual(dtypes, copy.deepcopy(dtypes))
def testSizeConstructors(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=SQUARE_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 84))
self.assertEqual(spec.types.screen2.sizes, (84, 84))
self.assertEqual(spec.types.minimap.sizes, (64, 64))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
# Missing one or the other of game_info and dimensions.
with self.assertRaises(ValueError):
features.Features()
# Resolution/action space mismatch.
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
action_space=actions.ActionSpace.RGB))
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
rgb_dimensions=RECTANGULAR_DIMENSIONS,
action_space=actions.ActionSpace.FEATURES))
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=RECTANGULAR_DIMENSIONS))
def testFlRgbActionSpec(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.FEATURES))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.RGB))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (128, 132))
self.assertEqual(spec.types.screen2.sizes, (128, 132))
self.assertEqual(spec.types.minimap.sizes, (74, 77))
def testFlRgbObservationSpec(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.FEATURES))
obs_spec = feats.observation_spec()
self.assertEqual(obs_spec["feature_screen"], # pylint: disable=g-generic-assert
(len(features.SCREEN_FEATURES), 80, 84))
self.assertEqual(obs_spec["feature_minimap"], # pylint: disable=g-generic-assert
(len(features.MINIMAP_FEATURES), 67, 64))
self.assertEqual(obs_spec["rgb_screen"], (132, 128, 3))
self.assertEqual(obs_spec["rgb_minimap"], (77, 74, 3))
if __name__ == "__main__":
absltest.main()
| true
| true
|
f7080c518ae1fc47e5711644e4f224ee3c1b2c7f
| 1,116
|
py
|
Python
|
setup.py
|
timgates42/kivy-ios
|
132eb2739279c479556e74575b97ba9df8faf046
|
[
"MIT"
] | null | null | null |
setup.py
|
timgates42/kivy-ios
|
132eb2739279c479556e74575b97ba9df8faf046
|
[
"MIT"
] | null | null | null |
setup.py
|
timgates42/kivy-ios
|
132eb2739279c479556e74575b97ba9df8faf046
|
[
"MIT"
] | null | null | null |
import os
from glob import glob
from setuptools import setup, find_packages
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
def recursive_include(module):
module_path = module.replace(".", "/") + "/"
files = glob(f"{module_path}**", recursive=True)
return [file.replace(module_path, "") for file in files]
setup(
name="kivy-ios",
version="1.3.0.dev0",
description="Kivy for iOS",
long_description=read("README.md"),
long_description_content_type="text/markdown",
author="The Kivy team",
author_email="kivy-dev@googlegroups.com",
url="https://github.com/kivy/kivy-ios",
python_requires=">=3.6.0",
install_requires=["cookiecutter", "pbxproj", "Pillow", "requests", "sh"],
packages=find_packages(),
package_data={
# note this method is a bit excessive as it includes absolutely everything
# make sure you run with from a clean directory
"kivy_ios": recursive_include("kivy_ios"),
},
entry_points={"console_scripts": ["toolchain = kivy_ios.toolchain:main"]},
)
| 31
| 82
| 0.671147
|
import os
from glob import glob
from setuptools import setup, find_packages
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
def recursive_include(module):
module_path = module.replace(".", "/") + "/"
files = glob(f"{module_path}**", recursive=True)
return [file.replace(module_path, "") for file in files]
setup(
name="kivy-ios",
version="1.3.0.dev0",
description="Kivy for iOS",
long_description=read("README.md"),
long_description_content_type="text/markdown",
author="The Kivy team",
author_email="kivy-dev@googlegroups.com",
url="https://github.com/kivy/kivy-ios",
python_requires=">=3.6.0",
install_requires=["cookiecutter", "pbxproj", "Pillow", "requests", "sh"],
packages=find_packages(),
package_data={
"kivy_ios": recursive_include("kivy_ios"),
},
entry_points={"console_scripts": ["toolchain = kivy_ios.toolchain:main"]},
)
| true
| true
|
f7080d396c4f1a78d80c63ef4098a0c25063085e
| 114
|
py
|
Python
|
srptools/exceptions.py
|
idlesign/srptools
|
35dd4a355e0723cdc1252eb346cff69b9073711d
|
[
"BSD-3-Clause"
] | 20
|
2017-02-13T15:16:18.000Z
|
2022-01-27T15:49:06.000Z
|
srptools/exceptions.py
|
idlesign/srptools
|
35dd4a355e0723cdc1252eb346cff69b9073711d
|
[
"BSD-3-Clause"
] | 10
|
2017-03-05T15:09:49.000Z
|
2022-01-28T05:44:45.000Z
|
srptools/exceptions.py
|
idlesign/srptools
|
35dd4a355e0723cdc1252eb346cff69b9073711d
|
[
"BSD-3-Clause"
] | 7
|
2017-03-11T07:50:09.000Z
|
2021-12-16T11:59:00.000Z
|
from __future__ import unicode_literals
class SRPException(Exception):
"""Base srptools exception class."""
| 19
| 40
| 0.77193
|
from __future__ import unicode_literals
class SRPException(Exception):
| true
| true
|
f7080e2cbbea07d976c6f91d6794ea06cc485769
| 817
|
py
|
Python
|
examples/wrap_pytorch.py
|
ccoulombe/thinc
|
8d891b61ddef3ca00266ca0ec7c47e2d063a3a83
|
[
"MIT"
] | null | null | null |
examples/wrap_pytorch.py
|
ccoulombe/thinc
|
8d891b61ddef3ca00266ca0ec7c47e2d063a3a83
|
[
"MIT"
] | null | null | null |
examples/wrap_pytorch.py
|
ccoulombe/thinc
|
8d891b61ddef3ca00266ca0ec7c47e2d063a3a83
|
[
"MIT"
] | null | null | null |
import plac
import numpy
import torch
from torch import autograd
from torch import nn
import torch.optim
import torch.cuda
from thinc.neural.ops import CupyOps
from thinc.extra.wrappers import PyTorchWrapper
from thinc.v2v import Model
def main(length=1000, nO=32, nI=32):
if CupyOps.xp != None:
print("Use GPU")
Model.ops = CupyOps()
Model.Ops = CupyOps
torch.set_default_tensor_type('torch.cuda.FloatTensor')
pt_model = nn.Linear(nI, nO)
optimizer = torch.optim.Adam(pt_model.parameters())
model = PyTorchWrapper(pt_model)
X = Model.ops.xp.ones((length, nI), dtype='f')
y = 1. / X
for i in range(10):
yh, get_dX = model.begin_update(X)
dY = (yh - y) / len(y)
dX = get_dX(dY)
if __name__ == '__main__':
plac.call(main)
| 22.081081
| 63
| 0.656059
|
import plac
import numpy
import torch
from torch import autograd
from torch import nn
import torch.optim
import torch.cuda
from thinc.neural.ops import CupyOps
from thinc.extra.wrappers import PyTorchWrapper
from thinc.v2v import Model
def main(length=1000, nO=32, nI=32):
if CupyOps.xp != None:
print("Use GPU")
Model.ops = CupyOps()
Model.Ops = CupyOps
torch.set_default_tensor_type('torch.cuda.FloatTensor')
pt_model = nn.Linear(nI, nO)
optimizer = torch.optim.Adam(pt_model.parameters())
model = PyTorchWrapper(pt_model)
X = Model.ops.xp.ones((length, nI), dtype='f')
y = 1. / X
for i in range(10):
yh, get_dX = model.begin_update(X)
dY = (yh - y) / len(y)
dX = get_dX(dY)
if __name__ == '__main__':
plac.call(main)
| true
| true
|
f7080e67524a51871c0f56f0d55f2da7a6eaf532
| 4,542
|
py
|
Python
|
app/common/views.py
|
vigov5/oshougatsu2015
|
38cbf325675ee2c08a6965b8689fad8308eb84eb
|
[
"MIT"
] | null | null | null |
app/common/views.py
|
vigov5/oshougatsu2015
|
38cbf325675ee2c08a6965b8689fad8308eb84eb
|
[
"MIT"
] | null | null | null |
app/common/views.py
|
vigov5/oshougatsu2015
|
38cbf325675ee2c08a6965b8689fad8308eb84eb
|
[
"MIT"
] | null | null | null |
from flask import render_template, g, request, url_for, jsonify, redirect
from flask_login import current_user, login_required
import flask_menu as menu
from sqlalchemy import desc, asc
from app import app, lm
from app.user.models import User, UserJoin
from app.contest.models import Contest
from app.submission.models import Submission
from app.problem.models import Problem
from app.common.tasks import run_code
from app.common.utils import generate_random_string
from app.problem import constants as PROBLEM
@app.before_request
def before_request():
g.user = current_user
@lm.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/')
@app.route('/index')
@login_required
@menu.register_menu(app, '.index', 'Home', order=0)
def index():
contest = Contest.query.order_by(Contest.id.desc()).first()
problems = []
if contest:
problems = contest.problems.order_by(Problem.rank.asc()).all()
return render_template(
'index.html',
contest=contest,
problems=problems
)
@app.route('/')
@app.route('/scoreboard')
def scoreboard():
contest = Contest.query.order_by(Contest.id.desc()).first()
if not contest:
return redirect(url_for('index'))
activities = Submission.query.order_by(desc(Submission.created_at)).all()
joins = UserJoin.query.filter_by(contest_id=contest.id).all()
raw = []
for join in joins:
raw.append((join.user, join.user.get_total_score()))
return render_template(
'scoreboard.html',
activities=activities,
raw=raw
)
@app.route('/howto')
@menu.register_menu(app, '.howto', 'How to', order=2)
def howto():
return render_template('howto.html')
@app.route('/admin')
def admin():
contests = Contest.query.order_by(Contest.id.desc()).all()
return render_template(
'admin.html',
contests=contests
)
@app.route('/activities/more', methods=['POST'])
def more_activities():
last_side = request.form.get('side', 'left')
last_id = request.form.get('id')
activities = Submission.query.filter(Submission.id >= last_id).order_by(desc(Submission.created_at)).limit(2).all()
resp = []
for activity in activities:
last_side = 'right' if last_side == 'left' else 'left'
element = {
'class': 'pos-%s clearfix' % last_side,
'id': activity.id,
'time': activity.created_at.strftime('%b %d %H:%M'),
'header': u"%s" % activity.user.email
}
element['result'] = u'/static/images/running.gif'
if activity.problem.category == PROBLEM.CATEGORY_CODE:
if int(activity.id) == int(last_id):
element['type'] = 'update'
else:
element['type'] = 'new'
if activity.is_finished():
if activity.is_accepted():
element['footer'] = u' solved <a href="{0:s}">{1:s}</a> and scored <strong>{2:s} points</strong>'.format(url_for('problem.show', problem_id=activity.problem.id), activity.problem.name_en, str(activity.received_point))
element['result'] = u'/static/images/true.png'
else:
element['footer'] = u' failed to solve <a href="{0:s}">{1:s}</a>'.format(url_for('problem.show', problem_id=activity.problem.id), activity.problem.name_en)
element['result'] = u'/static/images/false.png'
else:
element['footer'] = u' submitted solution for <a href="{0:s}">{1:s}</a>'.format(url_for('problem.show', problem_id=activity.problem.id), activity.problem.name_en)
else:
element['type'] = 'update'
element['result'] = u'/static/images/true.png'
if int(activity.id) != int(last_id):
element['type'] = 'new'
element['footer'] = u' solved <a href="{0:s}">{1:s}</a> and scored <strong>{2:s} points</strong>'.format(url_for('problem.show', problem_id=activity.problem.id), activity.problem.name_en, str(activity.received_point))
resp.append(element)
contest = Contest.query.order_by(Contest.id.desc()).first()
joins = UserJoin.query.filter_by(contest_id=contest.id).all()
for join in joins:
resp.append({
'type': 'point',
'user_id': join.user.id,
'point': join.user.get_total_score()
})
return jsonify(result=resp)
| 34.671756
| 237
| 0.6321
|
from flask import render_template, g, request, url_for, jsonify, redirect
from flask_login import current_user, login_required
import flask_menu as menu
from sqlalchemy import desc, asc
from app import app, lm
from app.user.models import User, UserJoin
from app.contest.models import Contest
from app.submission.models import Submission
from app.problem.models import Problem
from app.common.tasks import run_code
from app.common.utils import generate_random_string
from app.problem import constants as PROBLEM
@app.before_request
def before_request():
g.user = current_user
@lm.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/')
@app.route('/index')
@login_required
@menu.register_menu(app, '.index', 'Home', order=0)
def index():
contest = Contest.query.order_by(Contest.id.desc()).first()
problems = []
if contest:
problems = contest.problems.order_by(Problem.rank.asc()).all()
return render_template(
'index.html',
contest=contest,
problems=problems
)
@app.route('/')
@app.route('/scoreboard')
def scoreboard():
contest = Contest.query.order_by(Contest.id.desc()).first()
if not contest:
return redirect(url_for('index'))
activities = Submission.query.order_by(desc(Submission.created_at)).all()
joins = UserJoin.query.filter_by(contest_id=contest.id).all()
raw = []
for join in joins:
raw.append((join.user, join.user.get_total_score()))
return render_template(
'scoreboard.html',
activities=activities,
raw=raw
)
@app.route('/howto')
@menu.register_menu(app, '.howto', 'How to', order=2)
def howto():
return render_template('howto.html')
@app.route('/admin')
def admin():
contests = Contest.query.order_by(Contest.id.desc()).all()
return render_template(
'admin.html',
contests=contests
)
@app.route('/activities/more', methods=['POST'])
def more_activities():
last_side = request.form.get('side', 'left')
last_id = request.form.get('id')
activities = Submission.query.filter(Submission.id >= last_id).order_by(desc(Submission.created_at)).limit(2).all()
resp = []
for activity in activities:
last_side = 'right' if last_side == 'left' else 'left'
element = {
'class': 'pos-%s clearfix' % last_side,
'id': activity.id,
'time': activity.created_at.strftime('%b %d %H:%M'),
'header': u"%s" % activity.user.email
}
element['result'] = u'/static/images/running.gif'
if activity.problem.category == PROBLEM.CATEGORY_CODE:
if int(activity.id) == int(last_id):
element['type'] = 'update'
else:
element['type'] = 'new'
if activity.is_finished():
if activity.is_accepted():
element['footer'] = u' solved <a href="{0:s}">{1:s}</a> and scored <strong>{2:s} points</strong>'.format(url_for('problem.show', problem_id=activity.problem.id), activity.problem.name_en, str(activity.received_point))
element['result'] = u'/static/images/true.png'
else:
element['footer'] = u' failed to solve <a href="{0:s}">{1:s}</a>'.format(url_for('problem.show', problem_id=activity.problem.id), activity.problem.name_en)
element['result'] = u'/static/images/false.png'
else:
element['footer'] = u' submitted solution for <a href="{0:s}">{1:s}</a>'.format(url_for('problem.show', problem_id=activity.problem.id), activity.problem.name_en)
else:
element['type'] = 'update'
element['result'] = u'/static/images/true.png'
if int(activity.id) != int(last_id):
element['type'] = 'new'
element['footer'] = u' solved <a href="{0:s}">{1:s}</a> and scored <strong>{2:s} points</strong>'.format(url_for('problem.show', problem_id=activity.problem.id), activity.problem.name_en, str(activity.received_point))
resp.append(element)
contest = Contest.query.order_by(Contest.id.desc()).first()
joins = UserJoin.query.filter_by(contest_id=contest.id).all()
for join in joins:
resp.append({
'type': 'point',
'user_id': join.user.id,
'point': join.user.get_total_score()
})
return jsonify(result=resp)
| true
| true
|
f7080ea0135fa78c83425edc26e83fafaf4e91c6
| 2,579
|
py
|
Python
|
tests/test_day_05.py
|
jrcoutinho/aoc-2019-python
|
307ecddc0f7feeb31bc5d95a0479d5498bbf2679
|
[
"MIT"
] | null | null | null |
tests/test_day_05.py
|
jrcoutinho/aoc-2019-python
|
307ecddc0f7feeb31bc5d95a0479d5498bbf2679
|
[
"MIT"
] | null | null | null |
tests/test_day_05.py
|
jrcoutinho/aoc-2019-python
|
307ecddc0f7feeb31bc5d95a0479d5498bbf2679
|
[
"MIT"
] | null | null | null |
from aoc.day_02 import IntcodeComputer
def _run_test(program, expected):
computer = IntcodeComputer(program).execute()
assert ",".join(str(x) for x in computer.memory) == expected
def test_input(monkeypatch):
monkeypatch.setattr("builtins.input", lambda: "1")
program = "3,0,99"
expected = "1,0,99"
_run_test(program, expected)
def test_output(capfd):
program = "4,0,99"
IntcodeComputer(program).execute()
captured = capfd.readouterr()
assert captured.out == "4\n"
def test_ex01():
program = "1002,4,3,4,33"
expected = "1002,4,3,4,99"
_run_test(program, expected)
def _test_in_out(comp, in_val, out_val, monkeypatch, capfd):
monkeypatch.setattr("builtins.input", lambda: in_val)
comp.execute()
captured = capfd.readouterr()
assert captured.out == f"{out_val}\n"
def test_ex02(monkeypatch, capfd):
program = "3,9,8,9,10,9,4,9,99,-1,8"
comp = IntcodeComputer(program)
_test_in_out(comp, 8, 1, monkeypatch, capfd)
_test_in_out(comp, 1, 0, monkeypatch, capfd)
def test_ex03(monkeypatch, capfd):
program = "3,9,7,9,10,9,4,9,99,-1,8"
comp = IntcodeComputer(program)
_test_in_out(comp, 7, 1, monkeypatch, capfd)
_test_in_out(comp, 9, 0, monkeypatch, capfd)
def test_ex04(monkeypatch, capfd):
program = "3,3,1108,-1,8,3,4,3,99"
comp = IntcodeComputer(program)
_test_in_out(comp, 8, 1, monkeypatch, capfd)
_test_in_out(comp, 1, 0, monkeypatch, capfd)
def test_ex05(monkeypatch, capfd):
program = "3,3,1107,-1,8,3,4,3,99"
comp = IntcodeComputer(program)
_test_in_out(comp, 7, 1, monkeypatch, capfd)
_test_in_out(comp, 8, 0, monkeypatch, capfd)
def test_ex06(monkeypatch, capfd):
program = "3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9"
comp = IntcodeComputer(program)
_test_in_out(comp, 0, 0, monkeypatch, capfd)
_test_in_out(comp, 1, 1, monkeypatch, capfd)
def test_ex07(monkeypatch, capfd):
program = "3,3,1105,-1,9,1101,0,0,12,4,12,99,1"
comp = IntcodeComputer(program)
_test_in_out(comp, 0, 0, monkeypatch, capfd)
_test_in_out(comp, 1, 1, monkeypatch, capfd)
def test_ex08(monkeypatch, capfd):
program = (
"3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,"
"1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,"
"999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99"
)
comp = IntcodeComputer(program)
_test_in_out(comp, 7, 999, monkeypatch, capfd)
_test_in_out(comp, 8, 1000, monkeypatch, capfd)
_test_in_out(comp, 9, 1001, monkeypatch, capfd)
| 26.587629
| 64
| 0.666537
|
from aoc.day_02 import IntcodeComputer
def _run_test(program, expected):
computer = IntcodeComputer(program).execute()
assert ",".join(str(x) for x in computer.memory) == expected
def test_input(monkeypatch):
monkeypatch.setattr("builtins.input", lambda: "1")
program = "3,0,99"
expected = "1,0,99"
_run_test(program, expected)
def test_output(capfd):
program = "4,0,99"
IntcodeComputer(program).execute()
captured = capfd.readouterr()
assert captured.out == "4\n"
def test_ex01():
program = "1002,4,3,4,33"
expected = "1002,4,3,4,99"
_run_test(program, expected)
def _test_in_out(comp, in_val, out_val, monkeypatch, capfd):
monkeypatch.setattr("builtins.input", lambda: in_val)
comp.execute()
captured = capfd.readouterr()
assert captured.out == f"{out_val}\n"
def test_ex02(monkeypatch, capfd):
program = "3,9,8,9,10,9,4,9,99,-1,8"
comp = IntcodeComputer(program)
_test_in_out(comp, 8, 1, monkeypatch, capfd)
_test_in_out(comp, 1, 0, monkeypatch, capfd)
def test_ex03(monkeypatch, capfd):
program = "3,9,7,9,10,9,4,9,99,-1,8"
comp = IntcodeComputer(program)
_test_in_out(comp, 7, 1, monkeypatch, capfd)
_test_in_out(comp, 9, 0, monkeypatch, capfd)
def test_ex04(monkeypatch, capfd):
program = "3,3,1108,-1,8,3,4,3,99"
comp = IntcodeComputer(program)
_test_in_out(comp, 8, 1, monkeypatch, capfd)
_test_in_out(comp, 1, 0, monkeypatch, capfd)
def test_ex05(monkeypatch, capfd):
program = "3,3,1107,-1,8,3,4,3,99"
comp = IntcodeComputer(program)
_test_in_out(comp, 7, 1, monkeypatch, capfd)
_test_in_out(comp, 8, 0, monkeypatch, capfd)
def test_ex06(monkeypatch, capfd):
program = "3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9"
comp = IntcodeComputer(program)
_test_in_out(comp, 0, 0, monkeypatch, capfd)
_test_in_out(comp, 1, 1, monkeypatch, capfd)
def test_ex07(monkeypatch, capfd):
program = "3,3,1105,-1,9,1101,0,0,12,4,12,99,1"
comp = IntcodeComputer(program)
_test_in_out(comp, 0, 0, monkeypatch, capfd)
_test_in_out(comp, 1, 1, monkeypatch, capfd)
def test_ex08(monkeypatch, capfd):
program = (
"3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,"
"1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,"
"999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99"
)
comp = IntcodeComputer(program)
_test_in_out(comp, 7, 999, monkeypatch, capfd)
_test_in_out(comp, 8, 1000, monkeypatch, capfd)
_test_in_out(comp, 9, 1001, monkeypatch, capfd)
| true
| true
|
f7080ea22b594eb170cc9fc284bca87deed83679
| 18,309
|
py
|
Python
|
nipyapi/nifi/models/controller_status_dto.py
|
oneextrafact/nipyapi
|
4c184d69002a8ee3ac528fda63b2ffcc6cedbae5
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/nifi/models/controller_status_dto.py
|
oneextrafact/nipyapi
|
4c184d69002a8ee3ac528fda63b2ffcc6cedbae5
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/nifi/models/controller_status_dto.py
|
oneextrafact/nipyapi
|
4c184d69002a8ee3ac528fda63b2ffcc6cedbae5
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.10.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ControllerStatusDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'active_thread_count': 'int',
'terminated_thread_count': 'int',
'queued': 'str',
'flow_files_queued': 'int',
'bytes_queued': 'int',
'running_count': 'int',
'stopped_count': 'int',
'invalid_count': 'int',
'disabled_count': 'int',
'active_remote_port_count': 'int',
'inactive_remote_port_count': 'int',
'up_to_date_count': 'int',
'locally_modified_count': 'int',
'stale_count': 'int',
'locally_modified_and_stale_count': 'int',
'sync_failure_count': 'int'
}
attribute_map = {
'active_thread_count': 'activeThreadCount',
'terminated_thread_count': 'terminatedThreadCount',
'queued': 'queued',
'flow_files_queued': 'flowFilesQueued',
'bytes_queued': 'bytesQueued',
'running_count': 'runningCount',
'stopped_count': 'stoppedCount',
'invalid_count': 'invalidCount',
'disabled_count': 'disabledCount',
'active_remote_port_count': 'activeRemotePortCount',
'inactive_remote_port_count': 'inactiveRemotePortCount',
'up_to_date_count': 'upToDateCount',
'locally_modified_count': 'locallyModifiedCount',
'stale_count': 'staleCount',
'locally_modified_and_stale_count': 'locallyModifiedAndStaleCount',
'sync_failure_count': 'syncFailureCount'
}
def __init__(self, active_thread_count=None, terminated_thread_count=None, queued=None, flow_files_queued=None, bytes_queued=None, running_count=None, stopped_count=None, invalid_count=None, disabled_count=None, active_remote_port_count=None, inactive_remote_port_count=None, up_to_date_count=None, locally_modified_count=None, stale_count=None, locally_modified_and_stale_count=None, sync_failure_count=None):
"""
ControllerStatusDTO - a model defined in Swagger
"""
self._active_thread_count = None
self._terminated_thread_count = None
self._queued = None
self._flow_files_queued = None
self._bytes_queued = None
self._running_count = None
self._stopped_count = None
self._invalid_count = None
self._disabled_count = None
self._active_remote_port_count = None
self._inactive_remote_port_count = None
self._up_to_date_count = None
self._locally_modified_count = None
self._stale_count = None
self._locally_modified_and_stale_count = None
self._sync_failure_count = None
if active_thread_count is not None:
self.active_thread_count = active_thread_count
if terminated_thread_count is not None:
self.terminated_thread_count = terminated_thread_count
if queued is not None:
self.queued = queued
if flow_files_queued is not None:
self.flow_files_queued = flow_files_queued
if bytes_queued is not None:
self.bytes_queued = bytes_queued
if running_count is not None:
self.running_count = running_count
if stopped_count is not None:
self.stopped_count = stopped_count
if invalid_count is not None:
self.invalid_count = invalid_count
if disabled_count is not None:
self.disabled_count = disabled_count
if active_remote_port_count is not None:
self.active_remote_port_count = active_remote_port_count
if inactive_remote_port_count is not None:
self.inactive_remote_port_count = inactive_remote_port_count
if up_to_date_count is not None:
self.up_to_date_count = up_to_date_count
if locally_modified_count is not None:
self.locally_modified_count = locally_modified_count
if stale_count is not None:
self.stale_count = stale_count
if locally_modified_and_stale_count is not None:
self.locally_modified_and_stale_count = locally_modified_and_stale_count
if sync_failure_count is not None:
self.sync_failure_count = sync_failure_count
@property
def active_thread_count(self):
"""
Gets the active_thread_count of this ControllerStatusDTO.
The number of active threads in the NiFi.
:return: The active_thread_count of this ControllerStatusDTO.
:rtype: int
"""
return self._active_thread_count
@active_thread_count.setter
def active_thread_count(self, active_thread_count):
"""
Sets the active_thread_count of this ControllerStatusDTO.
The number of active threads in the NiFi.
:param active_thread_count: The active_thread_count of this ControllerStatusDTO.
:type: int
"""
self._active_thread_count = active_thread_count
@property
def terminated_thread_count(self):
"""
Gets the terminated_thread_count of this ControllerStatusDTO.
The number of terminated threads in the NiFi.
:return: The terminated_thread_count of this ControllerStatusDTO.
:rtype: int
"""
return self._terminated_thread_count
@terminated_thread_count.setter
def terminated_thread_count(self, terminated_thread_count):
"""
Sets the terminated_thread_count of this ControllerStatusDTO.
The number of terminated threads in the NiFi.
:param terminated_thread_count: The terminated_thread_count of this ControllerStatusDTO.
:type: int
"""
self._terminated_thread_count = terminated_thread_count
@property
def queued(self):
"""
Gets the queued of this ControllerStatusDTO.
The number of flowfiles queued in the NiFi.
:return: The queued of this ControllerStatusDTO.
:rtype: str
"""
return self._queued
@queued.setter
def queued(self, queued):
"""
Sets the queued of this ControllerStatusDTO.
The number of flowfiles queued in the NiFi.
:param queued: The queued of this ControllerStatusDTO.
:type: str
"""
self._queued = queued
@property
def flow_files_queued(self):
"""
Gets the flow_files_queued of this ControllerStatusDTO.
The number of FlowFiles queued across the entire flow
:return: The flow_files_queued of this ControllerStatusDTO.
:rtype: int
"""
return self._flow_files_queued
@flow_files_queued.setter
def flow_files_queued(self, flow_files_queued):
"""
Sets the flow_files_queued of this ControllerStatusDTO.
The number of FlowFiles queued across the entire flow
:param flow_files_queued: The flow_files_queued of this ControllerStatusDTO.
:type: int
"""
self._flow_files_queued = flow_files_queued
@property
def bytes_queued(self):
"""
Gets the bytes_queued of this ControllerStatusDTO.
The size of the FlowFiles queued across the entire flow
:return: The bytes_queued of this ControllerStatusDTO.
:rtype: int
"""
return self._bytes_queued
@bytes_queued.setter
def bytes_queued(self, bytes_queued):
"""
Sets the bytes_queued of this ControllerStatusDTO.
The size of the FlowFiles queued across the entire flow
:param bytes_queued: The bytes_queued of this ControllerStatusDTO.
:type: int
"""
self._bytes_queued = bytes_queued
@property
def running_count(self):
"""
Gets the running_count of this ControllerStatusDTO.
The number of running components in the NiFi.
:return: The running_count of this ControllerStatusDTO.
:rtype: int
"""
return self._running_count
@running_count.setter
def running_count(self, running_count):
"""
Sets the running_count of this ControllerStatusDTO.
The number of running components in the NiFi.
:param running_count: The running_count of this ControllerStatusDTO.
:type: int
"""
self._running_count = running_count
@property
def stopped_count(self):
"""
Gets the stopped_count of this ControllerStatusDTO.
The number of stopped components in the NiFi.
:return: The stopped_count of this ControllerStatusDTO.
:rtype: int
"""
return self._stopped_count
@stopped_count.setter
def stopped_count(self, stopped_count):
"""
Sets the stopped_count of this ControllerStatusDTO.
The number of stopped components in the NiFi.
:param stopped_count: The stopped_count of this ControllerStatusDTO.
:type: int
"""
self._stopped_count = stopped_count
@property
def invalid_count(self):
"""
Gets the invalid_count of this ControllerStatusDTO.
The number of invalid components in the NiFi.
:return: The invalid_count of this ControllerStatusDTO.
:rtype: int
"""
return self._invalid_count
@invalid_count.setter
def invalid_count(self, invalid_count):
"""
Sets the invalid_count of this ControllerStatusDTO.
The number of invalid components in the NiFi.
:param invalid_count: The invalid_count of this ControllerStatusDTO.
:type: int
"""
self._invalid_count = invalid_count
@property
def disabled_count(self):
"""
Gets the disabled_count of this ControllerStatusDTO.
The number of disabled components in the NiFi.
:return: The disabled_count of this ControllerStatusDTO.
:rtype: int
"""
return self._disabled_count
@disabled_count.setter
def disabled_count(self, disabled_count):
"""
Sets the disabled_count of this ControllerStatusDTO.
The number of disabled components in the NiFi.
:param disabled_count: The disabled_count of this ControllerStatusDTO.
:type: int
"""
self._disabled_count = disabled_count
@property
def active_remote_port_count(self):
"""
Gets the active_remote_port_count of this ControllerStatusDTO.
The number of active remote ports in the NiFi.
:return: The active_remote_port_count of this ControllerStatusDTO.
:rtype: int
"""
return self._active_remote_port_count
@active_remote_port_count.setter
def active_remote_port_count(self, active_remote_port_count):
"""
Sets the active_remote_port_count of this ControllerStatusDTO.
The number of active remote ports in the NiFi.
:param active_remote_port_count: The active_remote_port_count of this ControllerStatusDTO.
:type: int
"""
self._active_remote_port_count = active_remote_port_count
@property
def inactive_remote_port_count(self):
"""
Gets the inactive_remote_port_count of this ControllerStatusDTO.
The number of inactive remote ports in the NiFi.
:return: The inactive_remote_port_count of this ControllerStatusDTO.
:rtype: int
"""
return self._inactive_remote_port_count
@inactive_remote_port_count.setter
def inactive_remote_port_count(self, inactive_remote_port_count):
"""
Sets the inactive_remote_port_count of this ControllerStatusDTO.
The number of inactive remote ports in the NiFi.
:param inactive_remote_port_count: The inactive_remote_port_count of this ControllerStatusDTO.
:type: int
"""
self._inactive_remote_port_count = inactive_remote_port_count
@property
def up_to_date_count(self):
"""
Gets the up_to_date_count of this ControllerStatusDTO.
The number of up to date versioned process groups in the NiFi.
:return: The up_to_date_count of this ControllerStatusDTO.
:rtype: int
"""
return self._up_to_date_count
@up_to_date_count.setter
def up_to_date_count(self, up_to_date_count):
"""
Sets the up_to_date_count of this ControllerStatusDTO.
The number of up to date versioned process groups in the NiFi.
:param up_to_date_count: The up_to_date_count of this ControllerStatusDTO.
:type: int
"""
self._up_to_date_count = up_to_date_count
@property
def locally_modified_count(self):
"""
Gets the locally_modified_count of this ControllerStatusDTO.
The number of locally modified versioned process groups in the NiFi.
:return: The locally_modified_count of this ControllerStatusDTO.
:rtype: int
"""
return self._locally_modified_count
@locally_modified_count.setter
def locally_modified_count(self, locally_modified_count):
"""
Sets the locally_modified_count of this ControllerStatusDTO.
The number of locally modified versioned process groups in the NiFi.
:param locally_modified_count: The locally_modified_count of this ControllerStatusDTO.
:type: int
"""
self._locally_modified_count = locally_modified_count
@property
def stale_count(self):
"""
Gets the stale_count of this ControllerStatusDTO.
The number of stale versioned process groups in the NiFi.
:return: The stale_count of this ControllerStatusDTO.
:rtype: int
"""
return self._stale_count
@stale_count.setter
def stale_count(self, stale_count):
"""
Sets the stale_count of this ControllerStatusDTO.
The number of stale versioned process groups in the NiFi.
:param stale_count: The stale_count of this ControllerStatusDTO.
:type: int
"""
self._stale_count = stale_count
@property
def locally_modified_and_stale_count(self):
"""
Gets the locally_modified_and_stale_count of this ControllerStatusDTO.
The number of locally modified and stale versioned process groups in the NiFi.
:return: The locally_modified_and_stale_count of this ControllerStatusDTO.
:rtype: int
"""
return self._locally_modified_and_stale_count
@locally_modified_and_stale_count.setter
def locally_modified_and_stale_count(self, locally_modified_and_stale_count):
"""
Sets the locally_modified_and_stale_count of this ControllerStatusDTO.
The number of locally modified and stale versioned process groups in the NiFi.
:param locally_modified_and_stale_count: The locally_modified_and_stale_count of this ControllerStatusDTO.
:type: int
"""
self._locally_modified_and_stale_count = locally_modified_and_stale_count
@property
def sync_failure_count(self):
"""
Gets the sync_failure_count of this ControllerStatusDTO.
The number of versioned process groups in the NiFi that are unable to sync to a registry.
:return: The sync_failure_count of this ControllerStatusDTO.
:rtype: int
"""
return self._sync_failure_count
@sync_failure_count.setter
def sync_failure_count(self, sync_failure_count):
"""
Sets the sync_failure_count of this ControllerStatusDTO.
The number of versioned process groups in the NiFi that are unable to sync to a registry.
:param sync_failure_count: The sync_failure_count of this ControllerStatusDTO.
:type: int
"""
self._sync_failure_count = sync_failure_count
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ControllerStatusDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 33.532967
| 479
| 0.655579
|
from pprint import pformat
from six import iteritems
import re
class ControllerStatusDTO(object):
swagger_types = {
'active_thread_count': 'int',
'terminated_thread_count': 'int',
'queued': 'str',
'flow_files_queued': 'int',
'bytes_queued': 'int',
'running_count': 'int',
'stopped_count': 'int',
'invalid_count': 'int',
'disabled_count': 'int',
'active_remote_port_count': 'int',
'inactive_remote_port_count': 'int',
'up_to_date_count': 'int',
'locally_modified_count': 'int',
'stale_count': 'int',
'locally_modified_and_stale_count': 'int',
'sync_failure_count': 'int'
}
attribute_map = {
'active_thread_count': 'activeThreadCount',
'terminated_thread_count': 'terminatedThreadCount',
'queued': 'queued',
'flow_files_queued': 'flowFilesQueued',
'bytes_queued': 'bytesQueued',
'running_count': 'runningCount',
'stopped_count': 'stoppedCount',
'invalid_count': 'invalidCount',
'disabled_count': 'disabledCount',
'active_remote_port_count': 'activeRemotePortCount',
'inactive_remote_port_count': 'inactiveRemotePortCount',
'up_to_date_count': 'upToDateCount',
'locally_modified_count': 'locallyModifiedCount',
'stale_count': 'staleCount',
'locally_modified_and_stale_count': 'locallyModifiedAndStaleCount',
'sync_failure_count': 'syncFailureCount'
}
def __init__(self, active_thread_count=None, terminated_thread_count=None, queued=None, flow_files_queued=None, bytes_queued=None, running_count=None, stopped_count=None, invalid_count=None, disabled_count=None, active_remote_port_count=None, inactive_remote_port_count=None, up_to_date_count=None, locally_modified_count=None, stale_count=None, locally_modified_and_stale_count=None, sync_failure_count=None):
self._active_thread_count = None
self._terminated_thread_count = None
self._queued = None
self._flow_files_queued = None
self._bytes_queued = None
self._running_count = None
self._stopped_count = None
self._invalid_count = None
self._disabled_count = None
self._active_remote_port_count = None
self._inactive_remote_port_count = None
self._up_to_date_count = None
self._locally_modified_count = None
self._stale_count = None
self._locally_modified_and_stale_count = None
self._sync_failure_count = None
if active_thread_count is not None:
self.active_thread_count = active_thread_count
if terminated_thread_count is not None:
self.terminated_thread_count = terminated_thread_count
if queued is not None:
self.queued = queued
if flow_files_queued is not None:
self.flow_files_queued = flow_files_queued
if bytes_queued is not None:
self.bytes_queued = bytes_queued
if running_count is not None:
self.running_count = running_count
if stopped_count is not None:
self.stopped_count = stopped_count
if invalid_count is not None:
self.invalid_count = invalid_count
if disabled_count is not None:
self.disabled_count = disabled_count
if active_remote_port_count is not None:
self.active_remote_port_count = active_remote_port_count
if inactive_remote_port_count is not None:
self.inactive_remote_port_count = inactive_remote_port_count
if up_to_date_count is not None:
self.up_to_date_count = up_to_date_count
if locally_modified_count is not None:
self.locally_modified_count = locally_modified_count
if stale_count is not None:
self.stale_count = stale_count
if locally_modified_and_stale_count is not None:
self.locally_modified_and_stale_count = locally_modified_and_stale_count
if sync_failure_count is not None:
self.sync_failure_count = sync_failure_count
@property
def active_thread_count(self):
return self._active_thread_count
@active_thread_count.setter
def active_thread_count(self, active_thread_count):
self._active_thread_count = active_thread_count
@property
def terminated_thread_count(self):
return self._terminated_thread_count
@terminated_thread_count.setter
def terminated_thread_count(self, terminated_thread_count):
self._terminated_thread_count = terminated_thread_count
@property
def queued(self):
return self._queued
@queued.setter
def queued(self, queued):
self._queued = queued
@property
def flow_files_queued(self):
return self._flow_files_queued
@flow_files_queued.setter
def flow_files_queued(self, flow_files_queued):
self._flow_files_queued = flow_files_queued
@property
def bytes_queued(self):
return self._bytes_queued
@bytes_queued.setter
def bytes_queued(self, bytes_queued):
self._bytes_queued = bytes_queued
@property
def running_count(self):
return self._running_count
@running_count.setter
def running_count(self, running_count):
self._running_count = running_count
@property
def stopped_count(self):
return self._stopped_count
@stopped_count.setter
def stopped_count(self, stopped_count):
self._stopped_count = stopped_count
@property
def invalid_count(self):
return self._invalid_count
@invalid_count.setter
def invalid_count(self, invalid_count):
self._invalid_count = invalid_count
@property
def disabled_count(self):
return self._disabled_count
@disabled_count.setter
def disabled_count(self, disabled_count):
self._disabled_count = disabled_count
@property
def active_remote_port_count(self):
return self._active_remote_port_count
@active_remote_port_count.setter
def active_remote_port_count(self, active_remote_port_count):
self._active_remote_port_count = active_remote_port_count
@property
def inactive_remote_port_count(self):
return self._inactive_remote_port_count
@inactive_remote_port_count.setter
def inactive_remote_port_count(self, inactive_remote_port_count):
self._inactive_remote_port_count = inactive_remote_port_count
@property
def up_to_date_count(self):
return self._up_to_date_count
@up_to_date_count.setter
def up_to_date_count(self, up_to_date_count):
self._up_to_date_count = up_to_date_count
@property
def locally_modified_count(self):
return self._locally_modified_count
@locally_modified_count.setter
def locally_modified_count(self, locally_modified_count):
self._locally_modified_count = locally_modified_count
@property
def stale_count(self):
return self._stale_count
@stale_count.setter
def stale_count(self, stale_count):
self._stale_count = stale_count
@property
def locally_modified_and_stale_count(self):
return self._locally_modified_and_stale_count
@locally_modified_and_stale_count.setter
def locally_modified_and_stale_count(self, locally_modified_and_stale_count):
self._locally_modified_and_stale_count = locally_modified_and_stale_count
@property
def sync_failure_count(self):
return self._sync_failure_count
@sync_failure_count.setter
def sync_failure_count(self, sync_failure_count):
self._sync_failure_count = sync_failure_count
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ControllerStatusDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f7080eaefa65d3a729296006984ef9636d09f1a7
| 4,838
|
py
|
Python
|
repo/plugin.video.exodus/resources/lib/sources/mfree_mv.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | null | null | null |
repo/plugin.video.exodus/resources/lib/sources/mfree_mv.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | null | null | null |
repo/plugin.video.exodus/resources/lib/sources/mfree_mv.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
class source:
def __init__(self):
self.domains = ['m4ufree.info']
self.base_link = 'http://m4ufree.info'
self.include_link = '/include/autocomplete.php?q='
self.search_link = '/tag/%s'
def movie(self, imdb, title, year):
try:
t = cleantitle.get(title)
#r = cache.get(self.mfree_mvcache, 170)
#r = [i for i in r if t == i[0] and year == i[1]][0]
q = (title.translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower()
q = urlparse.urljoin(self.base_link, self.search_link % q)
r = client.request(q)
r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'top-item'}), client.parseDOM(r, 'a', attrs = {'class': 'top-item'}))
r = [(i[0], re.sub('^Watch\s*|<.+?>|</.+?>', '', i[1])) for i in r]
r = [(i[0], re.findall('(.+?) (?:\(|)(\d{4})(?:\)|)$', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def mfree_mvcache(self):
try:
u = urlparse.urljoin(self.base_link, self.include_link)
r = client.request(u).splitlines()
r = [re.findall('(.+?) (?:\(|)(\d{4})(?:\)|)$', i.strip()) for i in r]
r = [(cleantitle.get(i[0][0]), i[0][1]) for i in r if len(i) > 0]
return r
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
quality = client.parseDOM(r, 'h3', attrs = {'title': 'Quality.+?'})[0]
quality = client.parseDOM(quality, 'span')[0]
if quality.lower() in ['ts', 'tc', 'cam']: raise Exception()
url = client.parseDOM(r, 'a', ret='href')
url = [i for i in url if '-full-movie-' in i][0]
r = client.request(url)
headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
servers = client.parseDOM(r, 'span', ret='link', attrs = {'class': '[^"]*btn-eps(?:\s+|)'})
for server in servers:
try:
url = '/demo.php?v=%s' % server
url = urlparse.urljoin(self.base_link, url)
r += str(client.request(url, headers=headers))
except:
pass
links = client.parseDOM(r, 'source', ret='src', attrs = {'type': 'video/mp4'})
links += client.parseDOM(r, 'iframe', ret='src')
for link in links:
try:
if not link.startswith('http'): link = urlparse.urljoin(self.base_link, link)
url = client.request(link, output='geturl')
quality = directstream.googletag(url)[0]['quality']
sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'MFree', 'url': url, 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
| 34.312057
| 147
| 0.509921
|
import re,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
class source:
def __init__(self):
self.domains = ['m4ufree.info']
self.base_link = 'http://m4ufree.info'
self.include_link = '/include/autocomplete.php?q='
self.search_link = '/tag/%s'
def movie(self, imdb, title, year):
try:
t = cleantitle.get(title)
q = (title.translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower()
q = urlparse.urljoin(self.base_link, self.search_link % q)
r = client.request(q)
r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'top-item'}), client.parseDOM(r, 'a', attrs = {'class': 'top-item'}))
r = [(i[0], re.sub('^Watch\s*|<.+?>|</.+?>', '', i[1])) for i in r]
r = [(i[0], re.findall('(.+?) (?:\(|)(\d{4})(?:\)|)$', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def mfree_mvcache(self):
try:
u = urlparse.urljoin(self.base_link, self.include_link)
r = client.request(u).splitlines()
r = [re.findall('(.+?) (?:\(|)(\d{4})(?:\)|)$', i.strip()) for i in r]
r = [(cleantitle.get(i[0][0]), i[0][1]) for i in r if len(i) > 0]
return r
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
quality = client.parseDOM(r, 'h3', attrs = {'title': 'Quality.+?'})[0]
quality = client.parseDOM(quality, 'span')[0]
if quality.lower() in ['ts', 'tc', 'cam']: raise Exception()
url = client.parseDOM(r, 'a', ret='href')
url = [i for i in url if '-full-movie-' in i][0]
r = client.request(url)
headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
servers = client.parseDOM(r, 'span', ret='link', attrs = {'class': '[^"]*btn-eps(?:\s+|)'})
for server in servers:
try:
url = '/demo.php?v=%s' % server
url = urlparse.urljoin(self.base_link, url)
r += str(client.request(url, headers=headers))
except:
pass
links = client.parseDOM(r, 'source', ret='src', attrs = {'type': 'video/mp4'})
links += client.parseDOM(r, 'iframe', ret='src')
for link in links:
try:
if not link.startswith('http'): link = urlparse.urljoin(self.base_link, link)
url = client.request(link, output='geturl')
quality = directstream.googletag(url)[0]['quality']
sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'MFree', 'url': url, 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
| true
| true
|
f7080f0c7f3c31c0348f24f044d9060fdc789815
| 4,232
|
py
|
Python
|
video_demo.py
|
mdraw/AlphaPose
|
bed8e0798f6deed4789b9ae2646f72b9fd138c5b
|
[
"Apache-2.0"
] | null | null | null |
video_demo.py
|
mdraw/AlphaPose
|
bed8e0798f6deed4789b9ae2646f72b9fd138c5b
|
[
"Apache-2.0"
] | null | null | null |
video_demo.py
|
mdraw/AlphaPose
|
bed8e0798f6deed4789b9ae2646f72b9fd138c5b
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.nn as nn
import torch.utils.data
import numpy as np
from opt import opt
from dataloader import VideoLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco
from yolo.util import write_results, dynamic_write_results
from SPPE.src.main_fast_inference import *
import ntpath
import os
import sys
from tqdm import tqdm
import time
from fn import getTime
import cv2
from pPose_nms import pose_nms, write_json
args = opt
args.dataset = 'coco'
if not args.sp:
torch.multiprocessing.set_start_method('forkserver', force=True)
torch.multiprocessing.set_sharing_strategy('file_system')
if __name__ == "__main__":
videofile = args.video
mode = args.mode
if not os.path.exists(args.outputpath):
os.mkdir(args.outputpath)
if not len(videofile):
raise IOError('Error: must contain --video')
# Load input video
data_loader = VideoLoader(videofile, batchSize=args.detbatch).start()
(fourcc,fps,frameSize) = data_loader.videoinfo()
# Load detection loader
print('Loading YOLO model..')
sys.stdout.flush()
det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
det_processor = DetectionProcessor(det_loader).start()
# Load pose model
pose_dataset = Mscoco()
if args.fast_inference:
print('Using fast inference...')
pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
print('Using slow, more accurate inference...')
pose_model = InferenNet(4 * 1 + 1, pose_dataset)
pose_model
pose_model.eval()
runtime_profile = {
'dt': [],
'pt': [],
'pn': []
}
# Data writer
save_path = os.path.join(args.outputpath, 'AlphaPose_'+ntpath.basename(videofile).split('.')[0]+'.avi')
writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()
im_names_desc = tqdm(range(data_loader.length()))
batchSize = args.posebatch
for i in im_names_desc:
start_time = getTime()
with torch.no_grad():
(inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
if orig_img is None:
break
if boxes is None or boxes.nelement() == 0:
writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
continue
ckpt_time, det_time = getTime(start_time)
runtime_profile['dt'].append(det_time)
# Pose Estimation
datalen = inps.size(0)
leftover = 0
if (datalen) % batchSize:
leftover = 1
num_batches = datalen // batchSize + leftover
hm = []
for j in range(num_batches):
inps_j = inps[j*batchSize:min((j + 1)*batchSize, datalen)]
hm_j = pose_model(inps_j)
hm.append(hm_j)
hm = torch.cat(hm)
ckpt_time, pose_time = getTime(ckpt_time)
runtime_profile['pt'].append(pose_time)
hm = hm.cpu().data
writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
ckpt_time, post_time = getTime(ckpt_time)
runtime_profile['pn'].append(post_time)
if args.profile:
# TQDM
im_names_desc.set_description(
'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(
dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
)
print('===========================> Finish Model Running.')
if (args.save_img or args.save_video) and not args.vis_fast:
print('===========================> Rendering remaining images in the queue...')
print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
while(writer.running()):
pass
writer.stop()
final_result = writer.results()
write_json(final_result, args.outputpath)
| 34.406504
| 144
| 0.618384
|
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.nn as nn
import torch.utils.data
import numpy as np
from opt import opt
from dataloader import VideoLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco
from yolo.util import write_results, dynamic_write_results
from SPPE.src.main_fast_inference import *
import ntpath
import os
import sys
from tqdm import tqdm
import time
from fn import getTime
import cv2
from pPose_nms import pose_nms, write_json
args = opt
args.dataset = 'coco'
if not args.sp:
torch.multiprocessing.set_start_method('forkserver', force=True)
torch.multiprocessing.set_sharing_strategy('file_system')
if __name__ == "__main__":
videofile = args.video
mode = args.mode
if not os.path.exists(args.outputpath):
os.mkdir(args.outputpath)
if not len(videofile):
raise IOError('Error: must contain --video')
data_loader = VideoLoader(videofile, batchSize=args.detbatch).start()
(fourcc,fps,frameSize) = data_loader.videoinfo()
print('Loading YOLO model..')
sys.stdout.flush()
det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
det_processor = DetectionProcessor(det_loader).start()
pose_dataset = Mscoco()
if args.fast_inference:
print('Using fast inference...')
pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
print('Using slow, more accurate inference...')
pose_model = InferenNet(4 * 1 + 1, pose_dataset)
pose_model
pose_model.eval()
runtime_profile = {
'dt': [],
'pt': [],
'pn': []
}
save_path = os.path.join(args.outputpath, 'AlphaPose_'+ntpath.basename(videofile).split('.')[0]+'.avi')
writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()
im_names_desc = tqdm(range(data_loader.length()))
batchSize = args.posebatch
for i in im_names_desc:
start_time = getTime()
with torch.no_grad():
(inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
if orig_img is None:
break
if boxes is None or boxes.nelement() == 0:
writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
continue
ckpt_time, det_time = getTime(start_time)
runtime_profile['dt'].append(det_time)
datalen = inps.size(0)
leftover = 0
if (datalen) % batchSize:
leftover = 1
num_batches = datalen // batchSize + leftover
hm = []
for j in range(num_batches):
inps_j = inps[j*batchSize:min((j + 1)*batchSize, datalen)]
hm_j = pose_model(inps_j)
hm.append(hm_j)
hm = torch.cat(hm)
ckpt_time, pose_time = getTime(ckpt_time)
runtime_profile['pt'].append(pose_time)
hm = hm.cpu().data
writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
ckpt_time, post_time = getTime(ckpt_time)
runtime_profile['pn'].append(post_time)
if args.profile:
im_names_desc.set_description(
'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(
dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
)
print('===========================> Finish Model Running.')
if (args.save_img or args.save_video) and not args.vis_fast:
print('===========================> Rendering remaining images in the queue...')
print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
while(writer.running()):
pass
writer.stop()
final_result = writer.results()
write_json(final_result, args.outputpath)
| true
| true
|
f7080f3228b0020368d246c9844f3b75276a7981
| 2,368
|
py
|
Python
|
tools/telemetry/telemetry/page/actions/loop_unittest.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2019-11-28T10:46:52.000Z
|
2019-11-28T10:46:52.000Z
|
tools/telemetry/telemetry/page/actions/loop_unittest.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/telemetry/telemetry/page/actions/loop_unittest.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2015-03-27T11:15:39.000Z
|
2016-08-17T14:19:56.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import decorators
from telemetry.core import util
from telemetry.page.actions import loop
from telemetry.unittest_util import tab_test_case
AUDIO_1_LOOP_CHECK = 'window.__hasEventCompleted("#audio_1", "loop");'
VIDEO_1_LOOP_CHECK = 'window.__hasEventCompleted("#video_1", "loop");'
class LoopActionTest(tab_test_case.TabTestCase):
def setUp(self):
tab_test_case.TabTestCase.setUp(self)
self.Navigate('video_test.html')
@decorators.Disabled('android', 'linux') # crbug.com/418577
def testLoopWithNoSelector(self):
"""Tests that with no selector Loop action loops first media element."""
action = loop.LoopAction(loop_count=2, selector='#video_1',
timeout_in_seconds=10)
action.WillRunAction(self._tab)
action.RunAction(self._tab)
# Assert only first video has played.
self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
@decorators.Disabled('android', 'linux') # crbug.com/418577
def testLoopWithAllSelector(self):
"""Tests that Loop action loops all video elements with selector='all'."""
action = loop.LoopAction(loop_count=2, selector='all',
timeout_in_seconds=10)
action.WillRunAction(self._tab)
# Both videos not playing before running action.
self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
action.RunAction(self._tab)
# Assert all media elements played.
self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertTrue(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
@decorators.Disabled('android', 'linux') # crbug.com/418577
def testLoopWaitForLoopTimeout(self):
"""Tests that wait_for_loop timeout_in_secondss if video does not loop."""
action = loop.LoopAction(loop_count=2, selector='#video_1',
timeout_in_seconds=1)
action.WillRunAction(self._tab)
self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertRaises(util.TimeoutException, action.RunAction, self._tab)
| 44.679245
| 78
| 0.743243
|
from telemetry import decorators
from telemetry.core import util
from telemetry.page.actions import loop
from telemetry.unittest_util import tab_test_case
AUDIO_1_LOOP_CHECK = 'window.__hasEventCompleted("#audio_1", "loop");'
VIDEO_1_LOOP_CHECK = 'window.__hasEventCompleted("#video_1", "loop");'
class LoopActionTest(tab_test_case.TabTestCase):
def setUp(self):
tab_test_case.TabTestCase.setUp(self)
self.Navigate('video_test.html')
@decorators.Disabled('android', 'linux') def testLoopWithNoSelector(self):
action = loop.LoopAction(loop_count=2, selector='#video_1',
timeout_in_seconds=10)
action.WillRunAction(self._tab)
action.RunAction(self._tab)
self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
@decorators.Disabled('android', 'linux') def testLoopWithAllSelector(self):
action = loop.LoopAction(loop_count=2, selector='all',
timeout_in_seconds=10)
action.WillRunAction(self._tab)
self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
action.RunAction(self._tab)
self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertTrue(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
@decorators.Disabled('android', 'linux') def testLoopWaitForLoopTimeout(self):
action = loop.LoopAction(loop_count=2, selector='#video_1',
timeout_in_seconds=1)
action.WillRunAction(self._tab)
self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertRaises(util.TimeoutException, action.RunAction, self._tab)
| true
| true
|
f7080fb80e9aa983997b0fbf7e6f27f3d4fc213f
| 3,686
|
py
|
Python
|
syntax_highlighting/en/pygments/catala_en.py
|
xbonnetain/catala
|
04604ad9d01f99fe19ffca354e2cd70ef8ab34d3
|
[
"Apache-2.0"
] | null | null | null |
syntax_highlighting/en/pygments/catala_en.py
|
xbonnetain/catala
|
04604ad9d01f99fe19ffca354e2cd70ef8ab34d3
|
[
"Apache-2.0"
] | null | null | null |
syntax_highlighting/en/pygments/catala_en.py
|
xbonnetain/catala
|
04604ad9d01f99fe19ffca354e2cd70ef8ab34d3
|
[
"Apache-2.0"
] | null | null | null |
from pygments.lexer import RegexLexer, bygroups
from pygments.token import *
import re
__all__ = ['CatalaEnLexer']
class CatalaEnLexer(RegexLexer):
name = 'CatalaEn'
aliases = ['catala_en']
filenames = ['*.catala_en']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(u'(@@)', bygroups(Generic.Heading), 'main__1'),
(u'(@)', bygroups(Generic.Heading), 'main__2'),
(u'([^\\/\\n\\r])', bygroups(Text)),
(u'(\\/\\*)', bygroups(Text), 'code'),
('(\n|\r|\r\n)', Text),
('.', Text),
],
'code': [
(u'(\\*\\/)', bygroups(Text), 'root'),
(u'(\\s*\\#.*$)', bygroups(Comment.Single)),
(u'(context)(\\s+)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)',
bygroups(Keyword.Declaration, Text, Name.Variable)),
(u'\\b(match|with\\s+pattern|fixed|by|decreasing|increasing|varies|with|we\\s+have|in|such\\s+that|exists|for|all|of|if|then|else|initial)\\b', bygroups(Keyword.Reserved)),
(u'\\b(scope|depends\\s+on|declaration|includes|collection|content|optional|structure|enumeration|context|rule|under\\s+condition|condition|data|consequence|fulfilled|equals|assertion|definition|label|exception)\\b', bygroups(Keyword.Declaration)),
(u'(\\|[0-9]+/[0-9]+/[0-9]+\\|)', bygroups(Number.Integer)),
(u'\\b(true|false)\\b', bygroups(Keyword.Constant)),
(u'\\b([0-9]+(,[0.9]*|))\\b', bygroups(Number.Integer)),
(u'(\\-\\-|\\;|\\.|\\,|\\:|\\(|\\)|\\[|\\]|\\{|\\})', bygroups(
Operator)),
(u'(\\-\\>|\\+\\.|\\+\\@|\\+\\^|\\+\\$|\\+|\\-\\.|\\-\\@|\\-\\^|\\-\\$|\\-|\\*\\.|\\*\\@|\\*\\^|\\*\\$|\\*|/\\.|/\\@|/\\^|/\\$|/|\\!|>\\.|>=\\.|<=\\.|<\\.|>\\@|>=\\@|<=\\@|<\\@|>\\$|>=\\$|<=\\$|<\\$|>\\^|>=\\^|<=\\^|<\\^|>|>=|<=|<|=|not|or|and|\\$|%|year|month|day)', bygroups(Operator)),
(u'\\b(integer|boolean|date|money|text|decimal|number|sum)\\b',
bygroups(Keyword.Type)),
(u'\\b([A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)(\\.)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Class, Operator, Name.Variable)),
(u'\\b([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)(\\.)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\'\\.]*)\\b', bygroups(Name.Variable, Operator, Text)),
(u'\\b([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Variable)),
(u'\\b([A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Class)),
('(\n|\r|\r\n)', Text),
('.', Text),
],
'main__1': [
(u'(@@)', bygroups(Generic.Heading), 'root'),
(u'(.)', bygroups(Generic.Heading)),
('(\n|\r|\r\n)', Text),
('.', Text),
],
'main__2': [
(u'(@)', bygroups(Generic.Heading), 'root'),
(u'(.)', bygroups(Generic.Heading)),
('(\n|\r|\r\n)', Text),
('.', Text),
]
}
| 62.474576
| 352
| 0.511394
|
from pygments.lexer import RegexLexer, bygroups
from pygments.token import *
import re
__all__ = ['CatalaEnLexer']
class CatalaEnLexer(RegexLexer):
name = 'CatalaEn'
aliases = ['catala_en']
filenames = ['*.catala_en']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(u'(@@)', bygroups(Generic.Heading), 'main__1'),
(u'(@)', bygroups(Generic.Heading), 'main__2'),
(u'([^\\/\\n\\r])', bygroups(Text)),
(u'(\\/\\*)', bygroups(Text), 'code'),
('(\n|\r|\r\n)', Text),
('.', Text),
],
'code': [
(u'(\\*\\/)', bygroups(Text), 'root'),
(u'(\\s*\\#.*$)', bygroups(Comment.Single)),
(u'(context)(\\s+)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)',
bygroups(Keyword.Declaration, Text, Name.Variable)),
(u'\\b(match|with\\s+pattern|fixed|by|decreasing|increasing|varies|with|we\\s+have|in|such\\s+that|exists|for|all|of|if|then|else|initial)\\b', bygroups(Keyword.Reserved)),
(u'\\b(scope|depends\\s+on|declaration|includes|collection|content|optional|structure|enumeration|context|rule|under\\s+condition|condition|data|consequence|fulfilled|equals|assertion|definition|label|exception)\\b', bygroups(Keyword.Declaration)),
(u'(\\|[0-9]+/[0-9]+/[0-9]+\\|)', bygroups(Number.Integer)),
(u'\\b(true|false)\\b', bygroups(Keyword.Constant)),
(u'\\b([0-9]+(,[0.9]*|))\\b', bygroups(Number.Integer)),
(u'(\\-\\-|\\;|\\.|\\,|\\:|\\(|\\)|\\[|\\]|\\{|\\})', bygroups(
Operator)),
(u'(\\-\\>|\\+\\.|\\+\\@|\\+\\^|\\+\\$|\\+|\\-\\.|\\-\\@|\\-\\^|\\-\\$|\\-|\\*\\.|\\*\\@|\\*\\^|\\*\\$|\\*|/\\.|/\\@|/\\^|/\\$|/|\\!|>\\.|>=\\.|<=\\.|<\\.|>\\@|>=\\@|<=\\@|<\\@|>\\$|>=\\$|<=\\$|<\\$|>\\^|>=\\^|<=\\^|<\\^|>|>=|<=|<|=|not|or|and|\\$|%|year|month|day)', bygroups(Operator)),
(u'\\b(integer|boolean|date|money|text|decimal|number|sum)\\b',
bygroups(Keyword.Type)),
(u'\\b([A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)(\\.)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Class, Operator, Name.Variable)),
(u'\\b([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)(\\.)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\'\\.]*)\\b', bygroups(Name.Variable, Operator, Text)),
(u'\\b([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Variable)),
(u'\\b([A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Class)),
('(\n|\r|\r\n)', Text),
('.', Text),
],
'main__1': [
(u'(@@)', bygroups(Generic.Heading), 'root'),
(u'(.)', bygroups(Generic.Heading)),
('(\n|\r|\r\n)', Text),
('.', Text),
],
'main__2': [
(u'(@)', bygroups(Generic.Heading), 'root'),
(u'(.)', bygroups(Generic.Heading)),
('(\n|\r|\r\n)', Text),
('.', Text),
]
}
| true
| true
|
f7080fd2eda7217128ad61bbc427baee50aee4c9
| 292
|
py
|
Python
|
ctpn/show_model.py
|
aiedward/OCR-1
|
82ce764fb0071917360ea8b1ec5372035d0897b5
|
[
"Apache-2.0"
] | 1
|
2020-04-20T05:10:40.000Z
|
2020-04-20T05:10:40.000Z
|
ctpn/show_model.py
|
aiedward/OCR-1
|
82ce764fb0071917360ea8b1ec5372035d0897b5
|
[
"Apache-2.0"
] | null | null | null |
ctpn/show_model.py
|
aiedward/OCR-1
|
82ce764fb0071917360ea8b1ec5372035d0897b5
|
[
"Apache-2.0"
] | null | null | null |
from tensorflow.python import pywrap_tensorflow
checkpoint_path = 'checkpoints/VGGnet_fast_rcnn_iter_50000.ckpt'
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
print("tensor_name: ", key)
| 41.714286
| 64
| 0.84589
|
from tensorflow.python import pywrap_tensorflow
checkpoint_path = 'checkpoints/VGGnet_fast_rcnn_iter_50000.ckpt'
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
print("tensor_name: ", key)
| true
| true
|
f70811e4e7d8c3ad97aa34bee3b40802deccfec3
| 9,161
|
py
|
Python
|
paper_material.py
|
hakyimlab/multixcan-paper
|
bafad243f42298a0973ba4d4009b2b2f783743a3
|
[
"MIT"
] | null | null | null |
paper_material.py
|
hakyimlab/multixcan-paper
|
bafad243f42298a0973ba4d4009b2b2f783743a3
|
[
"MIT"
] | null | null | null |
paper_material.py
|
hakyimlab/multixcan-paper
|
bafad243f42298a0973ba4d4009b2b2f783743a3
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import os
import subprocess
import svgwrite
import math
import shutil
########################################################################################################################
def ensure_requisite_folders(path):
folder = os.path.split(path)[0]
if len(folder) and not os.path.exists(folder):
os.makedirs(folder)
def _png_name(p):
return p.split(".svg")[0]+".png"
def to_png(from_path, to_path):
ensure_requisite_folders(to_path)
cmd = \
"""
convert {} {}
""".format(from_path, to_path)
subprocess.call(cmd.split())
def _advance_cursor(c, x, y):
return (c[0]+x, c[1]+y)
def _kot(dwg, _c, text, ox=-40, oy=50, style="font-size:40;font-family:Arial;font-weight:bold;stroke:black;stroke-width:1;fill:black"):
dwg.add(dwg.text(text, insert=(_c[0]+ox, _c[1]+oy), fill='black', style=style))
def figure_1(args):
_p = "_fig1.svg"#os.path.join(args.output_folder, "fig1.svg")
# diagram is (341,972); plots are 600,600
_1_size = (467, 986)
_2_size = (341, 972)
_size = (_1_size[0]+140+_2_size[0], _1_size[1])
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40,0) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.input_folder_2, "multixcan_illustration.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c = _advance_cursor(_c, _1_size[0] + 90 , 0)
dwg.add(dwg.image(os.path.join(args.input_folder_2, "S-Predixcan-MT-diagram_2.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "fig-multi-tissue-presentation.png")
to_png(_p, t)
os.remove(_p)
def figure_2(args):
_p = "_fig2.svg"#os.path.join(args.output_folder, "fig1.svg")
# diagram is (341,972); plots are 600,600
_1_size = (600, 600)
_size = (_1_size[0]*3+140, _1_size[1])
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (20,0) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.plots_folder, "ukb", "ukb_mt_vs_p_number_significant.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=0, oy=30)
_c = _advance_cursor(_c, _1_size[0] + 50, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "ukb", "UKB_Cholesterol_significant_bars.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=0, oy=30)
_c =_advance_cursor (_c, _1_size[0]+50, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "ukb", "UKB_Cholesterol_qq.png"), _c, _1_size))
_kot(dwg, _c, "c", ox=0, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "fig-multi-tissue-ukb-cholesterol.png")
to_png(_p, t)
os.remove(_p)
def figure_3(args):
_p = "_fig3.svg"#os.path.join(args.output_folder, "fig1.svg")
# diagram is (341,972); plots are 600,600; illustration is 455,571
_1_size = (600, 600)
_2_size = (526*600.0/552, 600)
_size = (_1_size[0]*2+80, _1_size[1]*2+40)
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40+math.ceil(_1_size[0]-_2_size[0])/2.0, 0) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.input_folder_2, "smultixcan_illustration.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c =_advance_cursor (_c, _2_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "gwas", "smt_vs_sp_number_significant.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
_c = (40, _1_size[1]+40) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.plots_folder, "gwas", "PGC_scz2_qq.png"), _c, _1_size))
_kot(dwg, _c, "c", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[1]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "gwas", "PGC_scz2_significant_bars.png"), _c, _1_size))
_kot(dwg, _c, "d", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "fig-s-multi-tissue-presentation.png")
to_png(_p, t)
os.remove(_p)
def figure_5(args):
_p = "_fig5.svg"#os.path.join(args.output_folder, "fig1.svg")
_1_size = (800, 800)
_size = (_1_size[0]*2+80, _1_size[1]+40)
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40, 0) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "null_30_qq.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "null_0_qq.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "supp-fig-simulations-null.png")
to_png(_p, t)
os.remove(_p)
def figure_6_d(args):
_p = "_fig6.svg" # os.path.join(args.output_folder, "fig1.svg")
_1_size = (800, 800)
_size = (_1_size[0] * 2 + 80, _1_size[1] *2 + 80)
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40.0, 40) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "single_tissue_bp.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "correlated_tissues_bp.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
_c = (40, _1_size[1]*1+80)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "combination_brain_bp.png"), _c, _1_size))
_kot(dwg, _c, "c", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "combination_all_bp.png"), _c, _1_size))
_kot(dwg, _c, "d", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "supp-fig-simulations-misc.png")
to_png(_p, t)
os.remove(_p)
def figure_6(args):
_p = "_fig6.svg" # os.path.join(args.output_folder, "fig1.svg")
_1_size = (800, 800)
_size = (_1_size[0] * 3 + 80, _1_size[1])
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40.0, 0) # conceptual cursor
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "single_tissue_bp.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c = _advance_cursor(_c, _1_size[0] + 40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "combination_brain_bp.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "combination_all_bp.png"), _c, _1_size))
_kot(dwg, _c, "c", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "supp-fig-simulations-misc.png")
to_png(_p, t)
os.remove(_p)
def shove(args):
def _shove(input_folder, output_folder, files, file_prefix=""):
for sf in files:
shutil.copy(os.path.join(input_folder, *sf),
os.path.join(output_folder, file_prefix + sf[len(sf) - 1].replace("_", "-")))
figures = [("ukb","smt_vs_mt_ukb.png",)]
_shove(args.plots_folder, args.output_folder, figures, file_prefix="fig-")
supp_figures = [("ukb", "smt_vs_mt_ukb_supp.png",),
("ukb", "proportion_underestimated_ukb.png",),
("ukb", "UKB_Cholesterol_significant_bars_fdr.png",),
("simulations", "combination_all_tendency.png",),
("simulations", "pc.png"),
("wtccc", "t1d_snp_intersection.png")]
_shove(args.plots_folder, args.output_folder, supp_figures, "supp-fig-")
supp_data =[("gwas_traits.txt",),
("gwas_smultixcan_stats.txt",),
("gwas_smultixcan_significant.txt",),
("gwas_sp_significant.txt",),
("ukb_multixcan_stats.txt",),
("ukb_p_significant.txt",),
("ukb_multixcan_significant.txt",),
("ukb_individual_pm.txt",),
("wtccc_t1d.txt",)]
_shove(args.input_folder, args.output_folder, supp_data, "supp-data-")
images = [("corrplot_pearson_SLC5A6.png",)]
_shove(args.input_folder_2, args.output_folder, images, "supp-fig-")
########################################################################################################################
def run(args):
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder)
shove(args)
figure_1(args)
figure_2(args)
figure_3(args)
#figure_4(args)
figure_5(args)
figure_6(args)
if __name__ == "__main__":
class Dummy(object):
def __init__(self):
self.output_folder = "results/paper_material"
self.plots_folder = "results/plots"
self.input_folder = "results"
self.input_folder_2 = "images"
self.input_folder_3 = "external_data"
args = Dummy()
run(args)
| 36.353175
| 135
| 0.613688
|
import os
import subprocess
import svgwrite
import math
import shutil
def ensure_requisite_folders(path):
folder = os.path.split(path)[0]
if len(folder) and not os.path.exists(folder):
os.makedirs(folder)
def _png_name(p):
return p.split(".svg")[0]+".png"
def to_png(from_path, to_path):
ensure_requisite_folders(to_path)
cmd = \
"""
convert {} {}
""".format(from_path, to_path)
subprocess.call(cmd.split())
def _advance_cursor(c, x, y):
return (c[0]+x, c[1]+y)
def _kot(dwg, _c, text, ox=-40, oy=50, style="font-size:40;font-family:Arial;font-weight:bold;stroke:black;stroke-width:1;fill:black"):
dwg.add(dwg.text(text, insert=(_c[0]+ox, _c[1]+oy), fill='black', style=style))
def figure_1(args):
_p = "_fig1.svg"
_1_size = (467, 986)
_2_size = (341, 972)
_size = (_1_size[0]+140+_2_size[0], _1_size[1])
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40,0) dwg.add(dwg.image(os.path.join(args.input_folder_2, "multixcan_illustration.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c = _advance_cursor(_c, _1_size[0] + 90 , 0)
dwg.add(dwg.image(os.path.join(args.input_folder_2, "S-Predixcan-MT-diagram_2.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "fig-multi-tissue-presentation.png")
to_png(_p, t)
os.remove(_p)
def figure_2(args):
_p = "_fig2.svg"
_1_size = (600, 600)
_size = (_1_size[0]*3+140, _1_size[1])
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (20,0) dwg.add(dwg.image(os.path.join(args.plots_folder, "ukb", "ukb_mt_vs_p_number_significant.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=0, oy=30)
_c = _advance_cursor(_c, _1_size[0] + 50, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "ukb", "UKB_Cholesterol_significant_bars.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=0, oy=30)
_c =_advance_cursor (_c, _1_size[0]+50, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "ukb", "UKB_Cholesterol_qq.png"), _c, _1_size))
_kot(dwg, _c, "c", ox=0, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "fig-multi-tissue-ukb-cholesterol.png")
to_png(_p, t)
os.remove(_p)
def figure_3(args):
_p = "_fig3.svg"
_1_size = (600, 600)
_2_size = (526*600.0/552, 600)
_size = (_1_size[0]*2+80, _1_size[1]*2+40)
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40+math.ceil(_1_size[0]-_2_size[0])/2.0, 0) dwg.add(dwg.image(os.path.join(args.input_folder_2, "smultixcan_illustration.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c =_advance_cursor (_c, _2_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "gwas", "smt_vs_sp_number_significant.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
_c = (40, _1_size[1]+40) dwg.add(dwg.image(os.path.join(args.plots_folder, "gwas", "PGC_scz2_qq.png"), _c, _1_size))
_kot(dwg, _c, "c", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[1]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "gwas", "PGC_scz2_significant_bars.png"), _c, _1_size))
_kot(dwg, _c, "d", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "fig-s-multi-tissue-presentation.png")
to_png(_p, t)
os.remove(_p)
def figure_5(args):
_p = "_fig5.svg"
_1_size = (800, 800)
_size = (_1_size[0]*2+80, _1_size[1]+40)
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40, 0) dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "null_30_qq.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "null_0_qq.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "supp-fig-simulations-null.png")
to_png(_p, t)
os.remove(_p)
def figure_6_d(args):
_p = "_fig6.svg"
_1_size = (800, 800)
_size = (_1_size[0] * 2 + 80, _1_size[1] *2 + 80)
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40.0, 40) dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "single_tissue_bp.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "correlated_tissues_bp.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
_c = (40, _1_size[1]*1+80)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "combination_brain_bp.png"), _c, _1_size))
_kot(dwg, _c, "c", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "combination_all_bp.png"), _c, _1_size))
_kot(dwg, _c, "d", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "supp-fig-simulations-misc.png")
to_png(_p, t)
os.remove(_p)
def figure_6(args):
_p = "_fig6.svg"
_1_size = (800, 800)
_size = (_1_size[0] * 3 + 80, _1_size[1])
dwg = svgwrite.Drawing(_p, size=_size)
dwg.add(dwg.rect(insert=(0, 0), size=_size, fill="rgb(255,255,255)"))
_c = (40.0, 0) dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "single_tissue_bp.png"), _c, _1_size))
_kot(dwg, _c, "a", ox=-20, oy=30)
_c = _advance_cursor(_c, _1_size[0] + 40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "combination_brain_bp.png"), _c, _1_size))
_kot(dwg, _c, "b", ox=-20, oy=30)
_c =_advance_cursor (_c, _1_size[0]+40, 0)
dwg.add(dwg.image(os.path.join(args.plots_folder, "simulations", "combination_all_bp.png"), _c, _1_size))
_kot(dwg, _c, "c", ox=-20, oy=30)
dwg.save()
t = os.path.join(args.output_folder, "supp-fig-simulations-misc.png")
to_png(_p, t)
os.remove(_p)
def shove(args):
def _shove(input_folder, output_folder, files, file_prefix=""):
for sf in files:
shutil.copy(os.path.join(input_folder, *sf),
os.path.join(output_folder, file_prefix + sf[len(sf) - 1].replace("_", "-")))
figures = [("ukb","smt_vs_mt_ukb.png",)]
_shove(args.plots_folder, args.output_folder, figures, file_prefix="fig-")
supp_figures = [("ukb", "smt_vs_mt_ukb_supp.png",),
("ukb", "proportion_underestimated_ukb.png",),
("ukb", "UKB_Cholesterol_significant_bars_fdr.png",),
("simulations", "combination_all_tendency.png",),
("simulations", "pc.png"),
("wtccc", "t1d_snp_intersection.png")]
_shove(args.plots_folder, args.output_folder, supp_figures, "supp-fig-")
supp_data =[("gwas_traits.txt",),
("gwas_smultixcan_stats.txt",),
("gwas_smultixcan_significant.txt",),
("gwas_sp_significant.txt",),
("ukb_multixcan_stats.txt",),
("ukb_p_significant.txt",),
("ukb_multixcan_significant.txt",),
("ukb_individual_pm.txt",),
("wtccc_t1d.txt",)]
_shove(args.input_folder, args.output_folder, supp_data, "supp-data-")
images = [("corrplot_pearson_SLC5A6.png",)]
_shove(args.input_folder_2, args.output_folder, images, "supp-fig-")
def run(args):
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder)
shove(args)
figure_1(args)
figure_2(args)
figure_3(args)
figure_5(args)
figure_6(args)
if __name__ == "__main__":
class Dummy(object):
def __init__(self):
self.output_folder = "results/paper_material"
self.plots_folder = "results/plots"
self.input_folder = "results"
self.input_folder_2 = "images"
self.input_folder_3 = "external_data"
args = Dummy()
run(args)
| true
| true
|
f70812d2689291dc7dd4dc7a7b5eb1aacc45c5d5
| 2,322
|
py
|
Python
|
inktime/rgbkm.py
|
fligt/inktime
|
45f20602ef07cc8f62e0192318913cf910eb925b
|
[
"Apache-2.0"
] | null | null | null |
inktime/rgbkm.py
|
fligt/inktime
|
45f20602ef07cc8f62e0192318913cf910eb925b
|
[
"Apache-2.0"
] | 3
|
2021-07-13T15:09:03.000Z
|
2021-07-13T15:09:08.000Z
|
inktime/rgbkm.py
|
fligt/inktime
|
45f20602ef07cc8f62e0192318913cf910eb925b
|
[
"Apache-2.0"
] | 1
|
2022-03-08T14:13:30.000Z
|
2022-03-08T14:13:30.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/00_rgbkm.ipynb (unless otherwise specified).
__all__ = ['reflectance']
# Cell
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
import scipy.optimize as optimize
def reflectance(K, S, D, Rg):
'''Calculates reflectance for single colorant Kubelka-Munk model.
Based on Nobbs (1997) formulation with modified Saunderson expression for infinite reflectance.
Function works for single channel, 3 RGB channels, and spectral data/images with muliple wavelength channels.
Parameters:
-----------
K: tuple-like (n channels)
Colorant absorption coefficients for wavelength or RGB channels
S: tuple-like (n channels)
Colorant scattering coefficients for wavelength or RGB channels
D: array ( height x width)
Colorant thickness image
Rg: array (height x width x n) or rgb tuple with shape (3,)
Background reflectance image or background color
Returns:
--------
refl: array (height x width x n)
n-channel reflectance image
'''
Rg = np.array(Rg)
shape = Rg.shape
# create uniform background image if Rg is rgb tuple
if len(shape) == 1: # understood as rgb tuple
h, w = D.shape
Rg_img = np.ones([h, w, 3])
Rg_img[:,:] = Rg
Rg = Rg_img
shape = Rg.shape
#print('created uniform rgb background image Rg with shape: {}'.format(shape))
n_channels = shape[-1]
K = np.array(K).reshape(1, n_channels)
S = np.array(S).reshape(1, n_channels)
D = np.array(D).reshape(-1, 1)
Rg = Rg.reshape(-1, n_channels)
# need to return infinity for K =< 0 or S < 0 in optimization code
#pos_S = S >= 0
#pos_K = K > 0 # also non-zero
#ok = pos_S & pos_K
#Rinf = np.zeros([1, n_channels])
Rinf = (S/K) / ((S/K) + 1 + np.sqrt(1 + 2 * (S/K)))
#Rinf[ok] = (S[ok]/K[ok]) / ((S[ok]/K[ok]) + 1 + np.sqrt(1 + 2 * (S[ok]/K[ok])))
#Rinf[~ok] = np.infty
Z = D * np.sqrt(K * (K + 2 * S))
Z = np.clip(Z, a_min=0, a_max=50)
beta = np.exp(2 * Z) - 1
alpha = (1 - Rinf**2) / (1 - Rg * Rinf)
refl = (alpha * Rg + beta * Rinf) / (alpha + beta)
refl = refl.reshape(shape)
return refl
| 27
| 113
| 0.599914
|
__all__ = ['reflectance']
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
import scipy.optimize as optimize
def reflectance(K, S, D, Rg):
Rg = np.array(Rg)
shape = Rg.shape
if len(shape) == 1:
h, w = D.shape
Rg_img = np.ones([h, w, 3])
Rg_img[:,:] = Rg
Rg = Rg_img
shape = Rg.shape
n_channels = shape[-1]
K = np.array(K).reshape(1, n_channels)
S = np.array(S).reshape(1, n_channels)
D = np.array(D).reshape(-1, 1)
Rg = Rg.reshape(-1, n_channels)
Rinf = (S/K) / ((S/K) + 1 + np.sqrt(1 + 2 * (S/K)))
Z = D * np.sqrt(K * (K + 2 * S))
Z = np.clip(Z, a_min=0, a_max=50)
beta = np.exp(2 * Z) - 1
alpha = (1 - Rinf**2) / (1 - Rg * Rinf)
refl = (alpha * Rg + beta * Rinf) / (alpha + beta)
refl = refl.reshape(shape)
return refl
| true
| true
|
f708134fe19c7e41a5320a642137a1f4d9644d9a
| 2,766
|
py
|
Python
|
assets/code/python_code/python_socket/tcp/client.py
|
zanghu/gitbook_notebook
|
6df70f912ef16617456bd773b69240f42ee768c5
|
[
"MIT"
] | 4
|
2019-01-23T02:58:56.000Z
|
2019-07-19T12:40:59.000Z
|
assets/code/python_code/python_socket/tcp/client.py
|
zanghu/gitbook_notebook
|
6df70f912ef16617456bd773b69240f42ee768c5
|
[
"MIT"
] | null | null | null |
assets/code/python_code/python_socket/tcp/client.py
|
zanghu/gitbook_notebook
|
6df70f912ef16617456bd773b69240f42ee768c5
|
[
"MIT"
] | null | null | null |
#coding=utf-8
#!/bin/env python
import os
import base64
import socket
import numpy
import time
m_serv_ip = '10.230.147.31'
m_serv_port = 9999
def init_socket(serv_ip, serv_port):
""""""
ip_port = (serv_ip, serv_port)
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) # TCP
#sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) # UDP
sk.connect(ip_port)
sk.settimeout(50)
return sk
def send_socket(sk, b64, name):
""""""
len_content = len(b64) + len(name)
sk.sendall(bytes(str(len_content).zfill(16), encoding='utf-8')) # 发送头部
sk.sendall(bytes(name, encoding='utf-8'))
#sk.sendall(str(len_content).zfill(16)) # 发送头部
#sk.sendall(name)
sk.sendall(b64) # 发送内容
sk.close()
def img_to_b64(img_path):
"""显示一副图片"""
assert os.path.isfile(img_path)
with open(img_path, 'rb') as f:
img = f.read()
b64 = base64.b64encode(img)
return b64
def get_img_names(img_dir):
""""""
assert os.path.isdir(img_dir)
names_all = os.listdir(img_dir)
names = [name for name in names_all if name.endswith('.jpg')]
print('目录 {0} 下文件总数: {1}, 图片总数: {2}'.format(img_dir, len(names_all), len(names)))
return names
def send_batch(img_dir, img_names, start_idx, batch_num=10):
"""显示指定目录下的所有图片"""
global m_serv_ip
global m_serv_port
t0 = time.clock()
t1 = time.clock()
for cnt, img_name in enumerate(img_names[start_idx: start_idx + batch_num]):
img_path = os.path.join(img_dir, img_name)
b64 = img_to_b64(img_path) # 获得b64编码
sk = init_socket(m_serv_ip, m_serv_port)
send_socket(sk, b64, img_name.rstrip('.jpg')) # 发送数据
t2 = time.clock()
print('cnt {0} finish, time elapsed: {1}, total elapsed: {2}'.format(cnt, t2 - t1, t2 - t0))
t1 = t2
print('all finished, num send: {0}, time elapsed: {1}'.format(len(img_names), time.clock() - t0))
#sk.close()
def client(img_dir, batch_size, max_batch):
""""""
assert os.path.isdir(img_dir)
t0 = time.clock()
img_names = get_img_names(img_dir)
num_img = len(img_names)
num_finish = 0 # 已完成个数
start_idx = 0 # batch起始索引号
num_batch = 0
while num_finish < num_img:
max_num = 0
num_left = num_img - num_finish
if num_left < batch_size:
max_num = num_left
else:
max_num = batch_size
send_batch(img_dir, img_names, start_idx, max_num)
start_idx += max_num
num_finish += max_num
num_batch += 1
if num_batch >= max_batch:
break
print('client finish, time elapsed: {0}'.format(time.clock() - t0))
if __name__ == '__main__':
client('../data/problem3/train', batch_size=20, max_batch=10000)
| 30.065217
| 101
| 0.626537
|
import os
import base64
import socket
import numpy
import time
m_serv_ip = '10.230.147.31'
m_serv_port = 9999
def init_socket(serv_ip, serv_port):
ip_port = (serv_ip, serv_port)
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sk.connect(ip_port)
sk.settimeout(50)
return sk
def send_socket(sk, b64, name):
len_content = len(b64) + len(name)
sk.sendall(bytes(str(len_content).zfill(16), encoding='utf-8')) sk.sendall(bytes(name, encoding='utf-8'))
sk.sendall(b64) sk.close()
def img_to_b64(img_path):
assert os.path.isfile(img_path)
with open(img_path, 'rb') as f:
img = f.read()
b64 = base64.b64encode(img)
return b64
def get_img_names(img_dir):
assert os.path.isdir(img_dir)
names_all = os.listdir(img_dir)
names = [name for name in names_all if name.endswith('.jpg')]
print('目录 {0} 下文件总数: {1}, 图片总数: {2}'.format(img_dir, len(names_all), len(names)))
return names
def send_batch(img_dir, img_names, start_idx, batch_num=10):
global m_serv_ip
global m_serv_port
t0 = time.clock()
t1 = time.clock()
for cnt, img_name in enumerate(img_names[start_idx: start_idx + batch_num]):
img_path = os.path.join(img_dir, img_name)
b64 = img_to_b64(img_path) sk = init_socket(m_serv_ip, m_serv_port)
send_socket(sk, b64, img_name.rstrip('.jpg')) t2 = time.clock()
print('cnt {0} finish, time elapsed: {1}, total elapsed: {2}'.format(cnt, t2 - t1, t2 - t0))
t1 = t2
print('all finished, num send: {0}, time elapsed: {1}'.format(len(img_names), time.clock() - t0))
def client(img_dir, batch_size, max_batch):
assert os.path.isdir(img_dir)
t0 = time.clock()
img_names = get_img_names(img_dir)
num_img = len(img_names)
num_finish = 0 start_idx = 0 num_batch = 0
while num_finish < num_img:
max_num = 0
num_left = num_img - num_finish
if num_left < batch_size:
max_num = num_left
else:
max_num = batch_size
send_batch(img_dir, img_names, start_idx, max_num)
start_idx += max_num
num_finish += max_num
num_batch += 1
if num_batch >= max_batch:
break
print('client finish, time elapsed: {0}'.format(time.clock() - t0))
if __name__ == '__main__':
client('../data/problem3/train', batch_size=20, max_batch=10000)
| true
| true
|
f708138891556136d31fa1d4e8dbc351bd793c32
| 17,911
|
py
|
Python
|
gpkit/constraints/sgp.py
|
beldonl/gpkit
|
4c422d3f3b65b85f5baacc36305064aee4341ebe
|
[
"MIT"
] | null | null | null |
gpkit/constraints/sgp.py
|
beldonl/gpkit
|
4c422d3f3b65b85f5baacc36305064aee4341ebe
|
[
"MIT"
] | null | null | null |
gpkit/constraints/sgp.py
|
beldonl/gpkit
|
4c422d3f3b65b85f5baacc36305064aee4341ebe
|
[
"MIT"
] | null | null | null |
"""Implement the SequentialGeometricProgram class"""
from time import time
from collections import OrderedDict
import numpy as np
from ..exceptions import InvalidGPConstraint, Infeasible, UnnecessarySGP
from ..keydict import KeyDict
from ..nomials import Variable
from .gp import GeometricProgram
from ..nomials import PosynomialInequality
from .. import NamedVariables
from .costed import CostedConstraintSet
EPS = 1e-6 # 1 +/- this is used in a few relative differences
# pylint: disable=too-many-instance-attributes
class SequentialGeometricProgram(CostedConstraintSet):
"""Prepares a collection of signomials for a SP solve.
Arguments
---------
cost : Posynomial
Objective to minimize when solving
constraints : list of Constraint or SignomialConstraint objects
Constraints to maintain when solving (implicitly Signomials <= 1)
verbosity : int (optional)
Currently has no effect: SequentialGeometricPrograms don't know
anything new after being created, unlike GeometricPrograms.
Attributes with side effects
----------------------------
`gps` is set during a solve
`result` is set at the end of a solve
Examples
--------
>>> gp = gpkit.geometric_program.SequentialGeometricProgram(
# minimize
x,
[ # subject to
1/x - y/x, # <= 1, implicitly
y/10 # <= 1
])
>>> gp.solve()
"""
gps = solver_outs = _results = result = model = None
_gp = _spvars = _lt_approxs = pccp_penalty = None
with NamedVariables("SGP"):
slack = Variable("PCCPslack")
def __init__(self, cost, model, substitutions, *,
use_pccp=True, pccp_penalty=2e2, **initgpargs):
# pylint: disable=super-init-not-called,non-parent-init-called
if cost.any_nonpositive_cs:
raise UnnecessarySGP("""Sequential GPs need Posynomial objectives.
The equivalent of a Signomial objective can be constructed by constraining
a dummy variable `z` to be greater than the desired Signomial objective `s`
(z >= s) and then minimizing that dummy variable.""")
self.model = model
self._original_cost = cost
self.externalfn_vars = \
frozenset(Variable(v) for v in self.model.varkeys if v.externalfn)
if not self.externalfn_vars:
try:
sgpconstraints = {"SP constraints": [], "GP constraints": []}
self._lt_approxs = []
for cs in model.flat():
try:
if not isinstance(cs, PosynomialInequality):
cs.as_hmapslt1(substitutions) # gp-compatible?
sgpconstraints["GP constraints"].append(cs)
except InvalidGPConstraint:
sgpconstraints["SP constraints"].append(cs)
if use_pccp:
lts = [lt/self.slack for lt in cs.as_approxlts()]
else:
lts = cs.as_approxlts()
self._lt_approxs.append(lts)
if not sgpconstraints["SP constraints"]:
raise UnnecessarySGP("""Model valid as a Geometric Program.
SequentialGeometricPrograms should only be created with Models containing
Signomial Constraints, since Models without Signomials have global
solutions and can be solved with 'Model.solve()'.""")
if use_pccp:
self.pccp_penalty = pccp_penalty
self.cost = cost * self.slack**pccp_penalty
sgpconstraints["GP constraints"].append(self.slack >= 1)
else:
self.cost = cost
self.idxlookup = {k: i for i, k in enumerate(sgpconstraints)}
list.__init__(self, sgpconstraints.values())
self.substitutions = substitutions
self._gp = self.init_gp(**initgpargs)
self.blackboxconstraints = False
return
except AttributeError:
pass # some constraint lacked
self.blackboxconstraints = True
self.__bare_init__(cost, model, substitutions)
# pylint: disable=too-many-locals,too-many-branches
# pylint: disable=too-many-arguments
# pylint: disable=too-many-statements
def localsolve(self, solver=None, *, verbosity=1, x0=None, reltol=1e-4,
iteration_limit=50, mutategp=True, **solveargs):
"""Locally solves a SequentialGeometricProgram and returns the solution.
Arguments
---------
solver : str or function (optional)
By default uses one of the solvers found during installation.
If set to "mosek", "mosek_cli", or "cvxopt", uses that solver.
If set to a function, passes that function cs, A, p_idxs, and k.
verbosity : int (optional)
If greater than 0, prints solve time and number of iterations.
Each GP is created and solved with verbosity one less than this, so
if greater than 1, prints solver name and time for each GP.
x0 : dict (optional)
Initial location to approximate signomials about.
reltol : float
Iteration ends when this is greater than the distance between two
consecutive solve's objective values.
iteration_limit : int
Maximum GP iterations allowed.
mutategp: boolean
Prescribes whether to mutate the previously generated GP
or to create a new GP with every solve.
**solveargs :
Passed to solver function.
Returns
-------
result : dict
A dictionary containing the translated solver result.
"""
self.gps, self.solver_outs, self._results = [], [], []
# if there's external functions we can't mutate the GP
mutategp = mutategp and not self.blackboxconstraints
if not mutategp and not x0:
raise ValueError("Solves with arbitrary constraint generators"
" must specify an initial starting point x0.")
if mutategp:
if x0:
self._gp = self.init_gp(x0)
gp = self._gp
starttime = time()
if verbosity > 0:
print("Starting a sequence of GP solves")
if self.externalfn_vars:
print(" for %i variables defined by externalfns"
% len(self.externalfn_vars))
elif mutategp:
print(" for %i free variables" % len(self._spvars))
print(" in %i signomial constraints"
% len(self["SP constraints"]))
print(" and for %i free variables" % len(gp.varlocs))
print(" in %i posynomial inequalities." % len(gp.k))
prevcost, cost, rel_improvement = None, None, None
while rel_improvement is None or rel_improvement > reltol:
prevcost = cost
if len(self.gps) > iteration_limit:
raise Infeasible(
"Unsolved after %s iterations. Check `m.program.results`;"
" if they're converging, try `.localsolve(...,"
" iteration_limit=NEWLIMIT)`." % len(self.gps))
if mutategp:
self.update_gp(x0)
else:
gp = self.gp(x0)
gp.model = self.model
self.gps.append(gp) # NOTE: SIDE EFFECTS
if verbosity > 1:
print("\nGP Solve %i" % len(self.gps))
if verbosity > 2:
print("===============")
solver_out = gp.solve(solver, verbosity=verbosity-1,
gen_result=False, **solveargs)
self.solver_outs.append(solver_out)
cost = float(solver_out["objective"])
x0 = dict(zip(gp.varlocs, np.exp(solver_out["primal"])))
if verbosity > 2 and self._spvars:
result = gp.generate_result(solver_out, verbosity=verbosity-3)
self._results.append(result)
print(result.table(self._spvars))
elif verbosity > 1:
print("Solved cost was %.4g." % cost)
if prevcost is None:
continue
rel_improvement = (prevcost - cost)/(prevcost + cost)
if cost*(1 - EPS) > prevcost + EPS and verbosity > -1:
print("SGP not convergent: Cost rose by %.2g%% on GP solve %i."
" Details can be found in `m.program.results` or by"
" solving at a higher verbosity. Note that convergence is"
" not guaranteed for models with SignomialEqualities.\n"
% (100*(cost - prevcost)/prevcost, len(self.gps)))
rel_improvement = cost = None
# solved successfully!
self.result = gp.generate_result(solver_out, verbosity=verbosity-3)
self.result["soltime"] = time() - starttime
if verbosity > 1:
print()
if verbosity > 0:
print("Solving took %.3g seconds and %i GP solves."
% (self.result["soltime"], len(self.gps)))
self.model.process_result(self.result)
if self.externalfn_vars:
for v in self.externalfn_vars:
self[0].insert(0, v.key.externalfn) # for constraint senss
if self.slack.key in self.result["variables"]:
excess_slack = self.result["variables"][self.slack.key] - 1
if excess_slack <= EPS:
del self.result["freevariables"][self.slack.key]
del self.result["variables"][self.slack.key]
del self.result["sensitivities"]["variables"][self.slack.key]
slackconstraint = self["GP constraints"][-1]
del self.result["sensitivities"]["constraints"][slackconstraint]
elif verbosity > -1:
print("Final solution let signomial constraints slacken by"
" %.2g%%. Calling .localsolve with a higher"
" `pccp_penalty` (it was %.3g this time) will reduce"
" final slack if the model is solvable with less. If"
" you think it might not be, check by solving with "
"`use_pccp=False, x0=(this model's final solution)`.\n"
% (100*excess_slack, self.pccp_penalty))
return self.result
# pylint: disable=too-many-locals
def localsolveonce(self, solver=None, verbosity=1, x0=None, reltol=1e-4,
iteration_limit=50, mutategp=True, **kwargs):
"""Locally solves a SequentialGeometricProgram ONCE and returns the solution.
Arguments
---------
solver : str or function (optional)
By default uses one of the solvers found during installation.
If set to "mosek", "mosek_cli", or "cvxopt", uses that solver.
If set to a function, passes that function cs, A, p_idxs, and k.
verbosity : int (optional)
If greater than 0, prints solve time and number of iterations.
Each GP is created and solved with verbosity one less than this, so
if greater than 1, prints solver name and time for each GP.
x0 : dict (optional)
Initial location to approximate signomials about.
reltol : float
Iteration ends when this is greater than the distance between two
consecutive solve's objective values.
iteration_limit : int
Maximum GP iterations allowed.
*args, **kwargs :
Passed to solver function.
Returns
-------
result : dict
A dictionary containing the translated solver result.
"""
starttime = time()
if verbosity > 0:
print("Beginning signomial solve.")
self.gps = [] # NOTE: SIDE EFFECTS
self.results = []
if x0 and mutategp:
self._gp = self.init_gp(self.substitutions, x0)
slackvar = Variable()
prevcost, cost, rel_improvement = None, None, None
while (rel_improvement is None or rel_improvement > reltol) and len(self.gps) < iteration_limit:
if len(self.gps) > iteration_limit:
raise RuntimeWarning("""problem unsolved after %s iterations.
The last result is available in Model.program.gps[-1].result. If the gps
appear to be converging, you may wish to increase the iteration limit by
calling .localsolve(..., iteration_limit=NEWLIMIT).""" % len(self.gps))
gp = self.gp(x0, mutategp)
self.gps.append(gp) # NOTE: SIDE EFFECTS
try:
result = gp.solve(solver, verbosity-1,
warn_on_check=True, **kwargs)
self.results.append(result)
except (RuntimeWarning, ValueError):
feas_constrs = ([slackvar >= 1] +
[posy <= slackvar
for posy in gp.posynomials[1:]])
primal_feas = GeometricProgram(slackvar**100 * gp.cost,
feas_constrs, None)
self.gps.append(primal_feas)
result = primal_feas.solve(solver, verbosity-1, **kwargs)
result["cost"] = None # reset the cost-counting
x0 = result["freevariables"]
prevcost, cost = cost, result["cost"]
if prevcost is None or cost is None:
rel_improvement = None
elif prevcost < (1-reltol)*cost:
print("SP is not converging! Last GP iteration had a higher"
" cost (%.2g) than the previous one (%.2g). Results for"
" each iteration are in (Model).program.results. If your"
" model contains SignomialEqualities, note that"
" convergence is not guaranteed: try replacing any"
" SigEqs you can and solving again." % (cost, prevcost))
else:
rel_improvement = abs(prevcost-cost)/(prevcost + cost)
# solved successfully!
soltime = time() - starttime
if verbosity > 0:
print("Solving took %i GP solves" % len(self.gps)
+ " and %.3g seconds." % soltime)
self.process_result(result)
self.result = SolutionArray(result.copy()) # NOTE: SIDE EFFECTS
self.result["soltime"] = soltime
if self.externalfn_vars:
for v in self.externalfn_vars:
self[0].insert(0, v.key.externalfn) # for constraint senss
return self.result
@property
def results(self):
"Creates and caches results from the raw solver_outs"
if not self._results:
self._results = [o["generate_result"]() for o in self.solver_outs]
return self._results
def _fill_x0(self, x0):
"Returns a copy of x0 with subsitutions added."
x0kd = KeyDict()
x0kd.varkeys = self.varkeys
if x0:
x0kd.update(x0) # has to occur after the setting of varkeys
x0kd.update(self.substitutions)
return x0kd
def init_gp(self, x0=None, **initgpargs):
"Generates a simplified GP representation for later modification"
x0 = self._fill_x0(x0)
constraints = OrderedDict({"SP approximations": []})
constraints["GP constraints"] = self["GP constraints"]
self._spvars = set([self.slack])
for cs, lts in zip(self["SP constraints"], self._lt_approxs):
for lt, gt in zip(lts, cs.as_approxgts(x0)):
constraint = (lt <= gt)
constraint.generated_by = cs
constraints["SP approximations"].append(constraint)
self._spvars.update({vk for vk in gt.varkeys
if vk not in self.substitutions})
gp = GeometricProgram(self.cost, constraints, self.substitutions,
**initgpargs)
gp.x0 = x0
return gp
def update_gp(self, x0):
"Update self._gp for x0."
if not self.gps:
return # we've already generated the first gp
gp = self._gp
gp.x0.update({k: v for (k, v) in x0.items() if k in self._spvars})
hmap_idx = 0
for sp_constraint, lts in zip(self["SP constraints"], self._lt_approxs):
for lt, gt in zip(lts, sp_constraint.as_approxgts(gp.x0)):
unsubbed = lt/gt
gp["SP approximations"][hmap_idx].unsubbed = [unsubbed]
hmap = unsubbed.hmap.sub(self.substitutions, unsubbed.varkeys)
hmap.parent = gp["SP approximations"][hmap_idx]
hmap_idx += 1 # here because gp.hmaps[0] is the cost hmap
gp.hmaps[hmap_idx] = hmap
gp.gen()
def gp(self, x0=None, **gpinitargs):
"The GP approximation of this SP at x0."
x0 = self._fill_x0(x0)
constraints = OrderedDict(
{"SP constraints": [c.as_gpconstr(x0) for c in self.model.flat()]})
if self.externalfn_vars:
constraints["Generated by externalfns"] = []
for v in self.externalfn_vars:
constraint = v.key.externalfn(v, x0)
constraint.generated_by = v.key.externalfn
constraints["Generated by externalfns"].append(constraint)
gp = GeometricProgram(self._original_cost,
constraints, self.substitutions, **gpinitargs)
gp.x0 = x0
return gp
| 46.643229
| 104
| 0.570711
|
from time import time
from collections import OrderedDict
import numpy as np
from ..exceptions import InvalidGPConstraint, Infeasible, UnnecessarySGP
from ..keydict import KeyDict
from ..nomials import Variable
from .gp import GeometricProgram
from ..nomials import PosynomialInequality
from .. import NamedVariables
from .costed import CostedConstraintSet
EPS = 1e-6
class SequentialGeometricProgram(CostedConstraintSet):
gps = solver_outs = _results = result = model = None
_gp = _spvars = _lt_approxs = pccp_penalty = None
with NamedVariables("SGP"):
slack = Variable("PCCPslack")
def __init__(self, cost, model, substitutions, *,
use_pccp=True, pccp_penalty=2e2, **initgpargs):
if cost.any_nonpositive_cs:
raise UnnecessarySGP("""Sequential GPs need Posynomial objectives.
The equivalent of a Signomial objective can be constructed by constraining
a dummy variable `z` to be greater than the desired Signomial objective `s`
(z >= s) and then minimizing that dummy variable.""")
self.model = model
self._original_cost = cost
self.externalfn_vars = \
frozenset(Variable(v) for v in self.model.varkeys if v.externalfn)
if not self.externalfn_vars:
try:
sgpconstraints = {"SP constraints": [], "GP constraints": []}
self._lt_approxs = []
for cs in model.flat():
try:
if not isinstance(cs, PosynomialInequality):
cs.as_hmapslt1(substitutions) sgpconstraints["GP constraints"].append(cs)
except InvalidGPConstraint:
sgpconstraints["SP constraints"].append(cs)
if use_pccp:
lts = [lt/self.slack for lt in cs.as_approxlts()]
else:
lts = cs.as_approxlts()
self._lt_approxs.append(lts)
if not sgpconstraints["SP constraints"]:
raise UnnecessarySGP("""Model valid as a Geometric Program.
SequentialGeometricPrograms should only be created with Models containing
Signomial Constraints, since Models without Signomials have global
solutions and can be solved with 'Model.solve()'.""")
if use_pccp:
self.pccp_penalty = pccp_penalty
self.cost = cost * self.slack**pccp_penalty
sgpconstraints["GP constraints"].append(self.slack >= 1)
else:
self.cost = cost
self.idxlookup = {k: i for i, k in enumerate(sgpconstraints)}
list.__init__(self, sgpconstraints.values())
self.substitutions = substitutions
self._gp = self.init_gp(**initgpargs)
self.blackboxconstraints = False
return
except AttributeError:
pass self.blackboxconstraints = True
self.__bare_init__(cost, model, substitutions)
def localsolve(self, solver=None, *, verbosity=1, x0=None, reltol=1e-4,
iteration_limit=50, mutategp=True, **solveargs):
self.gps, self.solver_outs, self._results = [], [], []
mutategp = mutategp and not self.blackboxconstraints
if not mutategp and not x0:
raise ValueError("Solves with arbitrary constraint generators"
" must specify an initial starting point x0.")
if mutategp:
if x0:
self._gp = self.init_gp(x0)
gp = self._gp
starttime = time()
if verbosity > 0:
print("Starting a sequence of GP solves")
if self.externalfn_vars:
print(" for %i variables defined by externalfns"
% len(self.externalfn_vars))
elif mutategp:
print(" for %i free variables" % len(self._spvars))
print(" in %i signomial constraints"
% len(self["SP constraints"]))
print(" and for %i free variables" % len(gp.varlocs))
print(" in %i posynomial inequalities." % len(gp.k))
prevcost, cost, rel_improvement = None, None, None
while rel_improvement is None or rel_improvement > reltol:
prevcost = cost
if len(self.gps) > iteration_limit:
raise Infeasible(
"Unsolved after %s iterations. Check `m.program.results`;"
" if they're converging, try `.localsolve(...,"
" iteration_limit=NEWLIMIT)`." % len(self.gps))
if mutategp:
self.update_gp(x0)
else:
gp = self.gp(x0)
gp.model = self.model
self.gps.append(gp) # NOTE: SIDE EFFECTS
if verbosity > 1:
print("\nGP Solve %i" % len(self.gps))
if verbosity > 2:
print("===============")
solver_out = gp.solve(solver, verbosity=verbosity-1,
gen_result=False, **solveargs)
self.solver_outs.append(solver_out)
cost = float(solver_out["objective"])
x0 = dict(zip(gp.varlocs, np.exp(solver_out["primal"])))
if verbosity > 2 and self._spvars:
result = gp.generate_result(solver_out, verbosity=verbosity-3)
self._results.append(result)
print(result.table(self._spvars))
elif verbosity > 1:
print("Solved cost was %.4g." % cost)
if prevcost is None:
continue
rel_improvement = (prevcost - cost)/(prevcost + cost)
if cost*(1 - EPS) > prevcost + EPS and verbosity > -1:
print("SGP not convergent: Cost rose by %.2g%% on GP solve %i."
" Details can be found in `m.program.results` or by"
" solving at a higher verbosity. Note that convergence is"
" not guaranteed for models with SignomialEqualities.\n"
% (100*(cost - prevcost)/prevcost, len(self.gps)))
rel_improvement = cost = None
# solved successfully!
self.result = gp.generate_result(solver_out, verbosity=verbosity-3)
self.result["soltime"] = time() - starttime
if verbosity > 1:
print()
if verbosity > 0:
print("Solving took %.3g seconds and %i GP solves."
% (self.result["soltime"], len(self.gps)))
self.model.process_result(self.result)
if self.externalfn_vars:
for v in self.externalfn_vars:
self[0].insert(0, v.key.externalfn) # for constraint senss
if self.slack.key in self.result["variables"]:
excess_slack = self.result["variables"][self.slack.key] - 1
if excess_slack <= EPS:
del self.result["freevariables"][self.slack.key]
del self.result["variables"][self.slack.key]
del self.result["sensitivities"]["variables"][self.slack.key]
slackconstraint = self["GP constraints"][-1]
del self.result["sensitivities"]["constraints"][slackconstraint]
elif verbosity > -1:
print("Final solution let signomial constraints slacken by"
" %.2g%%. Calling .localsolve with a higher"
" `pccp_penalty` (it was %.3g this time) will reduce"
" final slack if the model is solvable with less. If"
" you think it might not be, check by solving with "
"`use_pccp=False, x0=(this model's final solution)`.\n"
% (100*excess_slack, self.pccp_penalty))
return self.result
def localsolveonce(self, solver=None, verbosity=1, x0=None, reltol=1e-4,
iteration_limit=50, mutategp=True, **kwargs):
starttime = time()
if verbosity > 0:
print("Beginning signomial solve.")
self.gps = [] self.results = []
if x0 and mutategp:
self._gp = self.init_gp(self.substitutions, x0)
slackvar = Variable()
prevcost, cost, rel_improvement = None, None, None
while (rel_improvement is None or rel_improvement > reltol) and len(self.gps) < iteration_limit:
if len(self.gps) > iteration_limit:
raise RuntimeWarning("""problem unsolved after %s iterations.
The last result is available in Model.program.gps[-1].result. If the gps
appear to be converging, you may wish to increase the iteration limit by
calling .localsolve(..., iteration_limit=NEWLIMIT).""" % len(self.gps))
gp = self.gp(x0, mutategp)
self.gps.append(gp) try:
result = gp.solve(solver, verbosity-1,
warn_on_check=True, **kwargs)
self.results.append(result)
except (RuntimeWarning, ValueError):
feas_constrs = ([slackvar >= 1] +
[posy <= slackvar
for posy in gp.posynomials[1:]])
primal_feas = GeometricProgram(slackvar**100 * gp.cost,
feas_constrs, None)
self.gps.append(primal_feas)
result = primal_feas.solve(solver, verbosity-1, **kwargs)
result["cost"] = None x0 = result["freevariables"]
prevcost, cost = cost, result["cost"]
if prevcost is None or cost is None:
rel_improvement = None
elif prevcost < (1-reltol)*cost:
print("SP is not converging! Last GP iteration had a higher"
" cost (%.2g) than the previous one (%.2g). Results for"
" each iteration are in (Model).program.results. If your"
" model contains SignomialEqualities, note that"
" convergence is not guaranteed: try replacing any"
" SigEqs you can and solving again." % (cost, prevcost))
else:
rel_improvement = abs(prevcost-cost)/(prevcost + cost)
soltime = time() - starttime
if verbosity > 0:
print("Solving took %i GP solves" % len(self.gps)
+ " and %.3g seconds." % soltime)
self.process_result(result)
self.result = SolutionArray(result.copy()) self.result["soltime"] = soltime
if self.externalfn_vars:
for v in self.externalfn_vars:
self[0].insert(0, v.key.externalfn) return self.result
@property
def results(self):
if not self._results:
self._results = [o["generate_result"]() for o in self.solver_outs]
return self._results
def _fill_x0(self, x0):
x0kd = KeyDict()
x0kd.varkeys = self.varkeys
if x0:
x0kd.update(x0) x0kd.update(self.substitutions)
return x0kd
def init_gp(self, x0=None, **initgpargs):
x0 = self._fill_x0(x0)
constraints = OrderedDict({"SP approximations": []})
constraints["GP constraints"] = self["GP constraints"]
self._spvars = set([self.slack])
for cs, lts in zip(self["SP constraints"], self._lt_approxs):
for lt, gt in zip(lts, cs.as_approxgts(x0)):
constraint = (lt <= gt)
constraint.generated_by = cs
constraints["SP approximations"].append(constraint)
self._spvars.update({vk for vk in gt.varkeys
if vk not in self.substitutions})
gp = GeometricProgram(self.cost, constraints, self.substitutions,
**initgpargs)
gp.x0 = x0
return gp
def update_gp(self, x0):
if not self.gps:
return gp = self._gp
gp.x0.update({k: v for (k, v) in x0.items() if k in self._spvars})
hmap_idx = 0
for sp_constraint, lts in zip(self["SP constraints"], self._lt_approxs):
for lt, gt in zip(lts, sp_constraint.as_approxgts(gp.x0)):
unsubbed = lt/gt
gp["SP approximations"][hmap_idx].unsubbed = [unsubbed]
hmap = unsubbed.hmap.sub(self.substitutions, unsubbed.varkeys)
hmap.parent = gp["SP approximations"][hmap_idx]
hmap_idx += 1 # here because gp.hmaps[0] is the cost hmap
gp.hmaps[hmap_idx] = hmap
gp.gen()
def gp(self, x0=None, **gpinitargs):
x0 = self._fill_x0(x0)
constraints = OrderedDict(
{"SP constraints": [c.as_gpconstr(x0) for c in self.model.flat()]})
if self.externalfn_vars:
constraints["Generated by externalfns"] = []
for v in self.externalfn_vars:
constraint = v.key.externalfn(v, x0)
constraint.generated_by = v.key.externalfn
constraints["Generated by externalfns"].append(constraint)
gp = GeometricProgram(self._original_cost,
constraints, self.substitutions, **gpinitargs)
gp.x0 = x0
return gp
| true
| true
|
f70813ab6b0a29667673f0e105ff0efc293c2861
| 7,934
|
py
|
Python
|
nova/api/openstack/compute/plugins/v3/hypervisors.py
|
alvarolopez/nova
|
97a97205a980459bae1f61aec3d4c7e0bec1e9c2
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/plugins/v3/hypervisors.py
|
alvarolopez/nova
|
97a97205a980459bae1f61aec3d4c7e0bec1e9c2
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/plugins/v3/hypervisors.py
|
alvarolopez/nova
|
97a97205a980459bae1f61aec3d4c7e0bec1e9c2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hypervisors admin extension."""
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova import servicegroup
ALIAS = "os-hypervisors"
authorize = extensions.os_compute_authorizer(ALIAS)
class HypervisorsController(wsgi.Controller):
"""The Hypervisors API controller for the OpenStack API."""
def __init__(self):
self.host_api = compute.HostAPI()
self.servicegroup_api = servicegroup.API()
super(HypervisorsController, self).__init__()
def _view_hypervisor(self, hypervisor, service, detail, servers=None,
**kwargs):
alive = self.servicegroup_api.service_is_up(service)
hyp_dict = {
'id': hypervisor.id,
'hypervisor_hostname': hypervisor.hypervisor_hostname,
'state': 'up' if alive else 'down',
'status': ('disabled' if service.disabled
else 'enabled'),
}
if detail and not servers:
for field in ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used',
'hypervisor_type', 'hypervisor_version',
'free_ram_mb', 'free_disk_gb', 'current_workload',
'running_vms', 'cpu_info', 'disk_available_least',
'host_ip'):
hyp_dict[field] = hypervisor[field]
hyp_dict['service'] = {
'id': service.id,
'host': hypervisor.host,
'disabled_reason': service.disabled_reason,
}
if servers:
hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid'])
for serv in servers]
# Add any additional info
if kwargs:
hyp_dict.update(kwargs)
return hyp_dict
@extensions.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
False)
for hyp in compute_nodes])
@extensions.expected_errors(())
def detail(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
True)
for hyp in compute_nodes])
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
service = self.host_api.service_get_by_compute_host(
context, hyp.host)
return dict(hypervisor=self._view_hypervisor(hyp, service, True))
@extensions.expected_errors((404, 501))
def uptime(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
# Get the uptime
try:
host = hyp.host
uptime = self.host_api.get_host_uptime(context, host)
except NotImplementedError:
msg = _("Virt driver does not implement uptime function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
service = self.host_api.service_get_by_compute_host(context, host)
return dict(hypervisor=self._view_hypervisor(hyp, service, False,
uptime=uptime))
@extensions.expected_errors(404)
def search(self, req, id):
context = req.environ['nova.context']
authorize(context)
hypervisors = self.host_api.compute_node_search_by_hypervisor(
context, id)
if hypervisors:
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
False)
for hyp in hypervisors])
else:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
@extensions.expected_errors(404)
def servers(self, req, id):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_search_by_hypervisor(
context, id)
if not compute_nodes:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
hypervisors = []
for compute_node in compute_nodes:
instances = self.host_api.instance_get_all_by_host(context,
compute_node.host)
service = self.host_api.service_get_by_compute_host(
context, compute_node.host)
hyp = self._view_hypervisor(compute_node, service, False,
instances)
hypervisors.append(hyp)
return dict(hypervisors=hypervisors)
@extensions.expected_errors(())
def statistics(self, req):
context = req.environ['nova.context']
authorize(context)
stats = self.host_api.compute_node_statistics(context)
return dict(hypervisor_statistics=stats)
class Hypervisors(extensions.V3APIExtensionBase):
"""Admin-only hypervisor administration."""
name = "Hypervisors"
alias = ALIAS
version = 1
def get_resources(self):
resources = [extensions.ResourceExtension(ALIAS,
HypervisorsController(),
collection_actions={'detail': 'GET',
'statistics': 'GET'},
member_actions={'uptime': 'GET',
'search': 'GET',
'servers': 'GET'})]
return resources
def get_controller_extensions(self):
return []
| 38.892157
| 79
| 0.580161
|
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova import servicegroup
ALIAS = "os-hypervisors"
authorize = extensions.os_compute_authorizer(ALIAS)
class HypervisorsController(wsgi.Controller):
def __init__(self):
self.host_api = compute.HostAPI()
self.servicegroup_api = servicegroup.API()
super(HypervisorsController, self).__init__()
def _view_hypervisor(self, hypervisor, service, detail, servers=None,
**kwargs):
alive = self.servicegroup_api.service_is_up(service)
hyp_dict = {
'id': hypervisor.id,
'hypervisor_hostname': hypervisor.hypervisor_hostname,
'state': 'up' if alive else 'down',
'status': ('disabled' if service.disabled
else 'enabled'),
}
if detail and not servers:
for field in ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used',
'hypervisor_type', 'hypervisor_version',
'free_ram_mb', 'free_disk_gb', 'current_workload',
'running_vms', 'cpu_info', 'disk_available_least',
'host_ip'):
hyp_dict[field] = hypervisor[field]
hyp_dict['service'] = {
'id': service.id,
'host': hypervisor.host,
'disabled_reason': service.disabled_reason,
}
if servers:
hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid'])
for serv in servers]
if kwargs:
hyp_dict.update(kwargs)
return hyp_dict
@extensions.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
False)
for hyp in compute_nodes])
@extensions.expected_errors(())
def detail(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
True)
for hyp in compute_nodes])
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
service = self.host_api.service_get_by_compute_host(
context, hyp.host)
return dict(hypervisor=self._view_hypervisor(hyp, service, True))
@extensions.expected_errors((404, 501))
def uptime(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
try:
host = hyp.host
uptime = self.host_api.get_host_uptime(context, host)
except NotImplementedError:
msg = _("Virt driver does not implement uptime function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
service = self.host_api.service_get_by_compute_host(context, host)
return dict(hypervisor=self._view_hypervisor(hyp, service, False,
uptime=uptime))
@extensions.expected_errors(404)
def search(self, req, id):
context = req.environ['nova.context']
authorize(context)
hypervisors = self.host_api.compute_node_search_by_hypervisor(
context, id)
if hypervisors:
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
False)
for hyp in hypervisors])
else:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
@extensions.expected_errors(404)
def servers(self, req, id):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_search_by_hypervisor(
context, id)
if not compute_nodes:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
hypervisors = []
for compute_node in compute_nodes:
instances = self.host_api.instance_get_all_by_host(context,
compute_node.host)
service = self.host_api.service_get_by_compute_host(
context, compute_node.host)
hyp = self._view_hypervisor(compute_node, service, False,
instances)
hypervisors.append(hyp)
return dict(hypervisors=hypervisors)
@extensions.expected_errors(())
def statistics(self, req):
context = req.environ['nova.context']
authorize(context)
stats = self.host_api.compute_node_statistics(context)
return dict(hypervisor_statistics=stats)
class Hypervisors(extensions.V3APIExtensionBase):
name = "Hypervisors"
alias = ALIAS
version = 1
def get_resources(self):
resources = [extensions.ResourceExtension(ALIAS,
HypervisorsController(),
collection_actions={'detail': 'GET',
'statistics': 'GET'},
member_actions={'uptime': 'GET',
'search': 'GET',
'servers': 'GET'})]
return resources
def get_controller_extensions(self):
return []
| true
| true
|
f70814361a55cd2096a3f189320262ab475a1a5f
| 400
|
py
|
Python
|
bili_kits/api/__init__.py
|
LonelySteve/Bili-Kits
|
42e536400b2f35d57e5871de34303b6f2fc901ed
|
[
"MIT"
] | null | null | null |
bili_kits/api/__init__.py
|
LonelySteve/Bili-Kits
|
42e536400b2f35d57e5871de34303b6f2fc901ed
|
[
"MIT"
] | null | null | null |
bili_kits/api/__init__.py
|
LonelySteve/Bili-Kits
|
42e536400b2f35d57e5871de34303b6f2fc901ed
|
[
"MIT"
] | null | null | null |
_BASE_WWW_BILIBILI_COM="https://www.bilibili.com"
_BASE_API_BILIBILI_COM="https://api.bilibili.com"
_BASE_API_BILIBILI_COM_X="https://api.bilibili.com/x"
_BASE_API_BILIBILI_COM_X_V2="%s/v2" % _BASE_API_BILIBILI_COM_X
_BASE_WEB_INTERFACE="%s/web-interface" % _BASE_API_BILIBILI_COM_X
_BASE_API_VC_BILIBILI_COM="http://api.vc.bilibili.com"
_BASE_INTERFACE_BILIBILI_COM="https://interface.bilibili.com"
| 50
| 65
| 0.835
|
_BASE_WWW_BILIBILI_COM="https://www.bilibili.com"
_BASE_API_BILIBILI_COM="https://api.bilibili.com"
_BASE_API_BILIBILI_COM_X="https://api.bilibili.com/x"
_BASE_API_BILIBILI_COM_X_V2="%s/v2" % _BASE_API_BILIBILI_COM_X
_BASE_WEB_INTERFACE="%s/web-interface" % _BASE_API_BILIBILI_COM_X
_BASE_API_VC_BILIBILI_COM="http://api.vc.bilibili.com"
_BASE_INTERFACE_BILIBILI_COM="https://interface.bilibili.com"
| true
| true
|
f70814e7ac2b17c9709ae4005bbc3581dcf38d66
| 3,008
|
py
|
Python
|
Server_API/licenciement-auto.py
|
crozes/CST_Project
|
0269be2c17da59f5b8c05671e24f7bf64c9fdea9
|
[
"MIT"
] | null | null | null |
Server_API/licenciement-auto.py
|
crozes/CST_Project
|
0269be2c17da59f5b8c05671e24f7bf64c9fdea9
|
[
"MIT"
] | null | null | null |
Server_API/licenciement-auto.py
|
crozes/CST_Project
|
0269be2c17da59f5b8c05671e24f7bf64c9fdea9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*
from __future__ import print_function
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
import json
import os
from flask import Flask
from flask import jsonify
from flask_cors import CORS
app = Flask(__name__)
#A tester
cors = CORS(app)
#--------------------_Function_--------------
# Setup the Sheets API
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
store = file.Storage('/home/pi/Serveur/credentials.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('/home/pi/Serveur/client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
@app.route('/')
def Welcome():
return "Hello World !"
@app.route('/sheetID/<post_id>')
def getSheetInfo(post_id):
# Call the Sheets API
#1vS3-iv0GOnHQTNMudK9yjl-KYdMQZjb7smJ6CNUa4x8
SPREADSHEET_ID = post_id
#NomDeLaFeuille!Range1:RangeTop
RANGE_NAME = 'Réponses au formulaire 1!A2:P'
try :
result = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID,
range=RANGE_NAME).execute()
except Exception as e:
print("Erreur")
print(e)
values = result.get('values', [])
resultFin = []
if not values:
print('No data found.')
return ''
else:
print(values)
print("------")
for row in values:
print(row)
horodateur = row[0]
nom = row[1].upper()
prenom = row[2].upper()
nationalite = row[3]
dateNaissance = row[4]
lieuNaissance = row[5]
departement = row[6]
sexe = row[7]
adresse = row[8]
adresse2 = ''
if row[9] != "" :
adresse2 = row[9]
codePostal = row[10]
ville = row[11]
pays = row[12]
telephone = ''
if row[13] != "" :
telephone = '0'+row[13]
portable = ''
if row[14] != "" :
portable = '0'+row[14]
mail = row[15]
commentaire = ''
#if row[16] != None :
# commentaire = row[16]
activite = 'secourisme'
result = {'Horodateur':horodateur,'Nom':nom,'Prenom':prenom,'Nationalite':nationalite,'Portable':portable,'Departement':departement,'Pays':pays,'Sexe':sexe,'Mail':mail,'DateNaissance':dateNaissance,'LieuNaissance':lieuNaissance,'Adresse':adresse,'Adresse2':adresse2,'CodePostal':codePostal,'Ville':ville,'Telephone':telephone,'Activite':activite}
resultFin.append(result)
print(resultFin)
print("-----")
return jsonify(resultFin)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| 31.333333
| 358
| 0.56117
|
from __future__ import print_function
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
import json
import os
from flask import Flask
from flask import jsonify
from flask_cors import CORS
app = Flask(__name__)
cors = CORS(app)
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
store = file.Storage('/home/pi/Serveur/credentials.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('/home/pi/Serveur/client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
@app.route('/')
def Welcome():
return "Hello World !"
@app.route('/sheetID/<post_id>')
def getSheetInfo(post_id):
SPREADSHEET_ID = post_id
RANGE_NAME = 'Réponses au formulaire 1!A2:P'
try :
result = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID,
range=RANGE_NAME).execute()
except Exception as e:
print("Erreur")
print(e)
values = result.get('values', [])
resultFin = []
if not values:
print('No data found.')
return ''
else:
print(values)
print("------")
for row in values:
print(row)
horodateur = row[0]
nom = row[1].upper()
prenom = row[2].upper()
nationalite = row[3]
dateNaissance = row[4]
lieuNaissance = row[5]
departement = row[6]
sexe = row[7]
adresse = row[8]
adresse2 = ''
if row[9] != "" :
adresse2 = row[9]
codePostal = row[10]
ville = row[11]
pays = row[12]
telephone = ''
if row[13] != "" :
telephone = '0'+row[13]
portable = ''
if row[14] != "" :
portable = '0'+row[14]
mail = row[15]
commentaire = ''
activite = 'secourisme'
result = {'Horodateur':horodateur,'Nom':nom,'Prenom':prenom,'Nationalite':nationalite,'Portable':portable,'Departement':departement,'Pays':pays,'Sexe':sexe,'Mail':mail,'DateNaissance':dateNaissance,'LieuNaissance':lieuNaissance,'Adresse':adresse,'Adresse2':adresse2,'CodePostal':codePostal,'Ville':ville,'Telephone':telephone,'Activite':activite}
resultFin.append(result)
print(resultFin)
print("-----")
return jsonify(resultFin)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| true
| true
|
f70815582f4c0043457ae574255b906a4b41d2d3
| 4,143
|
py
|
Python
|
auto_editor/utils/progressbar.py
|
chancat87/auto-editor
|
2dbf230ed172602e958e3b727cf5183eb48e50b3
|
[
"Unlicense"
] | null | null | null |
auto_editor/utils/progressbar.py
|
chancat87/auto-editor
|
2dbf230ed172602e958e3b727cf5183eb48e50b3
|
[
"Unlicense"
] | null | null | null |
auto_editor/utils/progressbar.py
|
chancat87/auto-editor
|
2dbf230ed172602e958e3b727cf5183eb48e50b3
|
[
"Unlicense"
] | null | null | null |
import sys
from math import floor
from time import time, localtime
from shutil import get_terminal_size
from platform import system
from typing import Union
from .func import get_stdout
class ProgressBar:
def __init__(self, bar_type: str) -> None:
self.machine = False
self.hide = False
self.icon = '⏳'
self.chars = [' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█']
self.brackets = ('|', '|')
if bar_type == 'classic':
self.icon = '⏳'
self.chars = ['░', '█']
self.brackets = ('[', ']')
if bar_type == 'ascii':
self.icon = '& '
self.chars = ['-', '#']
self.brackets = ('[', ']')
if bar_type == 'machine':
self.machine = True
if bar_type == 'none':
self.hide = True
self.part_width = len(self.chars) - 1
self.ampm = True
if system() == 'Darwin' and bar_type in ('default', 'classic'):
try:
date_format = get_stdout(
['defaults', 'read', 'com.apple.menuextra.clock', 'DateFormat']
)
self.ampm = 'a' in date_format
except FileNotFoundError:
pass
@staticmethod
def pretty_time(my_time: float, ampm: bool) -> str:
new_time = localtime(my_time)
hours = new_time.tm_hour
minutes = new_time.tm_min
if ampm:
if hours == 0:
hours = 12
if hours > 12:
hours -= 12
ampm_marker = 'PM' if new_time.tm_hour >= 12 else 'AM'
return '{:02}:{:02} {}'.format(hours, minutes, ampm_marker)
return '{:02}:{:02}'.format(hours, minutes)
def tick(self, index: Union[int, float]) -> None:
if self.hide:
return
progress = min(1, max(0, index / self.total))
if progress == 0:
progress_rate = 0.0
else:
progress_rate = (time() - self.begin_time) / progress
if self.machine:
index = min(index, self.total)
raw = int(self.begin_time + progress_rate)
print('{}~{}~{}~{}~{}'.format(
self.title, index, self.total, self.begin_time, raw),
end='\r', flush=True)
return
new_time = self.pretty_time(self.begin_time + progress_rate, self.ampm)
percent = round(progress * 100, 1)
p_pad = " " * (4 - len(str(percent)))
columns = get_terminal_size().columns
bar_len = max(1, columns - (self.len_title + 32))
progress_bar_str = self.progress_bar_str(progress, bar_len)
bar = f' {self.icon}{self.title} {progress_bar_str} {p_pad}{percent}% ETA {new_time}'
if len(bar) > columns - 2:
bar = bar[:columns - 2]
else:
bar += ' ' * (columns - len(bar) - 4)
sys.stdout.write(bar + '\r')
try:
sys.stdout.flush()
except AttributeError:
pass
def start(self, total: Union[int, float], title: str='Please wait') -> None:
self.title = title
self.len_title = len(title)
self.total = total
self.begin_time = time()
try:
self.tick(0)
except UnicodeEncodeError:
self.icon = '& '
self.chars = ['-', '#']
self.brackets = ('[', ']')
self.part_width = 1
def progress_bar_str(self, progress: float, width: int) -> str:
whole_width = floor(progress * width)
remainder_width = (progress * width) % 1
part_width = floor(remainder_width * self.part_width)
part_char = self.chars[part_width]
if width - whole_width - 1 < 0:
part_char = ''
line = (
self.brackets[0]
+ self.chars[-1] * whole_width
+ part_char
+ self.chars[0] * (width - whole_width - 1)
+ self.brackets[1]
)
return line
@staticmethod
def end() -> None:
sys.stdout.write(' ' * (get_terminal_size().columns - 2) + '\r')
| 28.972028
| 95
| 0.506396
|
import sys
from math import floor
from time import time, localtime
from shutil import get_terminal_size
from platform import system
from typing import Union
from .func import get_stdout
class ProgressBar:
def __init__(self, bar_type: str) -> None:
self.machine = False
self.hide = False
self.icon = '⏳'
self.chars = [' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█']
self.brackets = ('|', '|')
if bar_type == 'classic':
self.icon = '⏳'
self.chars = ['░', '█']
self.brackets = ('[', ']')
if bar_type == 'ascii':
self.icon = '& '
self.chars = ['-', '#']
self.brackets = ('[', ']')
if bar_type == 'machine':
self.machine = True
if bar_type == 'none':
self.hide = True
self.part_width = len(self.chars) - 1
self.ampm = True
if system() == 'Darwin' and bar_type in ('default', 'classic'):
try:
date_format = get_stdout(
['defaults', 'read', 'com.apple.menuextra.clock', 'DateFormat']
)
self.ampm = 'a' in date_format
except FileNotFoundError:
pass
@staticmethod
def pretty_time(my_time: float, ampm: bool) -> str:
new_time = localtime(my_time)
hours = new_time.tm_hour
minutes = new_time.tm_min
if ampm:
if hours == 0:
hours = 12
if hours > 12:
hours -= 12
ampm_marker = 'PM' if new_time.tm_hour >= 12 else 'AM'
return '{:02}:{:02} {}'.format(hours, minutes, ampm_marker)
return '{:02}:{:02}'.format(hours, minutes)
def tick(self, index: Union[int, float]) -> None:
if self.hide:
return
progress = min(1, max(0, index / self.total))
if progress == 0:
progress_rate = 0.0
else:
progress_rate = (time() - self.begin_time) / progress
if self.machine:
index = min(index, self.total)
raw = int(self.begin_time + progress_rate)
print('{}~{}~{}~{}~{}'.format(
self.title, index, self.total, self.begin_time, raw),
end='\r', flush=True)
return
new_time = self.pretty_time(self.begin_time + progress_rate, self.ampm)
percent = round(progress * 100, 1)
p_pad = " " * (4 - len(str(percent)))
columns = get_terminal_size().columns
bar_len = max(1, columns - (self.len_title + 32))
progress_bar_str = self.progress_bar_str(progress, bar_len)
bar = f' {self.icon}{self.title} {progress_bar_str} {p_pad}{percent}% ETA {new_time}'
if len(bar) > columns - 2:
bar = bar[:columns - 2]
else:
bar += ' ' * (columns - len(bar) - 4)
sys.stdout.write(bar + '\r')
try:
sys.stdout.flush()
except AttributeError:
pass
def start(self, total: Union[int, float], title: str='Please wait') -> None:
self.title = title
self.len_title = len(title)
self.total = total
self.begin_time = time()
try:
self.tick(0)
except UnicodeEncodeError:
self.icon = '& '
self.chars = ['-', '#']
self.brackets = ('[', ']')
self.part_width = 1
def progress_bar_str(self, progress: float, width: int) -> str:
whole_width = floor(progress * width)
remainder_width = (progress * width) % 1
part_width = floor(remainder_width * self.part_width)
part_char = self.chars[part_width]
if width - whole_width - 1 < 0:
part_char = ''
line = (
self.brackets[0]
+ self.chars[-1] * whole_width
+ part_char
+ self.chars[0] * (width - whole_width - 1)
+ self.brackets[1]
)
return line
@staticmethod
def end() -> None:
sys.stdout.write(' ' * (get_terminal_size().columns - 2) + '\r')
| true
| true
|
f7081627885de99317f297375e9e2fe146b35626
| 4,125
|
py
|
Python
|
lib/tests/test_config.py
|
mikewalch/muchos
|
6e786a0f43e4be01ce15fe1bf9fc7aeafd46739f
|
[
"Apache-2.0"
] | null | null | null |
lib/tests/test_config.py
|
mikewalch/muchos
|
6e786a0f43e4be01ce15fe1bf9fc7aeafd46739f
|
[
"Apache-2.0"
] | null | null | null |
lib/tests/test_config.py
|
mikewalch/muchos
|
6e786a0f43e4be01ce15fe1bf9fc7aeafd46739f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Muchos authors (see AUTHORS)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from muchos.config import DeployConfig
def test_defaults():
c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster',
'../conf/checksums', 'mycluster')
assert c.checksum_ver('accumulo', '1.9.0') == 'f68a6145029a9ea843b0305c90a7f5f0334d8a8ceeea94734267ec36421fe7fe'
assert c.checksum('accumulo') == 'baa5e0929248ff0d96355bc7fb42a5b75d183a83364519296e07b0adbb089180'
assert c.get('ec2', 'default_instance_type') == 'm5d.large'
assert c.get('ec2', 'worker_instance_type') == 'm5d.large'
assert c.get('ec2', 'aws_ami') == 'ami-9887c6e7'
assert c.max_ephemeral() == 1
assert c.mounts(2) == ['/media/ephemeral0', '/media/ephemeral1']
assert c.node_type_map() == {'default': {'mounts': ['/media/ephemeral0', ], 'devices': ['/dev/nvme1n1', ]},
'worker': {'mounts': ['/media/ephemeral0', ], 'devices': ['/dev/nvme1n1', ]}}
assert c.node_type('worker1') == 'worker'
assert c.node_type('leader1') == 'default'
assert not c.has_option('ec2', 'vpc_id')
assert not c.has_option('ec2', 'subnet_id')
assert c.get('ec2', 'key_name') == 'my_aws_key'
assert c.instance_tags() == {}
assert len(c.nodes()) == 6
assert c.get_node('leader1') == ['namenode', 'resourcemanager', 'accumulomaster', 'zookeeper']
assert c.get_node('worker1') == ['worker']
assert c.get_node('worker2') == ['worker']
assert c.get_node('worker3') == ['worker']
assert c.has_service('accumulomaster')
assert not c.has_service('fluo')
assert c.get_service_hostnames('worker') == ['worker1', 'worker2', 'worker3', 'worker4']
assert c.get_service_hostnames('zookeeper') == ['leader1']
assert c.get_hosts() == {'leader2': ('10.0.0.1', None), 'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.2', None), 'worker3': ('10.0.0.4', None), 'worker2': ('10.0.0.3', None), 'worker4': ('10.0.0.5', None)}
assert c.get_public_ip('leader1') == '23.0.0.0'
assert c.get_private_ip('leader1') == '10.0.0.0'
assert c.cluster_name == 'mycluster'
assert c.version("accumulo").startswith('2.')
assert c.version("fluo").startswith('1.')
assert c.version("hadoop").startswith('3.')
assert c.version("zookeeper").startswith('3.')
assert c.get_service_private_ips("worker") == ['10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5']
assert c.get('general', 'proxy_hostname') == "leader1"
assert c.proxy_public_ip() == "23.0.0.0"
assert c.proxy_private_ip() == "10.0.0.0"
assert c.get('general', 'cluster_basedir') == "/home/centos"
assert c.get('general', 'cluster_user') == "centos"
assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 'worker1'), ('10.0.0.3', 'worker2'),
('10.0.0.4', 'worker3'), ('10.0.0.5', 'worker4')]
assert c.get_host_services() == [('leader1', 'namenode resourcemanager accumulomaster zookeeper'), ('leader2', 'metrics'),
('worker1', 'worker'), ('worker2', 'worker'), ('worker3', 'worker'), ('worker4', 'worker')]
def test_case_sensitive():
c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster',
'../conf/checksums', 'mycluster')
assert c.has_option('ec2', 'default_instance_type') == True
assert c.has_option('ec2', 'Default_instance_type') == False
c.set('nodes', 'CamelCaseWorker', 'worker,fluo')
c.init_nodes()
assert c.get_node('CamelCaseWorker') == ['worker', 'fluo']
| 57.291667
| 220
| 0.640242
|
from muchos.config import DeployConfig
def test_defaults():
c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster',
'../conf/checksums', 'mycluster')
assert c.checksum_ver('accumulo', '1.9.0') == 'f68a6145029a9ea843b0305c90a7f5f0334d8a8ceeea94734267ec36421fe7fe'
assert c.checksum('accumulo') == 'baa5e0929248ff0d96355bc7fb42a5b75d183a83364519296e07b0adbb089180'
assert c.get('ec2', 'default_instance_type') == 'm5d.large'
assert c.get('ec2', 'worker_instance_type') == 'm5d.large'
assert c.get('ec2', 'aws_ami') == 'ami-9887c6e7'
assert c.max_ephemeral() == 1
assert c.mounts(2) == ['/media/ephemeral0', '/media/ephemeral1']
assert c.node_type_map() == {'default': {'mounts': ['/media/ephemeral0', ], 'devices': ['/dev/nvme1n1', ]},
'worker': {'mounts': ['/media/ephemeral0', ], 'devices': ['/dev/nvme1n1', ]}}
assert c.node_type('worker1') == 'worker'
assert c.node_type('leader1') == 'default'
assert not c.has_option('ec2', 'vpc_id')
assert not c.has_option('ec2', 'subnet_id')
assert c.get('ec2', 'key_name') == 'my_aws_key'
assert c.instance_tags() == {}
assert len(c.nodes()) == 6
assert c.get_node('leader1') == ['namenode', 'resourcemanager', 'accumulomaster', 'zookeeper']
assert c.get_node('worker1') == ['worker']
assert c.get_node('worker2') == ['worker']
assert c.get_node('worker3') == ['worker']
assert c.has_service('accumulomaster')
assert not c.has_service('fluo')
assert c.get_service_hostnames('worker') == ['worker1', 'worker2', 'worker3', 'worker4']
assert c.get_service_hostnames('zookeeper') == ['leader1']
assert c.get_hosts() == {'leader2': ('10.0.0.1', None), 'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.2', None), 'worker3': ('10.0.0.4', None), 'worker2': ('10.0.0.3', None), 'worker4': ('10.0.0.5', None)}
assert c.get_public_ip('leader1') == '23.0.0.0'
assert c.get_private_ip('leader1') == '10.0.0.0'
assert c.cluster_name == 'mycluster'
assert c.version("accumulo").startswith('2.')
assert c.version("fluo").startswith('1.')
assert c.version("hadoop").startswith('3.')
assert c.version("zookeeper").startswith('3.')
assert c.get_service_private_ips("worker") == ['10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5']
assert c.get('general', 'proxy_hostname') == "leader1"
assert c.proxy_public_ip() == "23.0.0.0"
assert c.proxy_private_ip() == "10.0.0.0"
assert c.get('general', 'cluster_basedir') == "/home/centos"
assert c.get('general', 'cluster_user') == "centos"
assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 'worker1'), ('10.0.0.3', 'worker2'),
('10.0.0.4', 'worker3'), ('10.0.0.5', 'worker4')]
assert c.get_host_services() == [('leader1', 'namenode resourcemanager accumulomaster zookeeper'), ('leader2', 'metrics'),
('worker1', 'worker'), ('worker2', 'worker'), ('worker3', 'worker'), ('worker4', 'worker')]
def test_case_sensitive():
c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster',
'../conf/checksums', 'mycluster')
assert c.has_option('ec2', 'default_instance_type') == True
assert c.has_option('ec2', 'Default_instance_type') == False
c.set('nodes', 'CamelCaseWorker', 'worker,fluo')
c.init_nodes()
assert c.get_node('CamelCaseWorker') == ['worker', 'fluo']
| true
| true
|
f70818765316c2ec03e839fa7ce2c7c658eaa0fd
| 3,946
|
py
|
Python
|
src/clikit/ui/components/exception_trace.py
|
kevinastone/clikit
|
2d9f4b4ac8cffa3229df1bb5bdd199c58ad72d5d
|
[
"MIT"
] | 1
|
2021-03-21T10:44:22.000Z
|
2021-03-21T10:44:22.000Z
|
src/clikit/ui/components/exception_trace.py
|
kevinastone/clikit
|
2d9f4b4ac8cffa3229df1bb5bdd199c58ad72d5d
|
[
"MIT"
] | null | null | null |
src/clikit/ui/components/exception_trace.py
|
kevinastone/clikit
|
2d9f4b4ac8cffa3229df1bb5bdd199c58ad72d5d
|
[
"MIT"
] | null | null | null |
import ast
import inspect
import keyword
import sys
import traceback
from clikit.api.io import IO
class ExceptionTrace(object):
"""
Renders the trace of an exception.
"""
THEME = {
"comment": "<fg=black;options=bold>",
"keyword": "<fg=yellow>",
"builtin": "<fg=blue>",
"literal": "<fg=magenta>",
}
AST_ELEMENTS = {
"builtins": __builtins__.keys()
if type(__builtins__) is dict
else dir(__builtins__),
"keywords": [
getattr(ast, cls)
for cls in dir(ast)
if keyword.iskeyword(cls.lower())
and inspect.isclass(getattr(ast, cls))
and issubclass(getattr(ast, cls), ast.AST)
],
}
def __init__(self, exception): # type: (Exception) -> None
self._exception = exception
self._exc_info = sys.exc_info()
def render(self, io, simple=False): # type: (IO, bool) -> None
if hasattr(self._exception, "__traceback__"):
tb = self._exception.__traceback__
else:
tb = self._exc_info[2]
title = ""
if not simple:
title += "\n[<error>{}</error>]\n".format(
self._exception.__class__.__name__
)
title += "<error>{}</error>".format(str(self._exception))
io.write_line(title)
if not simple and io.is_verbose():
io.write_line("")
self._render_traceback(io, tb)
def _render_traceback(self, io, tb): # type: (IO, ...) -> None
frames = []
while tb:
frames.append(self._format_traceback_frame(io, tb))
tb = tb.tb_next
io.write_line("<b>Traceback (most recent call last):</b>")
io.write_line("".join(traceback.format_list(frames)))
def _format_traceback_frame(self, io, tb): # type: (IO, ...) -> Tuple[Any]
frame_info = inspect.getframeinfo(tb)
filename = frame_info.filename
lineno = frame_info.lineno
function = frame_info.function
line = frame_info.code_context[0]
stripped_line = line.lstrip(" ")
try:
tree = ast.parse(stripped_line, mode="exec")
formatted = self._format_tree(tree, stripped_line, io)
formatted = (len(line) - len(stripped_line)) * " " + formatted
except SyntaxError:
formatted = line
return (
io.format("<c1>{}</c1>".format(filename)),
"<fg=blue;options=bold>{}</>".format(lineno),
"<b>{}</b>".format(function),
formatted,
)
def _format_tree(self, tree, source, io):
offset = 0
chunks = []
nodes = [n for n in ast.walk(tree)]
displayed_nodes = []
for node in nodes:
nodecls = node.__class__
nodename = nodecls.__name__
if "col_offset" not in dir(node):
continue
if nodecls in self.AST_ELEMENTS["keywords"]:
displayed_nodes.append((node, nodename.lower(), "keyword"))
elif nodecls == ast.Name and node.id in self.AST_ELEMENTS["builtins"]:
displayed_nodes.append((node, node.id, "builtin"))
elif nodecls == ast.Str:
displayed_nodes.append((node, "'{}'".format(node.s), "literal"))
elif nodecls == ast.Num:
displayed_nodes.append((node, str(node.n), "literal"))
displayed_nodes.sort(key=lambda elem: elem[0].col_offset)
for dn in displayed_nodes:
node = dn[0]
s = dn[1]
theme = dn[2]
begin_col = node.col_offset
src_chunk = source[offset:begin_col]
chunks.append(src_chunk)
chunks.append(io.format("{}{}</>".format(self.THEME[theme], s)))
offset = begin_col + len(s)
chunks.append(source[offset:])
return "".join(chunks)
| 29.893939
| 82
| 0.547643
|
import ast
import inspect
import keyword
import sys
import traceback
from clikit.api.io import IO
class ExceptionTrace(object):
THEME = {
"comment": "<fg=black;options=bold>",
"keyword": "<fg=yellow>",
"builtin": "<fg=blue>",
"literal": "<fg=magenta>",
}
AST_ELEMENTS = {
"builtins": __builtins__.keys()
if type(__builtins__) is dict
else dir(__builtins__),
"keywords": [
getattr(ast, cls)
for cls in dir(ast)
if keyword.iskeyword(cls.lower())
and inspect.isclass(getattr(ast, cls))
and issubclass(getattr(ast, cls), ast.AST)
],
}
def __init__(self, exception): self._exception = exception
self._exc_info = sys.exc_info()
def render(self, io, simple=False): if hasattr(self._exception, "__traceback__"):
tb = self._exception.__traceback__
else:
tb = self._exc_info[2]
title = ""
if not simple:
title += "\n[<error>{}</error>]\n".format(
self._exception.__class__.__name__
)
title += "<error>{}</error>".format(str(self._exception))
io.write_line(title)
if not simple and io.is_verbose():
io.write_line("")
self._render_traceback(io, tb)
def _render_traceback(self, io, tb): frames = []
while tb:
frames.append(self._format_traceback_frame(io, tb))
tb = tb.tb_next
io.write_line("<b>Traceback (most recent call last):</b>")
io.write_line("".join(traceback.format_list(frames)))
def _format_traceback_frame(self, io, tb): frame_info = inspect.getframeinfo(tb)
filename = frame_info.filename
lineno = frame_info.lineno
function = frame_info.function
line = frame_info.code_context[0]
stripped_line = line.lstrip(" ")
try:
tree = ast.parse(stripped_line, mode="exec")
formatted = self._format_tree(tree, stripped_line, io)
formatted = (len(line) - len(stripped_line)) * " " + formatted
except SyntaxError:
formatted = line
return (
io.format("<c1>{}</c1>".format(filename)),
"<fg=blue;options=bold>{}</>".format(lineno),
"<b>{}</b>".format(function),
formatted,
)
def _format_tree(self, tree, source, io):
offset = 0
chunks = []
nodes = [n for n in ast.walk(tree)]
displayed_nodes = []
for node in nodes:
nodecls = node.__class__
nodename = nodecls.__name__
if "col_offset" not in dir(node):
continue
if nodecls in self.AST_ELEMENTS["keywords"]:
displayed_nodes.append((node, nodename.lower(), "keyword"))
elif nodecls == ast.Name and node.id in self.AST_ELEMENTS["builtins"]:
displayed_nodes.append((node, node.id, "builtin"))
elif nodecls == ast.Str:
displayed_nodes.append((node, "'{}'".format(node.s), "literal"))
elif nodecls == ast.Num:
displayed_nodes.append((node, str(node.n), "literal"))
displayed_nodes.sort(key=lambda elem: elem[0].col_offset)
for dn in displayed_nodes:
node = dn[0]
s = dn[1]
theme = dn[2]
begin_col = node.col_offset
src_chunk = source[offset:begin_col]
chunks.append(src_chunk)
chunks.append(io.format("{}{}</>".format(self.THEME[theme], s)))
offset = begin_col + len(s)
chunks.append(source[offset:])
return "".join(chunks)
| true
| true
|
f7081b69da473f736356c8c9de8a9c57a7b4d90e
| 399
|
py
|
Python
|
lithography/wsgi.py
|
lwbe/lithography
|
0a640a58b563647d34528c7544e0ac200cd8dd97
|
[
"MIT"
] | null | null | null |
lithography/wsgi.py
|
lwbe/lithography
|
0a640a58b563647d34528c7544e0ac200cd8dd97
|
[
"MIT"
] | 12
|
2020-06-05T17:26:19.000Z
|
2022-03-11T23:16:21.000Z
|
lithography/wsgi.py
|
lwbe/lithography
|
0a640a58b563647d34528c7544e0ac200cd8dd97
|
[
"MIT"
] | null | null | null |
"""
WSGI config for lithography project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lithography.settings')
application = get_wsgi_application()
| 23.470588
| 78
| 0.789474
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lithography.settings')
application = get_wsgi_application()
| true
| true
|
f7081cbeb9857272c5f131a50f03e9c958dae3c6
| 814
|
py
|
Python
|
setup.py
|
jondot/dotrunner
|
d80a72a291a1de86626b7bfb65892f5898d8d057
|
[
"BSD-2-Clause"
] | 2
|
2018-09-02T15:44:01.000Z
|
2020-06-15T08:56:47.000Z
|
setup.py
|
jondot/dotrunner
|
d80a72a291a1de86626b7bfb65892f5898d8d057
|
[
"BSD-2-Clause"
] | 3
|
2020-09-05T11:46:55.000Z
|
2021-05-09T16:17:59.000Z
|
setup.py
|
jondot/dotrunner
|
d80a72a291a1de86626b7bfb65892f5898d8d057
|
[
"BSD-2-Clause"
] | 1
|
2019-10-15T14:45:27.000Z
|
2019-10-15T14:45:27.000Z
|
# -*- coding: utf-8 -*-
from dotrunner.version import VERSION
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='dotrunner',
version=VERSION,
description='Links dotfiles',
long_description=readme,
author='Dotan Nahum',
data_files=[('', ['LICENSE', 'README.md', 'README.rst'])],
author_email='jondotan@gmail.com',
url='https://github.com/jondot/dotrunner',
license=license,
packages=find_packages(exclude=('tests', 'docs', 'jest-pytest')),
entry_points='''
[console_scripts]
dotrunner=dotrunner.dotrunner:main
''',
install_requires=[
'toolz', 'docopt', 'networkx', 'pyyaml', 'delegator.py', 'colorama',
'pyspin'
])
| 26.258065
| 76
| 0.632678
|
from dotrunner.version import VERSION
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='dotrunner',
version=VERSION,
description='Links dotfiles',
long_description=readme,
author='Dotan Nahum',
data_files=[('', ['LICENSE', 'README.md', 'README.rst'])],
author_email='jondotan@gmail.com',
url='https://github.com/jondot/dotrunner',
license=license,
packages=find_packages(exclude=('tests', 'docs', 'jest-pytest')),
entry_points='''
[console_scripts]
dotrunner=dotrunner.dotrunner:main
''',
install_requires=[
'toolz', 'docopt', 'networkx', 'pyyaml', 'delegator.py', 'colorama',
'pyspin'
])
| true
| true
|
f7081d04f130b03fc952042146402d7f447fa934
| 604
|
py
|
Python
|
cuor/harvester/general.py
|
tocororo/cuor
|
0202970f19b927562f34eb4367ea4f91e08e6706
|
[
"MIT"
] | null | null | null |
cuor/harvester/general.py
|
tocororo/cuor
|
0202970f19b927562f34eb4367ea4f91e08e6706
|
[
"MIT"
] | null | null | null |
cuor/harvester/general.py
|
tocororo/cuor
|
0202970f19b927562f34eb4367ea4f91e08e6706
|
[
"MIT"
] | null | null | null |
from cuor.organizations.api import OrganizationRecord
import traceback
def remove_nulls(d):
return {k: v for k, v in d.items() if v is not None}
def _assing_if_exist(data, record, field):
if field in record:
data[field] = record[field]
def insert_in_cuor(data, inst):
# try:
OrganizationRecord.create_or_update(None, data, dbcommit=True, reindex=True)
# except Exception as e:
# print(e)
# print("------------")
#print(data)
#print("------------")
#print(inst)
#print("------------")
#print(traceback.format_exc())
| 25.166667
| 80
| 0.587748
|
from cuor.organizations.api import OrganizationRecord
import traceback
def remove_nulls(d):
return {k: v for k, v in d.items() if v is not None}
def _assing_if_exist(data, record, field):
if field in record:
data[field] = record[field]
def insert_in_cuor(data, inst):
OrganizationRecord.create_or_update(None, data, dbcommit=True, reindex=True)
| true
| true
|
f7081d5dad49cf3395526fbe0ef9fa8dc20e9ddf
| 6,037
|
py
|
Python
|
supyr_struct/field_type_methods/encoders.py
|
forksnd/supyr_struct
|
7ce60c406c0755e13658ba5d19b8437ff601cf43
|
[
"MIT"
] | 5
|
2020-02-15T06:19:45.000Z
|
2022-01-15T20:01:02.000Z
|
supyr_struct/field_type_methods/encoders.py
|
forksnd/supyr_struct
|
7ce60c406c0755e13658ba5d19b8437ff601cf43
|
[
"MIT"
] | 5
|
2019-11-25T20:39:33.000Z
|
2020-01-16T08:50:15.000Z
|
supyr_struct/field_type_methods/encoders.py
|
forksnd/supyr_struct
|
7ce60c406c0755e13658ba5d19b8437ff601cf43
|
[
"MIT"
] | 1
|
2019-09-02T06:10:40.000Z
|
2019-09-02T06:10:40.000Z
|
'''
Encoder functions for all standard FieldTypes.
Encoders are responsible for converting a python object into bytes*
*Not all encoders return bytes objects.
FieldTypes that operate on the bit level cant be expected to return
even byte sized amounts of bits, so they operate differently.
A FieldTypes serializer and encoder simply need to
be working with the same parameter and return data types.
'''
__all__ = [
# basic encoders
'encode_numeric', 'encode_string', 'no_encode',
'encode_big_int', 'encode_bit_int',
# specialized encoders
'encode_24bit_numeric', 'encode_decimal', 'encode_bit', 'encode_raw_string',
'encode_int_timestamp', 'encode_float_timestamp', 'encode_string_hex',
# wrapper functions
'encoder_wrapper',
]
from decimal import Decimal
from struct import pack
from time import mktime, strptime
from supyr_struct.defs.constants import ATTR_OFFS
def encoder_wrapper(en):
'''
This function is for wrapping encoders in functions which properly
work with FieldTypes where is_block and is_data are both True.
This is because the node will be a Block with some attribute
that stores the "data" of the node.
'''
def wrapped_encoder(
self, node, parent=None, attr_index=None, _encode=en):
return _encode(self, node.data, parent, attr_index)
return wrapped_encoder
def no_encode(self, node, parent=None, attr_index=None):
'''
Does not encode and just returns the node.
'''
return node
def encode_numeric(self, node, parent=None, attr_index=None):
'''
Encodes a python int into a bytes representation.
Encoding is done using struct.pack
Returns a bytes object encoded represention of the "node" argument.
'''
return self.struct_packer(node)
def encode_decimal(self, node, parent=None, attr_index=None):
'''
Encodes a python Decimal into a bytes representation.
Returns a bytes object encoded represention of the "node" argument.
'''
raise NotImplementedError('Encoding Decimal objects is not supported yet.')
def encode_24bit_numeric(self, node, parent=None, attr_index=None):
'''
Encodes a python int to a signed or unsigned 24-bit bytes representation.
Returns a bytes object encoded represention of the "node" argument.
'''
if self.enc[1] == 't':
# int can be signed
assert node >= -0x800000 and node <= 0x7fffff, (
'%s is too large to pack as a 24bit signed int.' % node)
if node < 0:
# int IS signed
node += 0x1000000
else:
assert node >= 0 and node <= 0xffffff, (
'%s is too large to pack as a 24bit unsigned int.' % node)
# pack and return the int
if self.endian == '<':
return pack('<I', node)[0:3]
return pack('>I', node)[1:4]
def encode_int_timestamp(self, node, parent=None, attr_index=None):
'''
'''
return self.struct_packer(int(mktime(strptime(node))))
def encode_float_timestamp(self, node, parent=None, attr_index=None):
'''
'''
return self.struct_packer(float(mktime(strptime(node))))
def encode_string(self, node, parent=None, attr_index=None):
'''
Encodes a python string into a bytes representation,
making sure there is a delimiter character on the end.
Encoding is done using str.encode
Returns a bytes object encoded represention of the "node" argument.
'''
if not node.endswith(self.str_delimiter):
return (node + self.str_delimiter).encode(self.enc)
return node.encode(self.enc)
def encode_raw_string(self, node, parent=None, attr_index=None):
'''
Encodes a python string into a bytes representation.
Encoding is done using str.encode
Returns a bytes object encoded represention of the "node" argument.
'''
return node.encode(self.enc)
def encode_string_hex(self, node, parent=None, attr_index=None):
'''
Encodes a python string formatted as a hex string into a bytes object.
Returns a bytes object encoded represention of the "node" argument.
'''
return int(node, 16).to_bytes((len(node) + 1)//2, 'big')
def encode_big_int(self, node, parent=None, attr_index=None):
'''
Encodes arbitrarily sized signed or unsigned integers
on the byte level in either ones or twos compliment.
Encoding is done using int.to_bytes
Returns a bytes object encoded represention of the "node" argument.
'''
bytecount = parent.get_size(attr_index)
if not bytecount:
return b''
if self.endian == '<':
endian = 'little'
else:
endian = 'big'
if self.enc[-1] == 'S':
# twos compliment
return node.to_bytes(bytecount, endian, signed=True)
elif self.enc[-1] == 's':
# ones compliment
if node < 0:
return (node-1).to_bytes(bytecount, endian, signed=True)
return node.to_bytes(bytecount, endian, signed=False)
return node.to_bytes(bytecount, endian)
def encode_bit(self, node, parent=None, attr_index=None):
'''
Encodes an int to a single bit.
Returns the encoded int, the offset it should be
shifted to, and a mask that covers its range.
'''
# return the int with the bit offset and a mask of 1
return(node, parent.ATTR_OFFS[attr_index], 1)
def encode_bit_int(self, node, parent=None, attr_index=None):
'''
Encodes arbitrarily sized signed or unsigned integers
on the bit level in either ones or twos compliment
Returns the encoded int, the offset it should be
shifted to, and a mask that covers its range.
'''
bitcount = parent.get_size(attr_index)
offset = parent.ATTR_OFFS[attr_index]
mask = (1 << bitcount) - 1
# if the number is signed
if node < 0:
signmask = 1 << (bitcount - 1)
if self.enc == 'S':
# twos signed
return(2*signmask + node, offset, mask)
# ones signed
return(2*signmask + (node-1), offset, mask)
return(node, offset, mask)
| 30.034826
| 80
| 0.673845
|
__all__ = [
'encode_numeric', 'encode_string', 'no_encode',
'encode_big_int', 'encode_bit_int',
'encode_24bit_numeric', 'encode_decimal', 'encode_bit', 'encode_raw_string',
'encode_int_timestamp', 'encode_float_timestamp', 'encode_string_hex',
'encoder_wrapper',
]
from decimal import Decimal
from struct import pack
from time import mktime, strptime
from supyr_struct.defs.constants import ATTR_OFFS
def encoder_wrapper(en):
def wrapped_encoder(
self, node, parent=None, attr_index=None, _encode=en):
return _encode(self, node.data, parent, attr_index)
return wrapped_encoder
def no_encode(self, node, parent=None, attr_index=None):
return node
def encode_numeric(self, node, parent=None, attr_index=None):
return self.struct_packer(node)
def encode_decimal(self, node, parent=None, attr_index=None):
raise NotImplementedError('Encoding Decimal objects is not supported yet.')
def encode_24bit_numeric(self, node, parent=None, attr_index=None):
if self.enc[1] == 't':
assert node >= -0x800000 and node <= 0x7fffff, (
'%s is too large to pack as a 24bit signed int.' % node)
if node < 0:
node += 0x1000000
else:
assert node >= 0 and node <= 0xffffff, (
'%s is too large to pack as a 24bit unsigned int.' % node)
if self.endian == '<':
return pack('<I', node)[0:3]
return pack('>I', node)[1:4]
def encode_int_timestamp(self, node, parent=None, attr_index=None):
return self.struct_packer(int(mktime(strptime(node))))
def encode_float_timestamp(self, node, parent=None, attr_index=None):
return self.struct_packer(float(mktime(strptime(node))))
def encode_string(self, node, parent=None, attr_index=None):
if not node.endswith(self.str_delimiter):
return (node + self.str_delimiter).encode(self.enc)
return node.encode(self.enc)
def encode_raw_string(self, node, parent=None, attr_index=None):
return node.encode(self.enc)
def encode_string_hex(self, node, parent=None, attr_index=None):
return int(node, 16).to_bytes((len(node) + 1)//2, 'big')
def encode_big_int(self, node, parent=None, attr_index=None):
bytecount = parent.get_size(attr_index)
if not bytecount:
return b''
if self.endian == '<':
endian = 'little'
else:
endian = 'big'
if self.enc[-1] == 'S':
return node.to_bytes(bytecount, endian, signed=True)
elif self.enc[-1] == 's':
if node < 0:
return (node-1).to_bytes(bytecount, endian, signed=True)
return node.to_bytes(bytecount, endian, signed=False)
return node.to_bytes(bytecount, endian)
def encode_bit(self, node, parent=None, attr_index=None):
return(node, parent.ATTR_OFFS[attr_index], 1)
def encode_bit_int(self, node, parent=None, attr_index=None):
bitcount = parent.get_size(attr_index)
offset = parent.ATTR_OFFS[attr_index]
mask = (1 << bitcount) - 1
if node < 0:
signmask = 1 << (bitcount - 1)
if self.enc == 'S':
return(2*signmask + node, offset, mask)
return(2*signmask + (node-1), offset, mask)
return(node, offset, mask)
| true
| true
|
f7081eaa121f76966dad478c0bd156dc401f5372
| 3,678
|
py
|
Python
|
actions/actions.py
|
coreycb/charm-swift-storage
|
c31991ab198d7b51b9a4f5744a1fcc1fef0bc1ef
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
actions/actions.py
|
coreycb/charm-swift-storage
|
c31991ab198d7b51b9a4f5744a1fcc1fef0bc1ef
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
actions/actions.py
|
coreycb/charm-swift-storage
|
c31991ab198d7b51b9a4f5744a1fcc1fef0bc1ef
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import yaml
from charmhelpers.core.host import service_pause, service_resume
from charmhelpers.core.hookenv import action_fail
from charmhelpers.core.unitdata import HookData, kv
from charmhelpers.contrib.openstack.utils import (
get_os_codename_package,
set_os_workload_status,
)
from lib.swift_storage_utils import (
assess_status,
REQUIRED_INTERFACES,
SWIFT_SVCS,
)
from hooks.swift_storage_hooks import (
CONFIGS,
)
def _get_services():
"""Return a list of services that need to be (un)paused."""
services = SWIFT_SVCS[:]
# Before Icehouse there was no swift-container-sync
if get_os_codename_package("swift-container") < "icehouse":
services.remove("swift-container-sync")
return services
def get_action_parser(actions_yaml_path, action_name,
get_services=_get_services):
"""Make an argparse.ArgumentParser seeded from actions.yaml definitions."""
with open(actions_yaml_path) as fh:
doc = yaml.load(fh)[action_name]["description"]
parser = argparse.ArgumentParser(description=doc)
parser.add_argument("--services", default=get_services())
# TODO: Add arguments for params defined in the actions.yaml
return parser
def pause(args):
"""Pause all the swift services.
@raises Exception if any services fail to stop
"""
for service in args.services:
stopped = service_pause(service)
if not stopped:
raise Exception("{} didn't stop cleanly.".format(service))
with HookData()():
kv().set('unit-paused', True)
set_os_workload_status(CONFIGS, REQUIRED_INTERFACES,
charm_func=assess_status)
def resume(args):
"""Resume all the swift services.
@raises Exception if any services fail to start
"""
for service in args.services:
started = service_resume(service)
if not started:
raise Exception("{} didn't start cleanly.".format(service))
with HookData()():
kv().set('unit-paused', False)
set_os_workload_status(CONFIGS, REQUIRED_INTERFACES,
charm_func=assess_status)
# A dictionary of all the defined actions to callables (which take
# parsed arguments).
ACTIONS = {"pause": pause, "resume": resume}
def main(argv):
action_name = _get_action_name()
actions_yaml_path = _get_actions_yaml_path()
parser = get_action_parser(actions_yaml_path, action_name)
args = parser.parse_args(argv)
try:
action = ACTIONS[action_name]
except KeyError:
return "Action %s undefined" % action_name
else:
try:
action(args)
except Exception as e:
action_fail(str(e))
def _get_action_name():
"""Return the name of the action."""
return os.path.basename(__file__)
def _get_actions_yaml_path():
"""Return the path to actions.yaml"""
cwd = os.path.dirname(__file__)
return os.path.join(cwd, "..", "actions.yaml")
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 29.902439
| 79
| 0.693855
|
import argparse
import os
import sys
import yaml
from charmhelpers.core.host import service_pause, service_resume
from charmhelpers.core.hookenv import action_fail
from charmhelpers.core.unitdata import HookData, kv
from charmhelpers.contrib.openstack.utils import (
get_os_codename_package,
set_os_workload_status,
)
from lib.swift_storage_utils import (
assess_status,
REQUIRED_INTERFACES,
SWIFT_SVCS,
)
from hooks.swift_storage_hooks import (
CONFIGS,
)
def _get_services():
services = SWIFT_SVCS[:]
if get_os_codename_package("swift-container") < "icehouse":
services.remove("swift-container-sync")
return services
def get_action_parser(actions_yaml_path, action_name,
get_services=_get_services):
with open(actions_yaml_path) as fh:
doc = yaml.load(fh)[action_name]["description"]
parser = argparse.ArgumentParser(description=doc)
parser.add_argument("--services", default=get_services())
return parser
def pause(args):
for service in args.services:
stopped = service_pause(service)
if not stopped:
raise Exception("{} didn't stop cleanly.".format(service))
with HookData()():
kv().set('unit-paused', True)
set_os_workload_status(CONFIGS, REQUIRED_INTERFACES,
charm_func=assess_status)
def resume(args):
for service in args.services:
started = service_resume(service)
if not started:
raise Exception("{} didn't start cleanly.".format(service))
with HookData()():
kv().set('unit-paused', False)
set_os_workload_status(CONFIGS, REQUIRED_INTERFACES,
charm_func=assess_status)
ACTIONS = {"pause": pause, "resume": resume}
def main(argv):
action_name = _get_action_name()
actions_yaml_path = _get_actions_yaml_path()
parser = get_action_parser(actions_yaml_path, action_name)
args = parser.parse_args(argv)
try:
action = ACTIONS[action_name]
except KeyError:
return "Action %s undefined" % action_name
else:
try:
action(args)
except Exception as e:
action_fail(str(e))
def _get_action_name():
return os.path.basename(__file__)
def _get_actions_yaml_path():
cwd = os.path.dirname(__file__)
return os.path.join(cwd, "..", "actions.yaml")
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| true
| true
|
f7081f1ab5736a9a2db040d3dadfb73cde6fea07
| 14,065
|
py
|
Python
|
Youtube Bot Client/database.py
|
zackmawaldi/Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader
|
3681babb6a46ec2e957cd57888dd13ac22b56cab
|
[
"MIT"
] | 279
|
2019-09-05T19:56:04.000Z
|
2022-03-29T01:02:33.000Z
|
Youtube Bot Client/database.py
|
zackmawaldi/Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader
|
3681babb6a46ec2e957cd57888dd13ac22b56cab
|
[
"MIT"
] | 74
|
2019-12-18T04:23:45.000Z
|
2022-03-21T19:33:51.000Z
|
Youtube Bot Client/database.py
|
zackmawaldi/Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader
|
3681babb6a46ec2e957cd57888dd13ac22b56cab
|
[
"MIT"
] | 153
|
2019-09-06T17:43:56.000Z
|
2022-03-27T13:20:05.000Z
|
import mysql
import pickle
import hashlib
import mysql.connector
from mysql.connector import pooling
import settings
import datetime
from time import sleep
def initDatabase():
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("SET sql_notes = 0; ")
cursor.execute("create database IF NOT EXISTS youtubebot")
cursor.execute("USE youtubebot;")
cursor.execute("SET sql_notes = 0; ")
cursor.execute("set global max_allowed_packet=67108864;")
cursor.execute("create table IF NOT EXISTS users (username varchar(70),password varchar(80), status varchar(80));")
cursor.execute("create table IF NOT EXISTS videogenerators (generatorname varchar(70),password varchar(80), status varchar(80));")
# youtube account, estimated length, actual length
cursor.execute("create table IF NOT EXISTS scripts (scriptno int NOT NULL AUTO_INCREMENT, PRIMARY KEY (scriptno), submission_id varchar(70), subredditid varchar(70), subreddit varchar(70), url varchar(2083), timecreated DATETIME,"
"status varchar(70), editedby varchar(70), scripttitle varchar(2083), scriptauthor varchar(70), ups int, downs int, num_comments int, timegathered DATETIME, timeuploaded DATETIME, sceduledupload DATETIME, esttime time, actualtime time, rawscript MEDIUMBLOB, "
"finalscript MEDIUMBLOB);")
cursor.execute("SET sql_notes = 1; ")
connection_pool = None
def login(username, password):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT count(*) FROM users WHERE username = %s AND password = %s;"%(repr(username), repr(password))
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
connection_object.close()
flag = (result[0][0])
if flag == 0:
return False
else:
return True
def getScriptEditInformation():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, status, editedby FROM scripts WHERE status = 'EDITING' AND editedby IS NOT NULL;"
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
results.append(res)
cursor.close()
connection_object.close()
return results
def completeUpload(scriptno, timeuploaded, scedualedrelease):
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "UPDATE scripts " \
"SET status = 'SUCCESSUPLOAD', timeuploaded = %s, sceduledupload = %s WHERE scriptno = %s;"
args = (timeuploaded, scedualedrelease, scriptno)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def getLastUploadedScripts():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
now = datetime.datetime.now()
cursor.execute("USE youtubebot;")
query = "SELECT timeuploaded "\
"from scripts "\
"WHERE timeuploaded <= '%s' "\
"ORDER BY timeuploaded DESC "\
"LIMIT 6;" % (now.strftime('%Y-%m-%d %H:%M:%S'))
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
results.append(res)
cursor.close()
connection_object.close()
return results
def getCompletedScripts():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, status, editedby FROM scripts WHERE status = 'COMPLETE' AND editedby IS NOT NULL;"
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
results.append(res)
cursor.close()
connection_object.close()
return results
def getOnlineUsers():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT username FROM users WHERE status = 'ONLINE';"
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
results.append(res[0])
cursor.close()
connection_object.close()
return results
def updateScriptStatus(status, user, scriptid):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
if user is None:
user = "NULL"
else:
user = user
cursor.execute("USE youtubebot;")
query = "UPDATE scripts " \
"SET status = %s, editedby = %s WHERE scriptno = %s;"
args = (status, user, scriptid)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def updateScriptStatusById(status, user, scriptid):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
if user is None:
user = "NULL"
else:
user = user
cursor.execute("USE youtubebot;")
query = "UPDATE scripts " \
"SET status = %s, editedby = %s WHERE submission_id = %s;"
args = (status, user, scriptid)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def updateUserStatus(user, status):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
if status is None:
status = "NULL"
else:
status = repr(status)
cursor.execute("USE youtubebot;")
query = "UPDATE users " \
"SET status = %s WHERE username = %s;"
args = (status, user)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def getScriptStatus(scriptno):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT status " \
"FROM scripts WHERE scriptno = %s;"%(scriptno)
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
connection_object.close()
return result[0][0]
def getScriptIds():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, submission_id, status " \
"FROM scripts;"
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
connection_object.close()
return result
def getCompletedScripts(back):
global connection_pool
try:
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, scripttitle, scriptauthor, ups, finalscript " \
"FROM scripts WHERE status = 'COMPLETE' AND finalscript IS NOT NULL ORDER BY ups DESC " \
"LIMIT %s;"%back
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
scriptno = res[0]
scripttitle = res[1]
author = res[2]
ups = res[3]
scriptpayload = pickle.loads(res[4])
load = (scriptno, scripttitle, author, ups, scriptpayload)
results.append(load)
cursor.close()
connection_object.close()
return results
except Exception as e:
print("Mysql Error with downloading completed scripts")
print(e)
pass
def getScripts(back, filter):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, subreddit, scripttitle, scriptauthor, ups, downs, rawscript, submission_id, status, editedby, num_comments " \
"FROM scripts WHERE status = 'RAW' or status = 'EDITING' ORDER BY %s DESC " \
"LIMIT %s;"%(filter, back)
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
scriptno = res[0]
subreddit = res[1]
title = res[2]
author = res[3]
ups = res[4]
downs = res[5]
rawscript = pickle.loads(res[6])
sub_id = res[7]
status = res[8]
editedby = res[9]
num_comments = res[10]
load = (scriptno, subreddit, title, author, ups, downs, rawscript, sub_id, status, editedby, num_comments)
results.append(load)
cursor.close()
connection_object.close()
return results
def addUser(username, password):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "INSERT INTO users(username, password) " \
"VALUES(%s, %s)"
args = (username, hashlib.md5(password.encode()).hexdigest())
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def addVideoGenerator(name, password):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "INSERT INTO videogenerators(generatorname, password) " \
"VALUES(%s, %s)"
args = (name, hashlib.md5(password.encode()).hexdigest())
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def beginDataBaseConnection():
global connection_pool
connection_pool = pooling.MySQLConnectionPool(
pool_size=32,
pool_reset_session=True,
host=settings.database_host,
user=settings.database_user,
passwd=settings.database_password,
auth_plugin='mysql_native_password'
)
print("Started database connection")
def uploadVid(payload, scriptno):
global connection_pool
try:
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
cursor.execute("set global max_allowed_packet=67108864;")
connection_object.commit()
load = pickle.dumps(payload)
print("%s SERVER attempting to upload script no %s (%s) to database" % (datetime.datetime.now(), scriptno, str((len(load) / 1000000)) + "MB"))
query = "UPDATE scripts SET finalscript = %s WHERE scriptno = %s " \
""
args = (load, scriptno)
cursor.execute(query, args)
connection_object.commit()
except Exception as e:
print("Error while connecting to MySQL using Connection pool ", e)
return False
finally:
if (connection_object.is_connected()):
cursor.close()
connection_object.close()
return True
def updateSubmission(submission):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
rawscript = pickle.dumps(submission.comments)
query = "UPDATE scripts " \
"SET scripttitle = %s, rawscript = %s, ups = %s, downs = %s, num_comments = %s, timecreated = %s, timegathered = %s WHERE submission_id = %s"
args = (submission.title, (rawscript), submission.upvotes, submission.downvotes, submission.amountcomments,
submission.timecreated, submission.timegathered, submission.submission_id)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def addSubmission(submission):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
rawscript = pickle.dumps(submission.comments)
query = "INSERT INTO scripts(subredditid, submission_id, subreddit, url, timecreated, status, scripttitle, scriptauthor, timegathered, rawscript, ups, downs, num_comments) " \
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
args = ((submission.subredditid), (submission.submission_id),
(submission.subreddit), (submission.link), (submission.timecreated),
("RAW"), submission.title, (submission.author), (submission.timegathered), rawscript,
submission.upvotes, submission.downvotes, submission.amountcomments)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def checkValueExists(column, value):
global database
cursor = database.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT count(*) FROM scripts WHERE %s = %s;"%(column, repr(value))
cursor.execute(query)
result = cursor.fetchall()
flag = (result[0][0])
if flag == 0:
return False
else:
return True
def getVideoCountFromStatus(status):
global database
cursor = database.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT count(*) FROM scripts WHERE status=%s;"%(repr(status))
cursor.execute(query)
result = cursor.fetchall()
return (result[0][0])
def getRowCount(tablename):
global database
cursor = database.cursor()
cursor.execute("USE youtubebot;")
cursor.execute("select count(*) from %s"%tablename)
result = cursor.fetchall()
return (result[0][0])
| 36.437824
| 279
| 0.653608
|
import mysql
import pickle
import hashlib
import mysql.connector
from mysql.connector import pooling
import settings
import datetime
from time import sleep
def initDatabase():
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("SET sql_notes = 0; ")
cursor.execute("create database IF NOT EXISTS youtubebot")
cursor.execute("USE youtubebot;")
cursor.execute("SET sql_notes = 0; ")
cursor.execute("set global max_allowed_packet=67108864;")
cursor.execute("create table IF NOT EXISTS users (username varchar(70),password varchar(80), status varchar(80));")
cursor.execute("create table IF NOT EXISTS videogenerators (generatorname varchar(70),password varchar(80), status varchar(80));")
cursor.execute("create table IF NOT EXISTS scripts (scriptno int NOT NULL AUTO_INCREMENT, PRIMARY KEY (scriptno), submission_id varchar(70), subredditid varchar(70), subreddit varchar(70), url varchar(2083), timecreated DATETIME,"
"status varchar(70), editedby varchar(70), scripttitle varchar(2083), scriptauthor varchar(70), ups int, downs int, num_comments int, timegathered DATETIME, timeuploaded DATETIME, sceduledupload DATETIME, esttime time, actualtime time, rawscript MEDIUMBLOB, "
"finalscript MEDIUMBLOB);")
cursor.execute("SET sql_notes = 1; ")
connection_pool = None
def login(username, password):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT count(*) FROM users WHERE username = %s AND password = %s;"%(repr(username), repr(password))
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
connection_object.close()
flag = (result[0][0])
if flag == 0:
return False
else:
return True
def getScriptEditInformation():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, status, editedby FROM scripts WHERE status = 'EDITING' AND editedby IS NOT NULL;"
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
results.append(res)
cursor.close()
connection_object.close()
return results
def completeUpload(scriptno, timeuploaded, scedualedrelease):
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "UPDATE scripts " \
"SET status = 'SUCCESSUPLOAD', timeuploaded = %s, sceduledupload = %s WHERE scriptno = %s;"
args = (timeuploaded, scedualedrelease, scriptno)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def getLastUploadedScripts():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
now = datetime.datetime.now()
cursor.execute("USE youtubebot;")
query = "SELECT timeuploaded "\
"from scripts "\
"WHERE timeuploaded <= '%s' "\
"ORDER BY timeuploaded DESC "\
"LIMIT 6;" % (now.strftime('%Y-%m-%d %H:%M:%S'))
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
results.append(res)
cursor.close()
connection_object.close()
return results
def getCompletedScripts():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, status, editedby FROM scripts WHERE status = 'COMPLETE' AND editedby IS NOT NULL;"
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
results.append(res)
cursor.close()
connection_object.close()
return results
def getOnlineUsers():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT username FROM users WHERE status = 'ONLINE';"
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
results.append(res[0])
cursor.close()
connection_object.close()
return results
def updateScriptStatus(status, user, scriptid):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
if user is None:
user = "NULL"
else:
user = user
cursor.execute("USE youtubebot;")
query = "UPDATE scripts " \
"SET status = %s, editedby = %s WHERE scriptno = %s;"
args = (status, user, scriptid)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def updateScriptStatusById(status, user, scriptid):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
if user is None:
user = "NULL"
else:
user = user
cursor.execute("USE youtubebot;")
query = "UPDATE scripts " \
"SET status = %s, editedby = %s WHERE submission_id = %s;"
args = (status, user, scriptid)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def updateUserStatus(user, status):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
if status is None:
status = "NULL"
else:
status = repr(status)
cursor.execute("USE youtubebot;")
query = "UPDATE users " \
"SET status = %s WHERE username = %s;"
args = (status, user)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def getScriptStatus(scriptno):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT status " \
"FROM scripts WHERE scriptno = %s;"%(scriptno)
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
connection_object.close()
return result[0][0]
def getScriptIds():
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, submission_id, status " \
"FROM scripts;"
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
connection_object.close()
return result
def getCompletedScripts(back):
global connection_pool
try:
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, scripttitle, scriptauthor, ups, finalscript " \
"FROM scripts WHERE status = 'COMPLETE' AND finalscript IS NOT NULL ORDER BY ups DESC " \
"LIMIT %s;"%back
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
scriptno = res[0]
scripttitle = res[1]
author = res[2]
ups = res[3]
scriptpayload = pickle.loads(res[4])
load = (scriptno, scripttitle, author, ups, scriptpayload)
results.append(load)
cursor.close()
connection_object.close()
return results
except Exception as e:
print("Mysql Error with downloading completed scripts")
print(e)
pass
def getScripts(back, filter):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT scriptno, subreddit, scripttitle, scriptauthor, ups, downs, rawscript, submission_id, status, editedby, num_comments " \
"FROM scripts WHERE status = 'RAW' or status = 'EDITING' ORDER BY %s DESC " \
"LIMIT %s;"%(filter, back)
cursor.execute(query)
result = cursor.fetchall()
results = []
for res in result:
scriptno = res[0]
subreddit = res[1]
title = res[2]
author = res[3]
ups = res[4]
downs = res[5]
rawscript = pickle.loads(res[6])
sub_id = res[7]
status = res[8]
editedby = res[9]
num_comments = res[10]
load = (scriptno, subreddit, title, author, ups, downs, rawscript, sub_id, status, editedby, num_comments)
results.append(load)
cursor.close()
connection_object.close()
return results
def addUser(username, password):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "INSERT INTO users(username, password) " \
"VALUES(%s, %s)"
args = (username, hashlib.md5(password.encode()).hexdigest())
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def addVideoGenerator(name, password):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
query = "INSERT INTO videogenerators(generatorname, password) " \
"VALUES(%s, %s)"
args = (name, hashlib.md5(password.encode()).hexdigest())
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def beginDataBaseConnection():
global connection_pool
connection_pool = pooling.MySQLConnectionPool(
pool_size=32,
pool_reset_session=True,
host=settings.database_host,
user=settings.database_user,
passwd=settings.database_password,
auth_plugin='mysql_native_password'
)
print("Started database connection")
def uploadVid(payload, scriptno):
global connection_pool
try:
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
cursor.execute("set global max_allowed_packet=67108864;")
connection_object.commit()
load = pickle.dumps(payload)
print("%s SERVER attempting to upload script no %s (%s) to database" % (datetime.datetime.now(), scriptno, str((len(load) / 1000000)) + "MB"))
query = "UPDATE scripts SET finalscript = %s WHERE scriptno = %s " \
""
args = (load, scriptno)
cursor.execute(query, args)
connection_object.commit()
except Exception as e:
print("Error while connecting to MySQL using Connection pool ", e)
return False
finally:
if (connection_object.is_connected()):
cursor.close()
connection_object.close()
return True
def updateSubmission(submission):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
rawscript = pickle.dumps(submission.comments)
query = "UPDATE scripts " \
"SET scripttitle = %s, rawscript = %s, ups = %s, downs = %s, num_comments = %s, timecreated = %s, timegathered = %s WHERE submission_id = %s"
args = (submission.title, (rawscript), submission.upvotes, submission.downvotes, submission.amountcomments,
submission.timecreated, submission.timegathered, submission.submission_id)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def addSubmission(submission):
global connection_pool
connection_object = connection_pool.get_connection()
cursor = connection_object.cursor()
cursor.execute("USE youtubebot;")
rawscript = pickle.dumps(submission.comments)
query = "INSERT INTO scripts(subredditid, submission_id, subreddit, url, timecreated, status, scripttitle, scriptauthor, timegathered, rawscript, ups, downs, num_comments) " \
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
args = ((submission.subredditid), (submission.submission_id),
(submission.subreddit), (submission.link), (submission.timecreated),
("RAW"), submission.title, (submission.author), (submission.timegathered), rawscript,
submission.upvotes, submission.downvotes, submission.amountcomments)
cursor.execute(query, args)
connection_object.commit()
cursor.close()
connection_object.close()
def checkValueExists(column, value):
global database
cursor = database.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT count(*) FROM scripts WHERE %s = %s;"%(column, repr(value))
cursor.execute(query)
result = cursor.fetchall()
flag = (result[0][0])
if flag == 0:
return False
else:
return True
def getVideoCountFromStatus(status):
global database
cursor = database.cursor()
cursor.execute("USE youtubebot;")
query = "SELECT count(*) FROM scripts WHERE status=%s;"%(repr(status))
cursor.execute(query)
result = cursor.fetchall()
return (result[0][0])
def getRowCount(tablename):
global database
cursor = database.cursor()
cursor.execute("USE youtubebot;")
cursor.execute("select count(*) from %s"%tablename)
result = cursor.fetchall()
return (result[0][0])
| true
| true
|
f7081f1b7f9f4fdf577a65a182a30a34529ab7d2
| 1,918
|
py
|
Python
|
resdiffcheck/diffcheck.py
|
bayotop/resdiffcheck
|
ff8004d424006a19c9fd75d45a621c8a63c4cd54
|
[
"MIT"
] | 4
|
2018-04-30T18:33:24.000Z
|
2020-04-06T14:54:55.000Z
|
resdiffcheck/diffcheck.py
|
codingo/resdiffcheck
|
ff8004d424006a19c9fd75d45a621c8a63c4cd54
|
[
"MIT"
] | null | null | null |
resdiffcheck/diffcheck.py
|
codingo/resdiffcheck
|
ff8004d424006a19c9fd75d45a621c8a63c4cd54
|
[
"MIT"
] | 4
|
2017-12-11T22:09:01.000Z
|
2020-05-24T21:02:19.000Z
|
#!/usr/bin/env python
import argparse
from datetime import date
import hashlib
import logging
import sys
import textwrap
from classes.resource import Resource
from classes.dbmanager import ResourceStorage
from classes.reporter import HtmlReport
import helpers
def get_reports_path(path):
today = date.today()
return "{0}/{1}/{2}/".format(path, today.month, today.day)
def check_differences(resources, report):
report.add_urls(resources)
changed_resources = []
for resource in resources:
actual_content = helpers.fetch_resource(resource.url)
if actual_content:
if (hashlib.sha256(actual_content).hexdigest() != resource.content.hash):
report.add(resource, actual_content)
resource.update(actual_content)
changed_resources.append(resource)
report.save()
return changed_resources
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="diffcheck.py",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Resource Difference Checker
See https://github.com/bayotop/resdiffcheck for more information.
"""))
parser.add_argument("db", help="database with resources to check")
parser.add_argument("report_dir", help="target directory for reports (without trailing /)")
parser.add_argument("-l", "--logfile", default="process.log", help="default ./process.log")
args = parser.parse_args()
logging.basicConfig(filename=args.logfile,level=logging.DEBUG)
storage = ResourceStorage(args.db)
if not storage.load():
sys.exit()
report = HtmlReport(get_reports_path(args.report_dir), "diff.html")
changed_resources = check_differences(storage.getall(), report)
if changed_resources:
storage.add_multiple(changed_resources)
| 30.935484
| 96
| 0.690824
|
import argparse
from datetime import date
import hashlib
import logging
import sys
import textwrap
from classes.resource import Resource
from classes.dbmanager import ResourceStorage
from classes.reporter import HtmlReport
import helpers
def get_reports_path(path):
today = date.today()
return "{0}/{1}/{2}/".format(path, today.month, today.day)
def check_differences(resources, report):
report.add_urls(resources)
changed_resources = []
for resource in resources:
actual_content = helpers.fetch_resource(resource.url)
if actual_content:
if (hashlib.sha256(actual_content).hexdigest() != resource.content.hash):
report.add(resource, actual_content)
resource.update(actual_content)
changed_resources.append(resource)
report.save()
return changed_resources
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="diffcheck.py",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Resource Difference Checker
See https://github.com/bayotop/resdiffcheck for more information.
"""))
parser.add_argument("db", help="database with resources to check")
parser.add_argument("report_dir", help="target directory for reports (without trailing /)")
parser.add_argument("-l", "--logfile", default="process.log", help="default ./process.log")
args = parser.parse_args()
logging.basicConfig(filename=args.logfile,level=logging.DEBUG)
storage = ResourceStorage(args.db)
if not storage.load():
sys.exit()
report = HtmlReport(get_reports_path(args.report_dir), "diff.html")
changed_resources = check_differences(storage.getall(), report)
if changed_resources:
storage.add_multiple(changed_resources)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.