hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f715d167d46ffec6d0102269f517067c5bcb0733 | 821 | py | Python | ch08_dash_standard_components/table_handmade_stylecell.py | Ethan0621/plotly-dash-dev | abe478824db1ee511a2d92f88e5dad49f5d6e27e | [
"MIT"
] | 21 | 2020-10-02T08:17:33.000Z | 2022-03-22T06:10:17.000Z | ch08_dash_standard_components/table_handmade_stylecell.py | Ethan0621/plotly-dash-dev | abe478824db1ee511a2d92f88e5dad49f5d6e27e | [
"MIT"
] | 4 | 2019-07-18T04:43:31.000Z | 2021-10-31T10:30:25.000Z | ch08_dash_standard_components/table_handmade_stylecell.py | Ethan0621/plotly-dash-dev | abe478824db1ee511a2d92f88e5dad49f5d6e27e | [
"MIT"
] | 12 | 2019-07-23T05:36:57.000Z | 2021-07-11T08:57:47.000Z | import dash
import dash_table
app = dash.Dash(__name__)
app.layout = dash_table.DataTable(
fill_width=False,
columns=[
{"name": "number", "id": "number"},
{"name": "region", "id": "area"},
{"name": "tsuyu-iri", "id": "tsuyu-iri"},
],
data=[
{"number": 0, "area": "okinawa", "tsuyu-iri": "5/16"},
{"number": 1, "area": "kyusyu-south", "tsuyu-iri": "5/31"},
{"number": 2, "area": "kyusyu-north", "tsuyu-iri": "6/26"},
{"number": 3, "area": "shikoku", "tsuyu-iri": "6/26"},
{"number": 4, "area": "chugoku", "tsuyu-iri": "6/26"},
{"number": 5, "area": "kinki", "tsuyu-iri": "6/26"},
],
# ➊ テーブル全体のセルのスタイルを定義(横幅、文字の大きさ、文字の揃え位置)
style_cell={"width": 160, "fontSize": 24, "textAlign": "center"},
)
app.run_server(debug=True)
| 31.576923 | 69 | 0.527406 | import dash
import dash_table
app = dash.Dash(__name__)
app.layout = dash_table.DataTable(
fill_width=False,
columns=[
{"name": "number", "id": "number"},
{"name": "region", "id": "area"},
{"name": "tsuyu-iri", "id": "tsuyu-iri"},
],
data=[
{"number": 0, "area": "okinawa", "tsuyu-iri": "5/16"},
{"number": 1, "area": "kyusyu-south", "tsuyu-iri": "5/31"},
{"number": 2, "area": "kyusyu-north", "tsuyu-iri": "6/26"},
{"number": 3, "area": "shikoku", "tsuyu-iri": "6/26"},
{"number": 4, "area": "chugoku", "tsuyu-iri": "6/26"},
{"number": 5, "area": "kinki", "tsuyu-iri": "6/26"},
],
style_cell={"width": 160, "fontSize": 24, "textAlign": "center"},
)
app.run_server(debug=True)
| true | true |
f715d1f15c0f57ec6888a0eaa987c5954b2e0137 | 12,603 | py | Python | ForgeActivity/forgeactivity/main.py | isabella232/allura | 04f14f15a9a9364e18c61f68acdaa241a470186b | [
"Apache-2.0"
] | 113 | 2015-03-25T10:33:37.000Z | 2022-02-16T20:55:06.000Z | ForgeActivity/forgeactivity/main.py | apache/allura | 6184203235ac6f83c943fae7fd3fef54678f9ed7 | [
"Apache-2.0"
] | 4 | 2017-08-04T16:19:07.000Z | 2020-06-08T19:01:33.000Z | ForgeActivity/forgeactivity/main.py | isabella232/allura | 04f14f15a9a9364e18c61f68acdaa241a470186b | [
"Apache-2.0"
] | 36 | 2015-08-14T16:27:39.000Z | 2022-02-16T20:54:35.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import calendar
from datetime import timedelta
from itertools import islice
from bson import ObjectId
from ming.orm import session
from tg import tmpl_context as c, app_globals as g
from tg import request, response
from tg import expose, validate, config
from tg.decorators import with_trailing_slash, without_trailing_slash
from paste.deploy.converters import asbool, asint
from webob import exc
import feedgenerator as FG
from activitystream.storage.mingstorage import Activity
from allura.app import Application
from allura import version
from allura import model as M
from allura.controllers import BaseController
from allura.controllers.rest import AppRestControllerMixin
from allura.lib.security import require_authenticated, require_access
from allura.model.timeline import perm_check, get_activity_object
from allura.lib import helpers as h
from allura.lib.decorators import require_post
from allura.lib.widgets.form_fields import PageList
from allura.ext.user_profile import ProfileSectionBase
from .widgets.follow import FollowToggle
from six.moves import filter
import re
log = logging.getLogger(__name__)
class ForgeActivityApp(Application):
"""Project Activity page for projects."""
__version__ = version.__version__
default_mount_point = 'activity'
max_instances = 0
searchable = False
has_notifications = False
def __init__(self, project, config):
Application.__init__(self, project, config)
self.root = ForgeActivityController(self)
self.api_root = ForgeActivityRestController(self)
def admin_menu(self): # pragma no cover
return []
def install(self, project):
role_anon = M.ProjectRole.by_name('*anonymous')._id
self.config.acl = [
M.ACE.allow(role_anon, 'read'),
]
def uninstall(self, project):
pass # pragma no cover
class W:
follow_toggle = FollowToggle()
page_list = PageList()
class ForgeActivityController(BaseController):
def __init__(self, app, *args, **kw):
super(ForgeActivityController, self).__init__(*args, **kw)
self.app = app
setattr(self, 'feed.atom', self.feed)
setattr(self, 'feed.rss', self.feed)
def _check_security(self):
require_access(c.app, 'read')
def _before(self, *args, **kw):
"""Runs before each request to this controller.
"""
# register the custom css for our tool
g.register_app_css('css/activity.css', app=self.app)
def _get_activities_data(self, **kw):
activity_enabled = asbool(config.get('activitystream.enabled', False))
if not activity_enabled:
raise exc.HTTPNotFound()
c.follow_toggle = W.follow_toggle
c.page_list = W.page_list
if c.project.is_user_project:
followee = c.project.user_project_of
actor_only = followee != c.user
else:
followee = c.project
actor_only = False
following = g.director.is_connected(c.user, followee)
limit, page = h.paging_sanitizer(kw.get('limit', 100), kw.get('page', 0))
extra_limit = limit
# get more in case perm check filters some out
if page == 0 and limit <= 10:
extra_limit = limit * 20
timeline = g.director.get_timeline(followee, page,
limit=extra_limit,
actor_only=actor_only)
filtered_timeline = list(islice(filter(perm_check(c.user), timeline),
0, limit))
if config.get("default_avatar_image"):
for t in filtered_timeline:
if not t.actor.activity_extras.get('icon_url'):
t.actor.activity_extras.icon_url = config['default_avatar_image']
else:
t.actor.activity_extras.icon_url = re.sub(r'([&?])d=[^&]*',
r'\1d={}'.format(config["default_avatar_image"]),
t.actor.activity_extras.icon_url)
session(t).expunge(t) # don't save back this change
if extra_limit == limit:
# if we didn't ask for extra, then we expect there's more if we got all we asked for
has_more = len(timeline) == limit
else:
# if we did ask for extra, check filtered result
has_more = len(filtered_timeline) == limit
return dict(
followee=followee,
following=following,
timeline=filtered_timeline,
noindex=False if filtered_timeline else True,
page=page,
limit=limit,
has_more=has_more,
actor_only=actor_only)
@expose('jinja:forgeactivity:templates/index.html')
@with_trailing_slash
def index(self, **kw):
return self._get_activities_data(**kw)
@expose('jinja:forgeactivity:templates/timeline.html')
def pjax(self, **kw):
return self._get_activities_data(**kw)
@without_trailing_slash
@expose()
def feed(self, **kw):
data = self._get_activities_data(**kw)
response.headers['Content-Type'] = str('')
response.content_type = str('application/xml')
d = {
'title': 'Activity for %s' % data['followee'].activity_name,
'link': h.absurl(self.app.url),
'description': 'Recent activity for %s' % (
data['followee'].activity_name),
'language': 'en',
}
if request.environ['PATH_INFO'].endswith(str('.atom')):
feed = FG.Atom1Feed(**d)
else:
feed = FG.Rss201rev2Feed(**d)
for t in data['timeline']:
url_id = h.absurl(t.obj.activity_url) # try to keep this consistent over time (not url-quoted)
url = h.absurl(h.urlquote_path_only(t.obj.activity_url))
feed.add_item(title='%s %s %s%s' % (
t.actor.activity_name,
t.verb,
t.obj.activity_name,
' on %s' % t.target.activity_name if t.target.activity_name else '',
),
link=url,
pubdate=t.published,
description=h.strip_bad_unicode(t.obj.activity_extras.get('summary', '')),
unique_id=url_id,
author_name=t.actor.activity_name,
author_link=h.absurl(t.actor.activity_url))
return feed.writeString('utf-8')
@require_post()
@expose('json:')
@validate(W.follow_toggle)
def follow(self, follow, **kw):
activity_enabled = asbool(config.get('activitystream.enabled', False))
if not activity_enabled:
raise exc.HTTPNotFound()
require_authenticated()
followee = c.project
if c.project.is_user_project:
followee = c.project.user_project_of
if c.user == followee:
return dict(
success=False,
message='Cannot follow yourself')
try:
if follow:
g.director.connect(c.user, followee)
else:
g.director.disconnect(c.user, followee)
except Exception as e:
log.exception('Unexpected error following user')
return dict(
success=False,
message='Unexpected error: %s' % e)
return dict(
success=True,
message=W.follow_toggle.success_message(follow),
following=follow)
@require_post()
@expose('json:')
def delete_item(self, activity_id, **kwargs):
require_access(c.project.neighborhood, 'admin')
activity = Activity.query.get(_id=ObjectId(activity_id))
if not activity:
raise exc.HTTPGone
# find other copies of this activity on other user/projects timelines
# but only within a small time window, so we can do efficient searching
activity_ts = activity._id.generation_time
time_window = timedelta(hours=1)
all_copies = Activity.query.find({
'_id': {
'$gt': ObjectId.from_datetime(activity_ts - time_window),
'$lt': ObjectId.from_datetime(activity_ts + time_window),
},
'obj': activity.obj,
'target': activity.target,
'actor': activity.actor,
'verb': activity.verb,
'tags': activity.tags,
}).all()
log.info('Deleting %s copies of activity record: %s %s %s', len(all_copies),
activity.actor.activity_url, activity.verb, activity.obj.activity_url)
for activity in all_copies:
activity.query.delete()
return {'success': True}
class ForgeActivityRestController(BaseController, AppRestControllerMixin):
def __init__(self, app, *args, **kw):
super(ForgeActivityRestController, self).__init__(*args, **kw)
self.app = app
def _check_security(self):
require_access(c.app, 'read')
@expose('json:')
def index(self, **kw):
data = self.app.root._get_activities_data(**kw)
return {
'following': data['following'],
'followee': {
'activity_name': data['followee'].activity_name,
'activity_url': data['followee'].url(),
'activity_extras': {},
},
'timeline': [{
'published': calendar.timegm(a.published.timetuple()) * 1000,
'actor': a.actor._deinstrument(),
'verb': a.verb,
'obj': a.obj._deinstrument(),
'target': a.target._deinstrument(),
'tags': a.tags._deinstrument(),
} for a in data['timeline']],
}
class ForgeActivityProfileSection(ProfileSectionBase):
template = 'forgeactivity:templates/widgets/profile_section.html'
def __init__(self, *a, **kw):
super(ForgeActivityProfileSection, self).__init__(*a, **kw)
self.activity_app = self.project.app_instance('activity')
def check_display(self):
app_installed = self.activity_app is not None
activity_enabled = asbool(config.get('activitystream.enabled', False))
return app_installed and activity_enabled
def prepare_context(self, context):
full_timeline = g.director.get_timeline(
self.user, page=0, limit=100,
actor_only=True,
)
filtered_timeline = list(islice(filter(perm_check(c.user), full_timeline),
0, 8))
for activity in filtered_timeline:
# Get the project for the activity.obj so we can use it in the
# template. Expunge first so Ming doesn't try to flush the attr
# we create to temporarily store the project.
#
# The get_activity_object() calls are cheap, pulling from
# the session identity map instead of mongo since identical
# calls are made by perm_check() above.
session(activity).expunge(activity)
activity_obj = get_activity_object(activity.obj)
activity.obj.project = getattr(activity_obj, 'project', None)
context.update({
'follow_toggle': W.follow_toggle,
'following': g.director.is_connected(c.user, self.user),
'timeline': filtered_timeline,
'activity_app': self.activity_app,
})
g.register_js('activity_js/follow.js')
return context
| 38.42378 | 111 | 0.611521 |
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import calendar
from datetime import timedelta
from itertools import islice
from bson import ObjectId
from ming.orm import session
from tg import tmpl_context as c, app_globals as g
from tg import request, response
from tg import expose, validate, config
from tg.decorators import with_trailing_slash, without_trailing_slash
from paste.deploy.converters import asbool, asint
from webob import exc
import feedgenerator as FG
from activitystream.storage.mingstorage import Activity
from allura.app import Application
from allura import version
from allura import model as M
from allura.controllers import BaseController
from allura.controllers.rest import AppRestControllerMixin
from allura.lib.security import require_authenticated, require_access
from allura.model.timeline import perm_check, get_activity_object
from allura.lib import helpers as h
from allura.lib.decorators import require_post
from allura.lib.widgets.form_fields import PageList
from allura.ext.user_profile import ProfileSectionBase
from .widgets.follow import FollowToggle
from six.moves import filter
import re
log = logging.getLogger(__name__)
class ForgeActivityApp(Application):
__version__ = version.__version__
default_mount_point = 'activity'
max_instances = 0
searchable = False
has_notifications = False
def __init__(self, project, config):
Application.__init__(self, project, config)
self.root = ForgeActivityController(self)
self.api_root = ForgeActivityRestController(self)
def admin_menu(self):
return []
def install(self, project):
role_anon = M.ProjectRole.by_name('*anonymous')._id
self.config.acl = [
M.ACE.allow(role_anon, 'read'),
]
def uninstall(self, project):
pass
class W:
follow_toggle = FollowToggle()
page_list = PageList()
class ForgeActivityController(BaseController):
def __init__(self, app, *args, **kw):
super(ForgeActivityController, self).__init__(*args, **kw)
self.app = app
setattr(self, 'feed.atom', self.feed)
setattr(self, 'feed.rss', self.feed)
def _check_security(self):
require_access(c.app, 'read')
def _before(self, *args, **kw):
g.register_app_css('css/activity.css', app=self.app)
def _get_activities_data(self, **kw):
activity_enabled = asbool(config.get('activitystream.enabled', False))
if not activity_enabled:
raise exc.HTTPNotFound()
c.follow_toggle = W.follow_toggle
c.page_list = W.page_list
if c.project.is_user_project:
followee = c.project.user_project_of
actor_only = followee != c.user
else:
followee = c.project
actor_only = False
following = g.director.is_connected(c.user, followee)
limit, page = h.paging_sanitizer(kw.get('limit', 100), kw.get('page', 0))
extra_limit = limit
if page == 0 and limit <= 10:
extra_limit = limit * 20
timeline = g.director.get_timeline(followee, page,
limit=extra_limit,
actor_only=actor_only)
filtered_timeline = list(islice(filter(perm_check(c.user), timeline),
0, limit))
if config.get("default_avatar_image"):
for t in filtered_timeline:
if not t.actor.activity_extras.get('icon_url'):
t.actor.activity_extras.icon_url = config['default_avatar_image']
else:
t.actor.activity_extras.icon_url = re.sub(r'([&?])d=[^&]*',
r'\1d={}'.format(config["default_avatar_image"]),
t.actor.activity_extras.icon_url)
session(t).expunge(t)
if extra_limit == limit:
# if we didn't ask for extra, then we expect there's more if we got all we asked for
has_more = len(timeline) == limit
else:
# if we did ask for extra, check filtered result
has_more = len(filtered_timeline) == limit
return dict(
followee=followee,
following=following,
timeline=filtered_timeline,
noindex=False if filtered_timeline else True,
page=page,
limit=limit,
has_more=has_more,
actor_only=actor_only)
@expose('jinja:forgeactivity:templates/index.html')
@with_trailing_slash
def index(self, **kw):
return self._get_activities_data(**kw)
@expose('jinja:forgeactivity:templates/timeline.html')
def pjax(self, **kw):
return self._get_activities_data(**kw)
@without_trailing_slash
@expose()
def feed(self, **kw):
data = self._get_activities_data(**kw)
response.headers['Content-Type'] = str('')
response.content_type = str('application/xml')
d = {
'title': 'Activity for %s' % data['followee'].activity_name,
'link': h.absurl(self.app.url),
'description': 'Recent activity for %s' % (
data['followee'].activity_name),
'language': 'en',
}
if request.environ['PATH_INFO'].endswith(str('.atom')):
feed = FG.Atom1Feed(**d)
else:
feed = FG.Rss201rev2Feed(**d)
for t in data['timeline']:
url_id = h.absurl(t.obj.activity_url) # try to keep this consistent over time (not url-quoted)
url = h.absurl(h.urlquote_path_only(t.obj.activity_url))
feed.add_item(title='%s %s %s%s' % (
t.actor.activity_name,
t.verb,
t.obj.activity_name,
' on %s' % t.target.activity_name if t.target.activity_name else '',
),
link=url,
pubdate=t.published,
description=h.strip_bad_unicode(t.obj.activity_extras.get('summary', '')),
unique_id=url_id,
author_name=t.actor.activity_name,
author_link=h.absurl(t.actor.activity_url))
return feed.writeString('utf-8')
@require_post()
@expose('json:')
@validate(W.follow_toggle)
def follow(self, follow, **kw):
activity_enabled = asbool(config.get('activitystream.enabled', False))
if not activity_enabled:
raise exc.HTTPNotFound()
require_authenticated()
followee = c.project
if c.project.is_user_project:
followee = c.project.user_project_of
if c.user == followee:
return dict(
success=False,
message='Cannot follow yourself')
try:
if follow:
g.director.connect(c.user, followee)
else:
g.director.disconnect(c.user, followee)
except Exception as e:
log.exception('Unexpected error following user')
return dict(
success=False,
message='Unexpected error: %s' % e)
return dict(
success=True,
message=W.follow_toggle.success_message(follow),
following=follow)
@require_post()
@expose('json:')
def delete_item(self, activity_id, **kwargs):
require_access(c.project.neighborhood, 'admin')
activity = Activity.query.get(_id=ObjectId(activity_id))
if not activity:
raise exc.HTTPGone
# find other copies of this activity on other user/projects timelines
# but only within a small time window, so we can do efficient searching
activity_ts = activity._id.generation_time
time_window = timedelta(hours=1)
all_copies = Activity.query.find({
'_id': {
'$gt': ObjectId.from_datetime(activity_ts - time_window),
'$lt': ObjectId.from_datetime(activity_ts + time_window),
},
'obj': activity.obj,
'target': activity.target,
'actor': activity.actor,
'verb': activity.verb,
'tags': activity.tags,
}).all()
log.info('Deleting %s copies of activity record: %s %s %s', len(all_copies),
activity.actor.activity_url, activity.verb, activity.obj.activity_url)
for activity in all_copies:
activity.query.delete()
return {'success': True}
class ForgeActivityRestController(BaseController, AppRestControllerMixin):
def __init__(self, app, *args, **kw):
super(ForgeActivityRestController, self).__init__(*args, **kw)
self.app = app
def _check_security(self):
require_access(c.app, 'read')
@expose('json:')
def index(self, **kw):
data = self.app.root._get_activities_data(**kw)
return {
'following': data['following'],
'followee': {
'activity_name': data['followee'].activity_name,
'activity_url': data['followee'].url(),
'activity_extras': {},
},
'timeline': [{
'published': calendar.timegm(a.published.timetuple()) * 1000,
'actor': a.actor._deinstrument(),
'verb': a.verb,
'obj': a.obj._deinstrument(),
'target': a.target._deinstrument(),
'tags': a.tags._deinstrument(),
} for a in data['timeline']],
}
class ForgeActivityProfileSection(ProfileSectionBase):
template = 'forgeactivity:templates/widgets/profile_section.html'
def __init__(self, *a, **kw):
super(ForgeActivityProfileSection, self).__init__(*a, **kw)
self.activity_app = self.project.app_instance('activity')
def check_display(self):
app_installed = self.activity_app is not None
activity_enabled = asbool(config.get('activitystream.enabled', False))
return app_installed and activity_enabled
def prepare_context(self, context):
full_timeline = g.director.get_timeline(
self.user, page=0, limit=100,
actor_only=True,
)
filtered_timeline = list(islice(filter(perm_check(c.user), full_timeline),
0, 8))
for activity in filtered_timeline:
# Get the project for the activity.obj so we can use it in the
# template. Expunge first so Ming doesn't try to flush the attr
session(activity).expunge(activity)
activity_obj = get_activity_object(activity.obj)
activity.obj.project = getattr(activity_obj, 'project', None)
context.update({
'follow_toggle': W.follow_toggle,
'following': g.director.is_connected(c.user, self.user),
'timeline': filtered_timeline,
'activity_app': self.activity_app,
})
g.register_js('activity_js/follow.js')
return context
| true | true |
f715d278f72818778ed7a4eac29242b89126d982 | 1,827 | py | Python | setup/authorization.py | LenoxFro/spotify-save-discover-weekly | 1fecd101ad21a96dbb8fef6b402358386e0e5687 | [
"MIT"
] | 8 | 2021-03-31T22:05:56.000Z | 2022-01-01T22:42:59.000Z | setup/authorization.py | LenoxFro/spotify-save-discover-weekly | 1fecd101ad21a96dbb8fef6b402358386e0e5687 | [
"MIT"
] | null | null | null | setup/authorization.py | LenoxFro/spotify-save-discover-weekly | 1fecd101ad21a96dbb8fef6b402358386e0e5687 | [
"MIT"
] | 10 | 2021-03-31T22:11:58.000Z | 2022-03-31T10:55:36.000Z | import urllib.parse
from urllib.parse import parse_qs
from dotenv import load_dotenv, find_dotenv
import requests
import base64
import os
load_dotenv(find_dotenv())
CLIENT_ID = os.environ.get("CLIENT_ID")
CLIENT_SECRET = os.environ.get("CLIENT_SECRET")
REDIRECT_URI = os.environ.get("REDIRECT_URI")
OAUTH_AUTHORIZE_URL = "https://accounts.spotify.com/authorize"
OAUTH_TOKEN_URL = "https://accounts.spotify.com/api/token"
SCOPE = "user-library-read playlist-modify-public playlist-modify-private playlist-read-private playlist-read-collaborative"
def get_auth_url():
payload = {
"client_id": CLIENT_ID,
"response_type": "code",
"redirect_uri": REDIRECT_URI,
"scope": SCOPE
}
urlparams = urllib.parse.urlencode(payload)
return ("%s?%s" % (OAUTH_AUTHORIZE_URL, urlparams))
def get_refresh_token(code):
payload = {
"grant_type": "authorization_code",
"code": code,
"redirect_uri": REDIRECT_URI
}
encoded_client = base64.b64encode((CLIENT_ID + ":" + CLIENT_SECRET).encode('ascii'))
headers = {"Authorization": "Basic %s" % encoded_client.decode('ascii')}
response = requests.post(OAUTH_TOKEN_URL, data=payload, headers=headers)
return response.json()['refresh_token']
def authorization():
if CLIENT_ID is None or CLIENT_SECRET is None or REDIRECT_URI is None:
print("Environment variables have not been loaded!")
return
print("Open this link in your browser: %s \n" % get_auth_url() )
redirected_url = input("Enter URL you was redirected to (after accepting authorization): ")
parsed_url = urllib.parse.urlparse(redirected_url)
code = parse_qs(parsed_url.query)['code'][0]
refresh_token = get_refresh_token(code)
print("\n Your refresh token is: %s" % refresh_token)
authorization()
| 34.471698 | 124 | 0.712644 | import urllib.parse
from urllib.parse import parse_qs
from dotenv import load_dotenv, find_dotenv
import requests
import base64
import os
load_dotenv(find_dotenv())
CLIENT_ID = os.environ.get("CLIENT_ID")
CLIENT_SECRET = os.environ.get("CLIENT_SECRET")
REDIRECT_URI = os.environ.get("REDIRECT_URI")
OAUTH_AUTHORIZE_URL = "https://accounts.spotify.com/authorize"
OAUTH_TOKEN_URL = "https://accounts.spotify.com/api/token"
SCOPE = "user-library-read playlist-modify-public playlist-modify-private playlist-read-private playlist-read-collaborative"
def get_auth_url():
payload = {
"client_id": CLIENT_ID,
"response_type": "code",
"redirect_uri": REDIRECT_URI,
"scope": SCOPE
}
urlparams = urllib.parse.urlencode(payload)
return ("%s?%s" % (OAUTH_AUTHORIZE_URL, urlparams))
def get_refresh_token(code):
payload = {
"grant_type": "authorization_code",
"code": code,
"redirect_uri": REDIRECT_URI
}
encoded_client = base64.b64encode((CLIENT_ID + ":" + CLIENT_SECRET).encode('ascii'))
headers = {"Authorization": "Basic %s" % encoded_client.decode('ascii')}
response = requests.post(OAUTH_TOKEN_URL, data=payload, headers=headers)
return response.json()['refresh_token']
def authorization():
if CLIENT_ID is None or CLIENT_SECRET is None or REDIRECT_URI is None:
print("Environment variables have not been loaded!")
return
print("Open this link in your browser: %s \n" % get_auth_url() )
redirected_url = input("Enter URL you was redirected to (after accepting authorization): ")
parsed_url = urllib.parse.urlparse(redirected_url)
code = parse_qs(parsed_url.query)['code'][0]
refresh_token = get_refresh_token(code)
print("\n Your refresh token is: %s" % refresh_token)
authorization()
| true | true |
f715d41a7338077da2b9062cb81f2988564db3d0 | 4,328 | py | Python | gouda/bot.py | fxcqz/gouda3 | 079bcb52f6357dc7704ec845d916f961a18d59cd | [
"MIT"
] | null | null | null | gouda/bot.py | fxcqz/gouda3 | 079bcb52f6357dc7704ec845d916f961a18d59cd | [
"MIT"
] | 10 | 2016-03-22T11:52:19.000Z | 2016-03-27T16:19:06.000Z | gouda/bot.py | fxcqz/gouda3 | 079bcb52f6357dc7704ec845d916f961a18d59cd | [
"MIT"
] | null | null | null | from collections import OrderedDict
import importlib
from peewee import SqliteDatabase
from .settings import Settings
DATABASE = SqliteDatabase("gouda.db")
class Gouda(object):
def __init__(self):
self.settings = Settings("config/config.json")
self.name = self.settings.core['nick']
# use ordered dict for definite evaluation
# i.e. we know logs will always be saved first since core is loaded
# first
self.modules = OrderedDict()
self.mains = OrderedDict()
self.load_modules()
self.commands = self.load_commands()
self.db = DATABASE
self.db.connect()
def load_module(self, module):
try:
self.modules[module] = importlib.import_module('gouda.modules.%s.main' % module)
self.commands = self.load_commands()
if hasattr(self.modules[module], "run_schema"):
self.modules[module].run_schema()
if hasattr(self.modules[module], "main"):
self.mains[module] = getattr(self.modules[module], "main")
except ImportError as e:
print("Oh no, an import error:", e)
def load_modules(self):
""" only run on init, innit """
module_list = self.settings['modules']
for module in module_list:
self.load_module(module)
def manage_modules(self, loads, unloads, reloads):
for load in loads:
if load in self.modules:
reloads.append(load)
else:
self.load_module(load)
for unload in unloads:
if unload in self.modules:
self.modules.pop(unload, None)
for reload_ in reloads:
if reload_ in self.modules:
importlib.reload(self.modules[reload_])
if reload_ in self.mains:
self.mains[reload_] = self.modules[reload_].main
self.commands = self.load_commands()
def load_commands(self):
cmds = {}
for name, module in self.modules.items():
try:
commands = getattr(module, "commands")
for command in commands:
if command.lower() != 'none':
cmds[command] = name
except AttributeError:
# no command list implemented in module
pass
return cmds
def get_loads(self, kind, mod):
loads, unloads, reloads = [], [], []
if kind == "load":
loads.append(mod)
elif kind == "unload":
unloads.append(mod)
elif kind == "reload":
reloads.append(mod)
return loads, unloads, reloads
def run(self, conn):
kwargs = {'writer': conn.message, 'db': self.db, 'log': True, 'name': self.name}
while True:
nick, line = conn.read()
loads, unloads, reloads = [], [], []
if line and ''.join(line) != '':
offset = 0
if line[0][:-1] == self.name and len(line) > 1:
offset = 1
# addressed to the bot
kwargs['log'] = False
if len(line) > 2:
loads, unloads, reloads = self.get_loads(line[offset], line[offset+1])
if not (loads or unloads or reloads):
# nothing *loaded, try commands
try:
module = self.commands[line[1].lower()]
func = getattr(self.modules[module], line[1])
msg = line[offset:]
func(
message=msg,
commands=self.commands.keys(),
modules=self.modules.keys(),
**kwargs
)
except Exception as e:
# pretty much anything can fuck it up
print(e)
# run anything else...
for func in self.mains.values():
func(line=line, nick=nick, **kwargs)
# load/unload/reload
self.manage_modules(loads, unloads, reloads)
kwargs['log'] = True
| 37.634783 | 94 | 0.501848 | from collections import OrderedDict
import importlib
from peewee import SqliteDatabase
from .settings import Settings
DATABASE = SqliteDatabase("gouda.db")
class Gouda(object):
def __init__(self):
self.settings = Settings("config/config.json")
self.name = self.settings.core['nick']
self.modules = OrderedDict()
self.mains = OrderedDict()
self.load_modules()
self.commands = self.load_commands()
self.db = DATABASE
self.db.connect()
def load_module(self, module):
try:
self.modules[module] = importlib.import_module('gouda.modules.%s.main' % module)
self.commands = self.load_commands()
if hasattr(self.modules[module], "run_schema"):
self.modules[module].run_schema()
if hasattr(self.modules[module], "main"):
self.mains[module] = getattr(self.modules[module], "main")
except ImportError as e:
print("Oh no, an import error:", e)
def load_modules(self):
module_list = self.settings['modules']
for module in module_list:
self.load_module(module)
def manage_modules(self, loads, unloads, reloads):
for load in loads:
if load in self.modules:
reloads.append(load)
else:
self.load_module(load)
for unload in unloads:
if unload in self.modules:
self.modules.pop(unload, None)
for reload_ in reloads:
if reload_ in self.modules:
importlib.reload(self.modules[reload_])
if reload_ in self.mains:
self.mains[reload_] = self.modules[reload_].main
self.commands = self.load_commands()
def load_commands(self):
cmds = {}
for name, module in self.modules.items():
try:
commands = getattr(module, "commands")
for command in commands:
if command.lower() != 'none':
cmds[command] = name
except AttributeError:
pass
return cmds
def get_loads(self, kind, mod):
loads, unloads, reloads = [], [], []
if kind == "load":
loads.append(mod)
elif kind == "unload":
unloads.append(mod)
elif kind == "reload":
reloads.append(mod)
return loads, unloads, reloads
def run(self, conn):
kwargs = {'writer': conn.message, 'db': self.db, 'log': True, 'name': self.name}
while True:
nick, line = conn.read()
loads, unloads, reloads = [], [], []
if line and ''.join(line) != '':
offset = 0
if line[0][:-1] == self.name and len(line) > 1:
offset = 1
kwargs['log'] = False
if len(line) > 2:
loads, unloads, reloads = self.get_loads(line[offset], line[offset+1])
if not (loads or unloads or reloads):
try:
module = self.commands[line[1].lower()]
func = getattr(self.modules[module], line[1])
msg = line[offset:]
func(
message=msg,
commands=self.commands.keys(),
modules=self.modules.keys(),
**kwargs
)
except Exception as e:
print(e)
for func in self.mains.values():
func(line=line, nick=nick, **kwargs)
self.manage_modules(loads, unloads, reloads)
kwargs['log'] = True
| true | true |
f715d44ead41064419d591910c9d8e9251cb95b1 | 4,635 | py | Python | 3 experiments_confidence/batch/e2 (experiment and chance scores) (cpj).py | nmningmei/metacognition | 734082e247cc7fc9d277563e2676e10692617a3f | [
"MIT"
] | 3 | 2019-07-09T15:37:46.000Z | 2019-07-17T16:28:02.000Z | 3 experiments_confidence/batch/e2 (experiment and chance scores) (cpj).py | nmningmei/metacognition | 734082e247cc7fc9d277563e2676e10692617a3f | [
"MIT"
] | null | null | null | 3 experiments_confidence/batch/e2 (experiment and chance scores) (cpj).py | nmningmei/metacognition | 734082e247cc7fc9d277563e2676e10692617a3f | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 16:07:58 2018
@author: nmei
in exp2 (e2) there were 3 possible awareness ratings ( (e.g. 1- no experience, 2 brief glimpse 3 almost clear or clear perception)
BUT if can make a binary classification by focussing on 1 and 2 which are the majority of the trials.
"""
if __name__ == '__main__':
import os
import pandas as pd
import numpy as np
import utils
# define result saving directory
dir_saving = 'results_e2'
if not os.path.exists(dir_saving):
os.mkdir(dir_saving)
try:# the subject level processing
df1 = pd.read_csv('e2.csv').iloc[:,1:]
except: # when I test the script
df1 = pd.read_csv('../e2.csv').iloc[:,1:]
df = df1.copy()
# select the columns that I need
df = df[['blocks.thisN',
'trials.thisN',
'key_resp_2.keys',
'resp.corr',
'resp_mrating.keys',
'participant',]]
# rename the columns
df.columns = ['blocks',
'trials',
'awareness',
'correctness',
'confidence',
'participant',]
# preallocate the data frame structure
results = dict(sub = [],
model = [],
score = [],
window = [],
correctness = [],
awareness = [],
confidence = [],
chance = [],
)
# use success, awareness, and confidence as features
np.random.seed(12345)
# use judgement features
feature_names = [
'correctness',
'awareness',
'confidence',
]
target_name = 'confidence'
experiment = 'e2'
# for some of the variables, we need to rescale them to a more preferable range like 0-1
name_for_scale = ['awareness']
# 'ack', 'cc', 'ck', 'cpj', 'em', 'es', 'fd', 'jmac', 'lidia', 'ls','mimi', 'pr', 'pss', 'sva', 'tj'
# get one of the participants' data
participant = 'cpj'
df_sub = df[df['participant'] == participant]
# pick 1- no experience, 2 brief glimpse for binary classification
df_sub = df_sub[df_sub['awareness'] != 3]
# for 1-back to 4-back
for n_back in np.arange(1,5):
# experiment score
results = utils.classification(
df_sub.dropna(), # take out nan rows
feature_names, # feature columns
target_name, # target column
results, # the saving structure
participant, # participant's name
experiment, # experiment name
window = n_back, # N-back
chance = False, # it is NOT estimating the chance level but the empirical classification experiment
name_for_scale = name_for_scale # scale some of the variables
)
# empirical chance level
results = utils.classification(
df_sub.dropna(),
feature_names,
target_name,
results,
participant,
experiment,
window = n_back,
chance = True, # it is to estimate the empirical chance level
name_for_scale = name_for_scale
)
results_to_save = pd.DataFrame(results)
results_to_save.to_csv(os.path.join(dir_saving,'{}.csv'.format(participant)))
| 31.965517 | 159 | 0.408846 |
if __name__ == '__main__':
import os
import pandas as pd
import numpy as np
import utils
dir_saving = 'results_e2'
if not os.path.exists(dir_saving):
os.mkdir(dir_saving)
try:
df1 = pd.read_csv('e2.csv').iloc[:,1:]
except:
df1 = pd.read_csv('../e2.csv').iloc[:,1:]
df = df1.copy()
df = df[['blocks.thisN',
'trials.thisN',
'key_resp_2.keys',
'resp.corr',
'resp_mrating.keys',
'participant',]]
df.columns = ['blocks',
'trials',
'awareness',
'correctness',
'confidence',
'participant',]
results = dict(sub = [],
model = [],
score = [],
window = [],
correctness = [],
awareness = [],
confidence = [],
chance = [],
)
np.random.seed(12345)
feature_names = [
'correctness',
'awareness',
'confidence',
]
target_name = 'confidence'
experiment = 'e2'
name_for_scale = ['awareness']
participant = 'cpj'
df_sub = df[df['participant'] == participant]
# pick 1- no experience, 2 brief glimpse for binary classification
df_sub = df_sub[df_sub['awareness'] != 3]
# for 1-back to 4-back
for n_back in np.arange(1,5):
# experiment score
results = utils.classification(
df_sub.dropna(), # take out nan rows
feature_names, # feature columns
target_name, # target column
results, # the saving structure
participant, # participant's name
experiment,
window = n_back,
chance = False,
name_for_scale = name_for_scale
)
results = utils.classification(
df_sub.dropna(),
feature_names,
target_name,
results,
participant,
experiment,
window = n_back,
chance = True,
name_for_scale = name_for_scale
)
results_to_save = pd.DataFrame(results)
results_to_save.to_csv(os.path.join(dir_saving,'{}.csv'.format(participant)))
| true | true |
f715d5acbe3a069259390dee428b7666dca26c08 | 9,706 | py | Python | src/intermediate_representation/sem_utils.py | ckosten/ValueNet4SPARQL | de320a2f0e1a4c5a6c0e5cc79057dda9901046e8 | [
"Apache-2.0"
] | null | null | null | src/intermediate_representation/sem_utils.py | ckosten/ValueNet4SPARQL | de320a2f0e1a4c5a6c0e5cc79057dda9901046e8 | [
"Apache-2.0"
] | null | null | null | src/intermediate_representation/sem_utils.py | ckosten/ValueNet4SPARQL | de320a2f0e1a4c5a6c0e5cc79057dda9901046e8 | [
"Apache-2.0"
] | 1 | 2021-09-23T13:02:45.000Z | 2021-09-23T13:02:45.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# -*- coding: utf-8 -*-
"""
# @Time : 2019/5/27
# @Author : Jiaqi&Zecheng
# @File : sem_utils.py
# @Software: PyCharm
"""
import os
import json
import re as regex
import spacy
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
def partial_match(query, table_name):
query = [token.lemma_ for token in nlp(query)]
table_name = [nlp(token)[0].lemma_ for token in table_name]
if query in table_name:
return True
return False
def is_partial_match(query, table_names):
query = nlp(query)[0].lemma_
table_names = [[token.lemma_ for token in nlp(names)] for names in table_names]
same_count = 0
result = None
for names in table_names:
if query in names:
same_count += 1
result = names
return result if same_count == 1 else False
def multi_option(question, q_ind, names, N):
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question):
re = is_partial_match(question[i][0], names)
if re is not False:
return re
return False
def multi_equal(question, q_ind, names, N):
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question):
if question[i] == names:
return i
return False
def random_choice(question_arg, question_arg_type, names, ground_col_labels, q_ind, N, origin_name):
# first try if there are other table
for t_ind, t_val in enumerate(question_arg_type):
if t_val == ['table']:
return names[origin_name.index(question_arg[t_ind])]
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question_arg):
if len(ground_col_labels) == 0:
for n in names:
if partial_match(question_arg[i][0], n) is True:
return n
else:
for n_id, n in enumerate(names):
if n_id in ground_col_labels and partial_match(question_arg[i][0], n) is True:
return n
if len(ground_col_labels) > 0:
return names[ground_col_labels[0]]
else:
return names[0]
def alter_column0(datas):
"""
Attach column * table
:return: model_result_replace
"""
zero_count = 0
count = 0
result = []
for d in datas:
if 'C(0)' in d['model_result']:
pattern = regex.compile('C\(.*?\) T\(.*?\)')
result_pattern = list(set(pattern.findall(d['model_result'])))
ground_col_labels = []
for pa in result_pattern:
pa = pa.split(' ')
if pa[0] != 'C(0)':
index = int(pa[1][2:-1])
ground_col_labels.append(index)
ground_col_labels = list(set(ground_col_labels))
question_arg_type = d['question_arg_type']
question_arg = d['question_arg']
table_names = [[token.lemma_ for token in nlp(names)] for names in d['table_names']]
origin_table_names = [[wordnet_lemmatizer.lemmatize(x.lower()) for x in names.split(' ')] for names in
d['table_names']]
count += 1
easy_flag = False
for q_ind, q in enumerate(d['question_arg']):
q_str = " ".join(" ".join(x) for x in d['question_arg'])
if 'how many' in q_str or 'number of' in q_str or 'count of' in q_str:
easy_flag = True
if easy_flag:
# check for the last one is a table word
for q_ind, q in enumerate(d['question_arg']):
if (q_ind > 0 and q == ['many'] and d['question_arg'][q_ind - 1] == ['how']) or (
q_ind > 0 and q == ['of'] and d['question_arg'][q_ind - 1] == ['number']) or (
q_ind > 0 and q == ['of'] and d['question_arg'][q_ind - 1] == ['count']):
re = multi_equal(question_arg_type, q_ind, ['table'], 2)
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
else:
re = multi_option(question_arg, q_ind, d['table_names'], 2)
if re is not False:
table_result = re
result.append((d['query'], d['question'], table_result, d))
pass
else:
re = multi_equal(question_arg_type, q_ind, ['table'], len(question_arg_type))
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
pass
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names,
ground_col_labels=ground_col_labels, q_ind=q_ind, N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
zero_count += 1
break
else:
M_OP = False
for q_ind, q in enumerate(d['question_arg']):
if M_OP is False and q in [['than'], ['least'], ['most'], ['msot'], ['fewest']] or \
question_arg_type[q_ind] == ['M_OP']:
M_OP = True
re = multi_equal(question_arg_type, q_ind, ['table'], 3)
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
else:
re = multi_option(question_arg, q_ind, d['table_names'], 3)
if re is not False:
table_result = re
# print(table_result)
result.append((d['query'], d['question'], table_result, d))
pass
else:
# zero_count += 1
re = multi_equal(question_arg_type, q_ind, ['table'], len(question_arg_type))
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names,
ground_col_labels=ground_col_labels, q_ind=q_ind, N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
pass
if M_OP is False:
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names, ground_col_labels=ground_col_labels, q_ind=q_ind,
N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
for re in result:
table_names = [[token.lemma_ for token in nlp(names)] for names in re[3]['table_names']]
origin_table_names = [[x for x in names.split(' ')] for names in re[3]['table_names']]
if re[2] in table_names:
re[3]['rule_count'] = table_names.index(re[2])
else:
re[3]['rule_count'] = origin_table_names.index(re[2])
for data in datas:
if 'rule_count' in data:
str_replace = 'C(0) T(' + str(data['rule_count']) + ')'
replace_result = regex.sub('C\(0\) T\(.\)', str_replace, data['model_result'])
data['model_result_replace'] = replace_result
else:
data['model_result_replace'] = data['model_result']
| 46.888889 | 117 | 0.474861 |
import os
import json
import re as regex
import spacy
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
def partial_match(query, table_name):
query = [token.lemma_ for token in nlp(query)]
table_name = [nlp(token)[0].lemma_ for token in table_name]
if query in table_name:
return True
return False
def is_partial_match(query, table_names):
query = nlp(query)[0].lemma_
table_names = [[token.lemma_ for token in nlp(names)] for names in table_names]
same_count = 0
result = None
for names in table_names:
if query in names:
same_count += 1
result = names
return result if same_count == 1 else False
def multi_option(question, q_ind, names, N):
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question):
re = is_partial_match(question[i][0], names)
if re is not False:
return re
return False
def multi_equal(question, q_ind, names, N):
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question):
if question[i] == names:
return i
return False
def random_choice(question_arg, question_arg_type, names, ground_col_labels, q_ind, N, origin_name):
for t_ind, t_val in enumerate(question_arg_type):
if t_val == ['table']:
return names[origin_name.index(question_arg[t_ind])]
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question_arg):
if len(ground_col_labels) == 0:
for n in names:
if partial_match(question_arg[i][0], n) is True:
return n
else:
for n_id, n in enumerate(names):
if n_id in ground_col_labels and partial_match(question_arg[i][0], n) is True:
return n
if len(ground_col_labels) > 0:
return names[ground_col_labels[0]]
else:
return names[0]
def alter_column0(datas):
zero_count = 0
count = 0
result = []
for d in datas:
if 'C(0)' in d['model_result']:
pattern = regex.compile('C\(.*?\) T\(.*?\)')
result_pattern = list(set(pattern.findall(d['model_result'])))
ground_col_labels = []
for pa in result_pattern:
pa = pa.split(' ')
if pa[0] != 'C(0)':
index = int(pa[1][2:-1])
ground_col_labels.append(index)
ground_col_labels = list(set(ground_col_labels))
question_arg_type = d['question_arg_type']
question_arg = d['question_arg']
table_names = [[token.lemma_ for token in nlp(names)] for names in d['table_names']]
origin_table_names = [[wordnet_lemmatizer.lemmatize(x.lower()) for x in names.split(' ')] for names in
d['table_names']]
count += 1
easy_flag = False
for q_ind, q in enumerate(d['question_arg']):
q_str = " ".join(" ".join(x) for x in d['question_arg'])
if 'how many' in q_str or 'number of' in q_str or 'count of' in q_str:
easy_flag = True
if easy_flag:
for q_ind, q in enumerate(d['question_arg']):
if (q_ind > 0 and q == ['many'] and d['question_arg'][q_ind - 1] == ['how']) or (
q_ind > 0 and q == ['of'] and d['question_arg'][q_ind - 1] == ['number']) or (
q_ind > 0 and q == ['of'] and d['question_arg'][q_ind - 1] == ['count']):
re = multi_equal(question_arg_type, q_ind, ['table'], 2)
if re is not False:
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
else:
re = multi_option(question_arg, q_ind, d['table_names'], 2)
if re is not False:
table_result = re
result.append((d['query'], d['question'], table_result, d))
pass
else:
re = multi_equal(question_arg_type, q_ind, ['table'], len(question_arg_type))
if re is not False:
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
pass
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names,
ground_col_labels=ground_col_labels, q_ind=q_ind, N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
zero_count += 1
break
else:
M_OP = False
for q_ind, q in enumerate(d['question_arg']):
if M_OP is False and q in [['than'], ['least'], ['most'], ['msot'], ['fewest']] or \
question_arg_type[q_ind] == ['M_OP']:
M_OP = True
re = multi_equal(question_arg_type, q_ind, ['table'], 3)
if re is not False:
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
else:
re = multi_option(question_arg, q_ind, d['table_names'], 3)
if re is not False:
table_result = re
result.append((d['query'], d['question'], table_result, d))
pass
else:
re = multi_equal(question_arg_type, q_ind, ['table'], len(question_arg_type))
if re is not False:
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names,
ground_col_labels=ground_col_labels, q_ind=q_ind, N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
pass
if M_OP is False:
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names, ground_col_labels=ground_col_labels, q_ind=q_ind,
N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
for re in result:
table_names = [[token.lemma_ for token in nlp(names)] for names in re[3]['table_names']]
origin_table_names = [[x for x in names.split(' ')] for names in re[3]['table_names']]
if re[2] in table_names:
re[3]['rule_count'] = table_names.index(re[2])
else:
re[3]['rule_count'] = origin_table_names.index(re[2])
for data in datas:
if 'rule_count' in data:
str_replace = 'C(0) T(' + str(data['rule_count']) + ')'
replace_result = regex.sub('C\(0\) T\(.\)', str_replace, data['model_result'])
data['model_result_replace'] = replace_result
else:
data['model_result_replace'] = data['model_result']
| true | true |
f715d67eef0245ded35fcb508560db29166544bc | 518 | py | Python | components/driver/test_apps/i2s_test_apps/i2s/pytest_i2s.py | fbucafusco/esp-idf | c2ccc383dae2a47c2c2dc8c7ad78175a3fd11361 | [
"Apache-2.0"
] | null | null | null | components/driver/test_apps/i2s_test_apps/i2s/pytest_i2s.py | fbucafusco/esp-idf | c2ccc383dae2a47c2c2dc8c7ad78175a3fd11361 | [
"Apache-2.0"
] | null | null | null | components/driver/test_apps/i2s_test_apps/i2s/pytest_i2s.py | fbucafusco/esp-idf | c2ccc383dae2a47c2c2dc8c7ad78175a3fd11361 | [
"Apache-2.0"
] | null | null | null | # SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import pytest
from pytest_embedded import Dut
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.esp32c3
@pytest.mark.esp32s3
@pytest.mark.generic
@pytest.mark.parametrize(
'config',
[
'iram_safe',
'release',
],
indirect=True,
)
def test_i2s(dut: Dut) -> None:
dut.expect_exact('Press ENTER to see the list of tests')
dut.write('*')
dut.expect_unity_test_output()
| 20.72 | 66 | 0.696911 |
import pytest
from pytest_embedded import Dut
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.esp32c3
@pytest.mark.esp32s3
@pytest.mark.generic
@pytest.mark.parametrize(
'config',
[
'iram_safe',
'release',
],
indirect=True,
)
def test_i2s(dut: Dut) -> None:
dut.expect_exact('Press ENTER to see the list of tests')
dut.write('*')
dut.expect_unity_test_output()
| true | true |
f715d6d5b4734d75244b4bcd84df7da47ab5fd20 | 5,939 | py | Python | coresupdate.py | danitxu79/Retroarch_Cores_Update_from_Retropie_Menu | 2841b12b0d29b08e71e0ddbbd148e5cf84cad3ce | [
"MIT"
] | null | null | null | coresupdate.py | danitxu79/Retroarch_Cores_Update_from_Retropie_Menu | 2841b12b0d29b08e71e0ddbbd148e5cf84cad3ce | [
"MIT"
] | null | null | null | coresupdate.py | danitxu79/Retroarch_Cores_Update_from_Retropie_Menu | 2841b12b0d29b08e71e0ddbbd148e5cf84cad3ce | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# user's retroarch configuration file
retroconfig = '/opt/retropie/configs/all/retroarch.cfg'
# current buildbot url
retrourl = 'https://buildbot.libretro.com'
import argparse
import configparser
import os
import os.path as pth
import platform
import shutil
import sys
import tempfile
import time
import urllib.request
import zipfile
# parse arguments
pars = argparse.ArgumentParser()
pars.add_argument('-c', '--cores', action="store_true",
help='download and extract cores')
pars.add_argument('-s', '--assets', action="store_true",
help='download and extract asset files')
pars.add_argument('-a', '--all', action="store_true",
help='download and extract both')
pars.add_argument('-v', '--verbose', action="store_true",
help='display target urls and directories')
pars.add_argument('-d', '--dry', action="store_true",
help='dry run; do not download anything')
pars.add_argument('-g', '--config', type=str,
help='specify the retroarch config file')
args = pars.parse_args(args=None if sys.argv[1:] else ['-h'])
# if args.all:
# args.assets = True
# args.cores = True
# (echo raspberry | sudo -S apt-get install dialog -y)
# funcheck=( dialog --separate-output --menu "Retroarch Cores Actualizer" 0 0 0 1 "Update installed cores" 2 "I" 3 "Install all cores"
# opciones=(1 "opción 1" on
# 2 "opción 2" off
# selecciones=$("${funcheck[@]}" "${opciones[@]}" 2>&1 >/dev/tty)
# clear
# do
# case $seleccion in
# 1)
# echo "Escogiste la opción 1"
# ;;
# 2)
# echo "Escogiste la opción 2"
# ;;
# asset names used in the buildbot and config file
itemlist = {
'assets' : 'assets_directory',
'autoconfig' : 'joypad_autoconfig_dir',
'cheats' : 'cheat_database_path',
'database-cursors' : 'cursor_directory',
'database-rdb' : 'content_database_path',
'info' : 'libretro_info_path',
'overlays' : 'overlay_directory',
'shaders_cg' : 'video_shader_dir',
'shaders_glsl' : 'video_shader_dir',
'shaders_slang' : 'video_shader_dir',
}
# get platform
if sys.platform == 'win32':
osname = 'windows'
time.timezone = 0
elif sys.platform == 'darwin':
osname = 'apple/osx'
else:
osname = sys.platform
# check architecture
if platform.machine().endswith('64'):
osarch = 'x86_64'
else:
osarch = 'x86'
# get partial download urls
urlcores = pth.join(retrourl, 'nightly', osname, osarch, 'latest')
urlassets = pth.join(retrourl, 'assets/frontend')
# get config path; expand unix home folders
if args.config:
retroconfig = args.config
retroconfig = pth.normcase(pth.expanduser(retroconfig))
retrodir = pth.dirname(retroconfig)
# retrieve paths from retroarch user config
with open(retroconfig, 'r') as tmpconf:
conf = configparser.ConfigParser()
conf.read_string('[A]\n' + tmpconf.read())
# get asset paths; strip quotes and expand any ~'s
for item in itemlist:
itemlist[item] = pth.expanduser(conf['A'][itemlist[item]].strip('"'))
# get whole path of portable folders
if itemlist[item].startswith(':'):
itemlist[item] = pth.join(retrodir, itemlist[item].lstrip(':\\'))
# add subdirs to shaders' paths
for shdr in ['shaders_cg', 'shaders_glsl', 'shaders_slang']:
itemlist[shdr] = pth.join(itemlist[shdr], shdr)
# and also get the cores path
coredir = pth.expanduser(conf['A']['libretro_directory'].strip('"'))
if coredir.startswith(':'):
coredir = pth.join(retrodir, coredir.lstrip(':\\'))
corelist = sorted(os.listdir(coredir))
conf.clear()
# download and extract archive to destination
def fetch_archive(url, dest):
# download
with urllib.request.urlopen(url) as tmpdata:
tmpfile = tempfile.NamedTemporaryFile(suffix='.zip')
shutil.copyfileobj(tmpdata, tmpfile)
# extract
with zipfile.ZipFile(tmpfile, 'r') as tmpzip:
for member in tmpzip.infolist():
tmpzip.extract(member, dest)
# use original modification timestamp
origdate = time.mktime(member.date_time + (0, 0, -1)) - time.timezone
os.utime(pth.join(dest, member.filename), (origdate, origdate))
# download and extract each core currently in retroarch's core directory
if args.cores:
print('updating cores...')
for core in corelist:
coreurl = pth.join(urlcores, core+'.zip').replace('\\', '/')
print('[%2d/%2d] fetching: %s' % (corelist.index(core)+1,
len(corelist),
core+'.zip'))
if args.verbose:
print(' '*7, 'from url: %s' % coreurl)
print(' '*7, 'into dir: %s' % coredir)
if not args.dry:
try:
fetch_archive(coreurl, coredir)
except Exception as excp:
print(' '*7, 'could not fetch file: %s' % core+'.zip')
print(' '*7, excp)
# download and extract each asset archive into their respective directories
if args.assets:
print('updating assets...')
for item in itemlist:
itemurl = pth.join(urlassets, item+'.zip').replace('\\', '/')
itempath = itemlist[item]
print('[%2d/%2d] fetching: %s' % (list(itemlist).index(item)+1,
len(itemlist),
item+'.zip'))
if args.verbose:
print(' '*7, 'from url: %s' % itemurl)
print(' '*7, 'into dir: %s' % itempath)
if not args.dry:
try:
os.makedirs(itempath, exist_ok=True)
fetch_archive(itemurl, itempath)
except Exception as excp:
print(' '*7, 'could not fetch file: %s' % item+'.zip')
print(' '*7, excp)
| 30.613402 | 135 | 0.602795 |
retroconfig = '/opt/retropie/configs/all/retroarch.cfg'
# current buildbot url
retrourl = 'https://buildbot.libretro.com'
import argparse
import configparser
import os
import os.path as pth
import platform
import shutil
import sys
import tempfile
import time
import urllib.request
import zipfile
# parse arguments
pars = argparse.ArgumentParser()
pars.add_argument('-c', '--cores', action="store_true",
help='download and extract cores')
pars.add_argument('-s', '--assets', action="store_true",
help='download and extract asset files')
pars.add_argument('-a', '--all', action="store_true",
help='download and extract both')
pars.add_argument('-v', '--verbose', action="store_true",
help='display target urls and directories')
pars.add_argument('-d', '--dry', action="store_true",
help='dry run; do not download anything')
pars.add_argument('-g', '--config', type=str,
help='specify the retroarch config file')
args = pars.parse_args(args=None if sys.argv[1:] else ['-h'])
# if args.all:
# args.assets = True
# args.cores = True
# (echo raspberry | sudo -S apt-get install dialog -y)
# funcheck=( dialog --separate-output --menu "Retroarch Cores Actualizer" 0 0 0 1 "Update installed cores" 2 "I" 3 "Install all cores"
# opciones=(1 "opción 1" on
# 2 "opción 2" off
# selecciones=$("${funcheck[@]}" "${opciones[@]}" 2>&1 >/dev/tty)
# clear
# do
# case $seleccion in
# 1)
# echo "Escogiste la opción 1"
# ;;
# 2)
# echo "Escogiste la opción 2"
# ;;
# asset names used in the buildbot and config file
itemlist = {
'assets' : 'assets_directory',
'autoconfig' : 'joypad_autoconfig_dir',
'cheats' : 'cheat_database_path',
'database-cursors' : 'cursor_directory',
'database-rdb' : 'content_database_path',
'info' : 'libretro_info_path',
'overlays' : 'overlay_directory',
'shaders_cg' : 'video_shader_dir',
'shaders_glsl' : 'video_shader_dir',
'shaders_slang' : 'video_shader_dir',
}
# get platform
if sys.platform == 'win32':
osname = 'windows'
time.timezone = 0
elif sys.platform == 'darwin':
osname = 'apple/osx'
else:
osname = sys.platform
# check architecture
if platform.machine().endswith('64'):
osarch = 'x86_64'
else:
osarch = 'x86'
# get partial download urls
urlcores = pth.join(retrourl, 'nightly', osname, osarch, 'latest')
urlassets = pth.join(retrourl, 'assets/frontend')
# get config path; expand unix home folders
if args.config:
retroconfig = args.config
retroconfig = pth.normcase(pth.expanduser(retroconfig))
retrodir = pth.dirname(retroconfig)
# retrieve paths from retroarch user config
with open(retroconfig, 'r') as tmpconf:
conf = configparser.ConfigParser()
conf.read_string('[A]\n' + tmpconf.read())
# get asset paths; strip quotes and expand any ~'s
for item in itemlist:
itemlist[item] = pth.expanduser(conf['A'][itemlist[item]].strip('"'))
# get whole path of portable folders
if itemlist[item].startswith(':'):
itemlist[item] = pth.join(retrodir, itemlist[item].lstrip(':\\'))
# add subdirs to shaders' paths
for shdr in ['shaders_cg', 'shaders_glsl', 'shaders_slang']:
itemlist[shdr] = pth.join(itemlist[shdr], shdr)
# and also get the cores path
coredir = pth.expanduser(conf['A']['libretro_directory'].strip('"'))
if coredir.startswith(':'):
coredir = pth.join(retrodir, coredir.lstrip(':\\'))
corelist = sorted(os.listdir(coredir))
conf.clear()
# download and extract archive to destination
def fetch_archive(url, dest):
# download
with urllib.request.urlopen(url) as tmpdata:
tmpfile = tempfile.NamedTemporaryFile(suffix='.zip')
shutil.copyfileobj(tmpdata, tmpfile)
# extract
with zipfile.ZipFile(tmpfile, 'r') as tmpzip:
for member in tmpzip.infolist():
tmpzip.extract(member, dest)
# use original modification timestamp
origdate = time.mktime(member.date_time + (0, 0, -1)) - time.timezone
os.utime(pth.join(dest, member.filename), (origdate, origdate))
# download and extract each core currently in retroarch's core directory
if args.cores:
print('updating cores...')
for core in corelist:
coreurl = pth.join(urlcores, core+'.zip').replace('\\', '/')
print('[%2d/%2d] fetching: %s' % (corelist.index(core)+1,
len(corelist),
core+'.zip'))
if args.verbose:
print(' '*7, 'from url: %s' % coreurl)
print(' '*7, 'into dir: %s' % coredir)
if not args.dry:
try:
fetch_archive(coreurl, coredir)
except Exception as excp:
print(' '*7, 'could not fetch file: %s' % core+'.zip')
print(' '*7, excp)
if args.assets:
print('updating assets...')
for item in itemlist:
itemurl = pth.join(urlassets, item+'.zip').replace('\\', '/')
itempath = itemlist[item]
print('[%2d/%2d] fetching: %s' % (list(itemlist).index(item)+1,
len(itemlist),
item+'.zip'))
if args.verbose:
print(' '*7, 'from url: %s' % itemurl)
print(' '*7, 'into dir: %s' % itempath)
if not args.dry:
try:
os.makedirs(itempath, exist_ok=True)
fetch_archive(itemurl, itempath)
except Exception as excp:
print(' '*7, 'could not fetch file: %s' % item+'.zip')
print(' '*7, excp)
| true | true |
f715d7a31a5df246928567994bae099da3cac6a5 | 7,559 | py | Python | webStorm-APICloud/python_tools/Lib/SimpleHTTPServer.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | 81 | 2017-03-13T08:24:01.000Z | 2021-04-02T09:48:38.000Z | Macros/Python/SimpleHTTPServer.py | rec/DMXIS | 540baa59df6f4ae39990e5888f90b95caa362279 | [
"Artistic-2.0"
] | 6 | 2017-04-30T08:36:55.000Z | 2017-09-22T01:37:28.000Z | Macros/Python/SimpleHTTPServer.py | rec/DMXIS | 540baa59df6f4ae39990e5888f90b95caa362279 | [
"Artistic-2.0"
] | 41 | 2017-03-18T14:11:58.000Z | 2021-04-14T05:06:09.000Z | """Simple HTTP Server.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
__version__ = "0.6"
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import shutil
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
| 34.515982 | 83 | 0.582484 |
__version__ = "0.6"
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import shutil
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
f = self.send_head()
if f:
f.close()
def send_head(self):
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init()
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream',
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
| true | true |
f715d813ac5ceeef5aa1cbcaa572ba4bc2637be9 | 182 | py | Python | reduction/test/plot_algol_h_alpha_line.py | christianwbrock/algol-reduction | 5e85734d9e9e31985ead3ce40e67535418351010 | [
"BSD-3-Clause"
] | null | null | null | reduction/test/plot_algol_h_alpha_line.py | christianwbrock/algol-reduction | 5e85734d9e9e31985ead3ce40e67535418351010 | [
"BSD-3-Clause"
] | null | null | null | reduction/test/plot_algol_h_alpha_line.py | christianwbrock/algol-reduction | 5e85734d9e9e31985ead3ce40e67535418351010 | [
"BSD-3-Clause"
] | null | null | null |
import matplotlib.pyplot as plt
from reduction.algol_h_alpha_line_model import AlgolHAlphaModel
if __name__ == '__main__':
AlgolHAlphaModel().plot(plt.axes())
plt.show()
| 18.2 | 63 | 0.758242 |
import matplotlib.pyplot as plt
from reduction.algol_h_alpha_line_model import AlgolHAlphaModel
if __name__ == '__main__':
AlgolHAlphaModel().plot(plt.axes())
plt.show()
| true | true |
f715d8a0e2bc4f9037250784399021a44f9b5b67 | 2,019 | py | Python | h2o-py/tests/testdir_algos/glm/pyunit_link_correct_default_largeGLM.py | ChristosChristofidis/h2o-3 | 2a926c0950a98eff5a4c06aeaf0373e17176ecd8 | [
"Apache-2.0"
] | null | null | null | h2o-py/tests/testdir_algos/glm/pyunit_link_correct_default_largeGLM.py | ChristosChristofidis/h2o-3 | 2a926c0950a98eff5a4c06aeaf0373e17176ecd8 | [
"Apache-2.0"
] | null | null | null | h2o-py/tests/testdir_algos/glm/pyunit_link_correct_default_largeGLM.py | ChristosChristofidis/h2o-3 | 2a926c0950a98eff5a4c06aeaf0373e17176ecd8 | [
"Apache-2.0"
] | 1 | 2020-12-18T19:20:02.000Z | 2020-12-18T19:20:02.000Z | import sys
sys.path.insert(1, "../../../")
import h2o
def link_correct_default(ip,port):
# Connect to h2o
h2o.init(ip,port)
print("Reading in original prostate data.")
h2o_data = h2o.upload_file(path=h2o.locate("smalldata/prostate/prostate.csv.zip"))
print("Compare models with link unspecified and canonical link specified.")
print("GAUSSIAN: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian")
h2o_model_specified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian", link="identity")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("BINOMIAL: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial", link="logit")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("POISSON: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson", link="log")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("GAMMA: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma")
h2o_model_specified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma", link="inverse")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
if __name__ == "__main__":
h2o.run_test(sys.argv, link_correct_default)
| 51.769231 | 113 | 0.752353 | import sys
sys.path.insert(1, "../../../")
import h2o
def link_correct_default(ip,port):
h2o.init(ip,port)
print("Reading in original prostate data.")
h2o_data = h2o.upload_file(path=h2o.locate("smalldata/prostate/prostate.csv.zip"))
print("Compare models with link unspecified and canonical link specified.")
print("GAUSSIAN: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian")
h2o_model_specified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian", link="identity")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("BINOMIAL: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial", link="logit")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("POISSON: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson", link="log")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("GAMMA: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma")
h2o_model_specified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma", link="inverse")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
if __name__ == "__main__":
h2o.run_test(sys.argv, link_correct_default)
| true | true |
f715d969b2e39092279936585118b9960ebb2227 | 204 | py | Python | sigbox/__init__.py | ok65/sigbox | eacec88ccdc3929e19d92d54ef3c52dda54e5856 | [
"WTFPL"
] | null | null | null | sigbox/__init__.py | ok65/sigbox | eacec88ccdc3929e19d92d54ef3c52dda54e5856 | [
"WTFPL"
] | null | null | null | sigbox/__init__.py | ok65/sigbox | eacec88ccdc3929e19d92d54ef3c52dda54e5856 | [
"WTFPL"
] | null | null | null | from sigbox.signal_decorator import SignalDecorator
from sigbox.signal_box import SignalBox, SignalBoxClass
from sigbox.sigbox import SigBox
__all__ = [SignalBox, SignalDecorator, SignalBoxClass, SigBox] | 40.8 | 62 | 0.857843 | from sigbox.signal_decorator import SignalDecorator
from sigbox.signal_box import SignalBox, SignalBoxClass
from sigbox.sigbox import SigBox
__all__ = [SignalBox, SignalDecorator, SignalBoxClass, SigBox] | true | true |
f715d9a77e0c016a8984da9e96656403d08f49b6 | 3,270 | py | Python | aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/UpdateTransitRouterVbrAttachmentAttributeRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/UpdateTransitRouterVbrAttachmentAttributeRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/UpdateTransitRouterVbrAttachmentAttributeRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class UpdateTransitRouterVbrAttachmentAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'UpdateTransitRouterVbrAttachmentAttribute','cbn')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_TransitRouterAttachmentName(self):
return self.get_query_params().get('TransitRouterAttachmentName')
def set_TransitRouterAttachmentName(self,TransitRouterAttachmentName):
self.add_query_param('TransitRouterAttachmentName',TransitRouterAttachmentName)
def get_DryRun(self):
return self.get_query_params().get('DryRun')
def set_DryRun(self,DryRun):
self.add_query_param('DryRun',DryRun)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_TransitRouterAttachmentId(self):
return self.get_query_params().get('TransitRouterAttachmentId')
def set_TransitRouterAttachmentId(self,TransitRouterAttachmentId):
self.add_query_param('TransitRouterAttachmentId',TransitRouterAttachmentId)
def get_TransitRouterAttachmentDescription(self):
return self.get_query_params().get('TransitRouterAttachmentDescription')
def set_TransitRouterAttachmentDescription(self,TransitRouterAttachmentDescription):
self.add_query_param('TransitRouterAttachmentDescription',TransitRouterAttachmentDescription) | 38.023256 | 100 | 0.795107 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class UpdateTransitRouterVbrAttachmentAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'UpdateTransitRouterVbrAttachmentAttribute','cbn')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_TransitRouterAttachmentName(self):
return self.get_query_params().get('TransitRouterAttachmentName')
def set_TransitRouterAttachmentName(self,TransitRouterAttachmentName):
self.add_query_param('TransitRouterAttachmentName',TransitRouterAttachmentName)
def get_DryRun(self):
return self.get_query_params().get('DryRun')
def set_DryRun(self,DryRun):
self.add_query_param('DryRun',DryRun)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_TransitRouterAttachmentId(self):
return self.get_query_params().get('TransitRouterAttachmentId')
def set_TransitRouterAttachmentId(self,TransitRouterAttachmentId):
self.add_query_param('TransitRouterAttachmentId',TransitRouterAttachmentId)
def get_TransitRouterAttachmentDescription(self):
return self.get_query_params().get('TransitRouterAttachmentDescription')
def set_TransitRouterAttachmentDescription(self,TransitRouterAttachmentDescription):
self.add_query_param('TransitRouterAttachmentDescription',TransitRouterAttachmentDescription) | true | true |
f715d9aaf4f384ba12cc7069add8806d2f40e71b | 552 | py | Python | 446.py | wilbertgeng/LeetCode_exercise | f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc | [
"MIT"
] | null | null | null | 446.py | wilbertgeng/LeetCode_exercise | f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc | [
"MIT"
] | null | null | null | 446.py | wilbertgeng/LeetCode_exercise | f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc | [
"MIT"
] | null | null | null | """446. Arithmetic Slices II - Subsequence"""
class Solution(object):
def numberOfArithmeticSlices(self, A):
"""
:type A: List[int]
:rtype: int
"""
dp = [collections.defaultdict(int) for _ in range(len(A))]
total = 0
for i in range(len(A)):
for j in range(i):
k = A[i] - A[j]
dp[i][k] += 1
if k in dp[j]:
dp[i][k] += dp[j][k]
total += dp[j][k]
return total
######
| 14.153846 | 66 | 0.405797 |
class Solution(object):
def numberOfArithmeticSlices(self, A):
dp = [collections.defaultdict(int) for _ in range(len(A))]
total = 0
for i in range(len(A)):
for j in range(i):
k = A[i] - A[j]
dp[i][k] += 1
if k in dp[j]:
dp[i][k] += dp[j][k]
total += dp[j][k]
return total
| true | true |
f715d9f86be99e54b40e4f20ec32ccf74e3c5ae7 | 1,497 | py | Python | ci/fireci/fireci/gradle.py | Elke26/firebase-android-sdk | 47d41b9dc17cd95a7799f672f5cc14f1747642ec | [
"Apache-2.0"
] | 1 | 2021-01-30T19:52:32.000Z | 2021-01-30T19:52:32.000Z | ci/fireci/fireci/gradle.py | Elke26/firebase-android-sdk | 47d41b9dc17cd95a7799f672f5cc14f1747642ec | [
"Apache-2.0"
] | 1 | 2019-03-01T19:54:34.000Z | 2019-03-01T19:58:03.000Z | ci/fireci/fireci/gradle.py | samtstern/firebase-android-sdk | ef399052f99019feb294746447e2bd8a5a6e81a4 | [
"Apache-2.0"
] | 1 | 2021-01-02T20:23:09.000Z | 2021-01-02T20:23:09.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
import sys
from . import stats
_logger = logging.getLogger('fireci.gradle')
ADB_INSTALL_TIMEOUT = '5'
def P(name, value):
"""Returns name and value in the format of gradle's project property cli argument."""
return '-P{}={}'.format(name, value)
@stats.measure_call('gradle')
def run(*args, gradle_opts='', workdir=None):
"""Invokes gradle with specified args and gradle_opts."""
new_env = dict(os.environ)
if gradle_opts:
new_env['GRADLE_OPTS'] = gradle_opts
new_env[
'ADB_INSTALL_TIMEOUT'] = ADB_INSTALL_TIMEOUT # 5 minutes, rather than 2 minutes
stats.propagate_context_into(new_env)
command = ['./gradlew'] + list(args)
_logger.info('Executing gradle command: "%s" in directory: "%s"',
" ".join(command), workdir if workdir else '.')
return subprocess.check_call(
command,
cwd=workdir,
env=new_env,
)
| 29.94 | 87 | 0.716099 |
import logging
import os
import subprocess
import sys
from . import stats
_logger = logging.getLogger('fireci.gradle')
ADB_INSTALL_TIMEOUT = '5'
def P(name, value):
return '-P{}={}'.format(name, value)
@stats.measure_call('gradle')
def run(*args, gradle_opts='', workdir=None):
new_env = dict(os.environ)
if gradle_opts:
new_env['GRADLE_OPTS'] = gradle_opts
new_env[
'ADB_INSTALL_TIMEOUT'] = ADB_INSTALL_TIMEOUT
stats.propagate_context_into(new_env)
command = ['./gradlew'] + list(args)
_logger.info('Executing gradle command: "%s" in directory: "%s"',
" ".join(command), workdir if workdir else '.')
return subprocess.check_call(
command,
cwd=workdir,
env=new_env,
)
| true | true |
f715da382fe2b6fe6c5bc405301456843f05fbef | 34,975 | py | Python | cmf/models/cmfsm.py | lidongyv/Explicit-Context-Mapping-for-Stereo-Matching | 9b2e63982daf5629045de0bf0694d8ccb111b2f1 | [
"Apache-2.0"
] | 1 | 2020-12-31T02:40:49.000Z | 2020-12-31T02:40:49.000Z | cmf/models/cmfsm.py | lidongyv/Explicit-Context-Mapping-for-Stereo-Matching | 9b2e63982daf5629045de0bf0694d8ccb111b2f1 | [
"Apache-2.0"
] | null | null | null | cmf/models/cmfsm.py | lidongyv/Explicit-Context-Mapping-for-Stereo-Matching | 9b2e63982daf5629045de0bf0694d8ccb111b2f1 | [
"Apache-2.0"
] | 1 | 2020-12-31T02:40:49.000Z | 2020-12-31T02:40:49.000Z | # -*- coding: utf-8 -*-
# @Author: yulidong
# @Date: 2018-07-17 10:44:43
# @Last Modified by: yulidong
# @Last Modified time: 2019-03-01 14:12:35
# -*- coding: utf-8 -*-
# @Author: lidong
# @Date: 2018-03-20 18:01:52
# @Last Modified by: yulidong
# @Last Modified time: 2018-07-16 22:16:14
import time
import torch
import numpy as np
import torch.nn as nn
import math
from math import ceil
from torch.autograd import Variable
from torch.nn.functional import cosine_similarity as cosine_s
from cmf import caffe_pb2
from cmf.models.utils import *
rsn_specs = {
'scene':
{
'n_classes': 9,
'input_size': (540, 960),
'block_config': [3, 4, 23, 3],
},
}
group_dim=32
pramid_dim=8
group_norm_group_num = 32
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(
nn.Conv2d(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=dilation if dilation > 1 else pad,
dilation=dilation,
bias=False), nn.GroupNorm(group_norm_group_num, out_planes))
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(
nn.Conv3d(
in_planes,
out_planes,
kernel_size=kernel_size,
padding=pad,
stride=stride,
bias=False), nn.GroupNorm(group_norm_group_num, out_planes))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(
convbn(inplanes, planes, 3, stride, pad, dilation),
nn.ReLU(inplace=True))
self.conv2 = convbn(planes, planes, 3, 1, pad, dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class matchshifted(nn.Module):
def __init__(self):
super(matchshifted, self).__init__()
def forward(self, left, right, shift):
batch, filters, height, width = left.size()
shifted_left = F.pad(
torch.index_select(
left, 3,
Variable(torch.LongTensor(
[i for i in range(shift, width)])).cuda()),
(shift, 0, 0, 0))
shifted_right = F.pad(
torch.index_select(
right, 3,
Variable(torch.LongTensor(
[i for i in range(width - shift)])).cuda()),
(shift, 0, 0, 0))
out = torch.cat((shifted_left, shifted_right), 1).view(
batch, filters * 2, 1, height, width)
return out
class disparityregression(nn.Module):
def __init__(self, maxdisp):
super().__init__()
self.disp = Variable(
torch.Tensor(
np.reshape(np.array(range(maxdisp)),
[1, maxdisp, 1, 1])).cuda(),
requires_grad=False)
def forward(self, x):
disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])
out = torch.sum(x * disp, 1)
return out
class feature_extraction(nn.Module):
def __init__(self):
super(feature_extraction, self).__init__()
self.inplanes = 32
self.firstconv = nn.Sequential(
convbn(3, 32, 3, 1, 1, 1),
# nn.GroupNorm(group_dim, 32),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1, bias=False))
self.secondconv = nn.Sequential(
nn.GroupNorm(group_dim, 32),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1)
self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1)
self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1)
self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2)
self.branch1 = nn.Sequential(
nn.AvgPool2d((64, 64), stride=(64, 64)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(
nn.AvgPool2d((32, 32), stride=(32, 32)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch3 = nn.Sequential(
nn.AvgPool2d((16, 16), stride=(16, 16)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch4 = nn.Sequential(
nn.AvgPool2d((8, 8), stride=(8, 8)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.lastconv = nn.Sequential(
convbn(320, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))
def _make_layer(self, block, planes, blocks, stride, pad, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
nn.GroupNorm(group_norm_group_num, planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, pad, dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, None, pad, dilation))
return nn.Sequential(*layers)
def forward(self, x):
output_all = self.firstconv(x)
output=self.secondconv(output_all)
output_rt = self.layer1(output)
output_raw = self.layer2(output_rt)
output = self.layer3(output_raw)
output_skip = self.layer4(output)
output_branch1 = self.branch1(output_skip)
output_branch1 = F.interpolate(
output_branch1, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch2 = self.branch2(output_skip)
output_branch2 = F.interpolate(
output_branch2, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch3 = self.branch3(output_skip)
output_branch3 = F.interpolate(
output_branch3, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch4 = self.branch4(output_skip)
output_branch4 = F.interpolate(
output_branch4, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_feature = torch.cat(
(output_raw, output_skip, output_branch4, output_branch3,
output_branch2, output_branch1), 1)
output_feature = self.lastconv(output_feature)
return output_feature, output_rt,output_all
class hourglass(nn.Module):
def __init__(self, inplanes):
super().__init__()
self.conv1 = nn.Sequential(
convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv2 = convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1)
self.conv3 = nn.Sequential(
convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(
nn.ConvTranspose3d(
inplanes * 2,
inplanes * 2,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False), nn.GroupNorm(group_norm_group_num,
inplanes * 2)) # +conv2
self.conv6 = nn.Sequential(
nn.ConvTranspose3d(
inplanes * 2,
inplanes,
kernel_size=3,
padding=1,
output_padding=(1,1,1),
stride=2,
bias=False), nn.GroupNorm(group_norm_group_num,
inplanes)) # +x
def forward(self, x, presqu, postsqu):
out = self.conv1(x) # in:1/4 out:1/8
pre = self.conv2(out) # in:1/8 out:1/8
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre) # in:1/8 out:1/16
out = self.conv4(out) # in:1/16 out:1/16
if presqu is not None:
post = F.relu(
self.conv5(out) + presqu, inplace=True) # in:1/16 out:1/8
else:
post = F.relu(self.conv5(out) + pre, inplace=True)
out = self.conv6(post) # in:1/8 out:1/4
return out, pre, post
class similarity_measure1(nn.Module):
def __init__(self):
super(similarity_measure1, self).__init__()
self.inplanes = 32
self.conv0 = nn.Conv2d(66, 32, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu0 = nn.LeakyReLU(inplace=True)
self.conv1 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu2 = nn.LeakyReLU(inplace=True)
self.conv3 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
#self.relu3 = nn.Sigmoid()
# self.conv4 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,
# bias=False,dilation=1)
# self.relu4 = nn.LeakyReLU(inplace=True)
# self.conv5 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0,
# bias=False,dilation=1)
# self.relu5 = nn.ReLU(inplace=True)
#self.s1=nn.Parameter(torch.ones(1)).float()*0.5
for m in self.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
def forward(self, x):
output = self.conv0(x)
output = self.relu0(output)
output = self.conv1(output)
output = self.relu1(output)
output = self.conv2(output)
output = self.relu2(output)
output = self.conv3(output)
#output = self.relu3(output)
# output = self.conv4(output)
# output = self.relu4(output)
# output = self.conv5(output)
# #output = torch.abs(output)
# output = self.relu5(output)
# print(output.shape)
# print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())
# output = output/torch.max(output)
# output = output-torch.min(output)
# output = 1-output
# output = torch.exp(-output)
#print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())
return output
class similarity_measure2(nn.Module):
def __init__(self):
super(similarity_measure2, self).__init__()
self.inplanes = 32
self.conv0 = nn.Conv2d(3, 3, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu0 = nn.LeakyReLU(inplace=True)
self.conv1 = nn.Conv2d(3, 2, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu2 = nn.LeakyReLU(inplace=True)
#self.s2=nn.Parameter(torch.ones(1)).float()*0.5
for m in self.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
def forward(self, x):
output = self.conv0(x)
output = self.relu0(output)
output = self.conv1(output)
output = self.relu1(output)
output = self.conv2(output)
output = self.relu2(output)
return output
def matrix_generation():
scale=4
x=torch.arange(-scale//2,scale//2+1).float()
x=torch.cat([x[:x.shape[0]//2],x[x.shape[0]//2+1:]]).unsqueeze(0)
distance_matrix=x.expand(scale,scale).unsqueeze(0)
distance_matrix=torch.cat([distance_matrix,distance_matrix.transpose(2,1)],0)
distance_matrix=distance_matrix.unsqueeze(0)
distance_matrix1=distance_matrix+0
distance_matrix2=distance_matrix+0
distance_matrix3=distance_matrix+0
distance_matrix4=distance_matrix+0
distance_matrix5=distance_matrix+0
distance_matrix6=distance_matrix+0
distance_matrix7=distance_matrix+0
distance_matrix8=distance_matrix+0
x=torch.arange(1,scale+1).float()
x=x.expand(scale,scale).unsqueeze(0)
#x=x.repeat(hr_feature.shape[0],hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float().cuda()
distance_matrix1[:,0,:,:]=scale-x+1
distance_matrix2[:,0,:,:]=x
distance_matrix5[:,0,:,:]=distance_matrix2[:,0,:,:]
distance_matrix6[:,0,:,:]=distance_matrix1[:,0,:,:]
distance_matrix7[:,0,:,:]=distance_matrix2[:,0,:,:]
distance_matrix8[:,0,:,:]=distance_matrix1[:,0,:,:]
x=torch.arange(1,scale+1).float()
x=x.expand(scale,scale).unsqueeze(0).transpose(2,1)
distance_matrix3[:,1,:,:]=(scale-x+1)
distance_matrix4[:,1,:,:]=x
distance_matrix5[:,1,:,:]=distance_matrix3[:,1,:,:]
distance_matrix6[:,1,:,:]=distance_matrix3[:,1,:,:]
distance_matrix7[:,1,:,:]=distance_matrix4[:,1,:,:]
distance_matrix8[:,1,:,:]=distance_matrix4[:,1,:,:]
# print(distance_matrix3)
return distance_matrix.cuda(),distance_matrix1.cuda(),distance_matrix2.cuda(),distance_matrix3.cuda(),distance_matrix4.cuda(), \
distance_matrix5.cuda(),distance_matrix6.cuda(),distance_matrix7.cuda(),distance_matrix8.cuda()
class eight_related_context_mapping(nn.Module):
def __init__(self):
super(eight_related_context_mapping,self).__init__()
self.similarity1=similarity_measure1()
#need to remove
#self.similarity2=similarity_measure2()
# self.fuse=nn.Sequential(nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,
# bias=False,dilation=1),nn.LeakyReLU(inplace=True))
#self.fuse.weight.data.fill_(1)
self.sigmoid=nn.Sigmoid()
self.distance_matrix,self.distance_matrix1,self.distance_matrix2,self.distance_matrix3,self.distance_matrix4, \
self.distance_matrix5,self.distance_matrix6,self.distance_matrix7,self.distance_matrix8=matrix_generation()
def forward(self, lr_feature, hr_feature,lr_feature_r, hr_feature_r):
#self.fuse.weight.data=torch.abs(self.fuse.weight.data)
with torch.no_grad():
scale=hr_feature.shape[-1]//lr_feature.shape[-1]
if scale%2!=0:
exit()
padding1=hr_feature[:,:1,:,:scale]*0-100
padding2=hr_feature[:,:1,:scale,:]*0-100
distance_matrix=self.distance_matrix.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix1=self.distance_matrix1.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix2=self.distance_matrix2.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix3=self.distance_matrix3.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix4=self.distance_matrix4.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix5=self.distance_matrix1.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix6=self.distance_matrix2.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix7=self.distance_matrix3.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix8=self.distance_matrix4.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
#center
#reference image
lr_feature=lr_feature.unsqueeze(-1).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3],scale) \
.contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3]*scale) \
.unsqueeze(-2).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],scale,lr_feature.shape[3]*scale) \
.contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2]*scale,lr_feature.shape[3]*scale)
representation=torch.cat([lr_feature,hr_feature,distance_matrix],1)
weight=self.similarity1(representation)
#target image
# lr_feature_r=lr_feature_r.unsqueeze(-1).expand(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],lr_feature_r.shape[3],scale) \
# .contiguous().view(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],lr_feature_r.shape[3]*scale) \
# .unsqueeze(-2).expand(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],scale,lr_feature_r.shape[3]*scale) \
# .contiguous().view(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2]*scale,lr_feature_r.shape[3]*scale)
# representation_target=torch.cat([lr_feature_r,hr_feature_r,distance_matrix],1)
# weight_target=self.similarity1(representation_target)
#left
#reference
representation_l=torch.cat([lr_feature[:,:,:,:-scale],hr_feature[:,:,:,scale:],distance_matrix1[:,:,:,:-scale]],1)
weight_l=self.similarity1(representation_l)
weight_l=torch.cat([padding1,weight_l],-1)
#target
# representation_l_target=torch.cat([lr_feature_r[:,:,:,:-scale],hr_feature_r[:,:,:,scale:],distance_matrix2[:,:,:,:-scale]],1)
# weight_l_target=self.similarity1(representation_l_target)
# weight_l_target=torch.cat([padding1,weight_l_target],-1)
#right
#reference
representation_r=torch.cat([lr_feature[:,:,:,scale:],hr_feature[:,:,:,:-scale],distance_matrix2[:,:,:,scale:]],1)
weight_r=self.similarity1(representation_r)
weight_r=torch.cat([weight_r,padding1],-1)
#target image
# representation_r_target=torch.cat([lr_feature_r[:,:,:,scale:],hr_feature_r[:,:,:,:-scale],distance_matrix1[:,:,:,scale:]],1)
# weight_r_target=self.similarity1(representation_r_target)
# weight_r_target=torch.cat([weight_r_target,padding1],-1)
#top
#reference
representation_t=torch.cat([lr_feature[:,:,:-scale,:],hr_feature[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)
weight_t=self.similarity1(representation_t)
weight_t=torch.cat([padding2,weight_t],-2)
#target
# representation_t_target=torch.cat([lr_feature_r[:,:,:-scale,:],hr_feature_r[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)
# weight_t_target=self.similarity1(representation_t_target)
# weight_t_target=torch.cat([padding2,weight_t_target],-2)
#bottom
#reference
representation_b=torch.cat([lr_feature[:,:,scale:,:],hr_feature[:,:,:-scale,:],distance_matrix4[:,:,scale:,:]],1)
weight_b=self.similarity1(representation_b)
weight_b=torch.cat([weight_b,padding2],-2)
#left-top
#reference
representation_lt=torch.cat([lr_feature[:,:,:-scale,:-scale],hr_feature[:,:,scale:,scale:],distance_matrix5[:,:,:-scale,:-scale]],1)
weight_lt=self.similarity1(representation_lt)
weight_lt=torch.cat([padding2,torch.cat([padding1[...,scale:,:],weight_lt],-1)],-2)
#target
# representation_l_target=torch.cat([lr_feature_r[:,:,:,:-scale],hr_feature_r[:,:,:,scale:],distance_matrix2[:,:,:,:-scale]],1)
# weight_l_target=self.similarity1(representation_l_target)
# weight_l_target=torch.cat([padding1,weight_l_target],-1)
#right-top
#reference
representation_rt=torch.cat([lr_feature[:,:,:-scale,scale:],hr_feature[:,:,scale:,:-scale],distance_matrix6[:,:,:-scale,scale:]],1)
weight_rt=self.similarity1(representation_rt)
weight_rt=torch.cat([padding2,torch.cat([weight_rt,padding1[...,scale:,:]],-1)],-2)
#target image
# representation_r_target=torch.cat([lr_feature_r[:,:,:,scale:],hr_feature_r[:,:,:,:-scale],distance_matrix1[:,:,:,scale:]],1)
# weight_r_target=self.similarity1(representation_r_target)
# weight_r_target=torch.cat([weight_r_target,padding1],-1)
#left-bottom
#reference
representation_lb=torch.cat([lr_feature[:,:,scale:,:-scale],hr_feature[:,:,:-scale:,scale:],distance_matrix7[:,:,scale:,:-scale]],1)
weight_lb=self.similarity1(representation_lb)
weight_lb=torch.cat([torch.cat([padding1[...,scale:,:],weight_lb],-1),padding2],-2)
#target
# representation_t_target=torch.cat([lr_feature_r[:,:,:-scale,:],hr_feature_r[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)
# weight_t_target=self.similarity1(representation_t_target)
# weight_t_target=torch.cat([padding2,weight_t_target],-2)
#right-bottom
#reference
representation_rb=torch.cat([lr_feature[:,:,scale:,scale:],hr_feature[:,:,:-scale,:-scale],distance_matrix8[:,:,scale:,scale:]],1)
weight_rb=self.similarity1(representation_rb)
weight_rb=torch.cat([torch.cat([weight_rb,padding1[...,:-scale,:]],-1),padding2],-2)
weight_all=torch.cat([weight,weight_l,weight_r,weight_t,weight_b,weight_lt,weight_rt,weight_lb,weight_rb],dim=1)
weight_norm=F.softmax(weight_all, dim=1)
#weight_fuse=F.softmax(weight_norm*weight_all)
#target
# representation_b_target=torch.cat([lr_feature_r[:,:,scale:,:],hr_feature_r[:,:,:-scale,:],distance_matrix4[:,:,scale:,:]],1)
# weight_b_target=self.similarity1(representation_b_target)
# weight_b_target=torch.cat([weight_b_target,padding2],-2)
# weight_all=torch.cat([weight,weight_r,weight_l,weight_t,weight_b],dim=1)
# weight_norm=F.softmax(weight_all, dim=1)
# weight_all_target=torch.cat([weight_target,weight_r_target,weight_l_target,weight_t_target,weight_b_target],dim=1)
# weight_norm_target=F.softmax(weight_all_target, dim=1)
# return weight*weight_norm[:,0:1,:,:],weight_target*weight_norm_target[:,0:1,:,:], \
# weight_r*weight_norm[:,1:2,:,:],weight_r_target*weight_norm_target[:,1:2,:,:], \
# weight_l*weight_norm[:,2:3,:,:],weight_l_target*weight_norm_target[:,2:3,:,:], \
# weight_t*weight_norm[:,3:4,:,:],weight_t_target*weight_norm_target[:,3:4,:,:], \
# weight_b*weight_norm[:,4:5,:,:],weight_b_target*weight_norm_target[:,4:5,:,:]
# return self.sigmoid(weight)*weight_norm[:,0:1,...], \
# self.sigmoid(weight_l)*weight_norm[:,1:2,...], \
# self.sigmoid(weight_r)*weight_norm[:,2:3,...], \
# self.sigmoid(weight_t)*weight_norm[:,3:4,...], \
# self.sigmoid(weight_b)*weight_norm[:,4:5,...],\
# self.sigmoid(weight_lt)*weight_norm[:,5:6,...], \
# self.sigmoid(weight_rt)*weight_norm[:,6:7,...], \
# self.sigmoid(weight_lb)*weight_norm[:,7:8,...], \
# self.sigmoid(weight_rb)*weight_norm[:,8:9,...]
#print(torch.mean(torch.max(weight_norm,dim=1)[0]),torch.max(weight_all,dim=1)[0])
#print(torch.mean(torch.topk(weight_all,3,dim=1)[0].float()),torch.mean(torch.topk(weight_all,3,dim=1)[1].float()))
#print(torch.mean(torch.topk(weight_all,1,dim=1)[0].float()),torch.mean(torch.topk(weight_all,1,dim=1)[1].float()))
if torch.mean(torch.topk(weight_all,1,dim=1)[0].float())<0:
print(torch.mean(torch.topk(weight_all,3,dim=1)[0].float()),torch.mean(torch.topk(weight_all,3,dim=1)[1].float()))
print(torch.mean(torch.topk(weight_all,1,dim=1)[0].float()),torch.mean(torch.topk(weight_all,1,dim=1)[1].float()))
#print(torch.mean(torch.min(weight_norm,dim=1)[0]),torch.min(weight_all,dim=1)[0])
return weight_norm[:,0:1,...], \
weight_norm[:,1:2,...], \
weight_norm[:,2:3,...], \
weight_norm[:,3:4,...], \
weight_norm[:,4:5,...],\
weight_norm[:,5:6,...], \
weight_norm[:,6:7,...], \
weight_norm[:,7:8,...], \
weight_norm[:,8:9,...]
class cmfsm(nn.Module):
def __init__(self,
maxdisp=192):
super(cmfsm, self).__init__()
self.maxdisp = maxdisp
self.feature_extraction = feature_extraction()
self.dres0 = nn.Sequential(
convbn_3d(64, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres2 = hourglass(32)
self.dres3 = hourglass(32)
self.dres4 = hourglass(32)
self.classif1 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classif2 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classif3 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.mapping_matrix=eight_related_context_mapping()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * \
m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, left, right):
start=time.time()
refimg_fea, half,all_feature= self.feature_extraction(left)
targetimg_fea, _ ,all_feature_right= self.feature_extraction(right)
scale=all_feature.shape[-1]//refimg_fea.shape[-1]
#mapping,mapping_r,mapping_l,mapping_t,mapping_b=self.mapping_matrix(refimg_fea,all_feature)
#target
#[mapping,mapping_r,mapping_l,mapping_t,mapping_b],[mapping_target,mapping_target_r,mapping_target_l]=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)
#time=0.1s
weight,weight_l,weight_r,weight_t,weight_b,weight_lt,weight_rt,weight_lb,weight_rb=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)
#mapping,mapping_target=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)
# matching
cost = Variable(
torch.FloatTensor(refimg_fea.size()[0],
refimg_fea.size()[1] * 2, self.maxdisp // scale,
refimg_fea.size()[2],
refimg_fea.size()[3]).zero_()).cuda()
for i in range(self.maxdisp // scale):
if i > 0:
cost[:, :refimg_fea.size()[1], i, :, i:] = refimg_fea[:, :, :,
i:]
cost[:, refimg_fea.size()[1]:, i, :,
i:] = targetimg_fea[:, :, :, :-i]
else:
cost[:, :refimg_fea.size()[1], i, :, :] = refimg_fea
cost[:, refimg_fea.size()[1]:, i, :, :] = targetimg_fea
cost = cost.contiguous()
cost0 = self.dres0(cost)
cost0 = self.dres1(cost0) + cost0
out1, pre1, post1 = self.dres2(cost0, None, None)
out1 = out1 + cost0
out2, pre2, post2 = self.dres3(out1, pre1, post1)
out2 = out2 + cost0
out3, pre3, post3 = self.dres4(out2, pre1, post2)
out3 = out3 + cost0
cost1 = self.classif1(out1)
#cost2 = self.classif2(out2) + cost1
#cost3 = self.classif3(out3) + cost2
#torch.Size([1, 1, 256, 512])
# weight_all=torch.cat([weight,weight_r,weight_l,weight_t,weight_b],dim=1)
# weight_norm=F.softmax(weight_all, dim=1)
# t=time.time()
cost1 = torch.squeeze(cost1, 1)
pred1 = F.softmax(cost1, dim=1)
pred1 = disparityregression(self.maxdisp//scale)(pred1)
#torch.Size([1, 64, 128])
pred1=scale*pred1.unsqueeze(-1).expand(pred1.shape[0],pred1.shape[1],pred1.shape[2],scale) \
.contiguous().view(pred1.shape[0],pred1.shape[1],pred1.shape[2]*scale) \
.unsqueeze(-2).expand(pred1.shape[0],pred1.shape[1],scale,pred1.shape[2]*scale) \
.contiguous().view(pred1.shape[0],pred1.shape[1]*scale,pred1.shape[2]*scale)
pred1_map=pred1*weight
pred1_map[...,scale:]+=pred1[...,:-scale]*weight_l[...,scale:]
pred1_map[...,:-scale]+=pred1[...,scale:]*weight_r[...,:-scale]
pred1_map[...,scale:,:]+=pred1[...,:-scale,:]*weight_t[...,scale:,:]
pred1_map[...,:-scale,:]+=pred1[...,scale:,:]*weight_b[...,:-scale,:]
pred1_map[...,scale:,scale:]+=pred1[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]
pred1_map[...,scale:,:-scale]+=pred1[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]
pred1_map[...,:-scale,scale:]+=pred1[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]
pred1_map[...,:-scale,:-scale]+=pred1[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]
cost2 = self.classif2(out2)
cost2 = torch.squeeze(cost2, 1)+cost1
pred2 = F.softmax(cost2, dim=1)
pred2 = disparityregression(self.maxdisp//scale)(pred2)
pred2=scale*pred2.unsqueeze(-1).expand(pred2.shape[0],pred2.shape[1],pred2.shape[2],scale) \
.contiguous().view(pred2.shape[0],pred2.shape[1],pred2.shape[2]*scale) \
.unsqueeze(-2).expand(pred2.shape[0],pred2.shape[1],scale,pred2.shape[2]*scale) \
.contiguous().view(pred2.shape[0],pred2.shape[1]*scale,pred2.shape[2]*scale)
pred2_map=pred2*weight
pred2_map[...,scale:]+=pred2[...,:-scale]*weight_l[...,scale:]
pred2_map[...,:-scale]+=pred2[...,scale:]*weight_r[...,:-scale]
pred2_map[...,scale:,:]+=pred2[...,:-scale,:]*weight_t[...,scale:,:]
pred2_map[...,:-scale,:]+=pred2[...,scale:,:]*weight_b[...,:-scale,:]
pred2_map[...,scale:,scale:]+=pred2[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]
pred2_map[...,scale:,:-scale]+=pred2[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]
pred2_map[...,:-scale,scale:]+=pred2[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]
pred2_map[...,:-scale,:-scale]+=pred2[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]
cost3 = self.classif3(out3)
cost3 = torch.squeeze(cost3, 1)+cost2
pred3 = F.softmax(cost3, dim=1)
# print(torch.max(pred3,dim=1)[0])
# print(torch.min(pred3,dim=1)[0])
pred3 = disparityregression(self.maxdisp//scale)(pred3)
pred3=scale*pred3.unsqueeze(-1).expand(pred3.shape[0],pred3.shape[1],pred3.shape[2],scale) \
.contiguous().view(pred3.shape[0],pred3.shape[1],pred3.shape[2]*scale) \
.unsqueeze(-2).expand(pred3.shape[0],pred3.shape[1],scale,pred3.shape[2]*scale) \
.contiguous().view(pred3.shape[0],pred3.shape[1]*scale,pred3.shape[2]*scale)
pred3_map=pred3*weight
pred3_map[...,scale:]+=pred3[...,:-scale]*weight_l[...,scale:]
pred3_map[...,:-scale]+=pred3[...,scale:]*weight_r[...,:-scale]
pred3_map[...,scale:,:]+=pred3[...,:-scale,:]*weight_t[...,scale:,:]
pred3_map[...,:-scale,:]+=pred3[...,scale:,:]*weight_b[...,:-scale,:]
pred3_map[...,scale:,scale:]+=pred3[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]
pred3_map[...,scale:,:-scale]+=pred3[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]
pred3_map[...,:-scale,scale:]+=pred3[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]
pred3_map[...,:-scale,:-scale]+=pred3[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]
#pred3 = self.srr(pred3, left, refimg_fea, half)
#print(time.time()-start)
return pred1_map, pred2_map, pred3_map
#return pred3
| 44.897304 | 185 | 0.587277 |
import time
import torch
import numpy as np
import torch.nn as nn
import math
from math import ceil
from torch.autograd import Variable
from torch.nn.functional import cosine_similarity as cosine_s
from cmf import caffe_pb2
from cmf.models.utils import *
rsn_specs = {
'scene':
{
'n_classes': 9,
'input_size': (540, 960),
'block_config': [3, 4, 23, 3],
},
}
group_dim=32
pramid_dim=8
group_norm_group_num = 32
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(
nn.Conv2d(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=dilation if dilation > 1 else pad,
dilation=dilation,
bias=False), nn.GroupNorm(group_norm_group_num, out_planes))
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(
nn.Conv3d(
in_planes,
out_planes,
kernel_size=kernel_size,
padding=pad,
stride=stride,
bias=False), nn.GroupNorm(group_norm_group_num, out_planes))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(
convbn(inplanes, planes, 3, stride, pad, dilation),
nn.ReLU(inplace=True))
self.conv2 = convbn(planes, planes, 3, 1, pad, dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class matchshifted(nn.Module):
def __init__(self):
super(matchshifted, self).__init__()
def forward(self, left, right, shift):
batch, filters, height, width = left.size()
shifted_left = F.pad(
torch.index_select(
left, 3,
Variable(torch.LongTensor(
[i for i in range(shift, width)])).cuda()),
(shift, 0, 0, 0))
shifted_right = F.pad(
torch.index_select(
right, 3,
Variable(torch.LongTensor(
[i for i in range(width - shift)])).cuda()),
(shift, 0, 0, 0))
out = torch.cat((shifted_left, shifted_right), 1).view(
batch, filters * 2, 1, height, width)
return out
class disparityregression(nn.Module):
def __init__(self, maxdisp):
super().__init__()
self.disp = Variable(
torch.Tensor(
np.reshape(np.array(range(maxdisp)),
[1, maxdisp, 1, 1])).cuda(),
requires_grad=False)
def forward(self, x):
disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])
out = torch.sum(x * disp, 1)
return out
class feature_extraction(nn.Module):
def __init__(self):
super(feature_extraction, self).__init__()
self.inplanes = 32
self.firstconv = nn.Sequential(
convbn(3, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1, bias=False))
self.secondconv = nn.Sequential(
nn.GroupNorm(group_dim, 32),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1)
self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1)
self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1)
self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2)
self.branch1 = nn.Sequential(
nn.AvgPool2d((64, 64), stride=(64, 64)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(
nn.AvgPool2d((32, 32), stride=(32, 32)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch3 = nn.Sequential(
nn.AvgPool2d((16, 16), stride=(16, 16)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch4 = nn.Sequential(
nn.AvgPool2d((8, 8), stride=(8, 8)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.lastconv = nn.Sequential(
convbn(320, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))
def _make_layer(self, block, planes, blocks, stride, pad, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
nn.GroupNorm(group_norm_group_num, planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, pad, dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, None, pad, dilation))
return nn.Sequential(*layers)
def forward(self, x):
output_all = self.firstconv(x)
output=self.secondconv(output_all)
output_rt = self.layer1(output)
output_raw = self.layer2(output_rt)
output = self.layer3(output_raw)
output_skip = self.layer4(output)
output_branch1 = self.branch1(output_skip)
output_branch1 = F.interpolate(
output_branch1, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch2 = self.branch2(output_skip)
output_branch2 = F.interpolate(
output_branch2, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch3 = self.branch3(output_skip)
output_branch3 = F.interpolate(
output_branch3, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch4 = self.branch4(output_skip)
output_branch4 = F.interpolate(
output_branch4, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_feature = torch.cat(
(output_raw, output_skip, output_branch4, output_branch3,
output_branch2, output_branch1), 1)
output_feature = self.lastconv(output_feature)
return output_feature, output_rt,output_all
class hourglass(nn.Module):
def __init__(self, inplanes):
super().__init__()
self.conv1 = nn.Sequential(
convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv2 = convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1)
self.conv3 = nn.Sequential(
convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(
nn.ConvTranspose3d(
inplanes * 2,
inplanes * 2,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False), nn.GroupNorm(group_norm_group_num,
inplanes * 2))
self.conv6 = nn.Sequential(
nn.ConvTranspose3d(
inplanes * 2,
inplanes,
kernel_size=3,
padding=1,
output_padding=(1,1,1),
stride=2,
bias=False), nn.GroupNorm(group_norm_group_num,
inplanes))
def forward(self, x, presqu, postsqu):
out = self.conv1(x)
pre = self.conv2(out)
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre)
out = self.conv4(out)
if presqu is not None:
post = F.relu(
self.conv5(out) + presqu, inplace=True)
else:
post = F.relu(self.conv5(out) + pre, inplace=True)
out = self.conv6(post)
return out, pre, post
class similarity_measure1(nn.Module):
def __init__(self):
super(similarity_measure1, self).__init__()
self.inplanes = 32
self.conv0 = nn.Conv2d(66, 32, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu0 = nn.LeakyReLU(inplace=True)
self.conv1 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu2 = nn.LeakyReLU(inplace=True)
self.conv3 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
for m in self.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
def forward(self, x):
output = self.conv0(x)
output = self.relu0(output)
output = self.conv1(output)
output = self.relu1(output)
output = self.conv2(output)
output = self.relu2(output)
output = self.conv3(output)
return output
class similarity_measure2(nn.Module):
def __init__(self):
super(similarity_measure2, self).__init__()
self.inplanes = 32
self.conv0 = nn.Conv2d(3, 3, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu0 = nn.LeakyReLU(inplace=True)
self.conv1 = nn.Conv2d(3, 2, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu2 = nn.LeakyReLU(inplace=True)
for m in self.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
def forward(self, x):
output = self.conv0(x)
output = self.relu0(output)
output = self.conv1(output)
output = self.relu1(output)
output = self.conv2(output)
output = self.relu2(output)
return output
def matrix_generation():
scale=4
x=torch.arange(-scale//2,scale//2+1).float()
x=torch.cat([x[:x.shape[0]//2],x[x.shape[0]//2+1:]]).unsqueeze(0)
distance_matrix=x.expand(scale,scale).unsqueeze(0)
distance_matrix=torch.cat([distance_matrix,distance_matrix.transpose(2,1)],0)
distance_matrix=distance_matrix.unsqueeze(0)
distance_matrix1=distance_matrix+0
distance_matrix2=distance_matrix+0
distance_matrix3=distance_matrix+0
distance_matrix4=distance_matrix+0
distance_matrix5=distance_matrix+0
distance_matrix6=distance_matrix+0
distance_matrix7=distance_matrix+0
distance_matrix8=distance_matrix+0
x=torch.arange(1,scale+1).float()
x=x.expand(scale,scale).unsqueeze(0)
distance_matrix1[:,0,:,:]=scale-x+1
distance_matrix2[:,0,:,:]=x
distance_matrix5[:,0,:,:]=distance_matrix2[:,0,:,:]
distance_matrix6[:,0,:,:]=distance_matrix1[:,0,:,:]
distance_matrix7[:,0,:,:]=distance_matrix2[:,0,:,:]
distance_matrix8[:,0,:,:]=distance_matrix1[:,0,:,:]
x=torch.arange(1,scale+1).float()
x=x.expand(scale,scale).unsqueeze(0).transpose(2,1)
distance_matrix3[:,1,:,:]=(scale-x+1)
distance_matrix4[:,1,:,:]=x
distance_matrix5[:,1,:,:]=distance_matrix3[:,1,:,:]
distance_matrix6[:,1,:,:]=distance_matrix3[:,1,:,:]
distance_matrix7[:,1,:,:]=distance_matrix4[:,1,:,:]
distance_matrix8[:,1,:,:]=distance_matrix4[:,1,:,:]
return distance_matrix.cuda(),distance_matrix1.cuda(),distance_matrix2.cuda(),distance_matrix3.cuda(),distance_matrix4.cuda(), \
distance_matrix5.cuda(),distance_matrix6.cuda(),distance_matrix7.cuda(),distance_matrix8.cuda()
class eight_related_context_mapping(nn.Module):
def __init__(self):
super(eight_related_context_mapping,self).__init__()
self.similarity1=similarity_measure1()
self.sigmoid=nn.Sigmoid()
self.distance_matrix,self.distance_matrix1,self.distance_matrix2,self.distance_matrix3,self.distance_matrix4, \
self.distance_matrix5,self.distance_matrix6,self.distance_matrix7,self.distance_matrix8=matrix_generation()
def forward(self, lr_feature, hr_feature,lr_feature_r, hr_feature_r):
with torch.no_grad():
scale=hr_feature.shape[-1]//lr_feature.shape[-1]
if scale%2!=0:
exit()
padding1=hr_feature[:,:1,:,:scale]*0-100
padding2=hr_feature[:,:1,:scale,:]*0-100
distance_matrix=self.distance_matrix.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix1=self.distance_matrix1.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix2=self.distance_matrix2.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix3=self.distance_matrix3.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix4=self.distance_matrix4.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix5=self.distance_matrix1.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix6=self.distance_matrix2.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix7=self.distance_matrix3.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix8=self.distance_matrix4.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
lr_feature=lr_feature.unsqueeze(-1).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3],scale) \
.contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3]*scale) \
.unsqueeze(-2).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],scale,lr_feature.shape[3]*scale) \
.contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2]*scale,lr_feature.shape[3]*scale)
representation=torch.cat([lr_feature,hr_feature,distance_matrix],1)
weight=self.similarity1(representation)
representation_l=torch.cat([lr_feature[:,:,:,:-scale],hr_feature[:,:,:,scale:],distance_matrix1[:,:,:,:-scale]],1)
weight_l=self.similarity1(representation_l)
weight_l=torch.cat([padding1,weight_l],-1)
representation_r=torch.cat([lr_feature[:,:,:,scale:],hr_feature[:,:,:,:-scale],distance_matrix2[:,:,:,scale:]],1)
weight_r=self.similarity1(representation_r)
weight_r=torch.cat([weight_r,padding1],-1)
representation_t=torch.cat([lr_feature[:,:,:-scale,:],hr_feature[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)
weight_t=self.similarity1(representation_t)
weight_t=torch.cat([padding2,weight_t],-2)
representation_b=torch.cat([lr_feature[:,:,scale:,:],hr_feature[:,:,:-scale,:],distance_matrix4[:,:,scale:,:]],1)
weight_b=self.similarity1(representation_b)
weight_b=torch.cat([weight_b,padding2],-2)
representation_lt=torch.cat([lr_feature[:,:,:-scale,:-scale],hr_feature[:,:,scale:,scale:],distance_matrix5[:,:,:-scale,:-scale]],1)
weight_lt=self.similarity1(representation_lt)
weight_lt=torch.cat([padding2,torch.cat([padding1[...,scale:,:],weight_lt],-1)],-2)
representation_rt=torch.cat([lr_feature[:,:,:-scale,scale:],hr_feature[:,:,scale:,:-scale],distance_matrix6[:,:,:-scale,scale:]],1)
weight_rt=self.similarity1(representation_rt)
weight_rt=torch.cat([padding2,torch.cat([weight_rt,padding1[...,scale:,:]],-1)],-2)
representation_lb=torch.cat([lr_feature[:,:,scale:,:-scale],hr_feature[:,:,:-scale:,scale:],distance_matrix7[:,:,scale:,:-scale]],1)
weight_lb=self.similarity1(representation_lb)
weight_lb=torch.cat([torch.cat([padding1[...,scale:,:],weight_lb],-1),padding2],-2)
representation_rb=torch.cat([lr_feature[:,:,scale:,scale:],hr_feature[:,:,:-scale,:-scale],distance_matrix8[:,:,scale:,scale:]],1)
weight_rb=self.similarity1(representation_rb)
weight_rb=torch.cat([torch.cat([weight_rb,padding1[...,:-scale,:]],-1),padding2],-2)
weight_all=torch.cat([weight,weight_l,weight_r,weight_t,weight_b,weight_lt,weight_rt,weight_lb,weight_rb],dim=1)
weight_norm=F.softmax(weight_all, dim=1)
if torch.mean(torch.topk(weight_all,1,dim=1)[0].float())<0:
print(torch.mean(torch.topk(weight_all,3,dim=1)[0].float()),torch.mean(torch.topk(weight_all,3,dim=1)[1].float()))
print(torch.mean(torch.topk(weight_all,1,dim=1)[0].float()),torch.mean(torch.topk(weight_all,1,dim=1)[1].float()))
return weight_norm[:,0:1,...], \
weight_norm[:,1:2,...], \
weight_norm[:,2:3,...], \
weight_norm[:,3:4,...], \
weight_norm[:,4:5,...],\
weight_norm[:,5:6,...], \
weight_norm[:,6:7,...], \
weight_norm[:,7:8,...], \
weight_norm[:,8:9,...]
class cmfsm(nn.Module):
def __init__(self,
maxdisp=192):
super(cmfsm, self).__init__()
self.maxdisp = maxdisp
self.feature_extraction = feature_extraction()
self.dres0 = nn.Sequential(
convbn_3d(64, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres2 = hourglass(32)
self.dres3 = hourglass(32)
self.dres4 = hourglass(32)
self.classif1 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classif2 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classif3 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.mapping_matrix=eight_related_context_mapping()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * \
m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, left, right):
start=time.time()
refimg_fea, half,all_feature= self.feature_extraction(left)
targetimg_fea, _ ,all_feature_right= self.feature_extraction(right)
scale=all_feature.shape[-1]//refimg_fea.shape[-1]
weight,weight_l,weight_r,weight_t,weight_b,weight_lt,weight_rt,weight_lb,weight_rb=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)
cost = Variable(
torch.FloatTensor(refimg_fea.size()[0],
refimg_fea.size()[1] * 2, self.maxdisp // scale,
refimg_fea.size()[2],
refimg_fea.size()[3]).zero_()).cuda()
for i in range(self.maxdisp // scale):
if i > 0:
cost[:, :refimg_fea.size()[1], i, :, i:] = refimg_fea[:, :, :,
i:]
cost[:, refimg_fea.size()[1]:, i, :,
i:] = targetimg_fea[:, :, :, :-i]
else:
cost[:, :refimg_fea.size()[1], i, :, :] = refimg_fea
cost[:, refimg_fea.size()[1]:, i, :, :] = targetimg_fea
cost = cost.contiguous()
cost0 = self.dres0(cost)
cost0 = self.dres1(cost0) + cost0
out1, pre1, post1 = self.dres2(cost0, None, None)
out1 = out1 + cost0
out2, pre2, post2 = self.dres3(out1, pre1, post1)
out2 = out2 + cost0
out3, pre3, post3 = self.dres4(out2, pre1, post2)
out3 = out3 + cost0
cost1 = self.classif1(out1)
cost1 = torch.squeeze(cost1, 1)
pred1 = F.softmax(cost1, dim=1)
pred1 = disparityregression(self.maxdisp//scale)(pred1)
pred1=scale*pred1.unsqueeze(-1).expand(pred1.shape[0],pred1.shape[1],pred1.shape[2],scale) \
.contiguous().view(pred1.shape[0],pred1.shape[1],pred1.shape[2]*scale) \
.unsqueeze(-2).expand(pred1.shape[0],pred1.shape[1],scale,pred1.shape[2]*scale) \
.contiguous().view(pred1.shape[0],pred1.shape[1]*scale,pred1.shape[2]*scale)
pred1_map=pred1*weight
pred1_map[...,scale:]+=pred1[...,:-scale]*weight_l[...,scale:]
pred1_map[...,:-scale]+=pred1[...,scale:]*weight_r[...,:-scale]
pred1_map[...,scale:,:]+=pred1[...,:-scale,:]*weight_t[...,scale:,:]
pred1_map[...,:-scale,:]+=pred1[...,scale:,:]*weight_b[...,:-scale,:]
pred1_map[...,scale:,scale:]+=pred1[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]
pred1_map[...,scale:,:-scale]+=pred1[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]
pred1_map[...,:-scale,scale:]+=pred1[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]
pred1_map[...,:-scale,:-scale]+=pred1[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]
cost2 = self.classif2(out2)
cost2 = torch.squeeze(cost2, 1)+cost1
pred2 = F.softmax(cost2, dim=1)
pred2 = disparityregression(self.maxdisp//scale)(pred2)
pred2=scale*pred2.unsqueeze(-1).expand(pred2.shape[0],pred2.shape[1],pred2.shape[2],scale) \
.contiguous().view(pred2.shape[0],pred2.shape[1],pred2.shape[2]*scale) \
.unsqueeze(-2).expand(pred2.shape[0],pred2.shape[1],scale,pred2.shape[2]*scale) \
.contiguous().view(pred2.shape[0],pred2.shape[1]*scale,pred2.shape[2]*scale)
pred2_map=pred2*weight
pred2_map[...,scale:]+=pred2[...,:-scale]*weight_l[...,scale:]
pred2_map[...,:-scale]+=pred2[...,scale:]*weight_r[...,:-scale]
pred2_map[...,scale:,:]+=pred2[...,:-scale,:]*weight_t[...,scale:,:]
pred2_map[...,:-scale,:]+=pred2[...,scale:,:]*weight_b[...,:-scale,:]
pred2_map[...,scale:,scale:]+=pred2[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]
pred2_map[...,scale:,:-scale]+=pred2[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]
pred2_map[...,:-scale,scale:]+=pred2[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]
pred2_map[...,:-scale,:-scale]+=pred2[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]
cost3 = self.classif3(out3)
cost3 = torch.squeeze(cost3, 1)+cost2
pred3 = F.softmax(cost3, dim=1)
pred3 = disparityregression(self.maxdisp//scale)(pred3)
pred3=scale*pred3.unsqueeze(-1).expand(pred3.shape[0],pred3.shape[1],pred3.shape[2],scale) \
.contiguous().view(pred3.shape[0],pred3.shape[1],pred3.shape[2]*scale) \
.unsqueeze(-2).expand(pred3.shape[0],pred3.shape[1],scale,pred3.shape[2]*scale) \
.contiguous().view(pred3.shape[0],pred3.shape[1]*scale,pred3.shape[2]*scale)
pred3_map=pred3*weight
pred3_map[...,scale:]+=pred3[...,:-scale]*weight_l[...,scale:]
pred3_map[...,:-scale]+=pred3[...,scale:]*weight_r[...,:-scale]
pred3_map[...,scale:,:]+=pred3[...,:-scale,:]*weight_t[...,scale:,:]
pred3_map[...,:-scale,:]+=pred3[...,scale:,:]*weight_b[...,:-scale,:]
pred3_map[...,scale:,scale:]+=pred3[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]
pred3_map[...,scale:,:-scale]+=pred3[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]
pred3_map[...,:-scale,scale:]+=pred3[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]
pred3_map[...,:-scale,:-scale]+=pred3[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]
return pred1_map, pred2_map, pred3_map
| true | true |
f715da8614fe069c6b1f18f8b31418a56d1297bc | 2,432 | py | Python | kornia/augmentation/_3d/base.py | dichen-cd/kornia | dcd1c5e17cf4d2ae2db1f438c53245bba0afd93f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-02-10T02:02:06.000Z | 2022-02-10T02:02:06.000Z | kornia/augmentation/_3d/base.py | dichen-cd/kornia | dcd1c5e17cf4d2ae2db1f438c53245bba0afd93f | [
"ECL-2.0",
"Apache-2.0"
] | 14 | 2021-09-26T11:07:56.000Z | 2022-03-20T11:11:15.000Z | kornia/augmentation/_3d/base.py | dichen-cd/kornia | dcd1c5e17cf4d2ae2db1f438c53245bba0afd93f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import torch
import kornia
from kornia.augmentation.base import TensorWithTransformMat, _AugmentationBase
from kornia.augmentation.utils import _transform_input3d, _validate_input_dtype
class AugmentationBase3D(_AugmentationBase):
r"""AugmentationBase3D base class for customized augmentation implementations.
For any augmentation, the implementation of "generate_parameters" and "apply_transform" are required while the
"compute_transformation" is only required when passing "return_transform" as True.
Args:
p: probability for applying an augmentation. This param controls the augmentation probabilities
element-wise for a batch.
p_batch: probability for applying an augmentation to a batch. This param controls the augmentation
probabilities batch-wise.
return_transform: if ``True`` return the matrix describing the geometric transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation won't be concatenated.
same_on_batch: apply the same transformation across the batch.
"""
def __check_batching__(self, input: TensorWithTransformMat):
if isinstance(input, tuple):
inp, mat = input
if len(inp.shape) == 5:
if len(mat.shape) != 3:
raise AssertionError('Input tensor is in batch mode ' 'but transformation matrix is not')
if mat.shape[0] != inp.shape[0]:
raise AssertionError(
f"In batch dimension, input has {inp.shape[0]} but transformation matrix has {mat.shape[0]}"
)
elif len(inp.shape) in (3, 4):
if len(mat.shape) != 2:
raise AssertionError("Input tensor is in non-batch mode but transformation matrix is not")
else:
raise ValueError(f'Unrecognized output shape. Expected 3, 4 or 5, got {len(inp.shape)}')
def transform_tensor(self, input: torch.Tensor) -> torch.Tensor:
"""Convert any incoming (D, H, W), (C, D, H, W) and (B, C, D, H, W) into (B, C, D, H, W)."""
_validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])
return _transform_input3d(input)
def identity_matrix(self, input) -> torch.Tensor:
"""Return 4x4 identity matrix."""
return kornia.eye_like(4, input)
| 50.666667 | 116 | 0.665707 | import torch
import kornia
from kornia.augmentation.base import TensorWithTransformMat, _AugmentationBase
from kornia.augmentation.utils import _transform_input3d, _validate_input_dtype
class AugmentationBase3D(_AugmentationBase):
def __check_batching__(self, input: TensorWithTransformMat):
if isinstance(input, tuple):
inp, mat = input
if len(inp.shape) == 5:
if len(mat.shape) != 3:
raise AssertionError('Input tensor is in batch mode ' 'but transformation matrix is not')
if mat.shape[0] != inp.shape[0]:
raise AssertionError(
f"In batch dimension, input has {inp.shape[0]} but transformation matrix has {mat.shape[0]}"
)
elif len(inp.shape) in (3, 4):
if len(mat.shape) != 2:
raise AssertionError("Input tensor is in non-batch mode but transformation matrix is not")
else:
raise ValueError(f'Unrecognized output shape. Expected 3, 4 or 5, got {len(inp.shape)}')
def transform_tensor(self, input: torch.Tensor) -> torch.Tensor:
_validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])
return _transform_input3d(input)
def identity_matrix(self, input) -> torch.Tensor:
return kornia.eye_like(4, input)
| true | true |
f715dbd84fff6809be6f2c5c95ceb8898c2ac604 | 2,563 | gyp | Python | binding.gyp | sumeetkakkar/node-krb5 | 3c13021b3fcd3be239d3c731455154910f4d03b6 | [
"BSD-3-Clause"
] | null | null | null | binding.gyp | sumeetkakkar/node-krb5 | 3c13021b3fcd3be239d3c731455154910f4d03b6 | [
"BSD-3-Clause"
] | null | null | null | binding.gyp | sumeetkakkar/node-krb5 | 3c13021b3fcd3be239d3c731455154910f4d03b6 | [
"BSD-3-Clause"
] | 1 | 2019-08-29T18:45:47.000Z | 2019-08-29T18:45:47.000Z | {
"targets": [{
"target_name": "krb5",
"sources": [
"./src/module.cc",
"./src/krb5_bind.cc",
"./src/gss_bind.cc",
"./src/base64.cc"
],
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
'include_dirs': ["<!@(node -p \"require('node-addon-api').include\")"],
'dependencies': ["<!(node -p \"require('node-addon-api').gyp\")"],
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
'defines': [
'NAPI_DISABLE_CPP_EXCEPTIONS'
],
"conditions": [
[
"OS=='win'",
{
"variables": {
"KRB_PATH": "/Program Files/MIT/Kerberos"
},
"include_dirs": ["<(KRB_PATH)/include", "<(KRB_PATH)/include/gssapi", "src"],
"conditions": [
[
"target_arch=='x64'",
{
"msvs_settings": {
"VCCLCompilerTool": {
"AdditionalOptions": ["/MP /EHsc"]
},
"VCLinkerTool": {
"AdditionalLibraryDirectories": ["<(KRB_PATH)/lib/amd64/"]
}
},
"libraries": ["-lkrb5_64.lib", "-lgssapi64.lib"]
}
],
[
"target_arch=='ia32'",
{
"msvs_settings": {
"VCCLCompilerTool": {
"AdditionalOptions": ["/MP /EHsc"]
},
"VCLinkerTool": {
"AdditionalLibraryDirectories": ["<(KRB_PATH)/lib/amd64/"]
}
},
"libraries": ["-lkrb5_32.lib", "-lgssapi32.lib"]
}
]
]
}
],
[
"OS!='win'",
{
"libraries": ["-lkrb5", "-lgssapi_krb5"]
}
]
]
}]
}
| 36.614286 | 98 | 0.285603 | {
"targets": [{
"target_name": "krb5",
"sources": [
"./src/module.cc",
"./src/krb5_bind.cc",
"./src/gss_bind.cc",
"./src/base64.cc"
],
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
'include_dirs': ["<!@(node -p \"require('node-addon-api').include\")"],
'dependencies': ["<!(node -p \"require('node-addon-api').gyp\")"],
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
'defines': [
'NAPI_DISABLE_CPP_EXCEPTIONS'
],
"conditions": [
[
"OS=='win'",
{
"variables": {
"KRB_PATH": "/Program Files/MIT/Kerberos"
},
"include_dirs": ["<(KRB_PATH)/include", "<(KRB_PATH)/include/gssapi", "src"],
"conditions": [
[
"target_arch=='x64'",
{
"msvs_settings": {
"VCCLCompilerTool": {
"AdditionalOptions": ["/MP /EHsc"]
},
"VCLinkerTool": {
"AdditionalLibraryDirectories": ["<(KRB_PATH)/lib/amd64/"]
}
},
"libraries": ["-lkrb5_64.lib", "-lgssapi64.lib"]
}
],
[
"target_arch=='ia32'",
{
"msvs_settings": {
"VCCLCompilerTool": {
"AdditionalOptions": ["/MP /EHsc"]
},
"VCLinkerTool": {
"AdditionalLibraryDirectories": ["<(KRB_PATH)/lib/amd64/"]
}
},
"libraries": ["-lkrb5_32.lib", "-lgssapi32.lib"]
}
]
]
}
],
[
"OS!='win'",
{
"libraries": ["-lkrb5", "-lgssapi_krb5"]
}
]
]
}]
}
| true | true |
f715dc50ff4886ddbbcf5f5817f1d0e1a2b60106 | 461 | py | Python | jdxapi/routes/__init__.py | jobdataexchange/jdx-api | 7815a6463de56423c3b4196648607c4ebe56828c | [
"Apache-2.0"
] | null | null | null | jdxapi/routes/__init__.py | jobdataexchange/jdx-api | 7815a6463de56423c3b4196648607c4ebe56828c | [
"Apache-2.0"
] | 9 | 2019-12-26T17:39:58.000Z | 2022-01-13T01:59:49.000Z | jdxapi/routes/__init__.py | jobdataexchange/jdx-api | 7815a6463de56423c3b4196648607c4ebe56828c | [
"Apache-2.0"
] | null | null | null | from jdxapi.routes.health import *
from jdxapi.routes.upload_job_description_file import *
from jdxapi.routes.upload_job_description_context import *
from jdxapi.routes.framework_recommendations import *
from jdxapi.routes.framework_selections import *
from jdxapi.routes.generate_job_schema_plus import *
from jdxapi.routes.get_score import *
from jdxapi.routes.match_table import *
from jdxapi.routes.user_actions import *
from jdxapi.routes.preview import *
| 41.909091 | 58 | 0.848156 | from jdxapi.routes.health import *
from jdxapi.routes.upload_job_description_file import *
from jdxapi.routes.upload_job_description_context import *
from jdxapi.routes.framework_recommendations import *
from jdxapi.routes.framework_selections import *
from jdxapi.routes.generate_job_schema_plus import *
from jdxapi.routes.get_score import *
from jdxapi.routes.match_table import *
from jdxapi.routes.user_actions import *
from jdxapi.routes.preview import *
| true | true |
f715deb9771158a547fcbbc301e8725ecd0fded2 | 3,896 | py | Python | Python/PyParsing/node_utils.py | Gjacquenot/training-material | 16b29962bf5683f97a1072d961dd9f31e7468b8d | [
"CC-BY-4.0"
] | 115 | 2015-03-23T13:34:42.000Z | 2022-03-21T00:27:21.000Z | Python/PyParsing/node_utils.py | Gjacquenot/training-material | 16b29962bf5683f97a1072d961dd9f31e7468b8d | [
"CC-BY-4.0"
] | 56 | 2015-02-25T15:04:26.000Z | 2022-01-03T07:42:48.000Z | Python/PyParsing/node_utils.py | Gjacquenot/training-material | 16b29962bf5683f97a1072d961dd9f31e7468b8d | [
"CC-BY-4.0"
] | 59 | 2015-11-26T11:44:51.000Z | 2022-03-21T00:27:22.000Z | #!/usr/bin/env python
'''module containing various functions for working with trees and nodes'''
from node_parser import NodeParser
import unittest
def depth(node):
'''compute the depth of the given tree'''
if node is None:
return 0
elif node.is_leaf():
return 1
else:
return 1 + max(list(map(depth, node.children())))
def depth_first_iterator(node):
'''returns an depth-first itreator over the node and its children'''
if node is not None:
node_stack = [(node, -1)]
while len(node_stack) > 0:
node, child_index = node_stack.pop()
if child_index == -1:
if not node.is_leaf():
node_stack.append((node, child_index + 1))
yield node
elif child_index < node.nr_children():
node_stack.append((node, child_index + 1))
node_stack.append((node.child(child_index), -1))
def nr_leaf_nodes(start_node):
'''returns the number of leaf nodes starting form the given node'''
nr = 0
for node in depth_first_iterator(start_node):
if node.is_leaf():
nr += 1
return nr
class DepthTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
self.assertEqual(depth(tree), 0)
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
self.assertEqual(depth(tree), 1)
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
self.assertEqual(depth(tree), 3)
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
self.assertEqual(depth(tree), 4)
class DepthFirstIteratorTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, [])
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1'])
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1', 'c2', 'c3', 'c4', 'c5', 'c6'])
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1', 'c2', 'c3', 'c4'])
class NrLeafsTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 0)
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 1)
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 4)
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 1)
if __name__ == '__main__':
unittest.main()
| 28.647059 | 74 | 0.566992 |
from node_parser import NodeParser
import unittest
def depth(node):
if node is None:
return 0
elif node.is_leaf():
return 1
else:
return 1 + max(list(map(depth, node.children())))
def depth_first_iterator(node):
if node is not None:
node_stack = [(node, -1)]
while len(node_stack) > 0:
node, child_index = node_stack.pop()
if child_index == -1:
if not node.is_leaf():
node_stack.append((node, child_index + 1))
yield node
elif child_index < node.nr_children():
node_stack.append((node, child_index + 1))
node_stack.append((node.child(child_index), -1))
def nr_leaf_nodes(start_node):
nr = 0
for node in depth_first_iterator(start_node):
if node.is_leaf():
nr += 1
return nr
class DepthTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
self.assertEqual(depth(tree), 0)
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
self.assertEqual(depth(tree), 1)
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
self.assertEqual(depth(tree), 3)
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
self.assertEqual(depth(tree), 4)
class DepthFirstIteratorTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, [])
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1'])
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1', 'c2', 'c3', 'c4', 'c5', 'c6'])
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1', 'c2', 'c3', 'c4'])
class NrLeafsTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 0)
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 1)
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 4)
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 1)
if __name__ == '__main__':
unittest.main()
| true | true |
f715df136e0602e32db3dc820ce6e68ce0ad5f80 | 1,342 | py | Python | LeetCodeSolver/pythonSolutions/from1to100/Solution74.py | ZeromaXHe/Learning-Platform | ec75c2dbd472a568d1cd482450cc471295659c62 | [
"Apache-2.0"
] | null | null | null | LeetCodeSolver/pythonSolutions/from1to100/Solution74.py | ZeromaXHe/Learning-Platform | ec75c2dbd472a568d1cd482450cc471295659c62 | [
"Apache-2.0"
] | null | null | null | LeetCodeSolver/pythonSolutions/from1to100/Solution74.py | ZeromaXHe/Learning-Platform | ec75c2dbd472a568d1cd482450cc471295659c62 | [
"Apache-2.0"
] | null | null | null | from typing import List
class Solution:
"""
74.搜索二维矩阵 | 难度:中等 | 标签:数组、二分查找
编写一个高效的算法来判断 m x n 矩阵中,是否存在一个目标值。该矩阵具有如下特性:
<p>
每行中的整数从左到右按升序排列。
每行的第一个整数大于前一行的最后一个整数。
<p>
示例 1:
输入:matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]], target = 3
输出:true
<p>
示例 2:
输入:matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]], target = 13
输出:false
<p>
提示:
m == matrix.length
n == matrix[i].length
1 <= m, n <= 100
-10^4 <= matrix[i][j], target <= 10^4
<p>
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/search-a-2d-matrix
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
"""
执行用时: 44 ms , 在所有 Python3 提交中击败了 36.61% 的用户
内存消耗: 15.1 MB , 在所有 Python3 提交中击败了 49.64% 的用户
:param matrix:
:param target:
:return:
"""
x = 0
y = len(matrix[0]) - 1
# 将len(matrix)提取为变量n的话,效率变高:
# 执行用时: 36 ms , 在所有 Python3 提交中击败了 83.96% 的用户
# 内存消耗: 14.8 MB , 在所有 Python3 提交中击败了 99.17% 的用户
while x < len(matrix) and y >= 0:
if matrix[x][y] == target:
return True
elif matrix[x][y] > target:
y -= 1
else:
x += 1
return False
| 25.807692 | 73 | 0.508197 | from typing import List
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
x = 0
y = len(matrix[0]) - 1
while x < len(matrix) and y >= 0:
if matrix[x][y] == target:
return True
elif matrix[x][y] > target:
y -= 1
else:
x += 1
return False
| true | true |
f715e0177b8228f28f6e785e8abfae78e9cd6435 | 10,435 | py | Python | google/ads/googleads/v9/services/services/customer_customizer_service/transports/grpc.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v9/services/services/customer_customizer_service/transports/grpc.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v9/services/services/customer_customizer_service/transports/grpc.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.services.types import customer_customizer_service
from .base import CustomerCustomizerServiceTransport, DEFAULT_CLIENT_INFO
class CustomerCustomizerServiceGrpcTransport(
CustomerCustomizerServiceTransport
):
"""gRPC backend transport for CustomerCustomizerService.
Service to manage customer customizer
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def mutate_customer_customizers(
self,
) -> Callable[
[customer_customizer_service.MutateCustomerCustomizersRequest],
customer_customizer_service.MutateCustomerCustomizersResponse,
]:
r"""Return a callable for the mutate customer customizers method over gRPC.
Creates, updates or removes customer customizers.
Operation statuses are returned.
Returns:
Callable[[~.MutateCustomerCustomizersRequest],
~.MutateCustomerCustomizersResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_customer_customizers" not in self._stubs:
self._stubs[
"mutate_customer_customizers"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.CustomerCustomizerService/MutateCustomerCustomizers",
request_serializer=customer_customizer_service.MutateCustomerCustomizersRequest.serialize,
response_deserializer=customer_customizer_service.MutateCustomerCustomizersResponse.deserialize,
)
return self._stubs["mutate_customer_customizers"]
__all__ = ("CustomerCustomizerServiceGrpcTransport",)
| 41.74 | 112 | 0.628079 |
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import grpc
from google.ads.googleads.v9.services.types import customer_customizer_service
from .base import CustomerCustomizerServiceTransport, DEFAULT_CLIENT_INFO
class CustomerCustomizerServiceGrpcTransport(
CustomerCustomizerServiceTransport
):
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {}
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
return self._grpc_channel
@property
def mutate_customer_customizers(
self,
) -> Callable[
[customer_customizer_service.MutateCustomerCustomizersRequest],
customer_customizer_service.MutateCustomerCustomizersResponse,
]:
if "mutate_customer_customizers" not in self._stubs:
self._stubs[
"mutate_customer_customizers"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.CustomerCustomizerService/MutateCustomerCustomizers",
request_serializer=customer_customizer_service.MutateCustomerCustomizersRequest.serialize,
response_deserializer=customer_customizer_service.MutateCustomerCustomizersResponse.deserialize,
)
return self._stubs["mutate_customer_customizers"]
__all__ = ("CustomerCustomizerServiceGrpcTransport",)
| true | true |
f715e02ee65c789c8ebe66b9da527060e685ba50 | 475 | py | Python | examples/sw.py | khirotaka/testbed | e32384a3267d5282fb9f2df22597dfa7fb9aa17d | [
"MIT"
] | null | null | null | examples/sw.py | khirotaka/testbed | e32384a3267d5282fb9f2df22597dfa7fb9aa17d | [
"MIT"
] | 2 | 2020-08-09T06:26:51.000Z | 2020-08-10T01:08:28.000Z | examples/sw.py | khirotaka/testbed | e32384a3267d5282fb9f2df22597dfa7fb9aa17d | [
"MIT"
] | null | null | null | import time
import numpy as np
from testbed._rust import sliding_window
x = np.random.randn(5000, 5)
s = time.time()
rustout = sliding_window(x, 100, 1)
print("=" * 50)
print("Rust Speed: ", time.time() - s)
print(rustout.shape)
def sw(array, ws, over):
sl = len(array)
return [array[i:i+ws] for i in range(0, sl-ws, over)]
print("=" * 50)
s = time.time()
tmp = sw(x, 100, 1)
tmp = np.stack(tmp, 0)
print("Python Speed: ", time.time() - s)
print(tmp.shape)
| 17.592593 | 57 | 0.633684 | import time
import numpy as np
from testbed._rust import sliding_window
x = np.random.randn(5000, 5)
s = time.time()
rustout = sliding_window(x, 100, 1)
print("=" * 50)
print("Rust Speed: ", time.time() - s)
print(rustout.shape)
def sw(array, ws, over):
sl = len(array)
return [array[i:i+ws] for i in range(0, sl-ws, over)]
print("=" * 50)
s = time.time()
tmp = sw(x, 100, 1)
tmp = np.stack(tmp, 0)
print("Python Speed: ", time.time() - s)
print(tmp.shape)
| true | true |
f715e0b6071694faa85c51616ffa6eb6433f5b4c | 357 | py | Python | DMOJ/DMOPC/DMOPC_19_C5P3_Captivating_Construction_Challenge.py | Togohogo1/pg | ee3c36acde47769c66ee13a227762ee677591375 | [
"MIT"
] | null | null | null | DMOJ/DMOPC/DMOPC_19_C5P3_Captivating_Construction_Challenge.py | Togohogo1/pg | ee3c36acde47769c66ee13a227762ee677591375 | [
"MIT"
] | 1 | 2021-10-14T18:26:56.000Z | 2021-10-14T18:26:56.000Z | DMOJ/DMOPC/DMOPC_19_C5P3_Captivating_Construction_Challenge.py | Togohogo1/pg | ee3c36acde47769c66ee13a227762ee677591375 | [
"MIT"
] | 1 | 2021-08-06T03:39:55.000Z | 2021-08-06T03:39:55.000Z | def gcd(m, n):
if n == 0:
return m
return gcd(n, m%n)
ans = 0
H, V = map(int, input().split())
for x in range(H):
for y in range(1, V):
mx, my = y//gcd(x, y), x//gcd(x, y)
xx, yy = mx+x, my+y
while xx <= H and yy <= V:
ans += (H-xx) * (V-yy)
xx += mx
yy += my
print(ans)
| 16.227273 | 43 | 0.40056 | def gcd(m, n):
if n == 0:
return m
return gcd(n, m%n)
ans = 0
H, V = map(int, input().split())
for x in range(H):
for y in range(1, V):
mx, my = y//gcd(x, y), x//gcd(x, y)
xx, yy = mx+x, my+y
while xx <= H and yy <= V:
ans += (H-xx) * (V-yy)
xx += mx
yy += my
print(ans)
| true | true |
f715e0ba86825ddedbad3acbdb1a48496d9dfaa8 | 1,368 | py | Python | fetch_cast_html.py | nmaswood/tv_scraping | 91573df0ca9512ac1744cddc8635f681d8ed596a | [
"Apache-2.0"
] | null | null | null | fetch_cast_html.py | nmaswood/tv_scraping | 91573df0ca9512ac1744cddc8635f681d8ed596a | [
"Apache-2.0"
] | null | null | null | fetch_cast_html.py | nmaswood/tv_scraping | 91573df0ca9512ac1744cddc8635f681d8ed596a | [
"Apache-2.0"
] | null | null | null | from urllib.request import FancyURLopener
from bs4 import BeautifulSoup
from random import choice
import csv
from time import sleep
from urllib.parse import quote,unquote
import json
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9'
]
class MyOpener(FancyURLopener, object):
version = choice(user_agents)
myopener = MyOpener()
def _ids():
with open("meta_final.csv", 'r') as infile:
tv_reader = csv.reader(infile)
next(tv_reader)
return list(map(lambda x : x[-1], tv_reader))
def fetch_cast_data():
for index, _id in enumerate(_ids()):
print (index)
url ='http://www.imdb.com/title/{}/fullcredits?ref_=tt_ql_1'.format(_id)
try:
html = myopener.open(url).read()
except:
html = "error"
with open('data/' + _id + '.html', 'wb') as outfile:
outfile.write(html)
sleep(.5)
fetch_cast_data() | 31.090909 | 110 | 0.646199 | from urllib.request import FancyURLopener
from bs4 import BeautifulSoup
from random import choice
import csv
from time import sleep
from urllib.parse import quote,unquote
import json
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9'
]
class MyOpener(FancyURLopener, object):
version = choice(user_agents)
myopener = MyOpener()
def _ids():
with open("meta_final.csv", 'r') as infile:
tv_reader = csv.reader(infile)
next(tv_reader)
return list(map(lambda x : x[-1], tv_reader))
def fetch_cast_data():
for index, _id in enumerate(_ids()):
print (index)
url ='http://www.imdb.com/title/{}/fullcredits?ref_=tt_ql_1'.format(_id)
try:
html = myopener.open(url).read()
except:
html = "error"
with open('data/' + _id + '.html', 'wb') as outfile:
outfile.write(html)
sleep(.5)
fetch_cast_data() | true | true |
f715e119c6f7e84008328d68567e938b4668623f | 696 | py | Python | informacoes_emails.py | katianaz/GiftHelper | 1fbff4e7902c25950a5f50f04f0b2c834842ccbe | [
"MIT"
] | null | null | null | informacoes_emails.py | katianaz/GiftHelper | 1fbff4e7902c25950a5f50f04f0b2c834842ccbe | [
"MIT"
] | null | null | null | informacoes_emails.py | katianaz/GiftHelper | 1fbff4e7902c25950a5f50f04f0b2c834842ccbe | [
"MIT"
] | 1 | 2021-03-18T22:44:43.000Z | 2021-03-18T22:44:43.000Z | import pontuacao_categorias
import pandas as pd
nomes = []
nomes_presenteados = []
enderecos_emails = []
for p in range(len(pontuacao_categorias.tabela.index)):
nomes.append(pontuacao_categorias.tabela['3'][p])
nomes_presenteados.append(pontuacao_categorias.tabela['4'][p])
enderecos_emails.append(pontuacao_categorias.tabela['2'][p])
informacoes = {'Nome': nomes,
'Email': enderecos_emails,
'Presenteado': nomes_presenteados,
'Sugestoes': pontuacao_categorias.sugestoes}
infos = pd.DataFrame(informacoes, columns=['Nome', 'Email', 'Presenteado', 'Sugestoes'])
infos.to_csv('infos_emails.csv', encoding='latin-1')
| 33.142857 | 89 | 0.686782 | import pontuacao_categorias
import pandas as pd
nomes = []
nomes_presenteados = []
enderecos_emails = []
for p in range(len(pontuacao_categorias.tabela.index)):
nomes.append(pontuacao_categorias.tabela['3'][p])
nomes_presenteados.append(pontuacao_categorias.tabela['4'][p])
enderecos_emails.append(pontuacao_categorias.tabela['2'][p])
informacoes = {'Nome': nomes,
'Email': enderecos_emails,
'Presenteado': nomes_presenteados,
'Sugestoes': pontuacao_categorias.sugestoes}
infos = pd.DataFrame(informacoes, columns=['Nome', 'Email', 'Presenteado', 'Sugestoes'])
infos.to_csv('infos_emails.csv', encoding='latin-1')
| true | true |
f715e2b4af325720c565d744e3e3558d6ec968b2 | 11,243 | py | Python | bookworm/annotation/annotation_gui.py | mush42/bookworm | a4bdd89363137a89a1bed1e9e072de4fb55576fd | [
"MIT"
] | 18 | 2019-07-19T22:12:15.000Z | 2020-08-26T17:45:19.000Z | bookworm/annotation/annotation_gui.py | mush42/bookworm | a4bdd89363137a89a1bed1e9e072de4fb55576fd | [
"MIT"
] | 44 | 2019-07-15T10:17:00.000Z | 2020-07-26T11:22:53.000Z | bookworm/annotation/annotation_gui.py | mush42/bookworm | a4bdd89363137a89a1bed1e9e072de4fb55576fd | [
"MIT"
] | 9 | 2019-09-03T13:13:31.000Z | 2020-08-25T13:55:27.000Z | # coding: utf-8
import wx
from enum import IntEnum
from bookworm import speech
from bookworm.gui.settings import SettingsPanel
from bookworm.structured_text import TextRange
from bookworm.logger import logger
from .annotator import Bookmarker, NoteTaker, Quoter
from .annotation_dialogs import (
BookmarksViewer,
CommentsDialog,
QuotesDialog,
GenericAnnotationWithContentDialog,
)
log = logger.getChild(__name__)
class AnnotationSettingsPanel(SettingsPanel):
config_section = "annotation"
def addControls(self):
# Translators: the title of a group of controls in the
UIBox = self.make_static_box(_("Annotation"))
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Speak the bookmark when jumping"),
name="annotation.speak_bookmarks_on_jumping",
)
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Select the bookmarked line when jumping"),
name="annotation.select_bookmarked_line_on_jumping",
)
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Use visual styles to indicate annotations"),
name="annotation.use_visuals",
)
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Use sounds to indicate the presence of comments"),
name="annotation.play_sound_for_comments",
)
class AnnotationsMenuIds(IntEnum):
addBookmark = 241
addNamedBookmark = 242
addNote = 243
quoteSelection = 244
viewBookmarks = 245
class StatelessAnnotationsMenuIds(IntEnum):
viewNotes = 246
viewQuotes = 247
ANNOTATIONS_KEYBOARD_SHORTCUTS = {
AnnotationsMenuIds.addBookmark: "Ctrl-B",
AnnotationsMenuIds.addNamedBookmark: "Ctrl-Shift-B",
AnnotationsMenuIds.addNote: "Ctrl-M",
AnnotationsMenuIds.quoteSelection: "Ctrl-H",
}
class AnnotationMenu(wx.Menu):
"""Annotation menu."""
def __init__(self, service):
super().__init__()
self.service = service
self.view = service.view
self.reader = service.reader
# Add menu items
self.Append(
AnnotationsMenuIds.addBookmark,
# Translators: the label of an item in the application menubar
_("Add &Bookmark\tCtrl-B"),
# Translators: the help text of an item in the application menubar
_("Add a bookmark at the current position"),
)
self.Append(
AnnotationsMenuIds.addNamedBookmark,
# Translators: the label of an item in the application menubar
_("Add &Named Bookmark...\tCtrl-Shift-B"),
# Translators: the help text of an item in the application menubar
_("Add a named bookmark at the current position"),
)
self.Append(
AnnotationsMenuIds.addNote,
# Translators: the label of an item in the application menubar
_("Add Co&mment...\tCtrl-M"),
# Translators: the help text of an item in the application menubar
_("Add a comment at the current position"),
)
self.Append(
AnnotationsMenuIds.quoteSelection,
# Translators: the label of an item in the application menubar
_("&Highlight Selection\tCtrl-H"),
# Translators: the help text of an item in the application menubar
_("Highlight selected text and save it."),
)
self.Append(
AnnotationsMenuIds.viewBookmarks,
# Translators: the label of an item in the application menubar
_("Saved &Bookmarks..."),
# Translators: the help text of an item in the application menubar
_("View added bookmarks"),
)
self.Append(
StatelessAnnotationsMenuIds.viewNotes,
# Translators: the label of an item in the application menubar
_("Saved Co&mments..."),
# Translators: the help text of an item in the application menubar
_("View, edit, and remove comments."),
)
self.Append(
StatelessAnnotationsMenuIds.viewQuotes,
# Translators: the label of an item in the application menubar
_("Saved &Highlights..."),
# Translators: the help text of an item in the application menubar
_("View saved highlights."),
)
# Translators: the label of an item in the application menubar
# EventHandlers
self.view.Bind(
wx.EVT_MENU, self.onAddBookmark, id=AnnotationsMenuIds.addBookmark
)
self.view.Bind(
wx.EVT_MENU, self.onAddNamedBookmark, id=AnnotationsMenuIds.addNamedBookmark
)
self.view.Bind(wx.EVT_MENU, self.onAddNote, id=AnnotationsMenuIds.addNote)
self.view.Bind(
wx.EVT_MENU, self.onQuoteSelection, id=AnnotationsMenuIds.quoteSelection
)
self.view.Bind(
wx.EVT_MENU, self.onViewBookmarks, id=AnnotationsMenuIds.viewBookmarks
)
self.view.Bind(
wx.EVT_MENU, self.onViewNotes, id=StatelessAnnotationsMenuIds.viewNotes
)
self.view.Bind(
wx.EVT_MENU, self.onViewQuotes, id=StatelessAnnotationsMenuIds.viewQuotes
)
def _add_bookmark(self, name=""):
bookmarker = Bookmarker(self.reader)
insertionPoint = self.view.contentTextCtrl.GetInsertionPoint()
__, __, current_lino = self.view.contentTextCtrl.PositionToXY(insertionPoint)
count = 0
for bkm in bookmarker.get_for_page(self.reader.current_page):
__, __, lino = self.view.contentTextCtrl.PositionToXY(bkm.position)
if lino == current_lino:
count += 1
bookmarker.delete(bkm.id)
self.service.style_bookmark(self.view, bkm.position, enable=False)
if count and not name:
return speech.announce(_("Bookmark removed"))
Bookmarker(self.reader).create(title=name, position=insertionPoint)
# Translators: spoken message
speech.announce(_("Bookmark Added"))
self.service.style_bookmark(self.view, insertionPoint)
def onAddBookmark(self, event):
self._add_bookmark()
def onAddNamedBookmark(self, event):
bookmark_name = self.view.get_text_from_user(
# Translators: title of a dialog
_("Add Named Bookmark"),
# Translators: label of a text entry
_("Bookmark name:"),
)
if bookmark_name:
self._add_bookmark(bookmark_name)
def onAddNote(self, event):
_with_tags = wx.GetKeyState(wx.WXK_SHIFT)
insertionPoint = self.view.contentTextCtrl.GetInsertionPoint()
comment_text = self.view.get_text_from_user(
# Translators: the title of a dialog to add a comment
_("New Comment"),
# Translators: the label of an edit field to enter a comment
_("Comment:"),
style=wx.OK | wx.CANCEL | wx.TE_MULTILINE | wx.CENTER,
)
if not comment_text:
return
note = NoteTaker(self.reader).create(
title="", content=comment_text, position=insertionPoint
)
self.service.style_comment(self.view, insertionPoint)
if _with_tags:
# add tags
tags_text = self.view.get_text_from_user(
# Translators: title of a dialog
_("Tag Comment"),
# Translators: label of a text entry
_("Tags:"),
)
if tags_text:
for tag in tags_text.split():
note.tags.append(tag.strip())
NoteTaker.model.session.commit()
def onQuoteSelection(self, event):
_with_tags = wx.GetKeyState(wx.WXK_SHIFT)
quoter = Quoter(self.reader)
selected_text = self.view.contentTextCtrl.GetStringSelection()
if not selected_text:
return speech.announce(_("No selection"))
x, y = self.view.get_selection_range()
for q in quoter.get_for_page():
q_range = TextRange(q.start_pos, q.end_pos)
if (q_range.start == x) and (q_range.stop == y):
quoter.delete(q.id)
self.service.style_highlight(self.view, x, y, enable=False)
# Translators: spoken message
return speech.announce(_("Highlight removed"))
elif (q.start_pos < x) and (q.end_pos > y):
# Translators: spoken message
speech.announce(_("Already highlighted"))
return wx.Bell()
if (x in q_range) or (y in q_range):
if x not in q_range:
q.start_pos = x
q.session.commit()
self.service.style_highlight(self.view, x, q_range.stop)
return speech.announce(_("Highlight extended"))
elif y not in q_range:
q.end_pos = y
q.session.commit()
self.service.style_highlight(self.view, q_range.start, y)
# Translators: spoken message
return speech.announce(_("Highlight extended"))
quote = quoter.create(title="", content=selected_text, start_pos=x, end_pos=y)
# Translators: spoken message
speech.announce(_("Selection highlighted"))
self.service.style_highlight(self.view, x, y)
if _with_tags:
# add tags
tags_text = self.view.get_text_from_user(
# Translators: title of a dialog
_("Tag Highlight"),
# Translators: label of a text entry
_("Tags:"),
)
if tags_text:
for tag in tags_text.split():
quote.tags.append(tag.strip())
Quoter.model.session.commit()
def onViewBookmarks(self, event):
with BookmarksViewer(
parent=self.view,
reader=self.reader,
annotator=Bookmarker,
# Translators: the title of a dialog to view bookmarks
title=_("Bookmarks | {book}").format(book=self.reader.current_book.title),
) as dlg:
dlg.ShowModal()
def onViewNotes(self, event):
Dialog = (
CommentsDialog if self.reader.ready else GenericAnnotationWithContentDialog
)
with Dialog(
parent=self.view,
title=_("Comments"),
reader=self.reader,
annotator_cls=NoteTaker,
can_edit=True,
) as dlg:
dlg.ShowModal()
def onViewQuotes(self, event):
Dialog = (
QuotesDialog if self.reader.ready else GenericAnnotationWithContentDialog
)
with Dialog(
parent=self.view,
title=_("Highlights"),
reader=self.reader,
annotator_cls=Quoter,
) as dlg:
dlg.ShowModal()
| 37.228477 | 88 | 0.592102 |
import wx
from enum import IntEnum
from bookworm import speech
from bookworm.gui.settings import SettingsPanel
from bookworm.structured_text import TextRange
from bookworm.logger import logger
from .annotator import Bookmarker, NoteTaker, Quoter
from .annotation_dialogs import (
BookmarksViewer,
CommentsDialog,
QuotesDialog,
GenericAnnotationWithContentDialog,
)
log = logger.getChild(__name__)
class AnnotationSettingsPanel(SettingsPanel):
config_section = "annotation"
def addControls(self):
UIBox = self.make_static_box(_("Annotation"))
wx.CheckBox(
UIBox,
-1,
_("Speak the bookmark when jumping"),
name="annotation.speak_bookmarks_on_jumping",
)
wx.CheckBox(
UIBox,
-1,
_("Select the bookmarked line when jumping"),
name="annotation.select_bookmarked_line_on_jumping",
)
wx.CheckBox(
UIBox,
-1,
_("Use visual styles to indicate annotations"),
name="annotation.use_visuals",
)
wx.CheckBox(
UIBox,
-1,
_("Use sounds to indicate the presence of comments"),
name="annotation.play_sound_for_comments",
)
class AnnotationsMenuIds(IntEnum):
addBookmark = 241
addNamedBookmark = 242
addNote = 243
quoteSelection = 244
viewBookmarks = 245
class StatelessAnnotationsMenuIds(IntEnum):
viewNotes = 246
viewQuotes = 247
ANNOTATIONS_KEYBOARD_SHORTCUTS = {
AnnotationsMenuIds.addBookmark: "Ctrl-B",
AnnotationsMenuIds.addNamedBookmark: "Ctrl-Shift-B",
AnnotationsMenuIds.addNote: "Ctrl-M",
AnnotationsMenuIds.quoteSelection: "Ctrl-H",
}
class AnnotationMenu(wx.Menu):
def __init__(self, service):
super().__init__()
self.service = service
self.view = service.view
self.reader = service.reader
self.Append(
AnnotationsMenuIds.addBookmark,
_("Add &Bookmark\tCtrl-B"),
_("Add a bookmark at the current position"),
)
self.Append(
AnnotationsMenuIds.addNamedBookmark,
_("Add &Named Bookmark...\tCtrl-Shift-B"),
_("Add a named bookmark at the current position"),
)
self.Append(
AnnotationsMenuIds.addNote,
_("Add Co&mment...\tCtrl-M"),
_("Add a comment at the current position"),
)
self.Append(
AnnotationsMenuIds.quoteSelection,
_("&Highlight Selection\tCtrl-H"),
_("Highlight selected text and save it."),
)
self.Append(
AnnotationsMenuIds.viewBookmarks,
_("Saved &Bookmarks..."),
_("View added bookmarks"),
)
self.Append(
StatelessAnnotationsMenuIds.viewNotes,
_("Saved Co&mments..."),
_("View, edit, and remove comments."),
)
self.Append(
StatelessAnnotationsMenuIds.viewQuotes,
_("Saved &Highlights..."),
_("View saved highlights."),
)
self.view.Bind(
wx.EVT_MENU, self.onAddBookmark, id=AnnotationsMenuIds.addBookmark
)
self.view.Bind(
wx.EVT_MENU, self.onAddNamedBookmark, id=AnnotationsMenuIds.addNamedBookmark
)
self.view.Bind(wx.EVT_MENU, self.onAddNote, id=AnnotationsMenuIds.addNote)
self.view.Bind(
wx.EVT_MENU, self.onQuoteSelection, id=AnnotationsMenuIds.quoteSelection
)
self.view.Bind(
wx.EVT_MENU, self.onViewBookmarks, id=AnnotationsMenuIds.viewBookmarks
)
self.view.Bind(
wx.EVT_MENU, self.onViewNotes, id=StatelessAnnotationsMenuIds.viewNotes
)
self.view.Bind(
wx.EVT_MENU, self.onViewQuotes, id=StatelessAnnotationsMenuIds.viewQuotes
)
def _add_bookmark(self, name=""):
bookmarker = Bookmarker(self.reader)
insertionPoint = self.view.contentTextCtrl.GetInsertionPoint()
__, __, current_lino = self.view.contentTextCtrl.PositionToXY(insertionPoint)
count = 0
for bkm in bookmarker.get_for_page(self.reader.current_page):
__, __, lino = self.view.contentTextCtrl.PositionToXY(bkm.position)
if lino == current_lino:
count += 1
bookmarker.delete(bkm.id)
self.service.style_bookmark(self.view, bkm.position, enable=False)
if count and not name:
return speech.announce(_("Bookmark removed"))
Bookmarker(self.reader).create(title=name, position=insertionPoint)
speech.announce(_("Bookmark Added"))
self.service.style_bookmark(self.view, insertionPoint)
def onAddBookmark(self, event):
self._add_bookmark()
def onAddNamedBookmark(self, event):
bookmark_name = self.view.get_text_from_user(
_("Add Named Bookmark"),
_("Bookmark name:"),
)
if bookmark_name:
self._add_bookmark(bookmark_name)
def onAddNote(self, event):
_with_tags = wx.GetKeyState(wx.WXK_SHIFT)
insertionPoint = self.view.contentTextCtrl.GetInsertionPoint()
comment_text = self.view.get_text_from_user(
_("New Comment"),
_("Comment:"),
style=wx.OK | wx.CANCEL | wx.TE_MULTILINE | wx.CENTER,
)
if not comment_text:
return
note = NoteTaker(self.reader).create(
title="", content=comment_text, position=insertionPoint
)
self.service.style_comment(self.view, insertionPoint)
if _with_tags:
tags_text = self.view.get_text_from_user(
_("Tag Comment"),
_("Tags:"),
)
if tags_text:
for tag in tags_text.split():
note.tags.append(tag.strip())
NoteTaker.model.session.commit()
def onQuoteSelection(self, event):
_with_tags = wx.GetKeyState(wx.WXK_SHIFT)
quoter = Quoter(self.reader)
selected_text = self.view.contentTextCtrl.GetStringSelection()
if not selected_text:
return speech.announce(_("No selection"))
x, y = self.view.get_selection_range()
for q in quoter.get_for_page():
q_range = TextRange(q.start_pos, q.end_pos)
if (q_range.start == x) and (q_range.stop == y):
quoter.delete(q.id)
self.service.style_highlight(self.view, x, y, enable=False)
return speech.announce(_("Highlight removed"))
elif (q.start_pos < x) and (q.end_pos > y):
speech.announce(_("Already highlighted"))
return wx.Bell()
if (x in q_range) or (y in q_range):
if x not in q_range:
q.start_pos = x
q.session.commit()
self.service.style_highlight(self.view, x, q_range.stop)
return speech.announce(_("Highlight extended"))
elif y not in q_range:
q.end_pos = y
q.session.commit()
self.service.style_highlight(self.view, q_range.start, y)
return speech.announce(_("Highlight extended"))
quote = quoter.create(title="", content=selected_text, start_pos=x, end_pos=y)
speech.announce(_("Selection highlighted"))
self.service.style_highlight(self.view, x, y)
if _with_tags:
tags_text = self.view.get_text_from_user(
_("Tag Highlight"),
_("Tags:"),
)
if tags_text:
for tag in tags_text.split():
quote.tags.append(tag.strip())
Quoter.model.session.commit()
def onViewBookmarks(self, event):
with BookmarksViewer(
parent=self.view,
reader=self.reader,
annotator=Bookmarker,
title=_("Bookmarks | {book}").format(book=self.reader.current_book.title),
) as dlg:
dlg.ShowModal()
def onViewNotes(self, event):
Dialog = (
CommentsDialog if self.reader.ready else GenericAnnotationWithContentDialog
)
with Dialog(
parent=self.view,
title=_("Comments"),
reader=self.reader,
annotator_cls=NoteTaker,
can_edit=True,
) as dlg:
dlg.ShowModal()
def onViewQuotes(self, event):
Dialog = (
QuotesDialog if self.reader.ready else GenericAnnotationWithContentDialog
)
with Dialog(
parent=self.view,
title=_("Highlights"),
reader=self.reader,
annotator_cls=Quoter,
) as dlg:
dlg.ShowModal()
| true | true |
f715e2db12bbd9d23ff08edf3785830ee8d31ab7 | 3,291 | py | Python | orders/models.py | pmaigutyak/mp-shop | 14ea67f71fd91a282d2070414924708214fc6464 | [
"0BSD"
] | 2 | 2018-03-14T11:32:36.000Z | 2021-09-25T14:31:36.000Z | orders/models.py | pmaigutyak/mp-shop | 14ea67f71fd91a282d2070414924708214fc6464 | [
"0BSD"
] | null | null | null | orders/models.py | pmaigutyak/mp-shop | 14ea67f71fd91a282d2070414924708214fc6464 | [
"0BSD"
] | null | null | null |
from django.apps import apps
from django.db import models
from django.conf import settings
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
from exchange.models import format_printable_price, MultiCurrencyPrice
from delivery.models import DeliveryMethodField
from orders.constants import (
PAYMENT_METHODS,
ORDER_STATUSES,
ORDER_STATUS_NEW,
PAYMENT_METHOD_PRIVAT24
)
def _generate_hash():
return get_random_string(length=10)
class Order(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='orders',
verbose_name=_('Owner'), null=True, blank=True,
on_delete=models.SET_NULL)
status = models.CharField(
_('Status'),
max_length=100,
choices=ORDER_STATUSES,
default=ORDER_STATUS_NEW)
payment_method = models.CharField(
_('Payment method'),
max_length=100,
choices=PAYMENT_METHODS)
delivery = DeliveryMethodField()
first_name = models.CharField(_('First name'), max_length=255)
last_name = models.CharField(_('Last name'), max_length=255)
middle_name = models.CharField(
_('Middle name'), max_length=255, blank=True)
address = models.CharField(_('Address'), max_length=255, blank=True)
mobile = models.CharField(_('Mobile number'), max_length=255)
created = models.DateTimeField(
_('Date created'), auto_now_add=True, editable=False)
comment = models.TextField(_('Comment'), max_length=1000, blank=True)
hash = models.CharField(
max_length=10,
default=_generate_hash,
unique=True)
def __str__(self):
return self.printable_name
@property
def printable_name(self):
return '{} #{}'.format(_('Order'), self.id)
@property
def full_name(self):
return '{} {} {}'.format(
self.last_name, self.first_name, self.middle_name)
@property
def total(self):
return sum([i.subtotal for i in self.items.all()])
@property
def printable_total(self):
return format_printable_price(self.total)
@property
def delivery_method(self):
return self.delivery.name
def is_liqpay_payment(self):
return self.is_paynow_form_visible() and apps.is_installed('liqpay')
def is_paynow_form_visible(self):
return self.payment_method == PAYMENT_METHOD_PRIVAT24
class Meta:
verbose_name = _('Order')
verbose_name_plural = _('Orders')
class OrderedProduct(MultiCurrencyPrice):
order = models.ForeignKey(
Order,
verbose_name=_('Order'),
related_name='items',
on_delete=models.CASCADE)
product = models.ForeignKey(
'products.Product',
verbose_name=_('Product'),
related_name='order_items',
on_delete=models.CASCADE)
qty = models.PositiveIntegerField(_('Quantity'), default=1)
def __str__(self):
return str(self.product)
@property
def subtotal(self):
return self.price * self.qty
def printable_subtotal(self):
return format_printable_price(self.subtotal)
class Meta:
verbose_name = _('Ordered product')
verbose_name_plural = _('Ordered products')
| 25.710938 | 76 | 0.673959 |
from django.apps import apps
from django.db import models
from django.conf import settings
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
from exchange.models import format_printable_price, MultiCurrencyPrice
from delivery.models import DeliveryMethodField
from orders.constants import (
PAYMENT_METHODS,
ORDER_STATUSES,
ORDER_STATUS_NEW,
PAYMENT_METHOD_PRIVAT24
)
def _generate_hash():
return get_random_string(length=10)
class Order(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='orders',
verbose_name=_('Owner'), null=True, blank=True,
on_delete=models.SET_NULL)
status = models.CharField(
_('Status'),
max_length=100,
choices=ORDER_STATUSES,
default=ORDER_STATUS_NEW)
payment_method = models.CharField(
_('Payment method'),
max_length=100,
choices=PAYMENT_METHODS)
delivery = DeliveryMethodField()
first_name = models.CharField(_('First name'), max_length=255)
last_name = models.CharField(_('Last name'), max_length=255)
middle_name = models.CharField(
_('Middle name'), max_length=255, blank=True)
address = models.CharField(_('Address'), max_length=255, blank=True)
mobile = models.CharField(_('Mobile number'), max_length=255)
created = models.DateTimeField(
_('Date created'), auto_now_add=True, editable=False)
comment = models.TextField(_('Comment'), max_length=1000, blank=True)
hash = models.CharField(
max_length=10,
default=_generate_hash,
unique=True)
def __str__(self):
return self.printable_name
@property
def printable_name(self):
return '{} #{}'.format(_('Order'), self.id)
@property
def full_name(self):
return '{} {} {}'.format(
self.last_name, self.first_name, self.middle_name)
@property
def total(self):
return sum([i.subtotal for i in self.items.all()])
@property
def printable_total(self):
return format_printable_price(self.total)
@property
def delivery_method(self):
return self.delivery.name
def is_liqpay_payment(self):
return self.is_paynow_form_visible() and apps.is_installed('liqpay')
def is_paynow_form_visible(self):
return self.payment_method == PAYMENT_METHOD_PRIVAT24
class Meta:
verbose_name = _('Order')
verbose_name_plural = _('Orders')
class OrderedProduct(MultiCurrencyPrice):
order = models.ForeignKey(
Order,
verbose_name=_('Order'),
related_name='items',
on_delete=models.CASCADE)
product = models.ForeignKey(
'products.Product',
verbose_name=_('Product'),
related_name='order_items',
on_delete=models.CASCADE)
qty = models.PositiveIntegerField(_('Quantity'), default=1)
def __str__(self):
return str(self.product)
@property
def subtotal(self):
return self.price * self.qty
def printable_subtotal(self):
return format_printable_price(self.subtotal)
class Meta:
verbose_name = _('Ordered product')
verbose_name_plural = _('Ordered products')
| true | true |
f715e307959616301e030cf3bce9da95242c350f | 2,917 | py | Python | sklearn/linear_model/__init__.py | emarkou/scikit-learn | d73822f84f2832dcc25f0ff58769f60871a78025 | [
"BSD-3-Clause"
] | 3 | 2019-11-18T13:47:42.000Z | 2021-08-22T23:37:47.000Z | sklearn/linear_model/__init__.py | emarkou/scikit-learn | d73822f84f2832dcc25f0ff58769f60871a78025 | [
"BSD-3-Clause"
] | 12 | 2021-03-06T23:42:46.000Z | 2021-04-04T00:10:42.000Z | sklearn/linear_model/__init__.py | emarkou/scikit-learn | d73822f84f2832dcc25f0ff58769f60871a78025 | [
"BSD-3-Clause"
] | 2 | 2017-06-27T12:40:35.000Z | 2021-08-22T23:37:35.000Z | """
The :mod:`sklearn.linear_model` module implements a variety of linear models.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from ._base import LinearRegression
from ._bayes import BayesianRidge, ARDRegression
from ._least_angle import (Lars, LassoLars, lars_path, lars_path_gram, LarsCV,
LassoLarsCV, LassoLarsIC)
from ._coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from ._glm import (PoissonRegressor,
GammaRegressor, TweedieRegressor)
from ._huber import HuberRegressor
from ._sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from ._stochastic_gradient import SGDClassifier, SGDRegressor, SGDOneClassSVM
from ._ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from ._logistic import LogisticRegression, LogisticRegressionCV
from ._omp import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV)
from ._passive_aggressive import PassiveAggressiveClassifier
from ._passive_aggressive import PassiveAggressiveRegressor
from ._perceptron import Perceptron
from ._ransac import RANSACRegressor
from ._theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SGDOneClassSVM',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lars_path_gram',
'lasso_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor',
'PoissonRegressor',
'GammaRegressor',
'TweedieRegressor']
| 35.573171 | 78 | 0.618101 |
from ._base import LinearRegression
from ._bayes import BayesianRidge, ARDRegression
from ._least_angle import (Lars, LassoLars, lars_path, lars_path_gram, LarsCV,
LassoLarsCV, LassoLarsIC)
from ._coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from ._glm import (PoissonRegressor,
GammaRegressor, TweedieRegressor)
from ._huber import HuberRegressor
from ._sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from ._stochastic_gradient import SGDClassifier, SGDRegressor, SGDOneClassSVM
from ._ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from ._logistic import LogisticRegression, LogisticRegressionCV
from ._omp import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV)
from ._passive_aggressive import PassiveAggressiveClassifier
from ._passive_aggressive import PassiveAggressiveRegressor
from ._perceptron import Perceptron
from ._ransac import RANSACRegressor
from ._theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SGDOneClassSVM',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lars_path_gram',
'lasso_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor',
'PoissonRegressor',
'GammaRegressor',
'TweedieRegressor']
| true | true |
f715e36e362ae80301e03af5d3ad4b2ac4a51e76 | 239 | py | Python | examples/pull_inbox_delivery_reports_ex.py | ubidreams/infobip-api-python-client | 3e585bf00565627bd7da46a2c8f10b860faaeb8b | [
"Apache-2.0"
] | null | null | null | examples/pull_inbox_delivery_reports_ex.py | ubidreams/infobip-api-python-client | 3e585bf00565627bd7da46a2c8f10b860faaeb8b | [
"Apache-2.0"
] | null | null | null | examples/pull_inbox_delivery_reports_ex.py | ubidreams/infobip-api-python-client | 3e585bf00565627bd7da46a2c8f10b860faaeb8b | [
"Apache-2.0"
] | null | null | null | from infobip.clients import get_received_messages
from __init__ import configuration
get_delivery_reports_client = get_received_messages(configuration)
response = get_delivery_reports_client.execute({"limit": 1})
print(unicode(response))
| 34.142857 | 66 | 0.857741 | from infobip.clients import get_received_messages
from __init__ import configuration
get_delivery_reports_client = get_received_messages(configuration)
response = get_delivery_reports_client.execute({"limit": 1})
print(unicode(response))
| true | true |
f715e3a8e11c572f5bb1831dd2bd65643e2aa549 | 1,558 | py | Python | tests/test_image.py | juliamarc/mal-tier-list-bbcode-gen | 3b14d1982883bea6c0b5cf3ba1de5360c2d71abc | [
"MIT"
] | null | null | null | tests/test_image.py | juliamarc/mal-tier-list-bbcode-gen | 3b14d1982883bea6c0b5cf3ba1de5360c2d71abc | [
"MIT"
] | null | null | null | tests/test_image.py | juliamarc/mal-tier-list-bbcode-gen | 3b14d1982883bea6c0b5cf3ba1de5360c2d71abc | [
"MIT"
] | null | null | null | import pytest
import mal_tier_list_bbcode_gen.exceptions as exceptions
from mal_tier_list_bbcode_gen.image import Image
def test_source_direct_url():
image_url = 'example.com/test.png'
image = Image('direct URL', image_url)
assert image.image_url == image_url
def test_source_google_drive_file_id():
expected_url = ('https://drive.google.com/uc'
'?id=1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z')
image_url = '1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z'
image = Image('Google Drive', image_url)
assert image.image_url == expected_url
def test_source_google_drive_share_link():
expected_url = ('https://drive.google.com/uc'
'?id=1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z')
image_url = ('https://drive.google.com/file/d/'
'1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z/view?usp=sharing')
image = Image('Google Drive', image_url)
assert image.image_url == expected_url
def test_source_google_no_file_id():
image_url = ('https://drive.google.com/file/d/view?usp=sharing')
with pytest.raises(exceptions.GoogleDriveSourceError):
Image('Google Drive', image_url)
def test_source_not_valid():
with pytest.raises(exceptions.InvalidImageSourceError,
match=r".*is not a valid image source.*"):
Image('not valid', 'example.com/test.png')
def test_get_bbcode():
image_url = 'example.com/test.png'
expected_bbcode = f'[img]{image_url}[/img]'
image = Image('direct URL', image_url)
assert image.get_bbcode() == expected_bbcode
| 30.54902 | 70 | 0.70154 | import pytest
import mal_tier_list_bbcode_gen.exceptions as exceptions
from mal_tier_list_bbcode_gen.image import Image
def test_source_direct_url():
image_url = 'example.com/test.png'
image = Image('direct URL', image_url)
assert image.image_url == image_url
def test_source_google_drive_file_id():
expected_url = ('https://drive.google.com/uc'
'?id=1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z')
image_url = '1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z'
image = Image('Google Drive', image_url)
assert image.image_url == expected_url
def test_source_google_drive_share_link():
expected_url = ('https://drive.google.com/uc'
'?id=1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z')
image_url = ('https://drive.google.com/file/d/'
'1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z/view?usp=sharing')
image = Image('Google Drive', image_url)
assert image.image_url == expected_url
def test_source_google_no_file_id():
image_url = ('https://drive.google.com/file/d/view?usp=sharing')
with pytest.raises(exceptions.GoogleDriveSourceError):
Image('Google Drive', image_url)
def test_source_not_valid():
with pytest.raises(exceptions.InvalidImageSourceError,
match=r".*is not a valid image source.*"):
Image('not valid', 'example.com/test.png')
def test_get_bbcode():
image_url = 'example.com/test.png'
expected_bbcode = f'[img]{image_url}[/img]'
image = Image('direct URL', image_url)
assert image.get_bbcode() == expected_bbcode
| true | true |
f715e48b813407c0bd9d7f1f42d77633e8197d1d | 5,342 | py | Python | isi_sdk/models/mapping_identity_target_create_params.py | robzim/isilon_sdk_python | 3c2efcae7002f8ad25c0cfcb42a53b4d83e826d7 | [
"MIT"
] | null | null | null | isi_sdk/models/mapping_identity_target_create_params.py | robzim/isilon_sdk_python | 3c2efcae7002f8ad25c0cfcb42a53b4d83e826d7 | [
"MIT"
] | null | null | null | isi_sdk/models/mapping_identity_target_create_params.py | robzim/isilon_sdk_python | 3c2efcae7002f8ad25c0cfcb42a53b4d83e826d7 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_0.models.group_member import GroupMember # noqa: F401,E501
class MappingIdentityTargetCreateParams(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'on_disk': 'bool',
'target': 'GroupMember',
'type': 'str'
}
attribute_map = {
'on_disk': 'on_disk',
'target': 'target',
'type': 'type'
}
def __init__(self, on_disk=None, target=None, type=None): # noqa: E501
"""MappingIdentityTargetCreateParams - a model defined in Swagger""" # noqa: E501
self._on_disk = None
self._target = None
self._type = None
self.discriminator = None
if on_disk is not None:
self.on_disk = on_disk
self.target = target
if type is not None:
self.type = type
@property
def on_disk(self):
"""Gets the on_disk of this MappingIdentityTargetCreateParams. # noqa: E501
Identity is preferred on-disk. # noqa: E501
:return: The on_disk of this MappingIdentityTargetCreateParams. # noqa: E501
:rtype: bool
"""
return self._on_disk
@on_disk.setter
def on_disk(self, on_disk):
"""Sets the on_disk of this MappingIdentityTargetCreateParams.
Identity is preferred on-disk. # noqa: E501
:param on_disk: The on_disk of this MappingIdentityTargetCreateParams. # noqa: E501
:type: bool
"""
self._on_disk = on_disk
@property
def target(self):
"""Gets the target of this MappingIdentityTargetCreateParams. # noqa: E501
Specifies properties for a persona, which consists of either a 'type' and a 'name' or an 'ID'. # noqa: E501
:return: The target of this MappingIdentityTargetCreateParams. # noqa: E501
:rtype: GroupMember
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this MappingIdentityTargetCreateParams.
Specifies properties for a persona, which consists of either a 'type' and a 'name' or an 'ID'. # noqa: E501
:param target: The target of this MappingIdentityTargetCreateParams. # noqa: E501
:type: GroupMember
"""
if target is None:
raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501
self._target = target
@property
def type(self):
"""Gets the type of this MappingIdentityTargetCreateParams. # noqa: E501
Origin of identity mapping. # noqa: E501
:return: The type of this MappingIdentityTargetCreateParams. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this MappingIdentityTargetCreateParams.
Origin of identity mapping. # noqa: E501
:param type: The type of this MappingIdentityTargetCreateParams. # noqa: E501
:type: str
"""
allowed_values = ["auto", "external", "manual"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MappingIdentityTargetCreateParams):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.677778 | 116 | 0.586484 |
import pprint
import re
import six
from isi_sdk_8_0.models.group_member import GroupMember
class MappingIdentityTargetCreateParams(object):
swagger_types = {
'on_disk': 'bool',
'target': 'GroupMember',
'type': 'str'
}
attribute_map = {
'on_disk': 'on_disk',
'target': 'target',
'type': 'type'
}
def __init__(self, on_disk=None, target=None, type=None):
self._on_disk = None
self._target = None
self._type = None
self.discriminator = None
if on_disk is not None:
self.on_disk = on_disk
self.target = target
if type is not None:
self.type = type
@property
def on_disk(self):
return self._on_disk
@on_disk.setter
def on_disk(self, on_disk):
self._on_disk = on_disk
@property
def target(self):
return self._target
@target.setter
def target(self, target):
if target is None:
raise ValueError("Invalid value for `target`, must not be `None`")
self._target = target
@property
def type(self):
return self._type
@type.setter
def type(self, type):
allowed_values = ["auto", "external", "manual"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, MappingIdentityTargetCreateParams):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f715e4b9d03d838a2a6581b960aa71928211ff89 | 855 | py | Python | slrp/expressions.py | thomasmatecki/parsley | 0c51e9c37759fbc1c723519619952248c83e4642 | [
"MIT"
] | null | null | null | slrp/expressions.py | thomasmatecki/parsley | 0c51e9c37759fbc1c723519619952248c83e4642 | [
"MIT"
] | 2 | 2020-03-24T18:30:15.000Z | 2020-03-31T10:57:37.000Z | slrp/expressions.py | thomasmatecki/parsley | 0c51e9c37759fbc1c723519619952248c83e4642 | [
"MIT"
] | null | null | null | """
Expression for matching.
"""
import re
from abc import ABC
from typing import Callable, Text, Tuple
from slrp.combos import Combinable
class RegExpr(Combinable):
"""
Regular expression matcher.
"""
def __init__(self, pattern):
self.pattern = pattern
def match(self, expr):
_match = re.match(self.pattern, expr)
if _match:
fr, to = _match.span()
return _match.groups(), expr[to:]
class StringExpr(Combinable):
"""
String Expression Matcher.
"""
def __init__(self, string: str, capture=False):
self.string = string
self.capture = capture
def match(self, expr):
if expr.startswith(self.string):
remaining = expr[len(self.string) :]
return ((self.string,), remaining) if self.capture else (tuple(), remaining)
| 21.923077 | 88 | 0.615205 | import re
from abc import ABC
from typing import Callable, Text, Tuple
from slrp.combos import Combinable
class RegExpr(Combinable):
def __init__(self, pattern):
self.pattern = pattern
def match(self, expr):
_match = re.match(self.pattern, expr)
if _match:
fr, to = _match.span()
return _match.groups(), expr[to:]
class StringExpr(Combinable):
def __init__(self, string: str, capture=False):
self.string = string
self.capture = capture
def match(self, expr):
if expr.startswith(self.string):
remaining = expr[len(self.string) :]
return ((self.string,), remaining) if self.capture else (tuple(), remaining)
| true | true |
f715e4c13f0a448d661aadd39ef081eb09b73466 | 410 | py | Python | examples/example_sparsifier_graph.py | tfgraph/tfgraph | 19ae968b3060275c631dc601757646abaf1f58a1 | [
"Apache-2.0"
] | 4 | 2017-07-23T13:48:35.000Z | 2021-12-03T18:11:50.000Z | examples/example_sparsifier_graph.py | tfgraph/tfgraph | 19ae968b3060275c631dc601757646abaf1f58a1 | [
"Apache-2.0"
] | 21 | 2017-07-23T13:15:20.000Z | 2020-09-28T02:13:11.000Z | examples/example_sparsifier_graph.py | tfgraph/tfgraph | 19ae968b3060275c631dc601757646abaf1f58a1 | [
"Apache-2.0"
] | 1 | 2017-07-28T10:28:04.000Z | 2017-07-28T10:28:04.000Z | #!/usr/bin/python3
import tensorflow as tf
import tfgraph
def main():
with tf.Session() as sess:
g: tfgraph.Graph = tfgraph.GraphConstructor.unweighted_random(sess, "G", 10, 85)
g_sparse: tfgraph.Graph = tfgraph.GraphConstructor.as_sparsifier(sess, g, 0.75)
print(g)
print(g.m)
print(g_sparse)
print(g_sparse.m)
print(g_sparse.m / g.m)
if __name__ == '__main__':
main()
| 17.826087 | 84 | 0.673171 |
import tensorflow as tf
import tfgraph
def main():
with tf.Session() as sess:
g: tfgraph.Graph = tfgraph.GraphConstructor.unweighted_random(sess, "G", 10, 85)
g_sparse: tfgraph.Graph = tfgraph.GraphConstructor.as_sparsifier(sess, g, 0.75)
print(g)
print(g.m)
print(g_sparse)
print(g_sparse.m)
print(g_sparse.m / g.m)
if __name__ == '__main__':
main()
| true | true |
f715e4d2616e966f17915d11d03e9988858f4587 | 1,195 | py | Python | shanapy/test/test_interpolater.py | ZhiyLiu/shanapy | cbcdd87f4aaa1102d5b93c9488fbcee6e28da2a6 | [
"MIT"
] | 3 | 2021-11-21T23:14:50.000Z | 2022-02-12T04:32:52.000Z | shanapy/test/test_interpolater.py | ZhiyLiu/shanapy | cbcdd87f4aaa1102d5b93c9488fbcee6e28da2a6 | [
"MIT"
] | null | null | null | shanapy/test/test_interpolater.py | ZhiyLiu/shanapy | cbcdd87f4aaa1102d5b93c9488fbcee6e28da2a6 | [
"MIT"
] | null | null | null | import vtk
from shanapy.models.sreps import Initializer, Interpolater
import pyvista as pv
## Read the input surface mesh (produced by SPHARM-PDM)
reader = vtk.vtkPolyDataReader()
reader.SetFileName('data/example_hippocampus.vtk')
reader.Update()
input_mesh = reader.GetOutput()
## Initialize an s-rep for the input mesh
initializer = Initializer()
srep = initializer.fit(input_mesh)
num_crest_pt = 24
num_samples_outward = 3
## Interpolate up spokes
interp = Interpolater(interpolate_level=3)
interp_spokes, up_spokes = interp.interpolate(srep, num_crest_pt, num_samples_outward)
## Interpolate down spokes
interp.interpolate_up = False
interp_down_spokes, bot_spokes = interp.interpolate(srep, num_crest_pt, num_samples_outward)
## interpolate fold spokes
crest_spokes = interp.interpolate_crest(srep, up_spokes, bot_spokes, num_crest_pt)
p = pv.Plotter()
p.add_mesh(input_mesh, color='white', opacity=0.3, label='Surface')
p.add_mesh(interp_spokes, color='orange', line_width=3, label='Interp Up')
# p.add_mesh(interp_down_spokes, color='cyan', line_width=3, label='Interp Down')
p.add_mesh(srep, color='red', line_width=4, label='Primary')
p.add_legend()
p.add_axes(box=True)
p.show() | 33.194444 | 92 | 0.787448 | import vtk
from shanapy.models.sreps import Initializer, Interpolater
import pyvista as pv
data/example_hippocampus.vtk')
reader.Update()
input_mesh = reader.GetOutput()
ializer.fit(input_mesh)
num_crest_pt = 24
num_samples_outward = 3
interpolate_level=3)
interp_spokes, up_spokes = interp.interpolate(srep, num_crest_pt, num_samples_outward)
False
interp_down_spokes, bot_spokes = interp.interpolate(srep, num_crest_pt, num_samples_outward)
terpolate_crest(srep, up_spokes, bot_spokes, num_crest_pt)
p = pv.Plotter()
p.add_mesh(input_mesh, color='white', opacity=0.3, label='Surface')
p.add_mesh(interp_spokes, color='orange', line_width=3, label='Interp Up')
p.add_mesh(srep, color='red', line_width=4, label='Primary')
p.add_legend()
p.add_axes(box=True)
p.show() | true | true |
f715e52c53f3d913beec9dd47456ed969e6769b5 | 2,052 | py | Python | flywheel_cli/importers/slurp_scan.py | amitvakula/python-cli | 0bdbd39c40cdb3fe4dbd3b0cb38abbce94242dac | [
"MIT"
] | null | null | null | flywheel_cli/importers/slurp_scan.py | amitvakula/python-cli | 0bdbd39c40cdb3fe4dbd3b0cb38abbce94242dac | [
"MIT"
] | null | null | null | flywheel_cli/importers/slurp_scan.py | amitvakula/python-cli | 0bdbd39c40cdb3fe4dbd3b0cb38abbce94242dac | [
"MIT"
] | null | null | null | """Provides a scanner that will group files together under a common prefix"""
import copy
from .abstract_scanner import AbstractScanner
class SlurpScanner(AbstractScanner):
"""SlurpScanner groups files together by a common prefix.
This works by looking at the first slash (or if there is no slash, the first dot) in
each file path, and using that as the acquisition label.
"""
def __init__(self, config):
"""Class that handles generic acquisition slurping"""
super(SlurpScanner, self).__init__(config)
def discover(self, walker, context, container_factory, path_prefix=None, audit_log=None):
# Discover files first
files = list(sorted(walker.files(subdir=path_prefix)))
prefix_len = len(path_prefix or '')
current_prefix = None
current_files = []
for path in files:
path = path.lstrip('/')
prefix = SlurpScanner._get_prefix(path[prefix_len:])
if prefix == current_prefix:
current_files.append(path)
else:
self._add_acquisition(container_factory, context, current_prefix, current_files)
current_prefix = prefix
current_files = [path]
self._add_acquisition(container_factory, context, current_prefix, current_files)
@staticmethod
def _get_prefix(path):
"""Get the appropriate prefix for the given file"""
try:
idx = path.rindex('/')
except ValueError:
try:
idx = path.index('.')
except ValueError:
idx = len(path)
return path[:idx].strip('/').replace('/', '_')
def _add_acquisition(self, container_factory, context, label, files):
if not label or not files:
return
acquisition_context = copy.deepcopy(context)
acquisition_context.setdefault('acquisition', {})['label'] = label
container = container_factory.resolve(acquisition_context)
container.files.extend(files)
| 33.096774 | 96 | 0.634016 | import copy
from .abstract_scanner import AbstractScanner
class SlurpScanner(AbstractScanner):
def __init__(self, config):
super(SlurpScanner, self).__init__(config)
def discover(self, walker, context, container_factory, path_prefix=None, audit_log=None):
files = list(sorted(walker.files(subdir=path_prefix)))
prefix_len = len(path_prefix or '')
current_prefix = None
current_files = []
for path in files:
path = path.lstrip('/')
prefix = SlurpScanner._get_prefix(path[prefix_len:])
if prefix == current_prefix:
current_files.append(path)
else:
self._add_acquisition(container_factory, context, current_prefix, current_files)
current_prefix = prefix
current_files = [path]
self._add_acquisition(container_factory, context, current_prefix, current_files)
@staticmethod
def _get_prefix(path):
try:
idx = path.rindex('/')
except ValueError:
try:
idx = path.index('.')
except ValueError:
idx = len(path)
return path[:idx].strip('/').replace('/', '_')
def _add_acquisition(self, container_factory, context, label, files):
if not label or not files:
return
acquisition_context = copy.deepcopy(context)
acquisition_context.setdefault('acquisition', {})['label'] = label
container = container_factory.resolve(acquisition_context)
container.files.extend(files)
| true | true |
f715e6fb9ac52b17d3d805190df8d63c65156cf6 | 5,008 | py | Python | contrib/seeds/generate-seeds.py | BakedInside/beanscore | daa9b2ddbfd3305881749bda7f32146738154260 | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | BakedInside/beanscore | daa9b2ddbfd3305881749bda7f32146738154260 | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | BakedInside/beanscore | daa9b2ddbfd3305881749bda7f32146738154260 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2021 The Beans Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>:<port>
[<ipv6>]:<port>
<onion>.onion:<port>
<i2p>.b32.i2p:<port>
The output will be two data structures with the peers in binary format:
static const uint8_t chainparams_seed_{main,test}[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from enum import Enum
import struct
import sys
import os
import re
class BIP155Network(Enum):
IPV4 = 1
IPV6 = 2
TORV2 = 3
TORV3 = 4
I2P = 5
CJDNS = 6
def name_to_bip155(addr):
'''Convert address string to BIP155 (networkID, addr) tuple.'''
if addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) == 10:
return (BIP155Network.TORV2, vchAddr)
elif len(vchAddr) == 35:
assert(vchAddr[34] == 3)
return (BIP155Network.TORV3, vchAddr[:32])
else:
raise ValueError('Invalid onion %s' % vchAddr)
elif addr.endswith('.b32.i2p'):
vchAddr = b32decode(addr[0:-8] + '====', True)
if len(vchAddr) == 32:
return (BIP155Network.I2P, vchAddr)
else:
raise ValueError(f'Invalid I2P {vchAddr}')
elif '.' in addr: # IPv4
return (BIP155Network.IPV4, bytes((int(x) for x in addr.split('.'))))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return (BIP155Network.IPV6, bytes(sub[0] + ([0] * nullbytes) + sub[1]))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s):
'''Convert endpoint string to BIP155 (networkID, addr, port) tuple.'''
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = 0
else:
port = int(port)
host = name_to_bip155(host)
return host + (port, )
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def bip155_serialize(spec):
'''
Serialize (networkID, addr, port) tuple to BIP155 binary format.
'''
r = b""
r += struct.pack('B', spec[0].value)
r += ser_compact_size(len(spec[1]))
r += spec[1]
r += struct.pack('>H', spec[2])
return r
def process_nodes(g, f, structname):
g.write('static const uint8_t %s[] = {\n' % structname)
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
spec = parse_spec(line)
blob = bip155_serialize(spec)
hoststr = ','.join(('0x%02x' % b) for b in blob)
g.write(f' {hoststr},\n')
g.write('};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BEANS_CHAINPARAMSSEEDS_H\n')
g.write('#define BEANS_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the beans network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a BIP155 serialized (networkID, addr, port) tuple.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'chainparams_seed_main')
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'chainparams_seed_test')
g.write('#endif // BEANS_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 29.988024 | 91 | 0.568091 |
from base64 import b32decode
from enum import Enum
import struct
import sys
import os
import re
class BIP155Network(Enum):
IPV4 = 1
IPV6 = 2
TORV2 = 3
TORV3 = 4
I2P = 5
CJDNS = 6
def name_to_bip155(addr):
if addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) == 10:
return (BIP155Network.TORV2, vchAddr)
elif len(vchAddr) == 35:
assert(vchAddr[34] == 3)
return (BIP155Network.TORV3, vchAddr[:32])
else:
raise ValueError('Invalid onion %s' % vchAddr)
elif addr.endswith('.b32.i2p'):
vchAddr = b32decode(addr[0:-8] + '====', True)
if len(vchAddr) == 32:
return (BIP155Network.I2P, vchAddr)
else:
raise ValueError(f'Invalid I2P {vchAddr}')
elif '.' in addr:
return (BIP155Network.IPV4, bytes((int(x) for x in addr.split('.'))))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return (BIP155Network.IPV6, bytes(sub[0] + ([0] * nullbytes) + sub[1]))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s):
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = 0
else:
port = int(port)
host = name_to_bip155(host)
return host + (port, )
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def bip155_serialize(spec):
r = b""
r += struct.pack('B', spec[0].value)
r += ser_compact_size(len(spec[1]))
r += spec[1]
r += struct.pack('>H', spec[2])
return r
def process_nodes(g, f, structname):
g.write('static const uint8_t %s[] = {\n' % structname)
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
spec = parse_spec(line)
blob = bip155_serialize(spec)
hoststr = ','.join(('0x%02x' % b) for b in blob)
g.write(f' {hoststr},\n')
g.write('};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BEANS_CHAINPARAMSSEEDS_H\n')
g.write('#define BEANS_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the beans network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a BIP155 serialized (networkID, addr, port) tuple.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'chainparams_seed_main')
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'chainparams_seed_test')
g.write('#endif // BEANS_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true | true |
f715e76b2533da62db85b11847c06082ebf9c1c8 | 565 | py | Python | PDF_Copy_Paster/scripts/pdfcp.py | cooperbeaman/cogs18pdfcpfinalproject | ade7cf46534e8817b327f1c35ebf617cc977d872 | [
"CNRI-Python",
"Adobe-2006",
"Adobe-Glyph"
] | null | null | null | PDF_Copy_Paster/scripts/pdfcp.py | cooperbeaman/cogs18pdfcpfinalproject | ade7cf46534e8817b327f1c35ebf617cc977d872 | [
"CNRI-Python",
"Adobe-2006",
"Adobe-Glyph"
] | null | null | null | PDF_Copy_Paster/scripts/pdfcp.py | cooperbeaman/cogs18pdfcpfinalproject | ade7cf46534e8817b327f1c35ebf617cc977d872 | [
"CNRI-Python",
"Adobe-2006",
"Adobe-Glyph"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[3]:
"""Script runs pdf copy paste tool through command prompt.
Automatically monitors and updates clipboard contents.
"""
# Allows importing functions from functions.py in my_module folder
import sys
sys.path.append('../')
# Imports functions from my_module folder
from my_module.functions import *
# Runs windows clipboard monitoring and updating function "pdfcp"
# to remove line breaks from copied pdf text and optionally enclose
# copied pdf text in quotes or append a carriage return to the end of it
run()
| 25.681818 | 72 | 0.762832 |
import sys
sys.path.append('../')
from my_module.functions import *
run()
| true | true |
f715e9d439ec161b580dbb638b66c76fe3d21b3d | 2,166 | py | Python | tests/mantid_data_helper.py | scipp/scipp-ci-mantid | 29164f633096c4eeb0a8579b72165c96315113f8 | [
"Apache-2.0"
] | null | null | null | tests/mantid_data_helper.py | scipp/scipp-ci-mantid | 29164f633096c4eeb0a8579b72165c96315113f8 | [
"Apache-2.0"
] | null | null | null | tests/mantid_data_helper.py | scipp/scipp-ci-mantid | 29164f633096c4eeb0a8579b72165c96315113f8 | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: GPL-3.0-or-later
# Copyright (c) 2019 Scipp contributors (https://github.com/scipp)
# @author Dimitar Tasev
import os
import hashlib
import sys
import subprocess as sp
def download_file(source, destination):
command = "wget -O {} {}".format(destination, source)
status = sp.run(command, shell=True).returncode
if status != 0:
raise RuntimeError("Can't load {} to {}.".format(source, destination))
class MantidDataHelper:
# Valid only for Linux. Windows is as C:\MantidExternalData
DATA_DIR = os.path.abspath(os.path.expanduser(
"/opt/tests/MantidExternalData"))
DATA_LOCATION = "{data_dir}/{algorithm}/{hash}"
DATA_FILES = {
"CNCS_51936_event.nxs": {
"hash": "5ba401e489260a44374b5be12b780911",
"algorithm": "MD5"},
"iris26176_graphite002_sqw.nxs": {
"hash": "7ea63f9137602b7e9b604fe30f0c6ec2",
"algorithm": "MD5"},
"WISH00016748.raw": {
"hash": "37ecc6f99662b57e405ed967bdc068af",
"algorithm": "MD5"},
}
REMOTE_URL = "http://198.74.56.37/ftp/external-data/"\
"{algorithm}/{hash}"
@classmethod
def find_file(cls, name):
data_file = cls.DATA_FILES[name]
data_location = cls.DATA_LOCATION.format(
data_dir=cls.DATA_DIR,
algorithm=data_file["algorithm"],
hash=data_file["hash"])
dir_name = os.path.dirname(data_location)
if not os.path.exists(dir_name):
os.makedirs(dir_name, exist_ok=True)
if not os.path.isfile(data_location):
file_hash = data_file["hash"]
algorithm = data_file["algorithm"]
query = cls.REMOTE_URL.format(algorithm=algorithm,
hash=file_hash)
download_file(query, data_location)
if algorithm == "MD5":
with open(data_location, "rb") as file:
md5 = hashlib.md5(file.read()).hexdigest()
if md5 != file_hash:
raise RuntimeError("Check sum doesn't match.")
return data_location
| 35.508197 | 78 | 0.5988 |
import os
import hashlib
import sys
import subprocess as sp
def download_file(source, destination):
command = "wget -O {} {}".format(destination, source)
status = sp.run(command, shell=True).returncode
if status != 0:
raise RuntimeError("Can't load {} to {}.".format(source, destination))
class MantidDataHelper:
# Valid only for Linux. Windows is as C:\MantidExternalData
DATA_DIR = os.path.abspath(os.path.expanduser(
"/opt/tests/MantidExternalData"))
DATA_LOCATION = "{data_dir}/{algorithm}/{hash}"
DATA_FILES = {
"CNCS_51936_event.nxs": {
"hash": "5ba401e489260a44374b5be12b780911",
"algorithm": "MD5"},
"iris26176_graphite002_sqw.nxs": {
"hash": "7ea63f9137602b7e9b604fe30f0c6ec2",
"algorithm": "MD5"},
"WISH00016748.raw": {
"hash": "37ecc6f99662b57e405ed967bdc068af",
"algorithm": "MD5"},
}
REMOTE_URL = "http://198.74.56.37/ftp/external-data/"\
"{algorithm}/{hash}"
@classmethod
def find_file(cls, name):
data_file = cls.DATA_FILES[name]
data_location = cls.DATA_LOCATION.format(
data_dir=cls.DATA_DIR,
algorithm=data_file["algorithm"],
hash=data_file["hash"])
dir_name = os.path.dirname(data_location)
if not os.path.exists(dir_name):
os.makedirs(dir_name, exist_ok=True)
if not os.path.isfile(data_location):
file_hash = data_file["hash"]
algorithm = data_file["algorithm"]
query = cls.REMOTE_URL.format(algorithm=algorithm,
hash=file_hash)
download_file(query, data_location)
if algorithm == "MD5":
with open(data_location, "rb") as file:
md5 = hashlib.md5(file.read()).hexdigest()
if md5 != file_hash:
raise RuntimeError("Check sum doesn't match.")
return data_location
| true | true |
f715eb646ed9af649d8f5a29c1d0d68ce7a3e4b3 | 571 | py | Python | setup.py | TomKealy/causal-forest | 04f3aeb1ac5547a78b96eca9bdb51b61f9e940f4 | [
"MIT"
] | null | null | null | setup.py | TomKealy/causal-forest | 04f3aeb1ac5547a78b96eca9bdb51b61f9e940f4 | [
"MIT"
] | null | null | null | setup.py | TomKealy/causal-forest | 04f3aeb1ac5547a78b96eca9bdb51b61f9e940f4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Setup file for cforest.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 3.2.3.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup
try:
require('setuptools>=38.3')
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
| 23.791667 | 75 | 0.702277 |
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup
try:
require('setuptools>=38.3')
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
| true | true |
f715ec4a046a358ebcab33b297f3acf0d66c97dd | 9,457 | py | Python | venv/lib/python3.8/site-packages/vsts/task_agent/v4_0/models/task_group.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/task_agent/v4_0/models/task_group.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/task_agent/v4_0/models/task_group.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .task_definition import TaskDefinition
class TaskGroup(TaskDefinition):
"""TaskGroup.
:param agent_execution:
:type agent_execution: :class:`TaskExecution <task-agent.v4_0.models.TaskExecution>`
:param author:
:type author: str
:param category:
:type category: str
:param contents_uploaded:
:type contents_uploaded: bool
:param contribution_identifier:
:type contribution_identifier: str
:param contribution_version:
:type contribution_version: str
:param data_source_bindings:
:type data_source_bindings: list of :class:`DataSourceBinding <task-agent.v4_0.models.DataSourceBinding>`
:param definition_type:
:type definition_type: str
:param demands:
:type demands: list of :class:`object <task-agent.v4_0.models.object>`
:param deprecated:
:type deprecated: bool
:param description:
:type description: str
:param disabled:
:type disabled: bool
:param execution:
:type execution: dict
:param friendly_name:
:type friendly_name: str
:param groups:
:type groups: list of :class:`TaskGroupDefinition <task-agent.v4_0.models.TaskGroupDefinition>`
:param help_mark_down:
:type help_mark_down: str
:param host_type:
:type host_type: str
:param icon_url:
:type icon_url: str
:param id:
:type id: str
:param inputs:
:type inputs: list of :class:`TaskInputDefinition <task-agent.v4_0.models.TaskInputDefinition>`
:param instance_name_format:
:type instance_name_format: str
:param minimum_agent_version:
:type minimum_agent_version: str
:param name:
:type name: str
:param output_variables:
:type output_variables: list of :class:`TaskOutputVariable <task-agent.v4_0.models.TaskOutputVariable>`
:param package_location:
:type package_location: str
:param package_type:
:type package_type: str
:param preview:
:type preview: bool
:param release_notes:
:type release_notes: str
:param runs_on:
:type runs_on: list of str
:param satisfies:
:type satisfies: list of str
:param server_owned:
:type server_owned: bool
:param source_definitions:
:type source_definitions: list of :class:`TaskSourceDefinition <task-agent.v4_0.models.TaskSourceDefinition>`
:param source_location:
:type source_location: str
:param version:
:type version: :class:`TaskVersion <task-agent.v4_0.models.TaskVersion>`
:param visibility:
:type visibility: list of str
:param comment: Gets or sets comment.
:type comment: str
:param created_by: Gets or sets the identity who created.
:type created_by: :class:`IdentityRef <task-agent.v4_0.models.IdentityRef>`
:param created_on: Gets or sets date on which it got created.
:type created_on: datetime
:param deleted: Gets or sets as 'true' to indicate as deleted, 'false' otherwise.
:type deleted: bool
:param modified_by: Gets or sets the identity who modified.
:type modified_by: :class:`IdentityRef <task-agent.v4_0.models.IdentityRef>`
:param modified_on: Gets or sets date on which it got modified.
:type modified_on: datetime
:param owner: Gets or sets the owner.
:type owner: str
:param parent_definition_id: Gets or sets parent task group Id. This is used while creating a draft task group.
:type parent_definition_id: str
:param revision: Gets or sets revision.
:type revision: int
:param tasks:
:type tasks: list of :class:`TaskGroupStep <task-agent.v4_0.models.TaskGroupStep>`
"""
_attribute_map = {
'agent_execution': {'key': 'agentExecution', 'type': 'TaskExecution'},
'author': {'key': 'author', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'contents_uploaded': {'key': 'contentsUploaded', 'type': 'bool'},
'contribution_identifier': {'key': 'contributionIdentifier', 'type': 'str'},
'contribution_version': {'key': 'contributionVersion', 'type': 'str'},
'data_source_bindings': {'key': 'dataSourceBindings', 'type': '[DataSourceBinding]'},
'definition_type': {'key': 'definitionType', 'type': 'str'},
'demands': {'key': 'demands', 'type': '[object]'},
'deprecated': {'key': 'deprecated', 'type': 'bool'},
'description': {'key': 'description', 'type': 'str'},
'disabled': {'key': 'disabled', 'type': 'bool'},
'execution': {'key': 'execution', 'type': '{object}'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'groups': {'key': 'groups', 'type': '[TaskGroupDefinition]'},
'help_mark_down': {'key': 'helpMarkDown', 'type': 'str'},
'host_type': {'key': 'hostType', 'type': 'str'},
'icon_url': {'key': 'iconUrl', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[TaskInputDefinition]'},
'instance_name_format': {'key': 'instanceNameFormat', 'type': 'str'},
'minimum_agent_version': {'key': 'minimumAgentVersion', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'output_variables': {'key': 'outputVariables', 'type': '[TaskOutputVariable]'},
'package_location': {'key': 'packageLocation', 'type': 'str'},
'package_type': {'key': 'packageType', 'type': 'str'},
'preview': {'key': 'preview', 'type': 'bool'},
'release_notes': {'key': 'releaseNotes', 'type': 'str'},
'runs_on': {'key': 'runsOn', 'type': '[str]'},
'satisfies': {'key': 'satisfies', 'type': '[str]'},
'server_owned': {'key': 'serverOwned', 'type': 'bool'},
'source_definitions': {'key': 'sourceDefinitions', 'type': '[TaskSourceDefinition]'},
'source_location': {'key': 'sourceLocation', 'type': 'str'},
'version': {'key': 'version', 'type': 'TaskVersion'},
'visibility': {'key': 'visibility', 'type': '[str]'},
'comment': {'key': 'comment', 'type': 'str'},
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'deleted': {'key': 'deleted', 'type': 'bool'},
'modified_by': {'key': 'modifiedBy', 'type': 'IdentityRef'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'owner': {'key': 'owner', 'type': 'str'},
'parent_definition_id': {'key': 'parentDefinitionId', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'int'},
'tasks': {'key': 'tasks', 'type': '[TaskGroupStep]'}
}
def __init__(self, agent_execution=None, author=None, category=None, contents_uploaded=None, contribution_identifier=None, contribution_version=None, data_source_bindings=None, definition_type=None, demands=None, deprecated=None, description=None, disabled=None, execution=None, friendly_name=None, groups=None, help_mark_down=None, host_type=None, icon_url=None, id=None, inputs=None, instance_name_format=None, minimum_agent_version=None, name=None, output_variables=None, package_location=None, package_type=None, preview=None, release_notes=None, runs_on=None, satisfies=None, server_owned=None, source_definitions=None, source_location=None, version=None, visibility=None, comment=None, created_by=None, created_on=None, deleted=None, modified_by=None, modified_on=None, owner=None, parent_definition_id=None, revision=None, tasks=None):
super(TaskGroup, self).__init__(agent_execution=agent_execution, author=author, category=category, contents_uploaded=contents_uploaded, contribution_identifier=contribution_identifier, contribution_version=contribution_version, data_source_bindings=data_source_bindings, definition_type=definition_type, demands=demands, deprecated=deprecated, description=description, disabled=disabled, execution=execution, friendly_name=friendly_name, groups=groups, help_mark_down=help_mark_down, host_type=host_type, icon_url=icon_url, id=id, inputs=inputs, instance_name_format=instance_name_format, minimum_agent_version=minimum_agent_version, name=name, output_variables=output_variables, package_location=package_location, package_type=package_type, preview=preview, release_notes=release_notes, runs_on=runs_on, satisfies=satisfies, server_owned=server_owned, source_definitions=source_definitions, source_location=source_location, version=version, visibility=visibility)
self.comment = comment
self.created_by = created_by
self.created_on = created_on
self.deleted = deleted
self.modified_by = modified_by
self.modified_on = modified_on
self.owner = owner
self.parent_definition_id = parent_definition_id
self.revision = revision
self.tasks = tasks
| 56.628743 | 973 | 0.649466 |
from .task_definition import TaskDefinition
class TaskGroup(TaskDefinition):
_attribute_map = {
'agent_execution': {'key': 'agentExecution', 'type': 'TaskExecution'},
'author': {'key': 'author', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'contents_uploaded': {'key': 'contentsUploaded', 'type': 'bool'},
'contribution_identifier': {'key': 'contributionIdentifier', 'type': 'str'},
'contribution_version': {'key': 'contributionVersion', 'type': 'str'},
'data_source_bindings': {'key': 'dataSourceBindings', 'type': '[DataSourceBinding]'},
'definition_type': {'key': 'definitionType', 'type': 'str'},
'demands': {'key': 'demands', 'type': '[object]'},
'deprecated': {'key': 'deprecated', 'type': 'bool'},
'description': {'key': 'description', 'type': 'str'},
'disabled': {'key': 'disabled', 'type': 'bool'},
'execution': {'key': 'execution', 'type': '{object}'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'groups': {'key': 'groups', 'type': '[TaskGroupDefinition]'},
'help_mark_down': {'key': 'helpMarkDown', 'type': 'str'},
'host_type': {'key': 'hostType', 'type': 'str'},
'icon_url': {'key': 'iconUrl', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[TaskInputDefinition]'},
'instance_name_format': {'key': 'instanceNameFormat', 'type': 'str'},
'minimum_agent_version': {'key': 'minimumAgentVersion', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'output_variables': {'key': 'outputVariables', 'type': '[TaskOutputVariable]'},
'package_location': {'key': 'packageLocation', 'type': 'str'},
'package_type': {'key': 'packageType', 'type': 'str'},
'preview': {'key': 'preview', 'type': 'bool'},
'release_notes': {'key': 'releaseNotes', 'type': 'str'},
'runs_on': {'key': 'runsOn', 'type': '[str]'},
'satisfies': {'key': 'satisfies', 'type': '[str]'},
'server_owned': {'key': 'serverOwned', 'type': 'bool'},
'source_definitions': {'key': 'sourceDefinitions', 'type': '[TaskSourceDefinition]'},
'source_location': {'key': 'sourceLocation', 'type': 'str'},
'version': {'key': 'version', 'type': 'TaskVersion'},
'visibility': {'key': 'visibility', 'type': '[str]'},
'comment': {'key': 'comment', 'type': 'str'},
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'deleted': {'key': 'deleted', 'type': 'bool'},
'modified_by': {'key': 'modifiedBy', 'type': 'IdentityRef'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'owner': {'key': 'owner', 'type': 'str'},
'parent_definition_id': {'key': 'parentDefinitionId', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'int'},
'tasks': {'key': 'tasks', 'type': '[TaskGroupStep]'}
}
def __init__(self, agent_execution=None, author=None, category=None, contents_uploaded=None, contribution_identifier=None, contribution_version=None, data_source_bindings=None, definition_type=None, demands=None, deprecated=None, description=None, disabled=None, execution=None, friendly_name=None, groups=None, help_mark_down=None, host_type=None, icon_url=None, id=None, inputs=None, instance_name_format=None, minimum_agent_version=None, name=None, output_variables=None, package_location=None, package_type=None, preview=None, release_notes=None, runs_on=None, satisfies=None, server_owned=None, source_definitions=None, source_location=None, version=None, visibility=None, comment=None, created_by=None, created_on=None, deleted=None, modified_by=None, modified_on=None, owner=None, parent_definition_id=None, revision=None, tasks=None):
super(TaskGroup, self).__init__(agent_execution=agent_execution, author=author, category=category, contents_uploaded=contents_uploaded, contribution_identifier=contribution_identifier, contribution_version=contribution_version, data_source_bindings=data_source_bindings, definition_type=definition_type, demands=demands, deprecated=deprecated, description=description, disabled=disabled, execution=execution, friendly_name=friendly_name, groups=groups, help_mark_down=help_mark_down, host_type=host_type, icon_url=icon_url, id=id, inputs=inputs, instance_name_format=instance_name_format, minimum_agent_version=minimum_agent_version, name=name, output_variables=output_variables, package_location=package_location, package_type=package_type, preview=preview, release_notes=release_notes, runs_on=runs_on, satisfies=satisfies, server_owned=server_owned, source_definitions=source_definitions, source_location=source_location, version=version, visibility=visibility)
self.comment = comment
self.created_by = created_by
self.created_on = created_on
self.deleted = deleted
self.modified_by = modified_by
self.modified_on = modified_on
self.owner = owner
self.parent_definition_id = parent_definition_id
self.revision = revision
self.tasks = tasks
| true | true |
f715ec73403c11a6f9f9c12405d55dc8c40d491d | 2,782 | py | Python | rpbp/analysis/profile_construction/visualize_metagene_profile_bayes_factor.py | HeyLifeHD/rp-bp | 9c59b1bc0267400747477467c45f96364d5528e1 | [
"MIT"
] | 6 | 2016-05-16T18:52:41.000Z | 2021-12-31T06:27:29.000Z | rpbp/analysis/profile_construction/visualize_metagene_profile_bayes_factor.py | HeyLifeHD/rp-bp | 9c59b1bc0267400747477467c45f96364d5528e1 | [
"MIT"
] | 110 | 2016-06-22T13:24:39.000Z | 2022-02-07T09:29:14.000Z | rpbp/analysis/profile_construction/visualize_metagene_profile_bayes_factor.py | HeyLifeHD/rp-bp | 9c59b1bc0267400747477467c45f96364d5528e1 | [
"MIT"
] | 5 | 2017-05-22T12:21:51.000Z | 2022-02-06T10:32:56.000Z | #! /usr/bin/env python3
import matplotlib
matplotlib.use('agg')
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import logging
default_title = "Metagene profile Bayes' factors"
default_xlabel = "Offset, relative to translation \ninitiation site"
default_ylabel = "Bayes' factor"
default_font_size = 15
default_series_label = ""
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This script visualizes the Bayes' factors for a metagene profile.\n\n"
"This script contains some hard-coded field names.")
parser.add_argument('bayes_factors', help="The metagene profile (csv) file")
parser.add_argument('length', help="The profile lengths to visualize", type=int)
parser.add_argument('out', help="The (output) image file")
parser.add_argument('--title', help="The title for the figure", default=default_title)
parser.add_argument('--xlabel', help="The label for the x-axis", default=default_xlabel)
parser.add_argument('--ylabel', help="The label for the y-axis", default=default_ylabel)
parser.add_argument('--series-label', help="The label for the legend", default=default_series_label)
parser.add_argument('--font-size', help="The font size for the title, axis labels, and "
"xticks labels", type=int, default=default_font_size)
args = parser.parse_args()
bayes_factors = pd.read_csv(args.bayes_factors)
mask_length = bayes_factors['length'] == args.length
group = bayes_factors.loc[mask_length]
bfs = group['bayes_factor_mean']
offsets = group['offset']
bf_range = max(bfs) - min(bfs)
fig, ax = plt.subplots(figsize=(10,5))
ax.plot(offsets, bfs, label=args.series_label, color='b')
ax.scatter(offsets, bfs, color='b')
xlim = (min(offsets), max(offsets))
ymin = min(bfs) - 0.1*bf_range
ymax = max(bfs) + 0.1*bf_range
ylim = (ymin, ymax)
# and draw a line at "bf=5"
plt.plot(xlim, (5, 5), color='k', linewidth=2, linestyle=':')
# and a horizontal line at the maximum bf
plt.plot(xlim, (max(bfs), max(bfs)), color='r', linewidth=1, linestyle="-.")
# and a vertical line at "offset=-12"
ax.plot((-12, -12), ylim, color='g', linestyle="--")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# finally, add the labels, etc.
plt.suptitle(args.title, fontsize=args.font_size, y=1.03)
ax.set_xlabel(args.xlabel, fontsize=args.font_size)
ax.set_ylabel(args.ylabel, fontsize=args.font_size)
ax.tick_params(axis='both', which='major', labelsize=args.font_size)
#ax.legend(loc="upper right")
fig.tight_layout()
fig.savefig(args.out, bbox_inches='tight')
if __name__ == '__main__':
main()
| 34.345679 | 104 | 0.691948 |
import matplotlib
matplotlib.use('agg')
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import logging
default_title = "Metagene profile Bayes' factors"
default_xlabel = "Offset, relative to translation \ninitiation site"
default_ylabel = "Bayes' factor"
default_font_size = 15
default_series_label = ""
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This script visualizes the Bayes' factors for a metagene profile.\n\n"
"This script contains some hard-coded field names.")
parser.add_argument('bayes_factors', help="The metagene profile (csv) file")
parser.add_argument('length', help="The profile lengths to visualize", type=int)
parser.add_argument('out', help="The (output) image file")
parser.add_argument('--title', help="The title for the figure", default=default_title)
parser.add_argument('--xlabel', help="The label for the x-axis", default=default_xlabel)
parser.add_argument('--ylabel', help="The label for the y-axis", default=default_ylabel)
parser.add_argument('--series-label', help="The label for the legend", default=default_series_label)
parser.add_argument('--font-size', help="The font size for the title, axis labels, and "
"xticks labels", type=int, default=default_font_size)
args = parser.parse_args()
bayes_factors = pd.read_csv(args.bayes_factors)
mask_length = bayes_factors['length'] == args.length
group = bayes_factors.loc[mask_length]
bfs = group['bayes_factor_mean']
offsets = group['offset']
bf_range = max(bfs) - min(bfs)
fig, ax = plt.subplots(figsize=(10,5))
ax.plot(offsets, bfs, label=args.series_label, color='b')
ax.scatter(offsets, bfs, color='b')
xlim = (min(offsets), max(offsets))
ymin = min(bfs) - 0.1*bf_range
ymax = max(bfs) + 0.1*bf_range
ylim = (ymin, ymax)
# and draw a line at "bf=5"
plt.plot(xlim, (5, 5), color='k', linewidth=2, linestyle=':')
# and a horizontal line at the maximum bf
plt.plot(xlim, (max(bfs), max(bfs)), color='r', linewidth=1, linestyle="-.")
# and a vertical line at "offset=-12"
ax.plot((-12, -12), ylim, color='g', linestyle="--")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# finally, add the labels, etc.
plt.suptitle(args.title, fontsize=args.font_size, y=1.03)
ax.set_xlabel(args.xlabel, fontsize=args.font_size)
ax.set_ylabel(args.ylabel, fontsize=args.font_size)
ax.tick_params(axis='both', which='major', labelsize=args.font_size)
#ax.legend(loc="upper right")
fig.tight_layout()
fig.savefig(args.out, bbox_inches='tight')
if __name__ == '__main__':
main()
| true | true |
f715ed9bba993419b3f1d384d817b622367f47e1 | 2,536 | py | Python | pxlc/qt/DropDownSelectMenu.py | pxlc/pxlc_td | 44d08dd9e9a9595449005f3446536e7a02113c95 | [
"MIT"
] | 2 | 2020-10-06T22:56:10.000Z | 2022-03-07T04:13:47.000Z | pxlc/qt/DropDownSelectMenu.py | pxlc/pxlc_td | 44d08dd9e9a9595449005f3446536e7a02113c95 | [
"MIT"
] | null | null | null | pxlc/qt/DropDownSelectMenu.py | pxlc/pxlc_td | 44d08dd9e9a9595449005f3446536e7a02113c95 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2018 pxlc@github
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -------------------------------------------------------------------------------
from PySide import QtCore, QtGui
from .cb import connect_callback # local import
__INFO__ = '''
item list:
[
{
'label': 'Menu label',
'select_data': 'any type, returned if item is selected',
'style': 'style sheet string (optional)',
}
]
'''
class DropDownSelectMenu(QtGui.QComboBox):
def __init__(self, item_list=[], parent=None):
super(DropDownSelectMenu, self).__init__(parent=parent)
self.item_list = item_list[:]
def clear_all_items(self):
while self.count() > 0:
self.removeItem(0)
def load_items(self, item_list):
self.clear_all_items()
self.item_list = item_list[:]
for item in self.item_list:
label = item.get('label','')
self.addItem(label)
self.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)
def set_index_changed_callback(self, index_changed_cb_fn):
connect_callback(self.currentIndexChanged, index_changed_cb_fn, {'wdg': self}, containing_obj=self)
def get_current_item(self):
curr_idx = self.currentIndex()
if curr_idx >= 0 and curr_idx < len(self.item_list):
return self.item_list[curr_idx]
return None
| 32.101266 | 107 | 0.651025 |
from PySide import QtCore, QtGui
from .cb import connect_callback
__INFO__ = '''
item list:
[
{
'label': 'Menu label',
'select_data': 'any type, returned if item is selected',
'style': 'style sheet string (optional)',
}
]
'''
class DropDownSelectMenu(QtGui.QComboBox):
def __init__(self, item_list=[], parent=None):
super(DropDownSelectMenu, self).__init__(parent=parent)
self.item_list = item_list[:]
def clear_all_items(self):
while self.count() > 0:
self.removeItem(0)
def load_items(self, item_list):
self.clear_all_items()
self.item_list = item_list[:]
for item in self.item_list:
label = item.get('label','')
self.addItem(label)
self.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)
def set_index_changed_callback(self, index_changed_cb_fn):
connect_callback(self.currentIndexChanged, index_changed_cb_fn, {'wdg': self}, containing_obj=self)
def get_current_item(self):
curr_idx = self.currentIndex()
if curr_idx >= 0 and curr_idx < len(self.item_list):
return self.item_list[curr_idx]
return None
| true | true |
f715edafdd23569ac05010564563d3ff065388fb | 8,828 | py | Python | docs/source/conf.py | andrewseidl/ibis | 1468b8c4f96d9d58f6fa147a2579b0d9e5796186 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | andrewseidl/ibis | 1468b8c4f96d9d58f6fa147a2579b0d9e5796186 | [
"Apache-2.0"
] | 6 | 2017-05-18T19:49:09.000Z | 2019-03-27T15:37:14.000Z | docs/source/conf.py | andrewseidl/ibis | 1468b8c4f96d9d58f6fa147a2579b0d9e5796186 | [
"Apache-2.0"
] | 1 | 2017-06-26T15:43:35.000Z | 2017-06-26T15:43:35.000Z | # -*- coding: utf-8 -*-
#
# Ibis documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 10 11:06:29 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import glob
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.mathjax',
'numpydoc',
'nbsphinx',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
]
autosummary_generate = glob.glob("*.rst")
# autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Ibis'
copyright = '{}, Ibis Developers'.format(datetime.date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.2'
from ibis import __version__ as version # noqa: E402
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme # noqa: E402
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo-wide.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ibisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Ibis.tex', 'Ibis Documentation', 'Ibis Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# extlinks alias
extlinks = {'issue': ('https://github.com/ibis-project/ibis/issues/%s', '#')}
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ibis', 'Ibis Documentation',
['Ibis Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Ibis', 'Ibis Documentation',
'Ibis Developers', 'Ibis', 'Pandas-like expressions for analytics',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| 31.194346 | 79 | 0.715224 |
import glob
import datetime
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.mathjax',
'numpydoc',
'nbsphinx',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
]
autosummary_generate = glob.glob("*.rst")
numpydoc_show_class_members = False
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'Ibis'
copyright = '{}, Ibis Developers'.format(datetime.date.today().year)
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.2'
from ibis import __version__ as version # noqa: E402
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme # noqa: E402
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo-wide.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ibisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Ibis.tex', 'Ibis Documentation', 'Ibis Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# extlinks alias
extlinks = {'issue': ('https://github.com/ibis-project/ibis/issues/%s', '
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ibis', 'Ibis Documentation',
['Ibis Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Ibis', 'Ibis Documentation',
'Ibis Developers', 'Ibis', 'Pandas-like expressions for analytics',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true | true |
f715ee5b393ad823887100ea16fc12a89479e531 | 1,091 | py | Python | masakari-7.0.0/masakari/tests/uuidsentinel.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 70 | 2016-07-22T21:58:00.000Z | 2022-01-04T06:05:32.000Z | masakari-7.0.0/masakari/tests/uuidsentinel.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | masakari-7.0.0/masakari/tests/uuidsentinel.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 33 | 2016-07-05T02:05:25.000Z | 2021-12-20T07:40:43.000Z | # Copyright 2016 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
class UUIDSentinels(object):
def __init__(self):
from oslo_utils import uuidutils
self._uuid_module = uuidutils
self._sentinels = {}
def __getattr__(self, name):
if name.startswith('_'):
raise ValueError('Sentinels must not start with _')
if name not in self._sentinels:
self._sentinels[name] = self._uuid_module.generate_uuid()
return self._sentinels[name]
sys.modules[__name__] = UUIDSentinels()
| 32.088235 | 75 | 0.711274 |
import sys
class UUIDSentinels(object):
def __init__(self):
from oslo_utils import uuidutils
self._uuid_module = uuidutils
self._sentinels = {}
def __getattr__(self, name):
if name.startswith('_'):
raise ValueError('Sentinels must not start with _')
if name not in self._sentinels:
self._sentinels[name] = self._uuid_module.generate_uuid()
return self._sentinels[name]
sys.modules[__name__] = UUIDSentinels()
| true | true |
f715ee8ea645eac7275a181386fd2444e7fa7fa0 | 5,420 | py | Python | sdk/python/pulumi_azure_native/machinelearningservices/v20200515preview/get_aks_service.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/machinelearningservices/v20200515preview/get_aks_service.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/machinelearningservices/v20200515preview/get_aks_service.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAKSServiceResult',
'AwaitableGetAKSServiceResult',
'get_aks_service',
]
@pulumi.output_type
class GetAKSServiceResult:
"""
Machine Learning service object wrapped into ARM resource envelope.
"""
def __init__(__self__, id=None, identity=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Specifies the resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Service properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetAKSServiceResult(GetAKSServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAKSServiceResult(
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_aks_service(expand: Optional[bool] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAKSServiceResult:
"""
Machine Learning service object wrapped into ARM resource envelope.
:param bool expand: Set to True to include Model details.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str service_name: Name of the Azure Machine Learning service.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20200515preview:getAKSService', __args__, opts=opts, typ=GetAKSServiceResult).value
return AwaitableGetAKSServiceResult(
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 33.04878 | 158 | 0.62583 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAKSServiceResult',
'AwaitableGetAKSServiceResult',
'get_aks_service',
]
@pulumi.output_type
class GetAKSServiceResult:
def __init__(__self__, id=None, identity=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetAKSServiceResult(GetAKSServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAKSServiceResult(
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_aks_service(expand: Optional[bool] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAKSServiceResult:
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20200515preview:getAKSService', __args__, opts=opts, typ=GetAKSServiceResult).value
return AwaitableGetAKSServiceResult(
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| true | true |
f715f0635c3ef7697ec0cfe38a3e89fa3c316e5f | 735 | py | Python | awx/main/migrations/0016_v330_non_blank_workflow.py | james-crowley/awx | 5cd44cde991a9526810809544e7a8f12e6174711 | [
"Apache-2.0"
] | 1 | 2021-12-27T14:33:10.000Z | 2021-12-27T14:33:10.000Z | awx/main/migrations/0016_v330_non_blank_workflow.py | james-crowley/awx | 5cd44cde991a9526810809544e7a8f12e6174711 | [
"Apache-2.0"
] | 35 | 2021-03-01T06:34:26.000Z | 2022-03-01T01:18:42.000Z | awx/main/migrations/0016_v330_non_blank_workflow.py | james-crowley/awx | 5cd44cde991a9526810809544e7a8f12e6174711 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-11 16:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0015_v330_blank_start_args'),
]
operations = [
migrations.AlterField(
model_name='workflowjobtemplatenode',
name='workflow_job_template',
field=models.ForeignKey(
default=None, on_delete=django.db.models.deletion.CASCADE, related_name='workflow_job_template_nodes', to='main.WorkflowJobTemplate'
),
preserve_default=False,
),
]
| 28.269231 | 148 | 0.665306 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0015_v330_blank_start_args'),
]
operations = [
migrations.AlterField(
model_name='workflowjobtemplatenode',
name='workflow_job_template',
field=models.ForeignKey(
default=None, on_delete=django.db.models.deletion.CASCADE, related_name='workflow_job_template_nodes', to='main.WorkflowJobTemplate'
),
preserve_default=False,
),
]
| true | true |
f715f097181f6d815bcda6fe2acd64e76df19463 | 8,577 | py | Python | models_all_solvable2/fac2.py | grossmann-group/pyomo-MINLP-benchmarking | 714f0a0dffd61675649a805683c0627af6b4929e | [
"MIT"
] | 7 | 2019-05-08T19:14:34.000Z | 2021-12-24T00:00:40.000Z | models_all_solvable2/fac2.py | grossmann-group/pyomo-MINLP-benchmarking | 714f0a0dffd61675649a805683c0627af6b4929e | [
"MIT"
] | null | null | null | models_all_solvable2/fac2.py | grossmann-group/pyomo-MINLP-benchmarking | 714f0a0dffd61675649a805683c0627af6b4929e | [
"MIT"
] | 2 | 2020-05-21T22:15:51.000Z | 2020-06-02T23:02:08.000Z | # MINLP written by GAMS Convert at 05/15/20 00:50:46
#
# Equation counts
# Total E G L N X C B
# 34 22 3 9 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 67 55 12 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 217 163 54 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.b55 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b56 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b57 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b58 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b59 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b60 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr=276.28*(m.x1 + m.x2 + m.x3 + m.x4 + m.x5 + m.x6 + m.x19 + m.x20 + m.x21 + m.x22 + m.x23 + m.x24
+ m.x37 + m.x38 + m.x39 + m.x40 + m.x41 + m.x42)**2.5 + 792.912*(m.x7 + m.x8 + m.x9 + m.x10 +
m.x11 + m.x12 + m.x25 + m.x26 + m.x27 + m.x28 + m.x29 + m.x30 + m.x43 + m.x44 + m.x45 + m.x46 +
m.x47 + m.x48)**2.5 + 991.679*(m.x13 + m.x14 + m.x15 + m.x16 + m.x17 + m.x18 + m.x31 + m.x32 +
m.x33 + m.x34 + m.x35 + m.x36 + m.x49 + m.x50 + m.x51 + m.x52 + m.x53 + m.x54)**2.5 + 115.274*
m.x1 + 98.5559*m.x2 + 142.777*m.x3 + 33.9886*m.x4 + 163.087*m.x5 + 10.4376*m.x6 + 234.406*m.x7 +
142.066*m.x8 + 50.6436*m.x9 + 123.61*m.x10 + 242.356*m.x11 + 135.071*m.x12 + 10.7347*m.x13 +
56.0272*m.x14 + 14.912*m.x15 + 169.218*m.x16 + 209.028*m.x17 + 259.29*m.x18 + 165.41*m.x19 +
40.7497*m.x20 + 124.907*m.x21 + 18.495*m.x22 + 95.2789*m.x23 + 251.899*m.x24 + 114.185*m.x25 +
37.8148*m.x26 + 10.5547*m.x27 + 52.5162*m.x28 + 37.4727*m.x29 + 254.843*m.x30 + 266.645*m.x31 +
136.583*m.x32 + 15.092*m.x33 + 194.101*m.x34 + 78.768*m.x35 + 120.36*m.x36 + 257.318*m.x37 +
172.747*m.x38 + 142.813*m.x39 + 251.331*m.x40 + 15.9113*m.x41 + 48.8251*m.x42 + 289.116*m.x43 +
129.705*m.x44 + 275.621*m.x45 + 20.2235*m.x46 + 253.789*m.x47 + 56.7474*m.x48 + 201.646*m.x49 +
164.573*m.x50 + 295.157*m.x51 + 151.474*m.x52 + 221.794*m.x53 + 278.304*m.x54 + 2481400*m.b64
+ 2156460*m.b65 + 2097730*m.b66, sense=minimize)
m.c2 = Constraint(expr= m.x1 + m.x3 + m.x5 + m.x7 + m.x9 + m.x11 + m.x13 + m.x15 + m.x17 <= 60)
m.c3 = Constraint(expr= m.x2 + m.x4 + m.x6 + m.x8 + m.x10 + m.x12 + m.x14 + m.x16 + m.x18 <= 60)
m.c4 = Constraint(expr= m.x19 + m.x21 + m.x23 + m.x25 + m.x27 + m.x29 + m.x31 + m.x33 + m.x35 <= 60)
m.c5 = Constraint(expr= m.x20 + m.x22 + m.x24 + m.x26 + m.x28 + m.x30 + m.x32 + m.x34 + m.x36 <= 60)
m.c6 = Constraint(expr= m.x37 + m.x39 + m.x41 + m.x43 + m.x45 + m.x47 + m.x49 + m.x51 + m.x53 <= 60)
m.c7 = Constraint(expr= m.x38 + m.x40 + m.x42 + m.x44 + m.x46 + m.x48 + m.x50 + m.x52 + m.x54 <= 60)
m.c8 = Constraint(expr= m.x1 + m.x19 + m.x37 - 60*m.b55 == 0)
m.c9 = Constraint(expr= m.x2 + m.x20 + m.x38 - 60*m.b55 == 0)
m.c10 = Constraint(expr= m.x3 + m.x21 + m.x39 - 60*m.b56 == 0)
m.c11 = Constraint(expr= m.x4 + m.x22 + m.x40 - 60*m.b56 == 0)
m.c12 = Constraint(expr= m.x5 + m.x23 + m.x41 - 60*m.b57 == 0)
m.c13 = Constraint(expr= m.x6 + m.x24 + m.x42 - 60*m.b57 == 0)
m.c14 = Constraint(expr= m.x7 + m.x25 + m.x43 - 60*m.b58 == 0)
m.c15 = Constraint(expr= m.x8 + m.x26 + m.x44 - 60*m.b58 == 0)
m.c16 = Constraint(expr= m.x9 + m.x27 + m.x45 - 60*m.b59 == 0)
m.c17 = Constraint(expr= m.x10 + m.x28 + m.x46 - 60*m.b59 == 0)
m.c18 = Constraint(expr= m.x11 + m.x29 + m.x47 - 60*m.b60 == 0)
m.c19 = Constraint(expr= m.x12 + m.x30 + m.x48 - 60*m.b60 == 0)
m.c20 = Constraint(expr= m.x13 + m.x31 + m.x49 - 60*m.b61 == 0)
m.c21 = Constraint(expr= m.x14 + m.x32 + m.x50 - 60*m.b61 == 0)
m.c22 = Constraint(expr= m.x15 + m.x33 + m.x51 - 60*m.b62 == 0)
m.c23 = Constraint(expr= m.x16 + m.x34 + m.x52 - 60*m.b62 == 0)
m.c24 = Constraint(expr= m.x17 + m.x35 + m.x53 - 60*m.b63 == 0)
m.c25 = Constraint(expr= m.x18 + m.x36 + m.x54 - 60*m.b63 == 0)
m.c26 = Constraint(expr= 120*m.b55 + 120*m.b56 + 120*m.b57 - 2749.5*m.b64 <= 0)
m.c27 = Constraint(expr= 120*m.b58 + 120*m.b59 + 120*m.b60 - 2872.94*m.b65 <= 0)
m.c28 = Constraint(expr= 120*m.b61 + 120*m.b62 + 120*m.b63 - 2508.06*m.b66 <= 0)
m.c29 = Constraint(expr= 120*m.b55 + 120*m.b56 + 120*m.b57 - 50*m.b64 >= 0)
m.c30 = Constraint(expr= 120*m.b58 + 120*m.b59 + 120*m.b60 - 50*m.b65 >= 0)
m.c31 = Constraint(expr= 120*m.b61 + 120*m.b62 + 120*m.b63 - 50*m.b66 >= 0)
m.c32 = Constraint(expr= m.b55 + m.b58 + m.b61 == 1)
m.c33 = Constraint(expr= m.b56 + m.b59 + m.b62 == 1)
m.c34 = Constraint(expr= m.b57 + m.b60 + m.b63 == 1)
| 49.578035 | 120 | 0.5903 |
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.b55 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b56 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b57 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b58 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b59 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b60 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr=276.28*(m.x1 + m.x2 + m.x3 + m.x4 + m.x5 + m.x6 + m.x19 + m.x20 + m.x21 + m.x22 + m.x23 + m.x24
+ m.x37 + m.x38 + m.x39 + m.x40 + m.x41 + m.x42)**2.5 + 792.912*(m.x7 + m.x8 + m.x9 + m.x10 +
m.x11 + m.x12 + m.x25 + m.x26 + m.x27 + m.x28 + m.x29 + m.x30 + m.x43 + m.x44 + m.x45 + m.x46 +
m.x47 + m.x48)**2.5 + 991.679*(m.x13 + m.x14 + m.x15 + m.x16 + m.x17 + m.x18 + m.x31 + m.x32 +
m.x33 + m.x34 + m.x35 + m.x36 + m.x49 + m.x50 + m.x51 + m.x52 + m.x53 + m.x54)**2.5 + 115.274*
m.x1 + 98.5559*m.x2 + 142.777*m.x3 + 33.9886*m.x4 + 163.087*m.x5 + 10.4376*m.x6 + 234.406*m.x7 +
142.066*m.x8 + 50.6436*m.x9 + 123.61*m.x10 + 242.356*m.x11 + 135.071*m.x12 + 10.7347*m.x13 +
56.0272*m.x14 + 14.912*m.x15 + 169.218*m.x16 + 209.028*m.x17 + 259.29*m.x18 + 165.41*m.x19 +
40.7497*m.x20 + 124.907*m.x21 + 18.495*m.x22 + 95.2789*m.x23 + 251.899*m.x24 + 114.185*m.x25 +
37.8148*m.x26 + 10.5547*m.x27 + 52.5162*m.x28 + 37.4727*m.x29 + 254.843*m.x30 + 266.645*m.x31 +
136.583*m.x32 + 15.092*m.x33 + 194.101*m.x34 + 78.768*m.x35 + 120.36*m.x36 + 257.318*m.x37 +
172.747*m.x38 + 142.813*m.x39 + 251.331*m.x40 + 15.9113*m.x41 + 48.8251*m.x42 + 289.116*m.x43 +
129.705*m.x44 + 275.621*m.x45 + 20.2235*m.x46 + 253.789*m.x47 + 56.7474*m.x48 + 201.646*m.x49 +
164.573*m.x50 + 295.157*m.x51 + 151.474*m.x52 + 221.794*m.x53 + 278.304*m.x54 + 2481400*m.b64
+ 2156460*m.b65 + 2097730*m.b66, sense=minimize)
m.c2 = Constraint(expr= m.x1 + m.x3 + m.x5 + m.x7 + m.x9 + m.x11 + m.x13 + m.x15 + m.x17 <= 60)
m.c3 = Constraint(expr= m.x2 + m.x4 + m.x6 + m.x8 + m.x10 + m.x12 + m.x14 + m.x16 + m.x18 <= 60)
m.c4 = Constraint(expr= m.x19 + m.x21 + m.x23 + m.x25 + m.x27 + m.x29 + m.x31 + m.x33 + m.x35 <= 60)
m.c5 = Constraint(expr= m.x20 + m.x22 + m.x24 + m.x26 + m.x28 + m.x30 + m.x32 + m.x34 + m.x36 <= 60)
m.c6 = Constraint(expr= m.x37 + m.x39 + m.x41 + m.x43 + m.x45 + m.x47 + m.x49 + m.x51 + m.x53 <= 60)
m.c7 = Constraint(expr= m.x38 + m.x40 + m.x42 + m.x44 + m.x46 + m.x48 + m.x50 + m.x52 + m.x54 <= 60)
m.c8 = Constraint(expr= m.x1 + m.x19 + m.x37 - 60*m.b55 == 0)
m.c9 = Constraint(expr= m.x2 + m.x20 + m.x38 - 60*m.b55 == 0)
m.c10 = Constraint(expr= m.x3 + m.x21 + m.x39 - 60*m.b56 == 0)
m.c11 = Constraint(expr= m.x4 + m.x22 + m.x40 - 60*m.b56 == 0)
m.c12 = Constraint(expr= m.x5 + m.x23 + m.x41 - 60*m.b57 == 0)
m.c13 = Constraint(expr= m.x6 + m.x24 + m.x42 - 60*m.b57 == 0)
m.c14 = Constraint(expr= m.x7 + m.x25 + m.x43 - 60*m.b58 == 0)
m.c15 = Constraint(expr= m.x8 + m.x26 + m.x44 - 60*m.b58 == 0)
m.c16 = Constraint(expr= m.x9 + m.x27 + m.x45 - 60*m.b59 == 0)
m.c17 = Constraint(expr= m.x10 + m.x28 + m.x46 - 60*m.b59 == 0)
m.c18 = Constraint(expr= m.x11 + m.x29 + m.x47 - 60*m.b60 == 0)
m.c19 = Constraint(expr= m.x12 + m.x30 + m.x48 - 60*m.b60 == 0)
m.c20 = Constraint(expr= m.x13 + m.x31 + m.x49 - 60*m.b61 == 0)
m.c21 = Constraint(expr= m.x14 + m.x32 + m.x50 - 60*m.b61 == 0)
m.c22 = Constraint(expr= m.x15 + m.x33 + m.x51 - 60*m.b62 == 0)
m.c23 = Constraint(expr= m.x16 + m.x34 + m.x52 - 60*m.b62 == 0)
m.c24 = Constraint(expr= m.x17 + m.x35 + m.x53 - 60*m.b63 == 0)
m.c25 = Constraint(expr= m.x18 + m.x36 + m.x54 - 60*m.b63 == 0)
m.c26 = Constraint(expr= 120*m.b55 + 120*m.b56 + 120*m.b57 - 2749.5*m.b64 <= 0)
m.c27 = Constraint(expr= 120*m.b58 + 120*m.b59 + 120*m.b60 - 2872.94*m.b65 <= 0)
m.c28 = Constraint(expr= 120*m.b61 + 120*m.b62 + 120*m.b63 - 2508.06*m.b66 <= 0)
m.c29 = Constraint(expr= 120*m.b55 + 120*m.b56 + 120*m.b57 - 50*m.b64 >= 0)
m.c30 = Constraint(expr= 120*m.b58 + 120*m.b59 + 120*m.b60 - 50*m.b65 >= 0)
m.c31 = Constraint(expr= 120*m.b61 + 120*m.b62 + 120*m.b63 - 50*m.b66 >= 0)
m.c32 = Constraint(expr= m.b55 + m.b58 + m.b61 == 1)
m.c33 = Constraint(expr= m.b56 + m.b59 + m.b62 == 1)
m.c34 = Constraint(expr= m.b57 + m.b60 + m.b63 == 1)
| true | true |
f715f09dd9a1ab89449f85a7df5a818d88fa8086 | 37,361 | py | Python | banana/study/mri/dwi.py | apoz00003/banana | 50bf516cc4f7d4d93985e42d0c4dcbc62fb8058a | [
"Apache-2.0"
] | null | null | null | banana/study/mri/dwi.py | apoz00003/banana | 50bf516cc4f7d4d93985e42d0c4dcbc62fb8058a | [
"Apache-2.0"
] | null | null | null | banana/study/mri/dwi.py | apoz00003/banana | 50bf516cc4f7d4d93985e42d0c4dcbc62fb8058a | [
"Apache-2.0"
] | null | null | null | from logging import getLogger
from nipype.interfaces.utility import Merge
from nipype.interfaces.fsl import (
TOPUP, ApplyTOPUP, BET, FUGUE, Merge as FslMerge)
from nipype.interfaces import fsl
from nipype.interfaces.utility import Merge as merge_lists
from nipype.interfaces.fsl.epi import PrepareFieldmap
from nipype.interfaces.mrtrix3 import ResponseSD, Tractography
from nipype.interfaces.mrtrix3.utils import BrainMask, TensorMetrics
from nipype.interfaces.mrtrix3.reconst import FitTensor, EstimateFOD
from banana.interfaces.custom.motion_correction import GenTopupConfigFiles
from banana.interfaces.mrtrix import (
DWIPreproc, MRCat, ExtractDWIorB0, MRMath, DWIBiasCorrect, DWIDenoise,
MRCalc, DWIIntensityNorm, AverageResponse, DWI2Mask)
# from nipype.workflows.dwi.fsl.tbss import create_tbss_all
# from banana.interfaces.noddi import (
# CreateROI, BatchNODDIFitting, SaveParamsAsNIfTI)
from nipype.interfaces import fsl, mrtrix3, utility
from arcana.utils.interfaces import MergeTuple, Chain
from arcana.data import FilesetSpec, InputFilesetSpec
from arcana.utils.interfaces import SelectSession
from arcana.study import ParamSpec, SwitchSpec
from arcana.exceptions import ArcanaMissingDataException, ArcanaNameError
from banana.requirement import (
fsl_req, mrtrix_req, ants_req)
from banana.interfaces.mrtrix import MRConvert, ExtractFSLGradients
from banana.study import StudyMetaClass
from banana.interfaces.custom.motion_correction import (
PrepareDWI, AffineMatrixGeneration)
from banana.interfaces.custom.dwi import TransformGradients
from banana.interfaces.utility import AppendPath
from banana.study.base import Study
from banana.bids_ import BidsInputs, BidsAssocInputs
from banana.exceptions import BananaUsageError
from banana.citation import (
mrtrix_cite, fsl_cite, eddy_cite, topup_cite, distort_correct_cite,
n4_cite, dwidenoise_cites)
from banana.file_format import (
mrtrix_image_format, nifti_gz_format, nifti_gz_x_format, fsl_bvecs_format,
fsl_bvals_format, text_format, dicom_format, eddy_par_format,
mrtrix_track_format, motion_mats_format, text_matrix_format,
directory_format, csv_format, zip_format, STD_IMAGE_FORMATS)
from .base import MriStudy
from .epi import EpiSeriesStudy, EpiStudy
logger = getLogger('banana')
class DwiStudy(EpiSeriesStudy, metaclass=StudyMetaClass):
desc = "Diffusion-weighted MRI contrast"
add_data_specs = [
InputFilesetSpec('anat_5tt', mrtrix_image_format,
desc=("A co-registered segmentation image taken from "
"freesurfer output and simplified into 5 tissue"
" types. Used in ACT streamlines tractography"),
optional=True),
InputFilesetSpec('anat_fs_recon_all', zip_format, optional=True,
desc=("Co-registered freesurfer recon-all output. "
"Used in building the connectome")),
InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True),
FilesetSpec('grad_dirs', fsl_bvecs_format, 'preprocess_pipeline'),
FilesetSpec('grad_dirs_coreg', fsl_bvecs_format,
'series_coreg_pipeline',
desc=("The gradient directions coregistered to the "
"orientation of the coreg reference")),
FilesetSpec('bvalues', fsl_bvals_format, 'preprocess_pipeline',
desc=("")),
FilesetSpec('eddy_par', eddy_par_format, 'preprocess_pipeline',
desc=("")),
FilesetSpec('noise_residual', mrtrix_image_format,
'preprocess_pipeline',
desc=("")),
FilesetSpec('tensor', nifti_gz_format, 'tensor_pipeline',
desc=("")),
FilesetSpec('fa', nifti_gz_format, 'tensor_metrics_pipeline',
desc=("")),
FilesetSpec('adc', nifti_gz_format, 'tensor_metrics_pipeline',
desc=("")),
FilesetSpec('wm_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('gm_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('csf_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('avg_response', text_format, 'average_response_pipeline',
desc=("")),
FilesetSpec('wm_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('gm_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('csf_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('norm_intensity', mrtrix_image_format,
'intensity_normalisation_pipeline',
desc=("")),
FilesetSpec('norm_intens_fa_template', mrtrix_image_format,
'intensity_normalisation_pipeline', frequency='per_study',
desc=("")),
FilesetSpec('norm_intens_wm_mask', mrtrix_image_format,
'intensity_normalisation_pipeline', frequency='per_study',
desc=("")),
FilesetSpec('global_tracks', mrtrix_track_format,
'global_tracking_pipeline',
desc=("")),
FilesetSpec('wm_mask', mrtrix_image_format,
'global_tracking_pipeline',
desc=("")),
FilesetSpec('connectome', csv_format, 'connectome_pipeline',
desc=(""))]
add_param_specs = [
ParamSpec('multi_tissue', True,
desc=("")),
ParamSpec('preproc_pe_dir', None, dtype=str,
desc=("")),
ParamSpec('tbss_skel_thresh', 0.2,
desc=("")),
ParamSpec('fsl_mask_f', 0.25,
desc=("")),
ParamSpec('bet_robust', True,
desc=("")),
ParamSpec('bet_f_threshold', 0.2,
desc=("")),
ParamSpec('bet_reduce_bias', False,
desc=("")),
ParamSpec('num_global_tracks', int(1e9),
desc=("")),
ParamSpec('global_tracks_cutoff', 0.05,
desc=("")),
SwitchSpec('preproc_denoise', False,
desc=("")),
SwitchSpec('response_algorithm', 'tax',
('tax', 'dhollander', 'msmt_5tt'),
desc=("")),
SwitchSpec('fod_algorithm', 'csd', ('csd', 'msmt_csd'),
desc=("")),
MriStudy.param_spec('bet_method').with_new_choices('mrtrix'),
SwitchSpec('reorient2std', False,
desc=(""))]
primary_bids_input = BidsInputs(
spec_name='series', type='dwi',
valid_formats=(nifti_gz_x_format, nifti_gz_format))
default_bids_inputs = [primary_bids_input,
BidsAssocInputs(
spec_name='bvalues',
primary=primary_bids_input,
association='grads',
type='bval',
format=fsl_bvals_format),
BidsAssocInputs(
spec_name='grad_dirs',
primary=primary_bids_input,
association='grads',
type='bvec',
format=fsl_bvecs_format),
BidsAssocInputs(
spec_name='reverse_phase',
primary=primary_bids_input,
association='epi',
format=nifti_gz_format,
drop_if_missing=True)]
RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM = 5
primary_scan_name = 'series'
@property
def multi_tissue(self):
return self.branch('response_algorithm',
('msmt_5tt', 'dhollander'))
def fsl_grads(self, pipeline, coregistered=True):
"Adds and returns a node to the pipeline to merge the FSL grads and "
"bvecs"
try:
fslgrad = pipeline.node('fslgrad')
except ArcanaNameError:
if self.is_coregistered and coregistered:
grad_dirs = 'grad_dirs_coreg'
else:
grad_dirs = 'grad_dirs'
# Gradient merge node
fslgrad = pipeline.add(
"fslgrad",
MergeTuple(2),
inputs={
'in1': (grad_dirs, fsl_bvecs_format),
'in2': ('bvalues', fsl_bvals_format)})
return (fslgrad, 'out')
def extract_magnitude_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
'extract_magnitude',
desc="Extracts the first b==0 volume from the series",
citations=[],
name_maps=name_maps)
dwiextract = pipeline.add(
'dwiextract',
ExtractDWIorB0(
bzero=True,
out_ext='.nii.gz'),
inputs={
'in_file': ('series', nifti_gz_format),
'fslgrad': self.fsl_grads(pipeline, coregistered=False)},
requirements=[mrtrix_req.v('3.0rc3')])
pipeline.add(
"extract_first_vol",
MRConvert(
coord=(3, 0)),
inputs={
'in_file': (dwiextract, 'out_file')},
outputs={
'magnitude': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def preprocess_pipeline(self, **name_maps):
"""
Performs a series of FSL preprocessing steps, including Eddy and Topup
Parameters
----------
phase_dir : str{AP|LR|IS}
The phase encode direction
"""
# Determine whether we can correct for distortion, i.e. if reference
# scans are provided
# Include all references
references = [fsl_cite, eddy_cite, topup_cite,
distort_correct_cite, n4_cite]
if self.branch('preproc_denoise'):
references.extend(dwidenoise_cites)
pipeline = self.new_pipeline(
name='preprocess',
name_maps=name_maps,
desc=(
"Preprocess dMRI studies using distortion correction"),
citations=references)
# Create nodes to gradients to FSL format
if self.input('series').format == dicom_format:
extract_grad = pipeline.add(
"extract_grad",
ExtractFSLGradients(),
inputs={
'in_file': ('series', dicom_format)},
outputs={
'grad_dirs': ('bvecs_file', fsl_bvecs_format),
'bvalues': ('bvals_file', fsl_bvals_format)},
requirements=[mrtrix_req.v('3.0rc3')])
grad_fsl_inputs = {'in1': (extract_grad, 'bvecs_file'),
'in2': (extract_grad, 'bvals_file')}
elif self.provided('grad_dirs') and self.provided('bvalues'):
grad_fsl_inputs = {'in1': ('grad_dirs', fsl_bvecs_format),
'in2': ('bvalues', fsl_bvals_format)}
else:
raise BananaUsageError(
"Either input 'magnitude' image needs to be in DICOM format "
"or gradient directions and b-values need to be explicitly "
"provided to {}".format(self))
# Gradient merge node
grad_fsl = pipeline.add(
"grad_fsl",
MergeTuple(2),
inputs=grad_fsl_inputs)
gradients = (grad_fsl, 'out')
# Create node to reorient preproc out_file
if self.branch('reorient2std'):
reorient = pipeline.add(
'fslreorient2std',
fsl.utils.Reorient2Std(
output_type='NIFTI_GZ'),
inputs={
'in_file': ('series', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
reoriented = (reorient, 'out_file')
else:
reoriented = ('series', nifti_gz_format)
# Denoise the dwi-scan
if self.branch('preproc_denoise'):
# Run denoising
denoise = pipeline.add(
'denoise',
DWIDenoise(),
inputs={
'in_file': reoriented},
requirements=[mrtrix_req.v('3.0rc3')])
# Calculate residual noise
subtract_operands = pipeline.add(
'subtract_operands',
Merge(2),
inputs={
'in1': reoriented,
'in2': (denoise, 'noise')})
pipeline.add(
'subtract',
MRCalc(
operation='subtract'),
inputs={
'operands': (subtract_operands, 'out')},
outputs={
'noise_residual': ('out_file', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
denoised = (denoise, 'out_file')
else:
denoised = reoriented
# Preproc kwargs
preproc_kwargs = {}
preproc_inputs = {'in_file': denoised,
'grad_fsl': gradients}
if self.provided('reverse_phase'):
if self.provided('magnitude', default_okay=False):
dwi_reference = ('magnitude', mrtrix_image_format)
else:
# Extract b=0 volumes
dwiextract = pipeline.add(
'dwiextract',
ExtractDWIorB0(
bzero=True,
out_ext='.nii.gz'),
inputs={
'in_file': denoised,
'fslgrad': gradients},
requirements=[mrtrix_req.v('3.0rc3')])
# Get first b=0 from dwi b=0 volumes
extract_first_b0 = pipeline.add(
"extract_first_vol",
MRConvert(
coord=(3, 0)),
inputs={
'in_file': (dwiextract, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
dwi_reference = (extract_first_b0, 'out_file')
# Concatenate extracted forward rpe with reverse rpe
combined_images = pipeline.add(
'combined_images',
MRCat(),
inputs={
'first_scan': dwi_reference,
'second_scan': ('reverse_phase', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# Create node to assign the right PED to the diffusion
prep_dwi = pipeline.add(
'prepare_dwi',
PrepareDWI(),
inputs={
'pe_dir': ('ped', float),
'ped_polarity': ('pe_angle', float)})
preproc_kwargs['rpe_pair'] = True
distortion_correction = True
preproc_inputs['se_epi'] = (combined_images, 'out_file')
else:
distortion_correction = False
preproc_kwargs['rpe_none'] = True
if self.parameter('preproc_pe_dir') is not None:
preproc_kwargs['pe_dir'] = self.parameter('preproc_pe_dir')
preproc = pipeline.add(
'dwipreproc',
DWIPreproc(
no_clean_up=True,
out_file_ext='.nii.gz',
# FIXME: Need to determine this programmatically
# eddy_parameters = '--data_is_shelled '
temp_dir='dwipreproc_tempdir',
**preproc_kwargs),
inputs=preproc_inputs,
outputs={
'eddy_par': ('eddy_parameters', eddy_par_format)},
requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('5.0.10')],
wall_time=60)
if distortion_correction:
pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')
mask = pipeline.add(
'dwi2mask',
BrainMask(
out_file='brainmask.nii.gz'),
inputs={
'in_file': (preproc, 'out_file'),
'grad_fsl': gradients},
requirements=[mrtrix_req.v('3.0rc3')])
# Create bias correct node
pipeline.add(
"bias_correct",
DWIBiasCorrect(
method='ants'),
inputs={
'grad_fsl': gradients, # internal
'in_file': (preproc, 'out_file'),
'mask': (mask, 'out_file')},
outputs={
'series_preproc': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3'), ants_req.v('2.0')])
return pipeline
def brain_extraction_pipeline(self, **name_maps):
"""
Generates a whole brain mask using MRtrix's 'dwi2mask' command
Parameters
----------
mask_tool: Str
Can be either 'bet' or 'dwi2mask' depending on which mask tool you
want to use
"""
if self.branch('bet_method', 'mrtrix'):
pipeline = self.new_pipeline(
'brain_extraction',
desc="Generate brain mask from b0 images",
citations=[mrtrix_cite],
name_maps=name_maps)
if self.provided('coreg_ref'):
series = 'series_coreg'
else:
series = 'series_preproc'
# Create mask node
masker = pipeline.add(
'dwi2mask',
BrainMask(
out_file='brain_mask.nii.gz'),
inputs={
'in_file': (series, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},
outputs={
'brain_mask': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
merge = pipeline.add(
'merge_operands',
Merge(2),
inputs={
'in1': ('mag_preproc', nifti_gz_format),
'in2': (masker, 'out_file')})
pipeline.add(
'apply_mask',
MRCalc(
operation='multiply'),
inputs={
'operands': (merge, 'out')},
outputs={
'brain': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
else:
pipeline = super().brain_extraction_pipeline(**name_maps)
return pipeline
def series_coreg_pipeline(self, **name_maps):
pipeline = super().series_coreg_pipeline(**name_maps)
# Apply coregistration transform to gradients
pipeline.add(
'transform_grads',
TransformGradients(),
inputs={
'gradients': ('grad_dirs', fsl_bvecs_format),
'transform': ('coreg_fsl_mat', text_matrix_format)},
outputs={
'grad_dirs_coreg': ('transformed', fsl_bvecs_format)})
return pipeline
def intensity_normalisation_pipeline(self, **name_maps):
if self.num_sessions < 2:
raise ArcanaMissingDataException(
"Cannot normalise intensities of DWI images as study only "
"contains a single session")
elif self.num_sessions < self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM:
logger.warning(
"The number of sessions in the study ({}) is less than the "
"recommended number for intensity normalisation ({}). The "
"results may be unreliable".format(
self.num_sessions,
self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM))
pipeline = self.new_pipeline(
name='intensity_normalization',
desc="Corrects for B1 field inhomogeneity",
citations=[mrtrix_req.v('3.0rc3')],
name_maps=name_maps)
mrconvert = pipeline.add(
'mrconvert',
MRConvert(
out_ext='.mif'),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline)},
requirements=[mrtrix_req.v('3.0rc3')])
# Pair subject and visit ids together, expanding so they can be
# joined and chained together
session_ids = pipeline.add(
'session_ids',
utility.IdentityInterface(
['subject_id', 'visit_id']),
inputs={
'subject_id': (Study.SUBJECT_ID, int),
'visit_id': (Study.VISIT_ID, int)})
# Set up join nodes
join_fields = ['dwis', 'masks', 'subject_ids', 'visit_ids']
join_over_subjects = pipeline.add(
'join_over_subjects',
utility.IdentityInterface(
join_fields),
inputs={
'masks': (self.brain_mask_spec_name, nifti_gz_format),
'dwis': (mrconvert, 'out_file'),
'subject_ids': (session_ids, 'subject_id'),
'visit_ids': (session_ids, 'visit_id')},
joinsource=self.SUBJECT_ID,
joinfield=join_fields)
join_over_visits = pipeline.add(
'join_over_visits',
Chain(
join_fields),
inputs={
'dwis': (join_over_subjects, 'dwis'),
'masks': (join_over_subjects, 'masks'),
'subject_ids': (join_over_subjects, 'subject_ids'),
'visit_ids': (join_over_subjects, 'visit_ids')},
joinsource=self.VISIT_ID,
joinfield=join_fields)
# Intensity normalization
intensity_norm = pipeline.add(
'dwiintensitynorm',
DWIIntensityNorm(),
inputs={
'in_files': (join_over_visits, 'dwis'),
'masks': (join_over_visits, 'masks')},
outputs={
'norm_intens_fa_template': ('fa_template',
mrtrix_image_format),
'norm_intens_wm_mask': ('wm_mask', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# Set up expand nodes
pipeline.add(
'expand', SelectSession(),
inputs={
'subject_ids': (join_over_visits, 'subject_ids'),
'visit_ids': (join_over_visits, 'visit_ids'),
'inlist': (intensity_norm, 'out_files'),
'subject_id': (Study.SUBJECT_ID, int),
'visit_id': (Study.VISIT_ID, int)},
outputs={
'norm_intensity': ('item', mrtrix_image_format)})
# Connect inputs
return pipeline
def tensor_pipeline(self, **name_maps):
"""
Fits the apparrent diffusion tensor (DT) to each voxel of the image
"""
pipeline = self.new_pipeline(
name='tensor',
desc=("Estimates the apparent diffusion tensor in each "
"voxel"),
citations=[],
name_maps=name_maps)
# Create tensor fit node
pipeline.add(
'dwi2tensor',
FitTensor(
out_file='dti.nii.gz'),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'tensor': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def tensor_metrics_pipeline(self, **name_maps):
"""
Fits the apparrent diffusion tensor (DT) to each voxel of the image
"""
pipeline = self.new_pipeline(
name='fa',
desc=("Calculates the FA and ADC from a tensor image"),
citations=[],
name_maps=name_maps)
# Create tensor fit node
pipeline.add(
'metrics',
TensorMetrics(
out_fa='fa.nii.gz',
out_adc='adc.nii.gz'),
inputs={
'in_file': ('tensor', nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'fa': ('out_fa', nifti_gz_format),
'adc': ('out_adc', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def response_pipeline(self, **name_maps):
"""
Estimates the fibre orientation distribution (FOD) using constrained
spherical deconvolution
Parameters
----------
response_algorithm : str
Algorithm used to estimate the response
"""
pipeline = self.new_pipeline(
name='response',
desc=("Estimates the fibre response function"),
citations=[mrtrix_cite],
name_maps=name_maps)
# Create fod fit node
response = pipeline.add(
'response',
ResponseSD(
algorithm=self.parameter('response_algorithm')),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'wm_response': ('wm_file', text_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# Connect to outputs
if self.multi_tissue:
response.inputs.gm_file = 'gm.txt',
response.inputs.csf_file = 'csf.txt',
pipeline.connect_output('gm_response', response, 'gm_file',
text_format)
pipeline.connect_output('csf_response', response, 'csf_file',
text_format)
return pipeline
def average_response_pipeline(self, **name_maps):
"""
Averages the estimate response function over all subjects in the
project
"""
pipeline = self.new_pipeline(
name='average_response',
desc=(
"Averages the fibre response function over the project"),
citations=[mrtrix_cite],
name_maps=name_maps)
join_subjects = pipeline.add(
'join_subjects',
utility.IdentityInterface(['responses']),
inputs={
'responses': ('wm_response', text_format)},
outputs={},
joinsource=self.SUBJECT_ID,
joinfield=['responses'])
join_visits = pipeline.add(
'join_visits',
Chain(['responses']),
inputs={
'responses': (join_subjects, 'responses')},
joinsource=self.VISIT_ID,
joinfield=['responses'])
pipeline.add(
'avg_response',
AverageResponse(),
inputs={
'in_files': (join_visits, 'responses')},
outputs={
'avg_response': ('out_file', text_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def fod_pipeline(self, **name_maps):
"""
Estimates the fibre orientation distribution (FOD) using constrained
spherical deconvolution
Parameters
----------
"""
pipeline = self.new_pipeline(
name='fod',
desc=("Estimates the fibre orientation distribution in each"
" voxel"),
citations=[mrtrix_cite],
name_maps=name_maps)
# Create fod fit node
dwi2fod = pipeline.add(
'dwi2fod',
EstimateFOD(
algorithm=self.parameter('fod_algorithm')),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'wm_txt': ('wm_response', text_format),
'mask_file': (self.brain_mask_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline)},
outputs={
'wm_odf': ('wm_odf', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.multi_tissue:
dwi2fod.inputs.gm_odf = 'gm.mif',
dwi2fod.inputs.csf_odf = 'csf.mif',
pipeline.connect_input('gm_response', dwi2fod, 'gm_txt',
text_format),
pipeline.connect_input('csf_response', dwi2fod, 'csf_txt',
text_format),
pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf',
nifti_gz_format),
pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf',
nifti_gz_format),
# Check inputs/output are connected
return pipeline
def extract_b0_pipeline(self, **name_maps):
"""
Extracts the b0 images from a DWI study and takes their mean
"""
pipeline = self.new_pipeline(
name='extract_b0',
desc="Extract b0 image from a DWI study",
citations=[mrtrix_cite],
name_maps=name_maps)
# Extraction node
extract_b0s = pipeline.add(
'extract_b0s',
ExtractDWIorB0(
bzero=True,
quiet=True),
inputs={
'fslgrad': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# FIXME: Need a registration step before the mean
# Mean calculation node
mean = pipeline.add(
"mean",
MRMath(
axis=3,
operation='mean',
quiet=True),
inputs={
'in_files': (extract_b0s, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
# Convert to Nifti
pipeline.add(
"output_conversion",
MRConvert(
out_ext='.nii.gz',
quiet=True),
inputs={
'in_file': (mean, 'out_file')},
outputs={
'b0': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def global_tracking_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='global_tracking',
desc="Extract b0 image from a DWI study",
citations=[mrtrix_cite],
name_maps=name_maps)
mask = pipeline.add(
'mask',
DWI2Mask(),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
tracking = pipeline.add(
'tracking',
Tractography(
select=self.parameter('num_global_tracks'),
cutoff=self.parameter('global_tracks_cutoff')),
inputs={
'seed_image': (mask, 'out_file'),
'in_file': ('wm_odf', mrtrix_image_format)},
outputs={
'global_tracks': ('out_file', mrtrix_track_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.provided('anat_5tt'):
pipeline.connect_input('anat_5tt', tracking, 'act_file',
mrtrix_image_format)
return pipeline
def intrascan_alignment_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='affine_mat_generation',
desc=("Generation of the affine matrices for the main dwi "
"sequence starting from eddy motion parameters"),
citations=[fsl_cite],
name_maps=name_maps)
pipeline.add(
'gen_aff_mats',
AffineMatrixGeneration(),
inputs={
'reference_image': ('mag_preproc', nifti_gz_format),
'motion_parameters': ('eddy_par', eddy_par_format)},
outputs={
'align_mats': ('affine_matrices', motion_mats_format)})
return pipeline
def connectome_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='connectome',
desc=("Generate a connectome from whole brain connectivity"),
citations=[],
name_maps=name_maps)
aseg_path = pipeline.add(
'aseg_path',
AppendPath(
sub_paths=['mri', 'aparc+aseg.mgz']),
inputs={
'base_path': ('anat_fs_recon_all', directory_format)})
pipeline.add(
'connectome',
mrtrix3.BuildConnectome(),
inputs={
'in_file': ('global_tracks', mrtrix_track_format),
'in_parc': (aseg_path, 'out_path')},
outputs={
'connectome': ('out_file', csv_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
class DwiRefStudy(EpiStudy, metaclass=StudyMetaClass):
add_data_specs = [
InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True)
]
desc = ("A special study used in the MR-PET motion correction algorithm to"
" perform distortion correction on the reverse-phase/reference b0 "
"scans by flipping it around and using the DWI series as the "
"reference")
def preprocess_pipeline(self, **name_maps):
if self.provided('reverse_phase'):
return self._topup_pipeline(**name_maps)
else:
return super().preprocess_pipeline(**name_maps)
def _topup_pipeline(self, **name_maps):
"""
Implementation of separate topup pipeline, moved from EPI study as it
is only really relevant for spin-echo DWI. Need to work out what to do
with it
"""
pipeline = self.new_pipeline(
name='preprocess_pipeline',
desc=("Topup distortion correction pipeline"),
citations=[fsl_cite],
name_maps=name_maps)
reorient_epi_in = pipeline.add(
'reorient_epi_in',
fsl.utils.Reorient2Std(),
inputs={
'in_file': ('magnitude', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
reorient_epi_opposite = pipeline.add(
'reorient_epi_opposite',
fsl.utils.Reorient2Std(),
inputs={
'in_file': ('reverse_phase', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
prep_dwi = pipeline.add(
'prepare_dwi',
PrepareDWI(
topup=True),
inputs={
'pe_dir': ('ped', str),
'ped_polarity': ('pe_angle', str),
'dwi': (reorient_epi_in, 'out_file'),
'dwi1': (reorient_epi_opposite, 'out_file')})
ped = pipeline.add(
'gen_config',
GenTopupConfigFiles(),
inputs={
'ped': (prep_dwi, 'pe')})
merge_outputs = pipeline.add(
'merge_files',
merge_lists(2),
inputs={
'in1': (prep_dwi, 'main'),
'in2': (prep_dwi, 'secondary')})
merge = pipeline.add(
'FslMerge',
FslMerge(
dimension='t',
output_type='NIFTI_GZ'),
inputs={
'in_files': (merge_outputs, 'out')},
requirements=[fsl_req.v('5.0.9')])
topup = pipeline.add(
'topup',
TOPUP(
output_type='NIFTI_GZ'),
inputs={
'in_file': (merge, 'merged_file'),
'encoding_file': (ped, 'config_file')},
requirements=[fsl_req.v('5.0.9')])
in_apply_tp = pipeline.add(
'in_apply_tp',
merge_lists(1),
inputs={
'in1': (reorient_epi_in, 'out_file')})
pipeline.add(
'applytopup',
ApplyTOPUP(
method='jac',
in_index=[1],
output_type='NIFTI_GZ'),
inputs={
'in_files': (in_apply_tp, 'out'),
'encoding_file': (ped, 'apply_topup_config'),
'in_topup_movpar': (topup, 'out_movpar'),
'in_topup_fieldcoef': (topup, 'out_fieldcoef')},
outputs={
'mag_preproc': ('out_corrected', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
return pipeline
| 37.064484 | 79 | 0.532855 | from logging import getLogger
from nipype.interfaces.utility import Merge
from nipype.interfaces.fsl import (
TOPUP, ApplyTOPUP, BET, FUGUE, Merge as FslMerge)
from nipype.interfaces import fsl
from nipype.interfaces.utility import Merge as merge_lists
from nipype.interfaces.fsl.epi import PrepareFieldmap
from nipype.interfaces.mrtrix3 import ResponseSD, Tractography
from nipype.interfaces.mrtrix3.utils import BrainMask, TensorMetrics
from nipype.interfaces.mrtrix3.reconst import FitTensor, EstimateFOD
from banana.interfaces.custom.motion_correction import GenTopupConfigFiles
from banana.interfaces.mrtrix import (
DWIPreproc, MRCat, ExtractDWIorB0, MRMath, DWIBiasCorrect, DWIDenoise,
MRCalc, DWIIntensityNorm, AverageResponse, DWI2Mask)
from nipype.interfaces import fsl, mrtrix3, utility
from arcana.utils.interfaces import MergeTuple, Chain
from arcana.data import FilesetSpec, InputFilesetSpec
from arcana.utils.interfaces import SelectSession
from arcana.study import ParamSpec, SwitchSpec
from arcana.exceptions import ArcanaMissingDataException, ArcanaNameError
from banana.requirement import (
fsl_req, mrtrix_req, ants_req)
from banana.interfaces.mrtrix import MRConvert, ExtractFSLGradients
from banana.study import StudyMetaClass
from banana.interfaces.custom.motion_correction import (
PrepareDWI, AffineMatrixGeneration)
from banana.interfaces.custom.dwi import TransformGradients
from banana.interfaces.utility import AppendPath
from banana.study.base import Study
from banana.bids_ import BidsInputs, BidsAssocInputs
from banana.exceptions import BananaUsageError
from banana.citation import (
mrtrix_cite, fsl_cite, eddy_cite, topup_cite, distort_correct_cite,
n4_cite, dwidenoise_cites)
from banana.file_format import (
mrtrix_image_format, nifti_gz_format, nifti_gz_x_format, fsl_bvecs_format,
fsl_bvals_format, text_format, dicom_format, eddy_par_format,
mrtrix_track_format, motion_mats_format, text_matrix_format,
directory_format, csv_format, zip_format, STD_IMAGE_FORMATS)
from .base import MriStudy
from .epi import EpiSeriesStudy, EpiStudy
logger = getLogger('banana')
class DwiStudy(EpiSeriesStudy, metaclass=StudyMetaClass):
desc = "Diffusion-weighted MRI contrast"
add_data_specs = [
InputFilesetSpec('anat_5tt', mrtrix_image_format,
desc=("A co-registered segmentation image taken from "
"freesurfer output and simplified into 5 tissue"
" types. Used in ACT streamlines tractography"),
optional=True),
InputFilesetSpec('anat_fs_recon_all', zip_format, optional=True,
desc=("Co-registered freesurfer recon-all output. "
"Used in building the connectome")),
InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True),
FilesetSpec('grad_dirs', fsl_bvecs_format, 'preprocess_pipeline'),
FilesetSpec('grad_dirs_coreg', fsl_bvecs_format,
'series_coreg_pipeline',
desc=("The gradient directions coregistered to the "
"orientation of the coreg reference")),
FilesetSpec('bvalues', fsl_bvals_format, 'preprocess_pipeline',
desc=("")),
FilesetSpec('eddy_par', eddy_par_format, 'preprocess_pipeline',
desc=("")),
FilesetSpec('noise_residual', mrtrix_image_format,
'preprocess_pipeline',
desc=("")),
FilesetSpec('tensor', nifti_gz_format, 'tensor_pipeline',
desc=("")),
FilesetSpec('fa', nifti_gz_format, 'tensor_metrics_pipeline',
desc=("")),
FilesetSpec('adc', nifti_gz_format, 'tensor_metrics_pipeline',
desc=("")),
FilesetSpec('wm_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('gm_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('csf_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('avg_response', text_format, 'average_response_pipeline',
desc=("")),
FilesetSpec('wm_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('gm_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('csf_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('norm_intensity', mrtrix_image_format,
'intensity_normalisation_pipeline',
desc=("")),
FilesetSpec('norm_intens_fa_template', mrtrix_image_format,
'intensity_normalisation_pipeline', frequency='per_study',
desc=("")),
FilesetSpec('norm_intens_wm_mask', mrtrix_image_format,
'intensity_normalisation_pipeline', frequency='per_study',
desc=("")),
FilesetSpec('global_tracks', mrtrix_track_format,
'global_tracking_pipeline',
desc=("")),
FilesetSpec('wm_mask', mrtrix_image_format,
'global_tracking_pipeline',
desc=("")),
FilesetSpec('connectome', csv_format, 'connectome_pipeline',
desc=(""))]
add_param_specs = [
ParamSpec('multi_tissue', True,
desc=("")),
ParamSpec('preproc_pe_dir', None, dtype=str,
desc=("")),
ParamSpec('tbss_skel_thresh', 0.2,
desc=("")),
ParamSpec('fsl_mask_f', 0.25,
desc=("")),
ParamSpec('bet_robust', True,
desc=("")),
ParamSpec('bet_f_threshold', 0.2,
desc=("")),
ParamSpec('bet_reduce_bias', False,
desc=("")),
ParamSpec('num_global_tracks', int(1e9),
desc=("")),
ParamSpec('global_tracks_cutoff', 0.05,
desc=("")),
SwitchSpec('preproc_denoise', False,
desc=("")),
SwitchSpec('response_algorithm', 'tax',
('tax', 'dhollander', 'msmt_5tt'),
desc=("")),
SwitchSpec('fod_algorithm', 'csd', ('csd', 'msmt_csd'),
desc=("")),
MriStudy.param_spec('bet_method').with_new_choices('mrtrix'),
SwitchSpec('reorient2std', False,
desc=(""))]
primary_bids_input = BidsInputs(
spec_name='series', type='dwi',
valid_formats=(nifti_gz_x_format, nifti_gz_format))
default_bids_inputs = [primary_bids_input,
BidsAssocInputs(
spec_name='bvalues',
primary=primary_bids_input,
association='grads',
type='bval',
format=fsl_bvals_format),
BidsAssocInputs(
spec_name='grad_dirs',
primary=primary_bids_input,
association='grads',
type='bvec',
format=fsl_bvecs_format),
BidsAssocInputs(
spec_name='reverse_phase',
primary=primary_bids_input,
association='epi',
format=nifti_gz_format,
drop_if_missing=True)]
RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM = 5
primary_scan_name = 'series'
@property
def multi_tissue(self):
return self.branch('response_algorithm',
('msmt_5tt', 'dhollander'))
def fsl_grads(self, pipeline, coregistered=True):
try:
fslgrad = pipeline.node('fslgrad')
except ArcanaNameError:
if self.is_coregistered and coregistered:
grad_dirs = 'grad_dirs_coreg'
else:
grad_dirs = 'grad_dirs'
fslgrad = pipeline.add(
"fslgrad",
MergeTuple(2),
inputs={
'in1': (grad_dirs, fsl_bvecs_format),
'in2': ('bvalues', fsl_bvals_format)})
return (fslgrad, 'out')
def extract_magnitude_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
'extract_magnitude',
desc="Extracts the first b==0 volume from the series",
citations=[],
name_maps=name_maps)
dwiextract = pipeline.add(
'dwiextract',
ExtractDWIorB0(
bzero=True,
out_ext='.nii.gz'),
inputs={
'in_file': ('series', nifti_gz_format),
'fslgrad': self.fsl_grads(pipeline, coregistered=False)},
requirements=[mrtrix_req.v('3.0rc3')])
pipeline.add(
"extract_first_vol",
MRConvert(
coord=(3, 0)),
inputs={
'in_file': (dwiextract, 'out_file')},
outputs={
'magnitude': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def preprocess_pipeline(self, **name_maps):
references = [fsl_cite, eddy_cite, topup_cite,
distort_correct_cite, n4_cite]
if self.branch('preproc_denoise'):
references.extend(dwidenoise_cites)
pipeline = self.new_pipeline(
name='preprocess',
name_maps=name_maps,
desc=(
"Preprocess dMRI studies using distortion correction"),
citations=references)
if self.input('series').format == dicom_format:
extract_grad = pipeline.add(
"extract_grad",
ExtractFSLGradients(),
inputs={
'in_file': ('series', dicom_format)},
outputs={
'grad_dirs': ('bvecs_file', fsl_bvecs_format),
'bvalues': ('bvals_file', fsl_bvals_format)},
requirements=[mrtrix_req.v('3.0rc3')])
grad_fsl_inputs = {'in1': (extract_grad, 'bvecs_file'),
'in2': (extract_grad, 'bvals_file')}
elif self.provided('grad_dirs') and self.provided('bvalues'):
grad_fsl_inputs = {'in1': ('grad_dirs', fsl_bvecs_format),
'in2': ('bvalues', fsl_bvals_format)}
else:
raise BananaUsageError(
"Either input 'magnitude' image needs to be in DICOM format "
"or gradient directions and b-values need to be explicitly "
"provided to {}".format(self))
grad_fsl = pipeline.add(
"grad_fsl",
MergeTuple(2),
inputs=grad_fsl_inputs)
gradients = (grad_fsl, 'out')
if self.branch('reorient2std'):
reorient = pipeline.add(
'fslreorient2std',
fsl.utils.Reorient2Std(
output_type='NIFTI_GZ'),
inputs={
'in_file': ('series', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
reoriented = (reorient, 'out_file')
else:
reoriented = ('series', nifti_gz_format)
if self.branch('preproc_denoise'):
denoise = pipeline.add(
'denoise',
DWIDenoise(),
inputs={
'in_file': reoriented},
requirements=[mrtrix_req.v('3.0rc3')])
subtract_operands = pipeline.add(
'subtract_operands',
Merge(2),
inputs={
'in1': reoriented,
'in2': (denoise, 'noise')})
pipeline.add(
'subtract',
MRCalc(
operation='subtract'),
inputs={
'operands': (subtract_operands, 'out')},
outputs={
'noise_residual': ('out_file', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
denoised = (denoise, 'out_file')
else:
denoised = reoriented
preproc_kwargs = {}
preproc_inputs = {'in_file': denoised,
'grad_fsl': gradients}
if self.provided('reverse_phase'):
if self.provided('magnitude', default_okay=False):
dwi_reference = ('magnitude', mrtrix_image_format)
else:
dwiextract = pipeline.add(
'dwiextract',
ExtractDWIorB0(
bzero=True,
out_ext='.nii.gz'),
inputs={
'in_file': denoised,
'fslgrad': gradients},
requirements=[mrtrix_req.v('3.0rc3')])
extract_first_b0 = pipeline.add(
"extract_first_vol",
MRConvert(
coord=(3, 0)),
inputs={
'in_file': (dwiextract, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
dwi_reference = (extract_first_b0, 'out_file')
combined_images = pipeline.add(
'combined_images',
MRCat(),
inputs={
'first_scan': dwi_reference,
'second_scan': ('reverse_phase', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
prep_dwi = pipeline.add(
'prepare_dwi',
PrepareDWI(),
inputs={
'pe_dir': ('ped', float),
'ped_polarity': ('pe_angle', float)})
preproc_kwargs['rpe_pair'] = True
distortion_correction = True
preproc_inputs['se_epi'] = (combined_images, 'out_file')
else:
distortion_correction = False
preproc_kwargs['rpe_none'] = True
if self.parameter('preproc_pe_dir') is not None:
preproc_kwargs['pe_dir'] = self.parameter('preproc_pe_dir')
preproc = pipeline.add(
'dwipreproc',
DWIPreproc(
no_clean_up=True,
out_file_ext='.nii.gz',
temp_dir='dwipreproc_tempdir',
**preproc_kwargs),
inputs=preproc_inputs,
outputs={
'eddy_par': ('eddy_parameters', eddy_par_format)},
requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('5.0.10')],
wall_time=60)
if distortion_correction:
pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')
mask = pipeline.add(
'dwi2mask',
BrainMask(
out_file='brainmask.nii.gz'),
inputs={
'in_file': (preproc, 'out_file'),
'grad_fsl': gradients},
requirements=[mrtrix_req.v('3.0rc3')])
pipeline.add(
"bias_correct",
DWIBiasCorrect(
method='ants'),
inputs={
'grad_fsl': gradients,
'in_file': (preproc, 'out_file'),
'mask': (mask, 'out_file')},
outputs={
'series_preproc': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3'), ants_req.v('2.0')])
return pipeline
def brain_extraction_pipeline(self, **name_maps):
if self.branch('bet_method', 'mrtrix'):
pipeline = self.new_pipeline(
'brain_extraction',
desc="Generate brain mask from b0 images",
citations=[mrtrix_cite],
name_maps=name_maps)
if self.provided('coreg_ref'):
series = 'series_coreg'
else:
series = 'series_preproc'
masker = pipeline.add(
'dwi2mask',
BrainMask(
out_file='brain_mask.nii.gz'),
inputs={
'in_file': (series, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},
outputs={
'brain_mask': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
merge = pipeline.add(
'merge_operands',
Merge(2),
inputs={
'in1': ('mag_preproc', nifti_gz_format),
'in2': (masker, 'out_file')})
pipeline.add(
'apply_mask',
MRCalc(
operation='multiply'),
inputs={
'operands': (merge, 'out')},
outputs={
'brain': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
else:
pipeline = super().brain_extraction_pipeline(**name_maps)
return pipeline
def series_coreg_pipeline(self, **name_maps):
pipeline = super().series_coreg_pipeline(**name_maps)
pipeline.add(
'transform_grads',
TransformGradients(),
inputs={
'gradients': ('grad_dirs', fsl_bvecs_format),
'transform': ('coreg_fsl_mat', text_matrix_format)},
outputs={
'grad_dirs_coreg': ('transformed', fsl_bvecs_format)})
return pipeline
def intensity_normalisation_pipeline(self, **name_maps):
if self.num_sessions < 2:
raise ArcanaMissingDataException(
"Cannot normalise intensities of DWI images as study only "
"contains a single session")
elif self.num_sessions < self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM:
logger.warning(
"The number of sessions in the study ({}) is less than the "
"recommended number for intensity normalisation ({}). The "
"results may be unreliable".format(
self.num_sessions,
self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM))
pipeline = self.new_pipeline(
name='intensity_normalization',
desc="Corrects for B1 field inhomogeneity",
citations=[mrtrix_req.v('3.0rc3')],
name_maps=name_maps)
mrconvert = pipeline.add(
'mrconvert',
MRConvert(
out_ext='.mif'),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline)},
requirements=[mrtrix_req.v('3.0rc3')])
session_ids = pipeline.add(
'session_ids',
utility.IdentityInterface(
['subject_id', 'visit_id']),
inputs={
'subject_id': (Study.SUBJECT_ID, int),
'visit_id': (Study.VISIT_ID, int)})
join_fields = ['dwis', 'masks', 'subject_ids', 'visit_ids']
join_over_subjects = pipeline.add(
'join_over_subjects',
utility.IdentityInterface(
join_fields),
inputs={
'masks': (self.brain_mask_spec_name, nifti_gz_format),
'dwis': (mrconvert, 'out_file'),
'subject_ids': (session_ids, 'subject_id'),
'visit_ids': (session_ids, 'visit_id')},
joinsource=self.SUBJECT_ID,
joinfield=join_fields)
join_over_visits = pipeline.add(
'join_over_visits',
Chain(
join_fields),
inputs={
'dwis': (join_over_subjects, 'dwis'),
'masks': (join_over_subjects, 'masks'),
'subject_ids': (join_over_subjects, 'subject_ids'),
'visit_ids': (join_over_subjects, 'visit_ids')},
joinsource=self.VISIT_ID,
joinfield=join_fields)
intensity_norm = pipeline.add(
'dwiintensitynorm',
DWIIntensityNorm(),
inputs={
'in_files': (join_over_visits, 'dwis'),
'masks': (join_over_visits, 'masks')},
outputs={
'norm_intens_fa_template': ('fa_template',
mrtrix_image_format),
'norm_intens_wm_mask': ('wm_mask', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
pipeline.add(
'expand', SelectSession(),
inputs={
'subject_ids': (join_over_visits, 'subject_ids'),
'visit_ids': (join_over_visits, 'visit_ids'),
'inlist': (intensity_norm, 'out_files'),
'subject_id': (Study.SUBJECT_ID, int),
'visit_id': (Study.VISIT_ID, int)},
outputs={
'norm_intensity': ('item', mrtrix_image_format)})
return pipeline
def tensor_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='tensor',
desc=("Estimates the apparent diffusion tensor in each "
"voxel"),
citations=[],
name_maps=name_maps)
pipeline.add(
'dwi2tensor',
FitTensor(
out_file='dti.nii.gz'),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'tensor': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def tensor_metrics_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='fa',
desc=("Calculates the FA and ADC from a tensor image"),
citations=[],
name_maps=name_maps)
pipeline.add(
'metrics',
TensorMetrics(
out_fa='fa.nii.gz',
out_adc='adc.nii.gz'),
inputs={
'in_file': ('tensor', nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'fa': ('out_fa', nifti_gz_format),
'adc': ('out_adc', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def response_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='response',
desc=("Estimates the fibre response function"),
citations=[mrtrix_cite],
name_maps=name_maps)
response = pipeline.add(
'response',
ResponseSD(
algorithm=self.parameter('response_algorithm')),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'wm_response': ('wm_file', text_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.multi_tissue:
response.inputs.gm_file = 'gm.txt',
response.inputs.csf_file = 'csf.txt',
pipeline.connect_output('gm_response', response, 'gm_file',
text_format)
pipeline.connect_output('csf_response', response, 'csf_file',
text_format)
return pipeline
def average_response_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='average_response',
desc=(
"Averages the fibre response function over the project"),
citations=[mrtrix_cite],
name_maps=name_maps)
join_subjects = pipeline.add(
'join_subjects',
utility.IdentityInterface(['responses']),
inputs={
'responses': ('wm_response', text_format)},
outputs={},
joinsource=self.SUBJECT_ID,
joinfield=['responses'])
join_visits = pipeline.add(
'join_visits',
Chain(['responses']),
inputs={
'responses': (join_subjects, 'responses')},
joinsource=self.VISIT_ID,
joinfield=['responses'])
pipeline.add(
'avg_response',
AverageResponse(),
inputs={
'in_files': (join_visits, 'responses')},
outputs={
'avg_response': ('out_file', text_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def fod_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='fod',
desc=("Estimates the fibre orientation distribution in each"
" voxel"),
citations=[mrtrix_cite],
name_maps=name_maps)
dwi2fod = pipeline.add(
'dwi2fod',
EstimateFOD(
algorithm=self.parameter('fod_algorithm')),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'wm_txt': ('wm_response', text_format),
'mask_file': (self.brain_mask_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline)},
outputs={
'wm_odf': ('wm_odf', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.multi_tissue:
dwi2fod.inputs.gm_odf = 'gm.mif',
dwi2fod.inputs.csf_odf = 'csf.mif',
pipeline.connect_input('gm_response', dwi2fod, 'gm_txt',
text_format),
pipeline.connect_input('csf_response', dwi2fod, 'csf_txt',
text_format),
pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf',
nifti_gz_format),
pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf',
nifti_gz_format),
return pipeline
def extract_b0_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='extract_b0',
desc="Extract b0 image from a DWI study",
citations=[mrtrix_cite],
name_maps=name_maps)
extract_b0s = pipeline.add(
'extract_b0s',
ExtractDWIorB0(
bzero=True,
quiet=True),
inputs={
'fslgrad': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
mean = pipeline.add(
"mean",
MRMath(
axis=3,
operation='mean',
quiet=True),
inputs={
'in_files': (extract_b0s, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
pipeline.add(
"output_conversion",
MRConvert(
out_ext='.nii.gz',
quiet=True),
inputs={
'in_file': (mean, 'out_file')},
outputs={
'b0': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def global_tracking_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='global_tracking',
desc="Extract b0 image from a DWI study",
citations=[mrtrix_cite],
name_maps=name_maps)
mask = pipeline.add(
'mask',
DWI2Mask(),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
tracking = pipeline.add(
'tracking',
Tractography(
select=self.parameter('num_global_tracks'),
cutoff=self.parameter('global_tracks_cutoff')),
inputs={
'seed_image': (mask, 'out_file'),
'in_file': ('wm_odf', mrtrix_image_format)},
outputs={
'global_tracks': ('out_file', mrtrix_track_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.provided('anat_5tt'):
pipeline.connect_input('anat_5tt', tracking, 'act_file',
mrtrix_image_format)
return pipeline
def intrascan_alignment_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='affine_mat_generation',
desc=("Generation of the affine matrices for the main dwi "
"sequence starting from eddy motion parameters"),
citations=[fsl_cite],
name_maps=name_maps)
pipeline.add(
'gen_aff_mats',
AffineMatrixGeneration(),
inputs={
'reference_image': ('mag_preproc', nifti_gz_format),
'motion_parameters': ('eddy_par', eddy_par_format)},
outputs={
'align_mats': ('affine_matrices', motion_mats_format)})
return pipeline
def connectome_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='connectome',
desc=("Generate a connectome from whole brain connectivity"),
citations=[],
name_maps=name_maps)
aseg_path = pipeline.add(
'aseg_path',
AppendPath(
sub_paths=['mri', 'aparc+aseg.mgz']),
inputs={
'base_path': ('anat_fs_recon_all', directory_format)})
pipeline.add(
'connectome',
mrtrix3.BuildConnectome(),
inputs={
'in_file': ('global_tracks', mrtrix_track_format),
'in_parc': (aseg_path, 'out_path')},
outputs={
'connectome': ('out_file', csv_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
class DwiRefStudy(EpiStudy, metaclass=StudyMetaClass):
add_data_specs = [
InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True)
]
desc = ("A special study used in the MR-PET motion correction algorithm to"
" perform distortion correction on the reverse-phase/reference b0 "
"scans by flipping it around and using the DWI series as the "
"reference")
def preprocess_pipeline(self, **name_maps):
if self.provided('reverse_phase'):
return self._topup_pipeline(**name_maps)
else:
return super().preprocess_pipeline(**name_maps)
def _topup_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='preprocess_pipeline',
desc=("Topup distortion correction pipeline"),
citations=[fsl_cite],
name_maps=name_maps)
reorient_epi_in = pipeline.add(
'reorient_epi_in',
fsl.utils.Reorient2Std(),
inputs={
'in_file': ('magnitude', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
reorient_epi_opposite = pipeline.add(
'reorient_epi_opposite',
fsl.utils.Reorient2Std(),
inputs={
'in_file': ('reverse_phase', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
prep_dwi = pipeline.add(
'prepare_dwi',
PrepareDWI(
topup=True),
inputs={
'pe_dir': ('ped', str),
'ped_polarity': ('pe_angle', str),
'dwi': (reorient_epi_in, 'out_file'),
'dwi1': (reorient_epi_opposite, 'out_file')})
ped = pipeline.add(
'gen_config',
GenTopupConfigFiles(),
inputs={
'ped': (prep_dwi, 'pe')})
merge_outputs = pipeline.add(
'merge_files',
merge_lists(2),
inputs={
'in1': (prep_dwi, 'main'),
'in2': (prep_dwi, 'secondary')})
merge = pipeline.add(
'FslMerge',
FslMerge(
dimension='t',
output_type='NIFTI_GZ'),
inputs={
'in_files': (merge_outputs, 'out')},
requirements=[fsl_req.v('5.0.9')])
topup = pipeline.add(
'topup',
TOPUP(
output_type='NIFTI_GZ'),
inputs={
'in_file': (merge, 'merged_file'),
'encoding_file': (ped, 'config_file')},
requirements=[fsl_req.v('5.0.9')])
in_apply_tp = pipeline.add(
'in_apply_tp',
merge_lists(1),
inputs={
'in1': (reorient_epi_in, 'out_file')})
pipeline.add(
'applytopup',
ApplyTOPUP(
method='jac',
in_index=[1],
output_type='NIFTI_GZ'),
inputs={
'in_files': (in_apply_tp, 'out'),
'encoding_file': (ped, 'apply_topup_config'),
'in_topup_movpar': (topup, 'out_movpar'),
'in_topup_fieldcoef': (topup, 'out_fieldcoef')},
outputs={
'mag_preproc': ('out_corrected', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
return pipeline
| true | true |
f715f0ca82d3720e709126ea59e933b3baab9523 | 2,969 | py | Python | test/features/steps/crud_table.py | lyrl/mycli | d62eefdc819a11ecdb97d93dd7ad1922d28a3795 | [
"BSD-3-Clause"
] | 10,997 | 2015-07-27T06:59:04.000Z | 2022-03-31T07:49:26.000Z | test/features/steps/crud_table.py | lyrl/mycli | d62eefdc819a11ecdb97d93dd7ad1922d28a3795 | [
"BSD-3-Clause"
] | 937 | 2015-07-29T09:25:30.000Z | 2022-03-30T23:54:03.000Z | test/features/steps/crud_table.py | lyrl/mycli | d62eefdc819a11ecdb97d93dd7ad1922d28a3795 | [
"BSD-3-Clause"
] | 799 | 2015-07-27T13:13:49.000Z | 2022-03-29T21:24:39.000Z | """Steps for behavioral style tests are defined in this module.
Each step is defined by the string decorating it. This string is used
to call the step in "*.feature" file.
"""
import wrappers
from behave import when, then
from textwrap import dedent
@when('we create table')
def step_create_table(context):
"""Send create table."""
context.cli.sendline('create table a(x text);')
@when('we insert into table')
def step_insert_into_table(context):
"""Send insert into table."""
context.cli.sendline('''insert into a(x) values('xxx');''')
@when('we update table')
def step_update_table(context):
"""Send insert into table."""
context.cli.sendline('''update a set x = 'yyy' where x = 'xxx';''')
@when('we select from table')
def step_select_from_table(context):
"""Send select from table."""
context.cli.sendline('select * from a;')
@when('we delete from table')
def step_delete_from_table(context):
"""Send deete from table."""
context.cli.sendline('''delete from a where x = 'yyy';''')
@when('we drop table')
def step_drop_table(context):
"""Send drop table."""
context.cli.sendline('drop table a;')
@then('we see table created')
def step_see_table_created(context):
"""Wait to see create table output."""
wrappers.expect_exact(context, 'Query OK, 0 rows affected', timeout=2)
@then('we see record inserted')
def step_see_record_inserted(context):
"""Wait to see insert output."""
wrappers.expect_exact(context, 'Query OK, 1 row affected', timeout=2)
@then('we see record updated')
def step_see_record_updated(context):
"""Wait to see update output."""
wrappers.expect_exact(context, 'Query OK, 1 row affected', timeout=2)
@then('we see data selected')
def step_see_data_selected(context):
"""Wait to see select output."""
wrappers.expect_pager(
context, dedent("""\
+-----+\r
| x |\r
+-----+\r
| yyy |\r
+-----+\r
\r
"""), timeout=2)
wrappers.expect_exact(context, '1 row in set', timeout=2)
@then('we see record deleted')
def step_see_data_deleted(context):
"""Wait to see delete output."""
wrappers.expect_exact(context, 'Query OK, 1 row affected', timeout=2)
@then('we see table dropped')
def step_see_table_dropped(context):
"""Wait to see drop output."""
wrappers.expect_exact(context, 'Query OK, 0 rows affected', timeout=2)
@when('we select null')
def step_select_null(context):
"""Send select null."""
context.cli.sendline('select null;')
@then('we see null selected')
def step_see_null_selected(context):
"""Wait to see null output."""
wrappers.expect_pager(
context, dedent("""\
+--------+\r
| NULL |\r
+--------+\r
| <null> |\r
+--------+\r
\r
"""), timeout=2)
wrappers.expect_exact(context, '1 row in set', timeout=2)
| 26.274336 | 74 | 0.629505 |
import wrappers
from behave import when, then
from textwrap import dedent
@when('we create table')
def step_create_table(context):
context.cli.sendline('create table a(x text);')
@when('we insert into table')
def step_insert_into_table(context):
context.cli.sendline('''insert into a(x) values('xxx');''')
@when('we update table')
def step_update_table(context):
context.cli.sendline('''update a set x = 'yyy' where x = 'xxx';''')
@when('we select from table')
def step_select_from_table(context):
context.cli.sendline('select * from a;')
@when('we delete from table')
def step_delete_from_table(context):
context.cli.sendline('''delete from a where x = 'yyy';''')
@when('we drop table')
def step_drop_table(context):
context.cli.sendline('drop table a;')
@then('we see table created')
def step_see_table_created(context):
wrappers.expect_exact(context, 'Query OK, 0 rows affected', timeout=2)
@then('we see record inserted')
def step_see_record_inserted(context):
wrappers.expect_exact(context, 'Query OK, 1 row affected', timeout=2)
@then('we see record updated')
def step_see_record_updated(context):
wrappers.expect_exact(context, 'Query OK, 1 row affected', timeout=2)
@then('we see data selected')
def step_see_data_selected(context):
wrappers.expect_pager(
context, dedent("""\
+-----+\r
| x |\r
+-----+\r
| yyy |\r
+-----+\r
\r
"""), timeout=2)
wrappers.expect_exact(context, '1 row in set', timeout=2)
@then('we see record deleted')
def step_see_data_deleted(context):
wrappers.expect_exact(context, 'Query OK, 1 row affected', timeout=2)
@then('we see table dropped')
def step_see_table_dropped(context):
wrappers.expect_exact(context, 'Query OK, 0 rows affected', timeout=2)
@when('we select null')
def step_select_null(context):
context.cli.sendline('select null;')
@then('we see null selected')
def step_see_null_selected(context):
wrappers.expect_pager(
context, dedent("""\
+--------+\r
| NULL |\r
+--------+\r
| <null> |\r
+--------+\r
\r
"""), timeout=2)
wrappers.expect_exact(context, '1 row in set', timeout=2)
| true | true |
f715f0d86c7d1a3a041efd85461b676d0a329b65 | 395 | py | Python | django_dapp/migrations/0005_application_default.py | phonkee/django-desktopapp | bd89434470c9d80074e8911d24059f962934c52a | [
"MIT"
] | null | null | null | django_dapp/migrations/0005_application_default.py | phonkee/django-desktopapp | bd89434470c9d80074e8911d24059f962934c52a | [
"MIT"
] | null | null | null | django_dapp/migrations/0005_application_default.py | phonkee/django-desktopapp | bd89434470c9d80074e8911d24059f962934c52a | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2019-04-12 22:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_dapp', '0004_auto_20190412_2138'),
]
operations = [
migrations.AddField(
model_name='application',
name='default',
field=models.BooleanField(default=False),
),
]
| 20.789474 | 53 | 0.610127 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_dapp', '0004_auto_20190412_2138'),
]
operations = [
migrations.AddField(
model_name='application',
name='default',
field=models.BooleanField(default=False),
),
]
| true | true |
f715f13073c90b7260a27beedc68a5672549e84b | 1,221 | py | Python | desktop/libs/notebook/setup.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/libs/notebook/setup.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/libs/notebook/setup.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "notebook",
version = VERSION,
author = "Hue",
url = 'http://github.com/cloudera/hue',
description = "Type various snippets of code",
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': 'notebook=notebook' },
) | 42.103448 | 74 | 0.72154 |
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "notebook",
version = VERSION,
author = "Hue",
url = 'http://github.com/cloudera/hue',
description = "Type various snippets of code",
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': 'notebook=notebook' },
) | true | true |
f715f2ea5f3929680f1eec4293377f802b326b46 | 103 | py | Python | game/urls.py | BehindLoader/strategy-try | f7f0007515804b2078bb18ae831a326e6e338bbd | [
"MIT"
] | null | null | null | game/urls.py | BehindLoader/strategy-try | f7f0007515804b2078bb18ae831a326e6e338bbd | [
"MIT"
] | null | null | null | game/urls.py | BehindLoader/strategy-try | f7f0007515804b2078bb18ae831a326e6e338bbd | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('get_all', views.get_all)
] | 14.714286 | 34 | 0.708738 | from django.urls import path
from . import views
urlpatterns = [
path('get_all', views.get_all)
] | true | true |
f715f3a67df36230e6e7a6cbb43f59bd83e295a2 | 4,603 | py | Python | pipeline/service/pipeline_engine_adapter/adapter_api.py | sdgdsffdsfff/bk-sops-tencent | e8aff91f822e79031e12b0f66943830f44ced506 | [
"Apache-2.0"
] | 1 | 2020-09-24T07:39:16.000Z | 2020-09-24T07:39:16.000Z | pipeline/service/pipeline_engine_adapter/adapter_api.py | sdgdsffdsfff/bk-sops-tencent | e8aff91f822e79031e12b0f66943830f44ced506 | [
"Apache-2.0"
] | 5 | 2021-02-08T20:46:54.000Z | 2021-06-10T22:54:45.000Z | pipeline/service/pipeline_engine_adapter/adapter_api.py | sdgdsffdsfff/bk-sops-tencent | e8aff91f822e79031e12b0f66943830f44ced506 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from pipeline.engine import api
from pipeline.log.models import LogEntry
STATE_MAP = {
'CREATED': 'RUNNING',
'READY': 'RUNNING',
'RUNNING': 'RUNNING',
'BLOCKED': 'BLOCKED',
'SUSPENDED': 'SUSPENDED',
'FINISHED': 'FINISHED',
'FAILED': 'FAILED',
'REVOKED': 'REVOKED'
}
def run_pipeline(pipeline_instance, instance_id=None, check_workers=True):
return api.start_pipeline(pipeline_instance, check_workers=check_workers)
def pause_pipeline(pipeline_id):
return api.pause_pipeline(pipeline_id)
def revoke_pipeline(pipeline_id):
return api.revoke_pipeline(pipeline_id)
def resume_pipeline(pipeline_id):
return api.resume_pipeline(pipeline_id)
def pause_activity(act_id):
return api.pause_node_appointment(act_id)
def resume_activity(act_id):
return api.resume_node_appointment(act_id)
def retry_activity(act_id, inputs=None):
return api.retry_node(act_id, inputs=inputs)
def skip_activity(act_id):
return api.skip_node(act_id)
def pause_subprocess(subprocess_id):
return api.pause_subprocess(subprocess_id)
def skip_exclusive_gateway(gateway_id, flow_id):
return api.skip_exclusive_gateway(gateway_id, flow_id)
def forced_fail(node_id):
return api.forced_fail(node_id)
def get_inputs(act_id):
return api.get_inputs(act_id)
def get_outputs(act_id):
return api.get_outputs(act_id)
def get_activity_histories(act_id):
histories = api.get_activity_histories(act_id)
for item in histories:
item['started_time'] = _better_time_or_none(item['started_time'])
item['finished_time'] = _better_time_or_none(item.pop('archived_time'))
return histories
def callback(act_id, data=None):
return api.activity_callback(act_id, data)
def get_state(node_id):
tree = api.get_status_tree(node_id, max_depth=100)
res = _map(tree)
# collect all atom
descendants = {}
_collect_descendants(tree, descendants)
res['children'] = descendants
# return
return res
def _get_node_state(tree):
status = []
# return state when meet leaf
if not tree.get('children', []):
return STATE_MAP[tree['state']]
# iterate children and get child state recursively
for identifier_code, child_tree in tree['children'].items():
status.append(_get_node_state(child_tree))
# summary parent state
return STATE_MAP[_get_parent_state_from_children_state(tree['state'], status)]
def _get_parent_state_from_children_state(parent_state, children_state_list):
"""
@summary: 根据子任务状态计算父任务状态
@param parent_state:
@param children_state_list:
@return:
"""
children_state_set = set(children_state_list)
if parent_state == 'BLOCKED':
if 'RUNNING' in children_state_set:
parent_state = 'RUNNING'
if 'FAILED' in children_state_set:
parent_state = 'FAILED'
return parent_state
def _collect_descendants(tree, descendants):
# iterate children for tree
for identifier_code, child_tree in tree['children'].items():
child_status = _map(child_tree)
descendants[identifier_code] = child_status
# collect children
if child_tree['children']:
_collect_descendants(child_tree, descendants)
def _better_time_or_none(time):
return time.strftime('%Y-%m-%d %H:%M:%S') if time else time
def _map(tree):
tree.setdefault('children', {})
return {
'id': tree['id'],
'state': _get_node_state(tree),
'start_time': _better_time_or_none(tree['started_time']),
'finish_time': _better_time_or_none(tree['archived_time']),
'loop': tree['loop'],
'retry': tree['retry'],
'skip': tree['skip']
}
def get_plain_log_for_node(node_id, history_id):
return LogEntry.objects.plain_log_for_node(node_id=node_id, history_id=history_id)
| 27.39881 | 115 | 0.718662 |
from pipeline.engine import api
from pipeline.log.models import LogEntry
STATE_MAP = {
'CREATED': 'RUNNING',
'READY': 'RUNNING',
'RUNNING': 'RUNNING',
'BLOCKED': 'BLOCKED',
'SUSPENDED': 'SUSPENDED',
'FINISHED': 'FINISHED',
'FAILED': 'FAILED',
'REVOKED': 'REVOKED'
}
def run_pipeline(pipeline_instance, instance_id=None, check_workers=True):
return api.start_pipeline(pipeline_instance, check_workers=check_workers)
def pause_pipeline(pipeline_id):
return api.pause_pipeline(pipeline_id)
def revoke_pipeline(pipeline_id):
return api.revoke_pipeline(pipeline_id)
def resume_pipeline(pipeline_id):
return api.resume_pipeline(pipeline_id)
def pause_activity(act_id):
return api.pause_node_appointment(act_id)
def resume_activity(act_id):
return api.resume_node_appointment(act_id)
def retry_activity(act_id, inputs=None):
return api.retry_node(act_id, inputs=inputs)
def skip_activity(act_id):
return api.skip_node(act_id)
def pause_subprocess(subprocess_id):
return api.pause_subprocess(subprocess_id)
def skip_exclusive_gateway(gateway_id, flow_id):
return api.skip_exclusive_gateway(gateway_id, flow_id)
def forced_fail(node_id):
return api.forced_fail(node_id)
def get_inputs(act_id):
return api.get_inputs(act_id)
def get_outputs(act_id):
return api.get_outputs(act_id)
def get_activity_histories(act_id):
histories = api.get_activity_histories(act_id)
for item in histories:
item['started_time'] = _better_time_or_none(item['started_time'])
item['finished_time'] = _better_time_or_none(item.pop('archived_time'))
return histories
def callback(act_id, data=None):
return api.activity_callback(act_id, data)
def get_state(node_id):
tree = api.get_status_tree(node_id, max_depth=100)
res = _map(tree)
descendants = {}
_collect_descendants(tree, descendants)
res['children'] = descendants
return res
def _get_node_state(tree):
status = []
if not tree.get('children', []):
return STATE_MAP[tree['state']]
for identifier_code, child_tree in tree['children'].items():
status.append(_get_node_state(child_tree))
return STATE_MAP[_get_parent_state_from_children_state(tree['state'], status)]
def _get_parent_state_from_children_state(parent_state, children_state_list):
children_state_set = set(children_state_list)
if parent_state == 'BLOCKED':
if 'RUNNING' in children_state_set:
parent_state = 'RUNNING'
if 'FAILED' in children_state_set:
parent_state = 'FAILED'
return parent_state
def _collect_descendants(tree, descendants):
for identifier_code, child_tree in tree['children'].items():
child_status = _map(child_tree)
descendants[identifier_code] = child_status
if child_tree['children']:
_collect_descendants(child_tree, descendants)
def _better_time_or_none(time):
return time.strftime('%Y-%m-%d %H:%M:%S') if time else time
def _map(tree):
tree.setdefault('children', {})
return {
'id': tree['id'],
'state': _get_node_state(tree),
'start_time': _better_time_or_none(tree['started_time']),
'finish_time': _better_time_or_none(tree['archived_time']),
'loop': tree['loop'],
'retry': tree['retry'],
'skip': tree['skip']
}
def get_plain_log_for_node(node_id, history_id):
return LogEntry.objects.plain_log_for_node(node_id=node_id, history_id=history_id)
| true | true |
f715f47669d217a83d920335ed050f78d844a22c | 5,720 | py | Python | examples/jsonrpc/JSONRPCExample.py | allbuttonspressed/pyjs | c726fdead530eb63ee4763ae15daaa58d84cd58f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | examples/jsonrpc/JSONRPCExample.py | allbuttonspressed/pyjs | c726fdead530eb63ee4763ae15daaa58d84cd58f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | examples/jsonrpc/JSONRPCExample.py | allbuttonspressed/pyjs | c726fdead530eb63ee4763ae15daaa58d84cd58f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-11-18T14:17:59.000Z | 2019-11-18T14:17:59.000Z | import pyjd # dummy in pyjs
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.TextArea import TextArea
from pyjamas.ui.Label import Label
from pyjamas.ui.Button import Button
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.ListBox import ListBox
from pyjamas.JSONService import JSONProxy
class JSONRPCExample:
def onModuleLoad(self):
self.TEXT_WAITING = "Waiting for response..."
self.TEXT_ERROR = "Server Error"
self.METHOD_ECHO = "Echo"
self.METHOD_REVERSE = "Reverse"
self.METHOD_UPPERCASE = "UPPERCASE"
self.METHOD_LOWERCASE = "lowercase"
self.METHOD_NONEXISTANT = "Non existant"
self.methods = [self.METHOD_ECHO, self.METHOD_REVERSE,
self.METHOD_UPPERCASE, self.METHOD_LOWERCASE,
self.METHOD_NONEXISTANT]
self.remote_php = EchoServicePHP()
self.remote_py = EchoServicePython()
self.status=Label()
self.text_area = TextArea()
self.text_area.setText("""{'Test'} [\"String\"]
\tTest Tab
Test Newline\n
after newline
""" + r"""Literal String:
{'Test'} [\"String\"]
""")
self.text_area.setCharacterWidth(80)
self.text_area.setVisibleLines(8)
self.method_list = ListBox()
self.method_list.setName("hello")
self.method_list.setVisibleItemCount(1)
for method in self.methods:
self.method_list.addItem(method)
self.method_list.setSelectedIndex(0)
method_panel = HorizontalPanel()
method_panel.add(HTML("Remote string method to call: "))
method_panel.add(self.method_list)
method_panel.setSpacing(8)
self.button_php = Button("Send to PHP Service", self)
self.button_py = Button("Send to Python Service", self)
buttons = HorizontalPanel()
buttons.add(self.button_php)
buttons.add(self.button_py)
buttons.setSpacing(8)
info = """<h2>JSON-RPC Example</h2>
<p>This example demonstrates the calling of server services with
<a href="http://json-rpc.org/">JSON-RPC</a>.
</p>
<p>Enter some text below, and press a button to send the text
to an Echo service on your server. An echo service simply sends the exact same text back that it receives.
</p>"""
panel = VerticalPanel()
panel.add(HTML(info))
panel.add(self.text_area)
panel.add(method_panel)
panel.add(buttons)
panel.add(self.status)
RootPanel().add(panel)
def onClick(self, sender):
self.status.setText(self.TEXT_WAITING)
method = self.methods[self.method_list.getSelectedIndex()]
text = self.text_area.getText()
# demonstrate proxy & callMethod()
if sender == self.button_php:
if method == self.METHOD_ECHO:
id = self.remote_php.echo(text, self)
elif method == self.METHOD_REVERSE:
id = self.remote_php.callMethod("reverse", [text], self)
elif method == self.METHOD_UPPERCASE:
id = self.remote_php.uppercase(text, self)
elif method == self.METHOD_LOWERCASE:
id = self.remote_php.lowercase(self, msg=text)
elif method == self.METHOD_NONEXISTANT:
id = self.remote_php.nonexistant(text, self)
else:
if method == self.METHOD_ECHO:
id = self.remote_py.echo(text, self)
elif method == self.METHOD_REVERSE:
id = self.remote_py.reverse(text, self)
elif method == self.METHOD_UPPERCASE:
id = self.remote_py.uppercase(text, self)
elif method == self.METHOD_LOWERCASE:
id = self.remote_py.lowercase(text, self)
elif method == self.METHOD_NONEXISTANT:
id = self.remote_py.nonexistant(text, self)
def onRemoteResponse(self, response, request_info):
self.status.setText(response)
def onRemoteError(self, code, errobj, request_info):
# onRemoteError gets the HTTP error code or 0 and
# errobj is an jsonrpc 2.0 error dict:
# {
# 'code': jsonrpc-error-code (integer) ,
# 'message': jsonrpc-error-message (string) ,
# 'data' : extra-error-data
# }
message = errobj['message']
if code != 0:
self.status.setText("HTTP error %d: %s" %
(code, message))
else:
code = errobj['code']
self.status.setText("JSONRPC Error %s: %s" %
(code, message))
class EchoServicePHP(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "services/EchoService.php", ["echo", "reverse", "uppercase", "lowercase", "nonexistant"])
class EchoServicePython(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "services/EchoService.py", ["echo", "reverse", "uppercase", "lowercase", "nonexistant"])
if __name__ == '__main__':
# for pyjd, set up a web server and load the HTML from there:
# this convinces the browser engine that the AJAX will be loaded
# from the same URI base as the URL, it's all a bit messy...
# Use the second pyjd.setup if you're using apache-php locally
# as described in the README
#pyjd.setup("http://127.0.0.1:8000/public/JSONRPCExample.html")
pyjd.setup("http://127.0.0.1/examples/jsonrpc/public/JSONRPCExample.html")
app = JSONRPCExample()
app.onModuleLoad()
pyjd.run()
| 38.389262 | 122 | 0.618706 | import pyjd
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.TextArea import TextArea
from pyjamas.ui.Label import Label
from pyjamas.ui.Button import Button
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.ListBox import ListBox
from pyjamas.JSONService import JSONProxy
class JSONRPCExample:
def onModuleLoad(self):
self.TEXT_WAITING = "Waiting for response..."
self.TEXT_ERROR = "Server Error"
self.METHOD_ECHO = "Echo"
self.METHOD_REVERSE = "Reverse"
self.METHOD_UPPERCASE = "UPPERCASE"
self.METHOD_LOWERCASE = "lowercase"
self.METHOD_NONEXISTANT = "Non existant"
self.methods = [self.METHOD_ECHO, self.METHOD_REVERSE,
self.METHOD_UPPERCASE, self.METHOD_LOWERCASE,
self.METHOD_NONEXISTANT]
self.remote_php = EchoServicePHP()
self.remote_py = EchoServicePython()
self.status=Label()
self.text_area = TextArea()
self.text_area.setText("""{'Test'} [\"String\"]
\tTest Tab
Test Newline\n
after newline
""" + r"""Literal String:
{'Test'} [\"String\"]
""")
self.text_area.setCharacterWidth(80)
self.text_area.setVisibleLines(8)
self.method_list = ListBox()
self.method_list.setName("hello")
self.method_list.setVisibleItemCount(1)
for method in self.methods:
self.method_list.addItem(method)
self.method_list.setSelectedIndex(0)
method_panel = HorizontalPanel()
method_panel.add(HTML("Remote string method to call: "))
method_panel.add(self.method_list)
method_panel.setSpacing(8)
self.button_php = Button("Send to PHP Service", self)
self.button_py = Button("Send to Python Service", self)
buttons = HorizontalPanel()
buttons.add(self.button_php)
buttons.add(self.button_py)
buttons.setSpacing(8)
info = """<h2>JSON-RPC Example</h2>
<p>This example demonstrates the calling of server services with
<a href="http://json-rpc.org/">JSON-RPC</a>.
</p>
<p>Enter some text below, and press a button to send the text
to an Echo service on your server. An echo service simply sends the exact same text back that it receives.
</p>"""
panel = VerticalPanel()
panel.add(HTML(info))
panel.add(self.text_area)
panel.add(method_panel)
panel.add(buttons)
panel.add(self.status)
RootPanel().add(panel)
def onClick(self, sender):
self.status.setText(self.TEXT_WAITING)
method = self.methods[self.method_list.getSelectedIndex()]
text = self.text_area.getText()
if sender == self.button_php:
if method == self.METHOD_ECHO:
id = self.remote_php.echo(text, self)
elif method == self.METHOD_REVERSE:
id = self.remote_php.callMethod("reverse", [text], self)
elif method == self.METHOD_UPPERCASE:
id = self.remote_php.uppercase(text, self)
elif method == self.METHOD_LOWERCASE:
id = self.remote_php.lowercase(self, msg=text)
elif method == self.METHOD_NONEXISTANT:
id = self.remote_php.nonexistant(text, self)
else:
if method == self.METHOD_ECHO:
id = self.remote_py.echo(text, self)
elif method == self.METHOD_REVERSE:
id = self.remote_py.reverse(text, self)
elif method == self.METHOD_UPPERCASE:
id = self.remote_py.uppercase(text, self)
elif method == self.METHOD_LOWERCASE:
id = self.remote_py.lowercase(text, self)
elif method == self.METHOD_NONEXISTANT:
id = self.remote_py.nonexistant(text, self)
def onRemoteResponse(self, response, request_info):
self.status.setText(response)
def onRemoteError(self, code, errobj, request_info):
message = errobj['message']
if code != 0:
self.status.setText("HTTP error %d: %s" %
(code, message))
else:
code = errobj['code']
self.status.setText("JSONRPC Error %s: %s" %
(code, message))
class EchoServicePHP(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "services/EchoService.php", ["echo", "reverse", "uppercase", "lowercase", "nonexistant"])
class EchoServicePython(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "services/EchoService.py", ["echo", "reverse", "uppercase", "lowercase", "nonexistant"])
if __name__ == '__main__':
# Use the second pyjd.setup if you're using apache-php locally
pyjd.setup("http://127.0.0.1/examples/jsonrpc/public/JSONRPCExample.html")
app = JSONRPCExample()
app.onModuleLoad()
pyjd.run()
| true | true |
f715f48de694a6699da344700b6ccc25623f65f8 | 59,016 | py | Python | haystack/nodes/reader/farm.py | ZanSara/haystack | b2e6dcc99899d9ad728d21f925c5300632683d4d | [
"Apache-2.0"
] | 1 | 2022-02-20T02:04:49.000Z | 2022-02-20T02:04:49.000Z | haystack/nodes/reader/farm.py | shenyezh/haystack | 2a674eaff7d711f38db1bd57ece9bb632fb928bd | [
"Apache-2.0"
] | null | null | null | haystack/nodes/reader/farm.py | shenyezh/haystack | 2a674eaff7d711f38db1bd57ece9bb632fb928bd | [
"Apache-2.0"
] | null | null | null | from typing import List, Optional, Dict, Any, Union, Callable
import logging
import multiprocessing
from pathlib import Path
from collections import defaultdict
from time import perf_counter
import torch
from haystack.modeling.data_handler.data_silo import DataSilo, DistillationDataSilo
from haystack.modeling.data_handler.processor import SquadProcessor, Processor
from haystack.modeling.data_handler.dataloader import NamedDataLoader
from haystack.modeling.data_handler.inputs import QAInput, Question
from haystack.modeling.infer import QAInferencer
from haystack.modeling.model.optimization import initialize_optimizer
from haystack.modeling.model.predictions import QAPred, QACandidate
from haystack.modeling.model.adaptive_model import AdaptiveModel
from haystack.modeling.training import Trainer, DistillationTrainer, TinyBERTDistillationTrainer
from haystack.modeling.evaluation import Evaluator
from haystack.modeling.utils import set_all_seeds, initialize_device_settings
from haystack.schema import Document, Answer, Span
from haystack.document_stores import BaseDocumentStore
from haystack.nodes.reader import BaseReader
logger = logging.getLogger(__name__)
class FARMReader(BaseReader):
"""
Transformer based model for extractive Question Answering using the FARM framework (https://github.com/deepset-ai/FARM).
While the underlying model can vary (BERT, Roberta, DistilBERT, ...), the interface remains the same.
| With a FARMReader, you can:
- directly get predictions via predict()
- fine-tune the model on QA data via train()
"""
def __init__(
self,
model_name_or_path: str,
model_version: Optional[str] = None,
context_window_size: int = 150,
batch_size: int = 50,
use_gpu: bool = True,
no_ans_boost: float = 0.0,
return_no_answer: bool = False,
top_k: int = 10,
top_k_per_candidate: int = 3,
top_k_per_sample: int = 1,
num_processes: Optional[int] = None,
max_seq_len: int = 256,
doc_stride: int = 128,
progress_bar: bool = True,
duplicate_filtering: int = 0,
use_confidence_scores: bool = True,
proxies: Optional[Dict[str, str]] = None,
local_files_only=False,
force_download=False,
use_auth_token: Optional[Union[str, bool]] = None,
**kwargs,
):
"""
:param model_name_or_path: Directory of a saved model or the name of a public model e.g. 'bert-base-cased',
'deepset/bert-base-cased-squad2', 'deepset/bert-base-cased-squad2', 'distilbert-base-uncased-distilled-squad'.
See https://huggingface.co/models for full list of available models.
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param context_window_size: The size, in characters, of the window around the answer span that is used when
displaying the context around the answer.
:param batch_size: Number of samples the model receives in one batch for inference.
Memory consumption is much lower in inference mode. Recommendation: Increase the batch size
to a value so only a single batch is used.
:param use_gpu: Whether to use GPU (if available)
:param no_ans_boost: How much the no_answer logit is boosted/increased.
If set to 0 (default), the no_answer logit is not changed.
If a negative number, there is a lower chance of "no_answer" being predicted.
If a positive number, there is an increased chance of "no_answer"
:param return_no_answer: Whether to include no_answer predictions in the results.
:param top_k: The maximum number of answers to return
:param top_k_per_candidate: How many answers to extract for each candidate doc that is coming from the retriever (might be a long text).
Note that this is not the number of "final answers" you will receive
(see `top_k` in FARMReader.predict() or Finder.get_answers() for that)
and that FARM includes no_answer in the sorted list of predictions.
:param top_k_per_sample: How many answers to extract from each small text passage that the model can process at once
(one "candidate doc" is usually split into many smaller "passages").
You usually want a very small value here, as it slows down inference
and you don't gain much of quality by having multiple answers from one passage.
Note that this is not the number of "final answers" you will receive
(see `top_k` in FARMReader.predict() or Finder.get_answers() for that)
and that FARM includes no_answer in the sorted list of predictions.
:param num_processes: The number of processes for `multiprocessing.Pool`. Set to value of 0 to disable
multiprocessing. Set to None to let Inferencer determine optimum number. If you
want to debug the Language Model, you might need to disable multiprocessing!
:param max_seq_len: Max sequence length of one input text for the model
:param doc_stride: Length of striding window for splitting long texts (used if ``len(text) > max_seq_len``)
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
:param duplicate_filtering: Answers are filtered based on their position. Both start and end position of the answers are considered.
The higher the value, answers that are more apart are filtered out. 0 corresponds to exact duplicates. -1 turns off duplicate removal.
:param use_confidence_scores: Sets the type of score that is returned with every predicted answer.
`True` => a scaled confidence / relevance score between [0, 1].
This score can also be further calibrated on your dataset via self.eval()
(see https://haystack.deepset.ai/components/reader#confidence-scores) .
`False` => an unscaled, raw score [-inf, +inf] which is the sum of start and end logit
from the model for the predicted span.
:param proxies: Dict of proxy servers to use for downloading external models. Example: {'http': 'some.proxy:1234', 'http://hostname': 'my.proxy:3111'}
:param local_files_only: Whether to force checking for local files only (and forbid downloads)
:param force_download: Whether fo force a (re-)download even if the model exists locally in the cache.
:param use_auth_token: API token used to download private models from Huggingface. If this parameter is set to `True`,
the local token will be used, which must be previously created via `transformer-cli login`.
Additional information can be found here https://huggingface.co/transformers/main_classes/model.html#transformers.PreTrainedModel.from_pretrained
"""
# save init parameters to enable export of component config as YAML
self.set_config(
model_name_or_path=model_name_or_path,
model_version=model_version,
context_window_size=context_window_size,
batch_size=batch_size,
use_gpu=use_gpu,
no_ans_boost=no_ans_boost,
return_no_answer=return_no_answer,
top_k=top_k,
top_k_per_candidate=top_k_per_candidate,
top_k_per_sample=top_k_per_sample,
num_processes=num_processes,
max_seq_len=max_seq_len,
doc_stride=doc_stride,
progress_bar=progress_bar,
duplicate_filtering=duplicate_filtering,
proxies=proxies,
local_files_only=local_files_only,
force_download=force_download,
use_confidence_scores=use_confidence_scores,
**kwargs,
)
self.devices, _ = initialize_device_settings(use_cuda=use_gpu, multi_gpu=False)
self.return_no_answers = return_no_answer
self.top_k = top_k
self.top_k_per_candidate = top_k_per_candidate
self.inferencer = QAInferencer.load(
model_name_or_path,
batch_size=batch_size,
gpu=use_gpu,
task_type="question_answering",
max_seq_len=max_seq_len,
doc_stride=doc_stride,
num_processes=num_processes,
revision=model_version,
disable_tqdm=not progress_bar,
strict=False,
proxies=proxies,
local_files_only=local_files_only,
force_download=force_download,
devices=self.devices,
use_auth_token=use_auth_token,
**kwargs,
)
self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
self.inferencer.model.prediction_heads[0].n_best = top_k_per_candidate + 1 # including possible no_answer
try:
self.inferencer.model.prediction_heads[0].n_best_per_sample = top_k_per_sample
except:
logger.warning("Could not set `top_k_per_sample` in FARM. Please update FARM version.")
try:
self.inferencer.model.prediction_heads[0].duplicate_filtering = duplicate_filtering
except:
logger.warning("Could not set `duplicate_filtering` in FARM. Please update FARM version.")
self.max_seq_len = max_seq_len
self.use_gpu = use_gpu
self.progress_bar = progress_bar
self.use_confidence_scores = use_confidence_scores
def _training_procedure(
self,
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 2,
learning_rate: float = 1e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
teacher_model: Optional["FARMReader"] = None,
teacher_batch_size: Optional[int] = None,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
distillation_loss_weight: float = 0.5,
distillation_loss: Union[str, Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = "kl_div",
temperature: float = 1.0,
tinybert: bool = False,
processor: Optional[Processor] = None,
):
if dev_filename:
dev_split = 0
if num_processes is None:
num_processes = multiprocessing.cpu_count() - 1 or 1
set_all_seeds(seed=42)
# For these variables, by default, we use the value set when initializing the FARMReader.
# These can also be manually set when train() is called if you want a different value at train vs inference
if use_gpu is None:
use_gpu = self.use_gpu
if max_seq_len is None:
max_seq_len = self.max_seq_len
devices, n_gpu = initialize_device_settings(use_cuda=use_gpu, multi_gpu=False)
if not save_dir:
save_dir = f"../../saved_models/{self.inferencer.model.language_model.name}"
if tinybert:
save_dir += "_tinybert_stage_1"
# 1. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
label_list = ["start_token", "end_token"]
metric = "squad"
if processor is None:
processor = SquadProcessor(
tokenizer=self.inferencer.processor.tokenizer,
max_seq_len=max_seq_len,
label_list=label_list,
metric=metric,
train_filename=train_filename,
dev_filename=dev_filename,
dev_split=dev_split,
test_filename=test_filename,
data_dir=Path(data_dir),
)
data_silo: DataSilo
# 2. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them
# and calculates a few descriptive statistics of our datasets
if (
teacher_model and not tinybert
): # checks if teacher model is passed as parameter, in that case assume model distillation is used
data_silo = DistillationDataSilo(
teacher_model,
teacher_batch_size or batch_size,
device=devices[0],
processor=processor,
batch_size=batch_size,
distributed=False,
max_processes=num_processes,
caching=caching,
cache_path=cache_path,
)
else: # caching would need too much memory for tinybert distillation so in that case we use the default data silo
data_silo = DataSilo(
processor=processor,
batch_size=batch_size,
distributed=False,
max_processes=num_processes,
caching=caching,
cache_path=cache_path,
)
# 3. Create an optimizer and pass the already initialized model
model, optimizer, lr_schedule = initialize_optimizer(
model=self.inferencer.model,
# model=self.inferencer.model,
learning_rate=learning_rate,
schedule_opts={"name": "LinearWarmup", "warmup_proportion": warmup_proportion},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
device=devices[0],
use_amp=use_amp,
)
# 4. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time
if tinybert:
if not teacher_model:
raise ValueError("TinyBERT distillation requires a teacher model.")
trainer = TinyBERTDistillationTrainer.create_or_load_checkpoint(
model=model,
teacher_model=teacher_model.inferencer.model, # teacher needs to be passed as teacher outputs aren't cached
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=devices[0],
use_amp=use_amp,
disable_tqdm=not self.progress_bar,
checkpoint_root_dir=Path(checkpoint_root_dir),
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
)
elif (
teacher_model
): # checks again if teacher model is passed as parameter, in that case assume model distillation is used
trainer = DistillationTrainer.create_or_load_checkpoint(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=devices[0],
use_amp=use_amp,
disable_tqdm=not self.progress_bar,
checkpoint_root_dir=Path(checkpoint_root_dir),
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
distillation_loss=distillation_loss,
distillation_loss_weight=distillation_loss_weight,
temperature=temperature,
)
else:
trainer = Trainer.create_or_load_checkpoint(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=devices[0],
use_amp=use_amp,
disable_tqdm=not self.progress_bar,
checkpoint_root_dir=Path(checkpoint_root_dir),
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
)
# 5. Let it grow!
self.inferencer.model = trainer.train()
self.save(Path(save_dir))
def train(
self,
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 2,
learning_rate: float = 1e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
):
"""
Fine-tune a model on a QA dataset. Options:
- Take a plain language model (e.g. `bert-base-cased`) and train it for QA (e.g. on SQuAD data)
- Take a QA model (e.g. `deepset/bert-base-cased-squad2`) and fine-tune it for your domain (e.g. using your labels collected via the haystack annotation tool)
Checkpoints can be stored via setting `checkpoint_every` to a custom number of steps.
If any checkpoints are stored, a subsequent run of train() will resume training from the latest available checkpoint.
:param data_dir: Path to directory containing your training data in SQuAD style
:param train_filename: Filename of training data
:param dev_filename: Filename of dev / eval data
:param test_filename: Filename of test data
:param dev_split: Instead of specifying a dev_filename, you can also specify a ratio (e.g. 0.1) here
that gets split off from training data for eval.
:param use_gpu: Whether to use GPU (if available)
:param batch_size: Number of samples the model receives in one batch for training
:param n_epochs: Number of iterations on the whole training data set
:param learning_rate: Learning rate of the optimizer
:param max_seq_len: Maximum text length (in tokens). Everything longer gets cut down.
:param warmup_proportion: Proportion of training steps until maximum learning rate is reached.
Until that point LR is increasing linearly. After that it's decreasing again linearly.
Options for different schedules are available in FARM.
:param evaluate_every: Evaluate the model every X steps on the hold-out eval dataset
:param save_dir: Path to store the final model
:param num_processes: The number of processes for `multiprocessing.Pool` during preprocessing.
Set to value of 1 to disable multiprocessing. When set to 1, you cannot split away a dev set from train set.
Set to None to use all CPU cores minus one.
:param use_amp: Optimization level of NVIDIA's automatic mixed precision (AMP). The higher the level, the faster the model.
Available options:
None (Don't use AMP)
"O0" (Normal FP32 training)
"O1" (Mixed Precision => Recommended)
"O2" (Almost FP16)
"O3" (Pure FP16).
See details on: https://nvidia.github.io/apex/amp.html
:param checkpoint_root_dir: the Path of directory where all train checkpoints are saved. For each individual
checkpoint, a subdirectory with the name epoch_{epoch_num}_step_{step_num} is created.
:param checkpoint_every: save a train checkpoint after this many steps of training.
:param checkpoints_to_keep: maximum number of train checkpoints to save.
:param caching: whether or not to use caching for preprocessed dataset
:param cache_path: Path to cache the preprocessed dataset
:param processor: The processor to use for preprocessing. If None, the default SquadProcessor is used.
:return: None
"""
return self._training_procedure(
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
use_gpu=use_gpu,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
max_seq_len=max_seq_len,
warmup_proportion=warmup_proportion,
dev_split=dev_split,
evaluate_every=evaluate_every,
save_dir=save_dir,
num_processes=num_processes,
use_amp=use_amp,
checkpoint_root_dir=checkpoint_root_dir,
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
caching=caching,
cache_path=cache_path,
)
def distil_prediction_layer_from(
self,
teacher_model: "FARMReader",
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
student_batch_size: int = 10,
teacher_batch_size: Optional[int] = None,
n_epochs: int = 2,
learning_rate: float = 3e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
distillation_loss_weight: float = 0.5,
distillation_loss: Union[str, Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = "kl_div",
temperature: float = 1.0,
):
"""
Fine-tune a model on a QA dataset using logit-based distillation. You need to provide a teacher model that is already finetuned on the dataset
and a student model that will be trained using the teacher's logits. The idea of this is to increase the accuracy of a lightweight student model.
using a more complex teacher.
Originally proposed in: https://arxiv.org/pdf/1503.02531.pdf
This can also be considered as the second stage of distillation finetuning as described in the TinyBERT paper:
https://arxiv.org/pdf/1909.10351.pdf
**Example**
```python
student = FARMReader(model_name_or_path="prajjwal1/bert-medium")
teacher = FARMReader(model_name_or_path="deepset/bert-large-uncased-whole-word-masking-squad2")
student.distil_prediction_layer_from(teacher, data_dir="squad2", train_filename="train.json", test_filename="dev.json",
learning_rate=3e-5, distillation_loss_weight=1.0, temperature=5)
```
Checkpoints can be stored via setting `checkpoint_every` to a custom number of steps.
If any checkpoints are stored, a subsequent run of train() will resume training from the latest available checkpoint.
:param teacher_model: Model whose logits will be used to improve accuracy
:param data_dir: Path to directory containing your training data in SQuAD style
:param train_filename: Filename of training data
:param dev_filename: Filename of dev / eval data
:param test_filename: Filename of test data
:param dev_split: Instead of specifying a dev_filename, you can also specify a ratio (e.g. 0.1) here
that gets split off from training data for eval.
:param use_gpu: Whether to use GPU (if available)
:param student_batch_size: Number of samples the student model receives in one batch for training
:param student_batch_size: Number of samples the teacher model receives in one batch for distillation
:param n_epochs: Number of iterations on the whole training data set
:param learning_rate: Learning rate of the optimizer
:param max_seq_len: Maximum text length (in tokens). Everything longer gets cut down.
:param warmup_proportion: Proportion of training steps until maximum learning rate is reached.
Until that point LR is increasing linearly. After that it's decreasing again linearly.
Options for different schedules are available in FARM.
:param evaluate_every: Evaluate the model every X steps on the hold-out eval dataset
:param save_dir: Path to store the final model
:param num_processes: The number of processes for `multiprocessing.Pool` during preprocessing.
Set to value of 1 to disable multiprocessing. When set to 1, you cannot split away a dev set from train set.
Set to None to use all CPU cores minus one.
:param use_amp: Optimization level of NVIDIA's automatic mixed precision (AMP). The higher the level, the faster the model.
Available options:
None (Don't use AMP)
"O0" (Normal FP32 training)
"O1" (Mixed Precision => Recommended)
"O2" (Almost FP16)
"O3" (Pure FP16).
See details on: https://nvidia.github.io/apex/amp.html
:param checkpoint_root_dir: the Path of directory where all train checkpoints are saved. For each individual
checkpoint, a subdirectory with the name epoch_{epoch_num}_step_{step_num} is created.
:param checkpoint_every: save a train checkpoint after this many steps of training.
:param checkpoints_to_keep: maximum number of train checkpoints to save.
:param caching: whether or not to use caching for preprocessed dataset and teacher logits
:param cache_path: Path to cache the preprocessed dataset and teacher logits
:param distillation_loss_weight: The weight of the distillation loss. A higher weight means the teacher outputs are more important.
:param distillation_loss: Specifies how teacher and model logits should be compared. Can either be a string ("mse" for mean squared error or "kl_div" for kl divergence loss) or a callable loss function (needs to have named parameters student_logits and teacher_logits)
:param temperature: The temperature for distillation. A higher temperature will result in less certainty of teacher outputs. A lower temperature means more certainty. A temperature of 1.0 does not change the certainty of the model.
:param tinybert_loss: Whether to use the TinyBERT loss function for distillation. This requires the student to be a TinyBERT model and the teacher to be a finetuned version of bert-base-uncased.
:param tinybert_epochs: Number of epochs to train the student model with the TinyBERT loss function. After this many epochs, the student model is trained with the regular distillation loss function.
:param tinybert_learning_rate: Learning rate to use when training the student model with the TinyBERT loss function.
:param tinybert_train_filename: Filename of training data to use when training the student model with the TinyBERT loss function. To best follow the original paper, this should be an augmented version of the training data created using the augment_squad.py script. If not specified, the training data from the original training is used.
:param processor: The processor to use for preprocessing. If None, the default SquadProcessor is used.
:return: None
"""
return self._training_procedure(
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
use_gpu=use_gpu,
batch_size=student_batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
max_seq_len=max_seq_len,
warmup_proportion=warmup_proportion,
dev_split=dev_split,
evaluate_every=evaluate_every,
save_dir=save_dir,
num_processes=num_processes,
use_amp=use_amp,
checkpoint_root_dir=checkpoint_root_dir,
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
teacher_model=teacher_model,
teacher_batch_size=teacher_batch_size,
caching=caching,
cache_path=cache_path,
distillation_loss_weight=distillation_loss_weight,
distillation_loss=distillation_loss,
temperature=temperature,
)
def distil_intermediate_layers_from(
self,
teacher_model: "FARMReader",
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 5,
learning_rate: float = 5e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
distillation_loss: Union[str, Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = "mse",
temperature: float = 1.0,
processor: Optional[Processor] = None,
):
"""
The first stage of distillation finetuning as described in the TinyBERT paper:
https://arxiv.org/pdf/1909.10351.pdf
**Example**
```python
student = FARMReader(model_name_or_path="prajjwal1/bert-medium")
teacher = FARMReader(model_name_or_path="huawei-noah/TinyBERT_General_6L_768D")
student.distil_intermediate_layers_from(teacher, data_dir="squad2", train_filename="train.json", test_filename="dev.json",
learning_rate=3e-5, distillation_loss_weight=1.0, temperature=5)
```
Checkpoints can be stored via setting `checkpoint_every` to a custom number of steps.
If any checkpoints are stored, a subsequent run of train() will resume training from the latest available checkpoint.
:param teacher_model: Model whose logits will be used to improve accuracy
:param data_dir: Path to directory containing your training data in SQuAD style
:param train_filename: Filename of training data. To best follow the original paper, this should be an augmented version of the training data created using the augment_squad.py script
:param dev_filename: Filename of dev / eval data
:param test_filename: Filename of test data
:param dev_split: Instead of specifying a dev_filename, you can also specify a ratio (e.g. 0.1) here
that gets split off from training data for eval.
:param use_gpu: Whether to use GPU (if available)
:param student_batch_size: Number of samples the student model receives in one batch for training
:param student_batch_size: Number of samples the teacher model receives in one batch for distillation
:param n_epochs: Number of iterations on the whole training data set
:param learning_rate: Learning rate of the optimizer
:param max_seq_len: Maximum text length (in tokens). Everything longer gets cut down.
:param warmup_proportion: Proportion of training steps until maximum learning rate is reached.
Until that point LR is increasing linearly. After that it's decreasing again linearly.
Options for different schedules are available in FARM.
:param evaluate_every: Evaluate the model every X steps on the hold-out eval dataset
:param save_dir: Path to store the final model
:param num_processes: The number of processes for `multiprocessing.Pool` during preprocessing.
Set to value of 1 to disable multiprocessing. When set to 1, you cannot split away a dev set from train set.
Set to None to use all CPU cores minus one.
:param use_amp: Optimization level of NVIDIA's automatic mixed precision (AMP). The higher the level, the faster the model.
Available options:
None (Don't use AMP)
"O0" (Normal FP32 training)
"O1" (Mixed Precision => Recommended)
"O2" (Almost FP16)
"O3" (Pure FP16).
See details on: https://nvidia.github.io/apex/amp.html
:param checkpoint_root_dir: the Path of directory where all train checkpoints are saved. For each individual
checkpoint, a subdirectory with the name epoch_{epoch_num}_step_{step_num} is created.
:param checkpoint_every: save a train checkpoint after this many steps of training.
:param checkpoints_to_keep: maximum number of train checkpoints to save.
:param caching: whether or not to use caching for preprocessed dataset and teacher logits
:param cache_path: Path to cache the preprocessed dataset and teacher logits
:param distillation_loss_weight: The weight of the distillation loss. A higher weight means the teacher outputs are more important.
:param distillation_loss: Specifies how teacher and model logits should be compared. Can either be a string ("mse" for mean squared error or "kl_div" for kl divergence loss) or a callable loss function (needs to have named parameters student_logits and teacher_logits)
:param temperature: The temperature for distillation. A higher temperature will result in less certainty of teacher outputs. A lower temperature means more certainty. A temperature of 1.0 does not change the certainty of the model.
:param processor: The processor to use for preprocessing. If None, the default SquadProcessor is used.
:return: None
"""
return self._training_procedure(
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
use_gpu=use_gpu,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
max_seq_len=max_seq_len,
warmup_proportion=warmup_proportion,
dev_split=dev_split,
evaluate_every=evaluate_every,
save_dir=save_dir,
num_processes=num_processes,
use_amp=use_amp,
checkpoint_root_dir=checkpoint_root_dir,
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
teacher_model=teacher_model,
teacher_batch_size=batch_size,
caching=caching,
cache_path=cache_path,
distillation_loss=distillation_loss,
temperature=temperature,
tinybert=True,
processor=processor,
)
def update_parameters(
self,
context_window_size: Optional[int] = None,
no_ans_boost: Optional[float] = None,
return_no_answer: Optional[bool] = None,
max_seq_len: Optional[int] = None,
doc_stride: Optional[int] = None,
):
"""
Hot update parameters of a loaded Reader. It may not to be safe when processing concurrent requests.
"""
if no_ans_boost is not None:
self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
if return_no_answer is not None:
self.return_no_answers = return_no_answer
if doc_stride is not None:
self.inferencer.processor.doc_stride = doc_stride
if context_window_size is not None:
self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
if max_seq_len is not None:
self.inferencer.processor.max_seq_len = max_seq_len
self.max_seq_len = max_seq_len
def save(self, directory: Path):
"""
Saves the Reader model so that it can be reused at a later point in time.
:param directory: Directory where the Reader model should be saved
"""
logger.info(f"Saving reader model to {directory}")
self.inferencer.model.save(directory)
self.inferencer.processor.save(directory)
def predict_batch(self, query_doc_list: List[dict], top_k: int = None, batch_size: int = None):
"""
Use loaded QA model to find answers for a list of queries in each query's supplied list of Document.
Returns list of dictionaries containing answers sorted by (desc.) score
:param query_doc_list: List of dictionaries containing queries with their retrieved documents
:param top_k: The maximum number of answers to return for each query
:param batch_size: Number of samples the model receives in one batch for inference
:return: List of dictionaries containing query and answers
"""
if top_k is None:
top_k = self.top_k
# convert input to FARM format
inputs = []
number_of_docs = []
labels = []
# build input objects for inference_from_objects
for query_with_docs in query_doc_list:
documents = query_with_docs["docs"]
query = query_with_docs["question"]
labels.append(query)
number_of_docs.append(len(documents))
for doc in documents:
cur = QAInput(doc_text=doc.content, questions=Question(text=query.query, uid=doc.id))
inputs.append(cur)
self.inferencer.batch_size = batch_size
# make predictions on all document-query pairs
predictions = self.inferencer.inference_from_objects(
objects=inputs, return_json=False, multiprocessing_chunksize=10
)
# group predictions together
grouped_predictions = []
left_idx = 0
right_idx = 0
for number in number_of_docs:
right_idx = left_idx + number
grouped_predictions.append(predictions[left_idx:right_idx])
left_idx = right_idx
result = []
for idx, group in enumerate(grouped_predictions):
answers, max_no_ans_gap = self._extract_answers_of_predictions(group, top_k)
query = group[0].query
cur_label = labels[idx]
result.append({"query": query, "no_ans_gap": max_no_ans_gap, "answers": answers, "label": cur_label})
return result
def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None):
"""
Use loaded QA model to find answers for a query in the supplied list of Document.
Returns dictionaries containing answers sorted by (desc.) score.
Example:
```python
|{
| 'query': 'Who is the father of Arya Stark?',
| 'answers':[Answer(
| 'answer': 'Eddard,',
| 'context': "She travels with her father, Eddard, to King's Landing when he is",
| 'score': 0.9787139466668613,
| 'offsets_in_context': [Span(start=29, end=35],
| 'offsets_in_context': [Span(start=347, end=353],
| 'document_id': '88d1ed769d003939d3a0d28034464ab2'
| ),...
| ]
|}
```
:param query: Query string
:param documents: List of Document in which to search for the answer
:param top_k: The maximum number of answers to return
:return: Dict containing query and answers
"""
if top_k is None:
top_k = self.top_k
# convert input to FARM format
inputs = []
for doc in documents:
cur = QAInput(doc_text=doc.content, questions=Question(text=query, uid=doc.id))
inputs.append(cur)
# get answers from QA model
# TODO: Need fix in FARM's `to_dict` function of `QAInput` class
predictions = self.inferencer.inference_from_objects(
objects=inputs, return_json=False, multiprocessing_chunksize=1
)
# assemble answers from all the different documents & format them.
answers, max_no_ans_gap = self._extract_answers_of_predictions(predictions, top_k)
# TODO: potentially simplify return here to List[Answer] and handle no_ans_gap differently
result = {"query": query, "no_ans_gap": max_no_ans_gap, "answers": answers}
return result
def eval_on_file(self, data_dir: str, test_filename: str, device: Optional[str] = None):
"""
Performs evaluation on a SQuAD-formatted file.
Returns a dict containing the following metrics:
- "EM": exact match score
- "f1": F1-Score
- "top_n_accuracy": Proportion of predicted answers that overlap with correct answer
:param data_dir: The directory in which the test set can be found
:type data_dir: Path or str
:param test_filename: The name of the file containing the test data in SQuAD format.
:type test_filename: str
:param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda" or use the Reader's device by default.
:type device: str
"""
if device is None:
device = self.devices[0]
eval_processor = SquadProcessor(
tokenizer=self.inferencer.processor.tokenizer,
max_seq_len=self.inferencer.processor.max_seq_len,
label_list=self.inferencer.processor.tasks["question_answering"]["label_list"],
metric=self.inferencer.processor.tasks["question_answering"]["metric"],
train_filename=None,
dev_filename=None,
dev_split=0,
test_filename=test_filename,
data_dir=Path(data_dir),
)
data_silo = DataSilo(processor=eval_processor, batch_size=self.inferencer.batch_size, distributed=False)
data_loader = data_silo.get_data_loader("test")
evaluator = Evaluator(data_loader=data_loader, tasks=eval_processor.tasks, device=device)
eval_results = evaluator.eval(self.inferencer.model)
results = {
"EM": eval_results[0]["EM"],
"f1": eval_results[0]["f1"],
"top_n_accuracy": eval_results[0]["top_n_accuracy"],
}
return results
def eval(
self,
document_store: BaseDocumentStore,
device: Optional[str] = None,
label_index: str = "label",
doc_index: str = "eval_document",
label_origin: str = "gold-label",
calibrate_conf_scores: bool = False,
):
"""
Performs evaluation on evaluation documents in the DocumentStore.
Returns a dict containing the following metrics:
- "EM": Proportion of exact matches of predicted answers with their corresponding correct answers
- "f1": Average overlap between predicted answers and their corresponding correct answers
- "top_n_accuracy": Proportion of predicted answers that overlap with correct answer
:param document_store: DocumentStore containing the evaluation documents
:param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda" or use the Reader's device by default.
:param label_index: Index/Table name where labeled questions are stored
:param doc_index: Index/Table name where documents that are used for evaluation are stored
:param label_origin: Field name where the gold labels are stored
:param calibrate_conf_scores: Whether to calibrate the temperature for temperature scaling of the confidence scores
"""
if device is None:
device = self.devices[0]
if self.top_k_per_candidate != 4:
logger.info(
f"Performing Evaluation using top_k_per_candidate = {self.top_k_per_candidate} \n"
f"and consequently, QuestionAnsweringPredictionHead.n_best = {self.top_k_per_candidate + 1}. \n"
f"This deviates from FARM's default where QuestionAnsweringPredictionHead.n_best = 5"
)
# extract all questions for evaluation
filters: Dict = {"origin": [label_origin]}
labels = document_store.get_all_labels(index=label_index, filters=filters)
# Aggregate all answer labels per question
aggregated_per_doc = defaultdict(list)
for label in labels:
if not label.document.id:
logger.error(f"Label does not contain a document id")
continue
aggregated_per_doc[label.document.id].append(label)
# Create squad style dicts
d: Dict[str, Any] = {}
all_doc_ids = [x.id for x in document_store.get_all_documents(doc_index)]
for doc_id in all_doc_ids:
doc = document_store.get_document_by_id(doc_id, index=doc_index)
if not doc:
logger.error(f"Document with the ID '{doc_id}' is not present in the document store.")
continue
d[str(doc_id)] = {"context": doc.content}
# get all questions / answers
# TODO check if we can simplify this by using MultiLabel
aggregated_per_question: Dict[tuple, Any] = defaultdict(list)
if doc_id in aggregated_per_doc:
for label in aggregated_per_doc[doc_id]:
aggregation_key = (doc_id, label.query)
if label.answer is None:
logger.error(f"Label.answer was None, but Answer object was expected: {label} ")
continue
if label.answer.offsets_in_document is None:
logger.error(
f"Label.answer.offsets_in_document was None, but Span object was expected: {label} "
)
continue
else:
# add to existing answers
# TODO offsets (whole block)
if aggregation_key in aggregated_per_question.keys():
if label.no_answer:
continue
else:
# Hack to fix problem where duplicate questions are merged by doc_store processing creating a QA example with 8 annotations > 6 annotation max
if len(aggregated_per_question[aggregation_key]["answers"]) >= 6:
logger.warning(
f"Answers in this sample are being dropped because it has more than 6 answers. (doc_id: {doc_id}, question: {label.query}, label_id: {label.id})"
)
continue
aggregated_per_question[aggregation_key]["answers"].append(
{
"text": label.answer.answer,
"answer_start": label.answer.offsets_in_document[0].start,
}
)
aggregated_per_question[aggregation_key]["is_impossible"] = False
# create new one
else:
# We don't need to create an answer dict if is_impossible / no_answer
if label.no_answer == True:
aggregated_per_question[aggregation_key] = {
"id": str(hash(str(doc_id) + label.query)),
"question": label.query,
"answers": [],
"is_impossible": True,
}
else:
aggregated_per_question[aggregation_key] = {
"id": str(hash(str(doc_id) + label.query)),
"question": label.query,
"answers": [
{
"text": label.answer.answer,
"answer_start": label.answer.offsets_in_document[0].start,
}
],
"is_impossible": False,
}
# Get rid of the question key again (after we aggregated we don't need it anymore)
d[str(doc_id)]["qas"] = [v for v in aggregated_per_question.values()]
# Convert input format for FARM
farm_input = [v for v in d.values()]
n_queries = len([y for x in farm_input for y in x["qas"]])
# Create DataLoader that can be passed to the Evaluator
tic = perf_counter()
indices = range(len(farm_input))
dataset, tensor_names, problematic_ids = self.inferencer.processor.dataset_from_dicts(
farm_input, indices=indices
)
data_loader = NamedDataLoader(dataset=dataset, batch_size=self.inferencer.batch_size, tensor_names=tensor_names)
evaluator = Evaluator(data_loader=data_loader, tasks=self.inferencer.processor.tasks, device=device)
eval_results = evaluator.eval(self.inferencer.model, calibrate_conf_scores=calibrate_conf_scores)
toc = perf_counter()
reader_time = toc - tic
results = {
"EM": eval_results[0]["EM"] * 100,
"f1": eval_results[0]["f1"] * 100,
"top_n_accuracy": eval_results[0]["top_n_accuracy"] * 100,
"top_n": self.inferencer.model.prediction_heads[0].n_best,
"reader_time": reader_time,
"seconds_per_query": reader_time / n_queries,
}
return results
def _extract_answers_of_predictions(self, predictions: List[QAPred], top_k: Optional[int] = None):
# Assemble answers from all the different documents and format them.
# For the 'no answer' option, we collect all no_ans_gaps and decide how likely
# a no answer is based on all no_ans_gaps values across all documents
answers: List[Answer] = []
no_ans_gaps = []
best_score_answer = 0
for pred in predictions:
answers_per_document = []
no_ans_gaps.append(pred.no_answer_gap)
for ans in pred.prediction:
# skip 'no answers' here
if self._check_no_answer(ans):
pass
else:
cur = Answer(
answer=ans.answer,
type="extractive",
score=ans.confidence if self.use_confidence_scores else ans.score,
context=ans.context_window,
document_id=pred.id,
offsets_in_context=[
Span(
start=ans.offset_answer_start - ans.offset_context_window_start,
end=ans.offset_answer_end - ans.offset_context_window_start,
)
],
offsets_in_document=[Span(start=ans.offset_answer_start, end=ans.offset_answer_end)],
)
answers_per_document.append(cur)
if ans.score > best_score_answer:
best_score_answer = ans.score
# Only take n best candidates. Answers coming back from FARM are sorted with decreasing relevance
answers += answers_per_document[: self.top_k_per_candidate]
# calculate the score for predicting 'no answer', relative to our best positive answer score
no_ans_prediction, max_no_ans_gap = self._calc_no_answer(
no_ans_gaps, best_score_answer, self.use_confidence_scores
)
if self.return_no_answers:
answers.append(no_ans_prediction)
# sort answers by score (descending) and select top-k
answers = sorted(answers, reverse=True)
answers = answers[:top_k]
return answers, max_no_ans_gap
def calibrate_confidence_scores(
self,
document_store: BaseDocumentStore,
device: Optional[str] = None,
label_index: str = "label",
doc_index: str = "eval_document",
label_origin: str = "gold_label",
):
"""
Calibrates confidence scores on evaluation documents in the DocumentStore.
:param document_store: DocumentStore containing the evaluation documents
:param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda" or use the Reader's device by default.
:param label_index: Index/Table name where labeled questions are stored
:param doc_index: Index/Table name where documents that are used for evaluation are stored
:param label_origin: Field name where the gold labels are stored
"""
if device is None:
device = self.devices[0]
self.eval(
document_store=document_store,
device=device,
label_index=label_index,
doc_index=doc_index,
label_origin=label_origin,
calibrate_conf_scores=True,
)
@staticmethod
def _check_no_answer(c: QACandidate):
# check for correct value in "answer"
if c.offset_answer_start == 0 and c.offset_answer_end == 0:
if c.answer != "no_answer":
logger.error(
"Invalid 'no_answer': Got a prediction for position 0, but answer string is not 'no_answer'"
)
if c.answer == "no_answer":
return True
else:
return False
def predict_on_texts(self, question: str, texts: List[str], top_k: Optional[int] = None):
"""
Use loaded QA model to find answers for a question in the supplied list of Document.
Returns dictionaries containing answers sorted by (desc.) score.
Example:
```python
|{
| 'question': 'Who is the father of Arya Stark?',
| 'answers':[
| {'answer': 'Eddard,',
| 'context': " She travels with her father, Eddard, to King's Landing when he is ",
| 'offset_answer_start': 147,
| 'offset_answer_end': 154,
| 'score': 0.9787139466668613,
| 'document_id': '1337'
| },...
| ]
|}
```
:param question: Question string
:param documents: List of documents as string type
:param top_k: The maximum number of answers to return
:return: Dict containing question and answers
"""
documents = []
for text in texts:
documents.append(Document(content=text))
predictions = self.predict(question, documents, top_k)
return predictions
@classmethod
def convert_to_onnx(
cls,
model_name: str,
output_path: Path,
convert_to_float16: bool = False,
quantize: bool = False,
task_type: str = "question_answering",
opset_version: int = 11,
):
"""
Convert a PyTorch BERT model to ONNX format and write to ./onnx-export dir. The converted ONNX model
can be loaded with in the `FARMReader` using the export path as `model_name_or_path` param.
Usage:
`from haystack.reader.farm import FARMReader
from pathlib import Path
onnx_model_path = Path("roberta-onnx-model")
FARMReader.convert_to_onnx(model_name="deepset/bert-base-cased-squad2", output_path=onnx_model_path)
reader = FARMReader(onnx_model_path)`
:param model_name: transformers model name
:param output_path: Path to output the converted model
:param convert_to_float16: Many models use float32 precision by default. With the half precision of float16,
inference is faster on Nvidia GPUs with Tensor core like T4 or V100. On older GPUs,
float32 could still be be more performant.
:param quantize: convert floating point number to integers
:param task_type: Type of task for the model. Available options: "question_answering" or "embeddings".
:param opset_version: ONNX opset version
"""
AdaptiveModel.convert_to_onnx(
model_name=model_name,
output_path=output_path,
task_type=task_type,
convert_to_float16=convert_to_float16,
quantize=quantize,
opset_version=opset_version,
)
| 51.407666 | 344 | 0.62561 | from typing import List, Optional, Dict, Any, Union, Callable
import logging
import multiprocessing
from pathlib import Path
from collections import defaultdict
from time import perf_counter
import torch
from haystack.modeling.data_handler.data_silo import DataSilo, DistillationDataSilo
from haystack.modeling.data_handler.processor import SquadProcessor, Processor
from haystack.modeling.data_handler.dataloader import NamedDataLoader
from haystack.modeling.data_handler.inputs import QAInput, Question
from haystack.modeling.infer import QAInferencer
from haystack.modeling.model.optimization import initialize_optimizer
from haystack.modeling.model.predictions import QAPred, QACandidate
from haystack.modeling.model.adaptive_model import AdaptiveModel
from haystack.modeling.training import Trainer, DistillationTrainer, TinyBERTDistillationTrainer
from haystack.modeling.evaluation import Evaluator
from haystack.modeling.utils import set_all_seeds, initialize_device_settings
from haystack.schema import Document, Answer, Span
from haystack.document_stores import BaseDocumentStore
from haystack.nodes.reader import BaseReader
logger = logging.getLogger(__name__)
class FARMReader(BaseReader):
def __init__(
self,
model_name_or_path: str,
model_version: Optional[str] = None,
context_window_size: int = 150,
batch_size: int = 50,
use_gpu: bool = True,
no_ans_boost: float = 0.0,
return_no_answer: bool = False,
top_k: int = 10,
top_k_per_candidate: int = 3,
top_k_per_sample: int = 1,
num_processes: Optional[int] = None,
max_seq_len: int = 256,
doc_stride: int = 128,
progress_bar: bool = True,
duplicate_filtering: int = 0,
use_confidence_scores: bool = True,
proxies: Optional[Dict[str, str]] = None,
local_files_only=False,
force_download=False,
use_auth_token: Optional[Union[str, bool]] = None,
**kwargs,
):
self.set_config(
model_name_or_path=model_name_or_path,
model_version=model_version,
context_window_size=context_window_size,
batch_size=batch_size,
use_gpu=use_gpu,
no_ans_boost=no_ans_boost,
return_no_answer=return_no_answer,
top_k=top_k,
top_k_per_candidate=top_k_per_candidate,
top_k_per_sample=top_k_per_sample,
num_processes=num_processes,
max_seq_len=max_seq_len,
doc_stride=doc_stride,
progress_bar=progress_bar,
duplicate_filtering=duplicate_filtering,
proxies=proxies,
local_files_only=local_files_only,
force_download=force_download,
use_confidence_scores=use_confidence_scores,
**kwargs,
)
self.devices, _ = initialize_device_settings(use_cuda=use_gpu, multi_gpu=False)
self.return_no_answers = return_no_answer
self.top_k = top_k
self.top_k_per_candidate = top_k_per_candidate
self.inferencer = QAInferencer.load(
model_name_or_path,
batch_size=batch_size,
gpu=use_gpu,
task_type="question_answering",
max_seq_len=max_seq_len,
doc_stride=doc_stride,
num_processes=num_processes,
revision=model_version,
disable_tqdm=not progress_bar,
strict=False,
proxies=proxies,
local_files_only=local_files_only,
force_download=force_download,
devices=self.devices,
use_auth_token=use_auth_token,
**kwargs,
)
self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
self.inferencer.model.prediction_heads[0].n_best = top_k_per_candidate + 1
try:
self.inferencer.model.prediction_heads[0].n_best_per_sample = top_k_per_sample
except:
logger.warning("Could not set `top_k_per_sample` in FARM. Please update FARM version.")
try:
self.inferencer.model.prediction_heads[0].duplicate_filtering = duplicate_filtering
except:
logger.warning("Could not set `duplicate_filtering` in FARM. Please update FARM version.")
self.max_seq_len = max_seq_len
self.use_gpu = use_gpu
self.progress_bar = progress_bar
self.use_confidence_scores = use_confidence_scores
def _training_procedure(
self,
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 2,
learning_rate: float = 1e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
teacher_model: Optional["FARMReader"] = None,
teacher_batch_size: Optional[int] = None,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
distillation_loss_weight: float = 0.5,
distillation_loss: Union[str, Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = "kl_div",
temperature: float = 1.0,
tinybert: bool = False,
processor: Optional[Processor] = None,
):
if dev_filename:
dev_split = 0
if num_processes is None:
num_processes = multiprocessing.cpu_count() - 1 or 1
set_all_seeds(seed=42)
if use_gpu is None:
use_gpu = self.use_gpu
if max_seq_len is None:
max_seq_len = self.max_seq_len
devices, n_gpu = initialize_device_settings(use_cuda=use_gpu, multi_gpu=False)
if not save_dir:
save_dir = f"../../saved_models/{self.inferencer.model.language_model.name}"
if tinybert:
save_dir += "_tinybert_stage_1"
label_list = ["start_token", "end_token"]
metric = "squad"
if processor is None:
processor = SquadProcessor(
tokenizer=self.inferencer.processor.tokenizer,
max_seq_len=max_seq_len,
label_list=label_list,
metric=metric,
train_filename=train_filename,
dev_filename=dev_filename,
dev_split=dev_split,
test_filename=test_filename,
data_dir=Path(data_dir),
)
data_silo: DataSilo
if (
teacher_model and not tinybert
):
data_silo = DistillationDataSilo(
teacher_model,
teacher_batch_size or batch_size,
device=devices[0],
processor=processor,
batch_size=batch_size,
distributed=False,
max_processes=num_processes,
caching=caching,
cache_path=cache_path,
)
else:
data_silo = DataSilo(
processor=processor,
batch_size=batch_size,
distributed=False,
max_processes=num_processes,
caching=caching,
cache_path=cache_path,
)
model, optimizer, lr_schedule = initialize_optimizer(
model=self.inferencer.model,
learning_rate=learning_rate,
schedule_opts={"name": "LinearWarmup", "warmup_proportion": warmup_proportion},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
device=devices[0],
use_amp=use_amp,
)
if tinybert:
if not teacher_model:
raise ValueError("TinyBERT distillation requires a teacher model.")
trainer = TinyBERTDistillationTrainer.create_or_load_checkpoint(
model=model,
teacher_model=teacher_model.inferencer.model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=devices[0],
use_amp=use_amp,
disable_tqdm=not self.progress_bar,
checkpoint_root_dir=Path(checkpoint_root_dir),
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
)
elif (
teacher_model
): # checks again if teacher model is passed as parameter, in that case assume model distillation is used
trainer = DistillationTrainer.create_or_load_checkpoint(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=devices[0],
use_amp=use_amp,
disable_tqdm=not self.progress_bar,
checkpoint_root_dir=Path(checkpoint_root_dir),
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
distillation_loss=distillation_loss,
distillation_loss_weight=distillation_loss_weight,
temperature=temperature,
)
else:
trainer = Trainer.create_or_load_checkpoint(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=devices[0],
use_amp=use_amp,
disable_tqdm=not self.progress_bar,
checkpoint_root_dir=Path(checkpoint_root_dir),
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
)
# 5. Let it grow!
self.inferencer.model = trainer.train()
self.save(Path(save_dir))
def train(
self,
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 2,
learning_rate: float = 1e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
):
return self._training_procedure(
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
use_gpu=use_gpu,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
max_seq_len=max_seq_len,
warmup_proportion=warmup_proportion,
dev_split=dev_split,
evaluate_every=evaluate_every,
save_dir=save_dir,
num_processes=num_processes,
use_amp=use_amp,
checkpoint_root_dir=checkpoint_root_dir,
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
caching=caching,
cache_path=cache_path,
)
def distil_prediction_layer_from(
self,
teacher_model: "FARMReader",
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
student_batch_size: int = 10,
teacher_batch_size: Optional[int] = None,
n_epochs: int = 2,
learning_rate: float = 3e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
distillation_loss_weight: float = 0.5,
distillation_loss: Union[str, Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = "kl_div",
temperature: float = 1.0,
):
return self._training_procedure(
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
use_gpu=use_gpu,
batch_size=student_batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
max_seq_len=max_seq_len,
warmup_proportion=warmup_proportion,
dev_split=dev_split,
evaluate_every=evaluate_every,
save_dir=save_dir,
num_processes=num_processes,
use_amp=use_amp,
checkpoint_root_dir=checkpoint_root_dir,
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
teacher_model=teacher_model,
teacher_batch_size=teacher_batch_size,
caching=caching,
cache_path=cache_path,
distillation_loss_weight=distillation_loss_weight,
distillation_loss=distillation_loss,
temperature=temperature,
)
def distil_intermediate_layers_from(
self,
teacher_model: "FARMReader",
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 5,
learning_rate: float = 5e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
distillation_loss: Union[str, Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = "mse",
temperature: float = 1.0,
processor: Optional[Processor] = None,
):
return self._training_procedure(
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
use_gpu=use_gpu,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
max_seq_len=max_seq_len,
warmup_proportion=warmup_proportion,
dev_split=dev_split,
evaluate_every=evaluate_every,
save_dir=save_dir,
num_processes=num_processes,
use_amp=use_amp,
checkpoint_root_dir=checkpoint_root_dir,
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
teacher_model=teacher_model,
teacher_batch_size=batch_size,
caching=caching,
cache_path=cache_path,
distillation_loss=distillation_loss,
temperature=temperature,
tinybert=True,
processor=processor,
)
def update_parameters(
self,
context_window_size: Optional[int] = None,
no_ans_boost: Optional[float] = None,
return_no_answer: Optional[bool] = None,
max_seq_len: Optional[int] = None,
doc_stride: Optional[int] = None,
):
if no_ans_boost is not None:
self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
if return_no_answer is not None:
self.return_no_answers = return_no_answer
if doc_stride is not None:
self.inferencer.processor.doc_stride = doc_stride
if context_window_size is not None:
self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
if max_seq_len is not None:
self.inferencer.processor.max_seq_len = max_seq_len
self.max_seq_len = max_seq_len
def save(self, directory: Path):
logger.info(f"Saving reader model to {directory}")
self.inferencer.model.save(directory)
self.inferencer.processor.save(directory)
def predict_batch(self, query_doc_list: List[dict], top_k: int = None, batch_size: int = None):
if top_k is None:
top_k = self.top_k
# convert input to FARM format
inputs = []
number_of_docs = []
labels = []
# build input objects for inference_from_objects
for query_with_docs in query_doc_list:
documents = query_with_docs["docs"]
query = query_with_docs["question"]
labels.append(query)
number_of_docs.append(len(documents))
for doc in documents:
cur = QAInput(doc_text=doc.content, questions=Question(text=query.query, uid=doc.id))
inputs.append(cur)
self.inferencer.batch_size = batch_size
# make predictions on all document-query pairs
predictions = self.inferencer.inference_from_objects(
objects=inputs, return_json=False, multiprocessing_chunksize=10
)
# group predictions together
grouped_predictions = []
left_idx = 0
right_idx = 0
for number in number_of_docs:
right_idx = left_idx + number
grouped_predictions.append(predictions[left_idx:right_idx])
left_idx = right_idx
result = []
for idx, group in enumerate(grouped_predictions):
answers, max_no_ans_gap = self._extract_answers_of_predictions(group, top_k)
query = group[0].query
cur_label = labels[idx]
result.append({"query": query, "no_ans_gap": max_no_ans_gap, "answers": answers, "label": cur_label})
return result
def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None):
if top_k is None:
top_k = self.top_k
# convert input to FARM format
inputs = []
for doc in documents:
cur = QAInput(doc_text=doc.content, questions=Question(text=query, uid=doc.id))
inputs.append(cur)
# get answers from QA model
# TODO: Need fix in FARM's `to_dict` function of `QAInput` class
predictions = self.inferencer.inference_from_objects(
objects=inputs, return_json=False, multiprocessing_chunksize=1
)
answers, max_no_ans_gap = self._extract_answers_of_predictions(predictions, top_k)
result = {"query": query, "no_ans_gap": max_no_ans_gap, "answers": answers}
return result
def eval_on_file(self, data_dir: str, test_filename: str, device: Optional[str] = None):
if device is None:
device = self.devices[0]
eval_processor = SquadProcessor(
tokenizer=self.inferencer.processor.tokenizer,
max_seq_len=self.inferencer.processor.max_seq_len,
label_list=self.inferencer.processor.tasks["question_answering"]["label_list"],
metric=self.inferencer.processor.tasks["question_answering"]["metric"],
train_filename=None,
dev_filename=None,
dev_split=0,
test_filename=test_filename,
data_dir=Path(data_dir),
)
data_silo = DataSilo(processor=eval_processor, batch_size=self.inferencer.batch_size, distributed=False)
data_loader = data_silo.get_data_loader("test")
evaluator = Evaluator(data_loader=data_loader, tasks=eval_processor.tasks, device=device)
eval_results = evaluator.eval(self.inferencer.model)
results = {
"EM": eval_results[0]["EM"],
"f1": eval_results[0]["f1"],
"top_n_accuracy": eval_results[0]["top_n_accuracy"],
}
return results
def eval(
self,
document_store: BaseDocumentStore,
device: Optional[str] = None,
label_index: str = "label",
doc_index: str = "eval_document",
label_origin: str = "gold-label",
calibrate_conf_scores: bool = False,
):
if device is None:
device = self.devices[0]
if self.top_k_per_candidate != 4:
logger.info(
f"Performing Evaluation using top_k_per_candidate = {self.top_k_per_candidate} \n"
f"and consequently, QuestionAnsweringPredictionHead.n_best = {self.top_k_per_candidate + 1}. \n"
f"This deviates from FARM's default where QuestionAnsweringPredictionHead.n_best = 5"
)
# extract all questions for evaluation
filters: Dict = {"origin": [label_origin]}
labels = document_store.get_all_labels(index=label_index, filters=filters)
# Aggregate all answer labels per question
aggregated_per_doc = defaultdict(list)
for label in labels:
if not label.document.id:
logger.error(f"Label does not contain a document id")
continue
aggregated_per_doc[label.document.id].append(label)
# Create squad style dicts
d: Dict[str, Any] = {}
all_doc_ids = [x.id for x in document_store.get_all_documents(doc_index)]
for doc_id in all_doc_ids:
doc = document_store.get_document_by_id(doc_id, index=doc_index)
if not doc:
logger.error(f"Document with the ID '{doc_id}' is not present in the document store.")
continue
d[str(doc_id)] = {"context": doc.content}
# get all questions / answers
# TODO check if we can simplify this by using MultiLabel
aggregated_per_question: Dict[tuple, Any] = defaultdict(list)
if doc_id in aggregated_per_doc:
for label in aggregated_per_doc[doc_id]:
aggregation_key = (doc_id, label.query)
if label.answer is None:
logger.error(f"Label.answer was None, but Answer object was expected: {label} ")
continue
if label.answer.offsets_in_document is None:
logger.error(
f"Label.answer.offsets_in_document was None, but Span object was expected: {label} "
)
continue
else:
# add to existing answers
# TODO offsets (whole block)
if aggregation_key in aggregated_per_question.keys():
if label.no_answer:
continue
else:
# Hack to fix problem where duplicate questions are merged by doc_store processing creating a QA example with 8 annotations > 6 annotation max
if len(aggregated_per_question[aggregation_key]["answers"]) >= 6:
logger.warning(
f"Answers in this sample are being dropped because it has more than 6 answers. (doc_id: {doc_id}, question: {label.query}, label_id: {label.id})"
)
continue
aggregated_per_question[aggregation_key]["answers"].append(
{
"text": label.answer.answer,
"answer_start": label.answer.offsets_in_document[0].start,
}
)
aggregated_per_question[aggregation_key]["is_impossible"] = False
# create new one
else:
# We don't need to create an answer dict if is_impossible / no_answer
if label.no_answer == True:
aggregated_per_question[aggregation_key] = {
"id": str(hash(str(doc_id) + label.query)),
"question": label.query,
"answers": [],
"is_impossible": True,
}
else:
aggregated_per_question[aggregation_key] = {
"id": str(hash(str(doc_id) + label.query)),
"question": label.query,
"answers": [
{
"text": label.answer.answer,
"answer_start": label.answer.offsets_in_document[0].start,
}
],
"is_impossible": False,
}
d[str(doc_id)]["qas"] = [v for v in aggregated_per_question.values()]
# Convert input format for FARM
farm_input = [v for v in d.values()]
n_queries = len([y for x in farm_input for y in x["qas"]])
# Create DataLoader that can be passed to the Evaluator
tic = perf_counter()
indices = range(len(farm_input))
dataset, tensor_names, problematic_ids = self.inferencer.processor.dataset_from_dicts(
farm_input, indices=indices
)
data_loader = NamedDataLoader(dataset=dataset, batch_size=self.inferencer.batch_size, tensor_names=tensor_names)
evaluator = Evaluator(data_loader=data_loader, tasks=self.inferencer.processor.tasks, device=device)
eval_results = evaluator.eval(self.inferencer.model, calibrate_conf_scores=calibrate_conf_scores)
toc = perf_counter()
reader_time = toc - tic
results = {
"EM": eval_results[0]["EM"] * 100,
"f1": eval_results[0]["f1"] * 100,
"top_n_accuracy": eval_results[0]["top_n_accuracy"] * 100,
"top_n": self.inferencer.model.prediction_heads[0].n_best,
"reader_time": reader_time,
"seconds_per_query": reader_time / n_queries,
}
return results
def _extract_answers_of_predictions(self, predictions: List[QAPred], top_k: Optional[int] = None):
# Assemble answers from all the different documents and format them.
# For the 'no answer' option, we collect all no_ans_gaps and decide how likely
# a no answer is based on all no_ans_gaps values across all documents
answers: List[Answer] = []
no_ans_gaps = []
best_score_answer = 0
for pred in predictions:
answers_per_document = []
no_ans_gaps.append(pred.no_answer_gap)
for ans in pred.prediction:
# skip 'no answers' here
if self._check_no_answer(ans):
pass
else:
cur = Answer(
answer=ans.answer,
type="extractive",
score=ans.confidence if self.use_confidence_scores else ans.score,
context=ans.context_window,
document_id=pred.id,
offsets_in_context=[
Span(
start=ans.offset_answer_start - ans.offset_context_window_start,
end=ans.offset_answer_end - ans.offset_context_window_start,
)
],
offsets_in_document=[Span(start=ans.offset_answer_start, end=ans.offset_answer_end)],
)
answers_per_document.append(cur)
if ans.score > best_score_answer:
best_score_answer = ans.score
# Only take n best candidates. Answers coming back from FARM are sorted with decreasing relevance
answers += answers_per_document[: self.top_k_per_candidate]
# calculate the score for predicting 'no answer', relative to our best positive answer score
no_ans_prediction, max_no_ans_gap = self._calc_no_answer(
no_ans_gaps, best_score_answer, self.use_confidence_scores
)
if self.return_no_answers:
answers.append(no_ans_prediction)
# sort answers by score (descending) and select top-k
answers = sorted(answers, reverse=True)
answers = answers[:top_k]
return answers, max_no_ans_gap
def calibrate_confidence_scores(
self,
document_store: BaseDocumentStore,
device: Optional[str] = None,
label_index: str = "label",
doc_index: str = "eval_document",
label_origin: str = "gold_label",
):
if device is None:
device = self.devices[0]
self.eval(
document_store=document_store,
device=device,
label_index=label_index,
doc_index=doc_index,
label_origin=label_origin,
calibrate_conf_scores=True,
)
@staticmethod
def _check_no_answer(c: QACandidate):
# check for correct value in "answer"
if c.offset_answer_start == 0 and c.offset_answer_end == 0:
if c.answer != "no_answer":
logger.error(
"Invalid 'no_answer': Got a prediction for position 0, but answer string is not 'no_answer'"
)
if c.answer == "no_answer":
return True
else:
return False
def predict_on_texts(self, question: str, texts: List[str], top_k: Optional[int] = None):
documents = []
for text in texts:
documents.append(Document(content=text))
predictions = self.predict(question, documents, top_k)
return predictions
@classmethod
def convert_to_onnx(
cls,
model_name: str,
output_path: Path,
convert_to_float16: bool = False,
quantize: bool = False,
task_type: str = "question_answering",
opset_version: int = 11,
):
AdaptiveModel.convert_to_onnx(
model_name=model_name,
output_path=output_path,
task_type=task_type,
convert_to_float16=convert_to_float16,
quantize=quantize,
opset_version=opset_version,
)
| true | true |
f715f9eec1999c4f7c88c87f57493937d98df307 | 8,387 | py | Python | thor/orbit.py | B612-Asteroid-Institute/thor | d3d1dcbe86f67a62c90b4cde3fc577e414825cf2 | [
"BSD-3-Clause"
] | null | null | null | thor/orbit.py | B612-Asteroid-Institute/thor | d3d1dcbe86f67a62c90b4cde3fc577e414825cf2 | [
"BSD-3-Clause"
] | null | null | null | thor/orbit.py | B612-Asteroid-Institute/thor | d3d1dcbe86f67a62c90b4cde3fc577e414825cf2 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from .utils import _checkTime
from .vectors import calcNae
from .vectors import calcDelta
from .vectors import calcXae
from .vectors import calcXa
from .vectors import calcNhat
from .vectors import calcR1
from .vectors import calcR2
from .projections import cartesianToGnomonic
from .coordinates import transformCoordinates
__all__ = ["TestOrbit"]
class TestOrbit:
"""
TestOrbit: Class that calculates and stores the rotation matrices
for a guess of heliocentric distance and velocity. To be used in
tandem with the Cell class.
Parameters
----------
elements : `~numpy.ndarray` (6)
Cartesian ecliptic orbital elements with postions in units of AU
and velocities in units of AU per day.
t0 : `~astropy.time.core.Time` (1)
Epoch at which orbital elements are defined.
"""
def __init__(self, elements, epoch):
_checkTime(epoch, "epoch")
self.elements = elements
self.epoch = epoch
def prepare(self, verbose=True):
"""
Calculate rotation matrices.
Populates the following class properties:
n_hat : vector normal to the plane of orbit
R1 : rotation matrix to rotate towards x-y plane
R2 : rotation matrix to rotate towards x-axis
M : final rotation matrix
Parameters
----------
verbose : bool, optional
Print progress statements.
[Default = True]
Returns
-------
None
"""
if verbose is True:
print("Calculating vector normal to plane of orbit...")
self.n_hat = calcNhat(self.elements[:3])
if verbose is True:
print("Calculating R1 rotation matrix...")
self.R1 = calcR1(self.elements[:3], self.n_hat)
self.x_a_xy = np.array(self.R1 @ self.elements[:3])[0]
if verbose is True:
print("Calculating R2 rotation matrix...")
self.R2 = calcR2(self.x_a_xy)
if verbose is True:
print("Calculating final rotation matrix...")
self.M = self.R2 @ self.R1
if verbose is True:
print("Done.")
print("")
return
def applyToObservations(self, observations, verbose=True):
"""
Apply the prepared rotations to the given observations. Adds the gnomonic
plane coordinates to observations (columns: theta_x_deg, theta_y_deg)
Parameters
----------
observations : `~pandas.DataFrame`
DataFrame of observations defined at the same epoch as this test orbit,
to project into the test orbit's frame.
verbose : bool, optional
Print progress statements?
[Default = True]
Returns
-------
None
"""
if verbose is True:
print("Applying rotation matrices to observations...")
print("Converting to ecliptic coordinates...")
#velocities_present = False
#if "vRAcosDec" in observations.columns and "vDec" in observations.columns:
# coords_eq_r = observations[["RA_deg", "Dec_deg"]].values
# coords_eq_v = observations[["vRAcosDec", "vDec"]].values
# coords_eq_v[:, 0] /= np.cos(np.radians(coords_eq_r[:, 1]))
# coords_eq = np.hstack([
# np.ones((len(coords_eq_r), 1)),
# coords_eq_r,
# np.zeros((len(coords_eq_r), 1)),
# coords_eq_v
# ])
# velocities_present = True
#else:
coords_eq = observations[["RA_deg", "Dec_deg"]].values
coords_eq = np.hstack([np.ones((len(coords_eq), 1)), coords_eq])
coords_ec = transformCoordinates(coords_eq,
"equatorial",
"ecliptic",
representation_in="spherical",
representation_out="spherical"
)
if verbose is True:
print("Calculating object to observer unit vector...")
n_ae = calcNae(coords_ec[:, 1:3])
x_e = observations[["obs_x", "obs_y", "obs_z"]].values
if verbose is True:
print("Calculating object to observer distance assuming r = {} AU...".format(np.linalg.norm(self.elements[:3])))
delta = np.zeros(len(n_ae))
for i in range(len(delta)):
delta[i] = calcDelta(np.linalg.norm(self.elements[:3]), x_e[i, :], n_ae[i, :])
if verbose is True:
print("Calculating object to observer position vector...")
x_ae = np.zeros([len(delta), 3])
for i, (delta_i, n_ae_i) in enumerate(zip(delta, n_ae)):
x_ae[i] = calcXae(delta_i, n_ae_i)
if verbose is True:
print("Calculating heliocentric object position vector...")
x_a = np.zeros([len(x_ae), 3])
for i, (x_ae_i, x_e_i) in enumerate(zip(x_ae, x_e)):
x_a[i] = calcXa(x_ae_i, x_e_i)
if verbose is True:
print("Applying rotation matrix M to heliocentric object position vector...")
coords_cart_rotated = np.array(self.M @ x_a.T).T
if verbose is True:
print("Performing gnomonic projection...")
gnomonic_coords = cartesianToGnomonic(coords_cart_rotated)
observations["obj_x"] = x_a[:, 0]
observations["obj_y"] = x_a[:, 1]
observations["obj_z"] = x_a[:, 2]
observations["theta_x_deg"] = np.degrees(gnomonic_coords[:, 0])
observations["theta_y_deg"] = np.degrees(gnomonic_coords[:, 1])
observations["test_obj_x"] = self.elements[0]
observations["test_obj_y"] = self.elements[1]
observations["test_obj_z"] = self.elements[2]
observations["test_obj_vx"] = self.elements[3]
observations["test_obj_vy"] = self.elements[4]
observations["test_obj_vz"] = self.elements[5]
if verbose is True:
print("Done.")
print("")
return
def applyToEphemeris(self, ephemeris, verbose=True):
"""
Apply the prepared rotations to the given ephemerides. Adds the gnomonic
plane coordinates to observations (columns: theta_x_deg, theta_y_deg, vtheta_x, and vtheta_y)
Parameters
----------
ephemeris : `~pandas.DataFrame`
DataFrame of ephemeris generated by a THOR backend defined at the same epoch as this test orbit,
to project into the test orbit's frame.
verbose : bool, optional
Print progress statements?
[Default = True]
Returns
-------
None
"""
coords_cart = ephemeris[["obj_x", "obj_y", "obj_z", "obj_vx", "obj_vy", "obj_vz"]].values
coords_cart_rotated = np.zeros_like(coords_cart)
if verbose is True:
print("Applying rotation matrix M to heliocentric object position vector...")
coords_cart_rotated[:, :3] = np.array(self.M @ coords_cart[:, :3].T).T
if verbose is True:
print("Applying rotation matrix M to heliocentric object velocity vector...")
# Calculate relative velocity, then rotate to projected frame
coords_cart[:, 3:] = coords_cart[:, 3:] - self.elements[3:].reshape(1, -1)
coords_cart_rotated[:, 3:] = np.array(self.M @ coords_cart[:, 3:].T).T
if verbose is True:
print("Performing gnomonic projection...")
gnomonic_coords = cartesianToGnomonic(coords_cart_rotated)
ephemeris["theta_x_deg"] = np.degrees(gnomonic_coords[:, 0])
ephemeris["theta_y_deg"] = np.degrees(gnomonic_coords[:, 1])
ephemeris["vtheta_x_deg"] = np.degrees(gnomonic_coords[:, 2])
ephemeris["vtheta_y_deg"] = np.degrees(gnomonic_coords[:, 3])
ephemeris["test_obj_x"] = self.elements[0]
ephemeris["test_obj_y"] = self.elements[1]
ephemeris["test_obj_z"] = self.elements[2]
ephemeris["test_obj_vx"] = self.elements[3]
ephemeris["test_obj_vy"] = self.elements[4]
ephemeris["test_obj_vz"] = self.elements[5]
if verbose is True:
print("Done.")
print("")
return | 37.779279 | 124 | 0.582211 | import numpy as np
from .utils import _checkTime
from .vectors import calcNae
from .vectors import calcDelta
from .vectors import calcXae
from .vectors import calcXa
from .vectors import calcNhat
from .vectors import calcR1
from .vectors import calcR2
from .projections import cartesianToGnomonic
from .coordinates import transformCoordinates
__all__ = ["TestOrbit"]
class TestOrbit:
def __init__(self, elements, epoch):
_checkTime(epoch, "epoch")
self.elements = elements
self.epoch = epoch
def prepare(self, verbose=True):
if verbose is True:
print("Calculating vector normal to plane of orbit...")
self.n_hat = calcNhat(self.elements[:3])
if verbose is True:
print("Calculating R1 rotation matrix...")
self.R1 = calcR1(self.elements[:3], self.n_hat)
self.x_a_xy = np.array(self.R1 @ self.elements[:3])[0]
if verbose is True:
print("Calculating R2 rotation matrix...")
self.R2 = calcR2(self.x_a_xy)
if verbose is True:
print("Calculating final rotation matrix...")
self.M = self.R2 @ self.R1
if verbose is True:
print("Done.")
print("")
return
def applyToObservations(self, observations, verbose=True):
if verbose is True:
print("Applying rotation matrices to observations...")
print("Converting to ecliptic coordinates...")
coords_eq = observations[["RA_deg", "Dec_deg"]].values
coords_eq = np.hstack([np.ones((len(coords_eq), 1)), coords_eq])
coords_ec = transformCoordinates(coords_eq,
"equatorial",
"ecliptic",
representation_in="spherical",
representation_out="spherical"
)
if verbose is True:
print("Calculating object to observer unit vector...")
n_ae = calcNae(coords_ec[:, 1:3])
x_e = observations[["obs_x", "obs_y", "obs_z"]].values
if verbose is True:
print("Calculating object to observer distance assuming r = {} AU...".format(np.linalg.norm(self.elements[:3])))
delta = np.zeros(len(n_ae))
for i in range(len(delta)):
delta[i] = calcDelta(np.linalg.norm(self.elements[:3]), x_e[i, :], n_ae[i, :])
if verbose is True:
print("Calculating object to observer position vector...")
x_ae = np.zeros([len(delta), 3])
for i, (delta_i, n_ae_i) in enumerate(zip(delta, n_ae)):
x_ae[i] = calcXae(delta_i, n_ae_i)
if verbose is True:
print("Calculating heliocentric object position vector...")
x_a = np.zeros([len(x_ae), 3])
for i, (x_ae_i, x_e_i) in enumerate(zip(x_ae, x_e)):
x_a[i] = calcXa(x_ae_i, x_e_i)
if verbose is True:
print("Applying rotation matrix M to heliocentric object position vector...")
coords_cart_rotated = np.array(self.M @ x_a.T).T
if verbose is True:
print("Performing gnomonic projection...")
gnomonic_coords = cartesianToGnomonic(coords_cart_rotated)
observations["obj_x"] = x_a[:, 0]
observations["obj_y"] = x_a[:, 1]
observations["obj_z"] = x_a[:, 2]
observations["theta_x_deg"] = np.degrees(gnomonic_coords[:, 0])
observations["theta_y_deg"] = np.degrees(gnomonic_coords[:, 1])
observations["test_obj_x"] = self.elements[0]
observations["test_obj_y"] = self.elements[1]
observations["test_obj_z"] = self.elements[2]
observations["test_obj_vx"] = self.elements[3]
observations["test_obj_vy"] = self.elements[4]
observations["test_obj_vz"] = self.elements[5]
if verbose is True:
print("Done.")
print("")
return
def applyToEphemeris(self, ephemeris, verbose=True):
coords_cart = ephemeris[["obj_x", "obj_y", "obj_z", "obj_vx", "obj_vy", "obj_vz"]].values
coords_cart_rotated = np.zeros_like(coords_cart)
if verbose is True:
print("Applying rotation matrix M to heliocentric object position vector...")
coords_cart_rotated[:, :3] = np.array(self.M @ coords_cart[:, :3].T).T
if verbose is True:
print("Applying rotation matrix M to heliocentric object velocity vector...")
coords_cart[:, 3:] = coords_cart[:, 3:] - self.elements[3:].reshape(1, -1)
coords_cart_rotated[:, 3:] = np.array(self.M @ coords_cart[:, 3:].T).T
if verbose is True:
print("Performing gnomonic projection...")
gnomonic_coords = cartesianToGnomonic(coords_cart_rotated)
ephemeris["theta_x_deg"] = np.degrees(gnomonic_coords[:, 0])
ephemeris["theta_y_deg"] = np.degrees(gnomonic_coords[:, 1])
ephemeris["vtheta_x_deg"] = np.degrees(gnomonic_coords[:, 2])
ephemeris["vtheta_y_deg"] = np.degrees(gnomonic_coords[:, 3])
ephemeris["test_obj_x"] = self.elements[0]
ephemeris["test_obj_y"] = self.elements[1]
ephemeris["test_obj_z"] = self.elements[2]
ephemeris["test_obj_vx"] = self.elements[3]
ephemeris["test_obj_vy"] = self.elements[4]
ephemeris["test_obj_vz"] = self.elements[5]
if verbose is True:
print("Done.")
print("")
return | true | true |
f715fa3d62c55bf4f7f70f4b2e9a10454d261c5c | 2,848 | py | Python | python/test/testutil.py | AppScale/appengine-pipelines | 277394648dac3e8214677af898935d07399ac8e1 | [
"Apache-2.0"
] | 82 | 2015-01-13T03:24:32.000Z | 2021-10-09T04:08:27.000Z | python/test/testutil.py | AppScale/appengine-pipelines | 277394648dac3e8214677af898935d07399ac8e1 | [
"Apache-2.0"
] | 57 | 2015-01-27T00:12:36.000Z | 2020-10-30T16:47:05.000Z | python/test/testutil.py | AppScale/appengine-pipelines | 277394648dac3e8214677af898935d07399ac8e1 | [
"Apache-2.0"
] | 58 | 2015-01-22T21:32:26.000Z | 2021-10-09T04:08:19.000Z | #!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test utilities for the Google App Engine Pipeline API."""
# Code originally from:
# http://code.google.com/p/pubsubhubbub/source/browse/trunk/hub/testutil.py
import logging
import os
import sys
import tempfile
class TestSetupMixin(object):
TEST_APP_ID = 'my-app-id'
TEST_VERSION_ID = 'my-version.1234'
def setUp(self):
super(TestSetupMixin, self).setUp()
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import memcache
from google.appengine.api import queueinfo
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
from google.appengine.ext.testbed import TASKQUEUE_SERVICE_NAME
before_level = logging.getLogger().getEffectiveLevel()
os.environ['APPLICATION_ID'] = self.TEST_APP_ID
os.environ['CURRENT_VERSION_ID'] = self.TEST_VERSION_ID
os.environ['HTTP_HOST'] = '%s.appspot.com' % self.TEST_APP_ID
os.environ['DEFAULT_VERSION_HOSTNAME'] = os.environ['HTTP_HOST']
os.environ['CURRENT_MODULE_ID'] = 'foo-module'
try:
logging.getLogger().setLevel(100)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(app_id=self.TEST_APP_ID, overwrite=True)
self.testbed.init_memcache_stub()
hr_policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
self.testbed.init_datastore_v3_stub(consistency_policy=hr_policy)
self.testbed.init_taskqueue_stub()
root_path = os.path.realpath(os.path.dirname(__file__))
# Actually need to flush, even though we've reallocated. Maybe because the
# memcache stub's cache is at the module level, not the API stub?
memcache.flush_all()
finally:
logging.getLogger().setLevel(before_level)
define_queues=['other']
taskqueue_stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')
taskqueue_stub.queue_yaml_parser = (
lambda x: queueinfo.LoadSingleQueue(
'queue:\n- name: default\n rate: 1/s\n' +
'\n'.join('- name: %s\n rate: 1/s' % name
for name in define_queues)))
def tearDown(self):
super(TestSetupMixin, self).tearDown()
self.testbed.deactivate()
| 34.313253 | 84 | 0.720857 |
import logging
import os
import sys
import tempfile
class TestSetupMixin(object):
TEST_APP_ID = 'my-app-id'
TEST_VERSION_ID = 'my-version.1234'
def setUp(self):
super(TestSetupMixin, self).setUp()
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import memcache
from google.appengine.api import queueinfo
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
from google.appengine.ext.testbed import TASKQUEUE_SERVICE_NAME
before_level = logging.getLogger().getEffectiveLevel()
os.environ['APPLICATION_ID'] = self.TEST_APP_ID
os.environ['CURRENT_VERSION_ID'] = self.TEST_VERSION_ID
os.environ['HTTP_HOST'] = '%s.appspot.com' % self.TEST_APP_ID
os.environ['DEFAULT_VERSION_HOSTNAME'] = os.environ['HTTP_HOST']
os.environ['CURRENT_MODULE_ID'] = 'foo-module'
try:
logging.getLogger().setLevel(100)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(app_id=self.TEST_APP_ID, overwrite=True)
self.testbed.init_memcache_stub()
hr_policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
self.testbed.init_datastore_v3_stub(consistency_policy=hr_policy)
self.testbed.init_taskqueue_stub()
root_path = os.path.realpath(os.path.dirname(__file__))
# memcache stub's cache is at the module level, not the API stub?
memcache.flush_all()
finally:
logging.getLogger().setLevel(before_level)
define_queues=['other']
taskqueue_stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')
taskqueue_stub.queue_yaml_parser = (
lambda x: queueinfo.LoadSingleQueue(
'queue:\n- name: default\n rate: 1/s\n' +
'\n'.join('- name: %s\n rate: 1/s' % name
for name in define_queues)))
def tearDown(self):
super(TestSetupMixin, self).tearDown()
self.testbed.deactivate()
| true | true |
f715fb59542e094790abdec20a4091318946f4e3 | 1,283 | py | Python | ParserTest/ParserTest.py | isaacrez/ShowdownParser | 965d5b35968978ad5101f3df3deede3219284154 | [
"MIT"
] | null | null | null | ParserTest/ParserTest.py | isaacrez/ShowdownParser | 965d5b35968978ad5101f3df3deede3219284154 | [
"MIT"
] | null | null | null | ParserTest/ParserTest.py | isaacrez/ShowdownParser | 965d5b35968978ad5101f3df3deede3219284154 | [
"MIT"
] | null | null | null |
import unittest
from ParserTest.TestUtil import *
class TestParserMethods(unittest.TestCase):
DIRECT_KOs_ID = 2
PASSIVE_KOs_ID = 3
DEATHS_ID = 4
def test_direct_KO(self):
pokemon_data = {
"Raichu-Alola": ["p1", "Stokin' Dude!"],
"Magikarp": ["p2", "A Karp"]
}
simulator = ParserSimulator(pokemon_data)
simulator.load_all()
simulator.switch_in_all()
simulator.move("Stokin' Dude!", "Thunderbolt", "A Karp")
simulator.damage("A Karp")
def test_toxic_spikes(self):
pokemon_data = {
"Toxapex": ["p1", "The Worst"],
"Magikarp": ["p2", "Sushi Incarnate"],
"Pichu": ["p2", "Baby Pikachu"]
}
simulator = ParserSimulator(pokemon_data)
simulator.load_all()
simulator.switch_in_species("Toxapex")
simulator.switch_in_species("Magikarp")
simulator.move("The Worst", "Toxic Spikes", "Sushi Incarnate")
simulator.move("Sushi Incarnate", "Splash", "The Worst")
simulator.switch_in_species("Pichu")
simulator.damage("Baby Pikachu", "psn")
simulator.faint("Baby Pikachu")
def test_stealth_rocks(self):
pass
if __name__ == '__main__':
unittest.main()
| 27.891304 | 70 | 0.600156 |
import unittest
from ParserTest.TestUtil import *
class TestParserMethods(unittest.TestCase):
DIRECT_KOs_ID = 2
PASSIVE_KOs_ID = 3
DEATHS_ID = 4
def test_direct_KO(self):
pokemon_data = {
"Raichu-Alola": ["p1", "Stokin' Dude!"],
"Magikarp": ["p2", "A Karp"]
}
simulator = ParserSimulator(pokemon_data)
simulator.load_all()
simulator.switch_in_all()
simulator.move("Stokin' Dude!", "Thunderbolt", "A Karp")
simulator.damage("A Karp")
def test_toxic_spikes(self):
pokemon_data = {
"Toxapex": ["p1", "The Worst"],
"Magikarp": ["p2", "Sushi Incarnate"],
"Pichu": ["p2", "Baby Pikachu"]
}
simulator = ParserSimulator(pokemon_data)
simulator.load_all()
simulator.switch_in_species("Toxapex")
simulator.switch_in_species("Magikarp")
simulator.move("The Worst", "Toxic Spikes", "Sushi Incarnate")
simulator.move("Sushi Incarnate", "Splash", "The Worst")
simulator.switch_in_species("Pichu")
simulator.damage("Baby Pikachu", "psn")
simulator.faint("Baby Pikachu")
def test_stealth_rocks(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f715fcdc9f378810a87d7cb126f42c12bd2af0f1 | 1,256 | py | Python | gan_test.py | Aitical/ADspeech2face | 2e811ff8cc7333729f4b77d1b1067296253e8e38 | [
"MIT"
] | 1 | 2022-01-27T14:19:04.000Z | 2022-01-27T14:19:04.000Z | gan_test.py | Aitical/ADspeech2face | 2e811ff8cc7333729f4b77d1b1067296253e8e38 | [
"MIT"
] | null | null | null | gan_test.py | Aitical/ADspeech2face | 2e811ff8cc7333729f4b77d1b1067296253e8e38 | [
"MIT"
] | null | null | null | import os
import glob
import torch
import torchvision.utils as vutils
import webrtcvad
from mfcc import MFCC
from utils import voice2face
from tqdm import tqdm
import sys
from parse_config import get_model
import importlib
# initialization
vad_obj = webrtcvad.Vad(2)
mfc_obj = MFCC(nfilt=64, lowerf=20., upperf=7200., samprate=16000, nfft=1024, wlen=0.025)
config_name = sys.argv[1]
command = sys.argv[2]
model_config = importlib.import_module(f'configs.{config_name}')
dataset_config = model_config.dataset_config
model_config.generator['pretrained'] = True
e_net = get_model(model_config.voice_encoder)
g_net = get_model(model_config.generator)
voice_path = os.path.join(dataset_config['test_path'], '*/*/*.wav')
voice_list = glob.glob(voice_path)
for filename in tqdm(voice_list):
face_image = voice2face(e_net, g_net, filename, vad_obj, mfc_obj, stylegan=True)
face = face_image[0]
wav_file_path, wav_file_name = os.path.split(filename)
face_name = wav_file_name.replace('.wav', f'_{command}.png')
face_path = wav_file_path.replace('voxceleb', 'voxceleb_face')
os.makedirs(face_path, exist_ok=True)
vutils.save_image(face.detach().clamp(-1, 1),
os.path.join(face_path, face_name), normalize=True)
| 33.052632 | 89 | 0.755573 | import os
import glob
import torch
import torchvision.utils as vutils
import webrtcvad
from mfcc import MFCC
from utils import voice2face
from tqdm import tqdm
import sys
from parse_config import get_model
import importlib
vad_obj = webrtcvad.Vad(2)
mfc_obj = MFCC(nfilt=64, lowerf=20., upperf=7200., samprate=16000, nfft=1024, wlen=0.025)
config_name = sys.argv[1]
command = sys.argv[2]
model_config = importlib.import_module(f'configs.{config_name}')
dataset_config = model_config.dataset_config
model_config.generator['pretrained'] = True
e_net = get_model(model_config.voice_encoder)
g_net = get_model(model_config.generator)
voice_path = os.path.join(dataset_config['test_path'], '*/*/*.wav')
voice_list = glob.glob(voice_path)
for filename in tqdm(voice_list):
face_image = voice2face(e_net, g_net, filename, vad_obj, mfc_obj, stylegan=True)
face = face_image[0]
wav_file_path, wav_file_name = os.path.split(filename)
face_name = wav_file_name.replace('.wav', f'_{command}.png')
face_path = wav_file_path.replace('voxceleb', 'voxceleb_face')
os.makedirs(face_path, exist_ok=True)
vutils.save_image(face.detach().clamp(-1, 1),
os.path.join(face_path, face_name), normalize=True)
| true | true |
f715fde92dd9503f60b1a71b39a46e6d2f9e42ad | 8,020 | py | Python | yatsm/cache.py | bullocke/yatsm_nrt | b0ded56032bf9f9dcdf6b7b749f6554ade56de1e | [
"MIT"
] | 2 | 2018-04-25T02:10:30.000Z | 2021-07-30T03:57:49.000Z | yatsm/cache.py | bullocke/yatsm_nrt | b0ded56032bf9f9dcdf6b7b749f6554ade56de1e | [
"MIT"
] | null | null | null | yatsm/cache.py | bullocke/yatsm_nrt | b0ded56032bf9f9dcdf6b7b749f6554ade56de1e | [
"MIT"
] | 1 | 2017-04-01T16:11:52.000Z | 2017-04-01T16:11:52.000Z | """ Functions related to writing to and retrieving from cache files
"""
import os
import numpy as np
from log_yatsm import logger
_image_ID_str = 'image_IDs'
def get_line_cache_name(dataset_config, n_images, row, nbands):
""" Returns cache filename for specified config and line number
Args:
dataset_config (dict): configuration information about the dataset
n_images (int): number of images in dataset
row (int): line of the dataset for output
nbands (int): number of bands in dataset
Returns:
str: filename of cache file
"""
path = dataset_config.get('cache_line_dir')
if not path:
return
filename = 'yatsm_r%i_n%i_b%i.npy.npz' % (row, n_images, nbands)
return os.path.join(path, filename)
def get_line_cache_pattern(row, nbands, regex=False):
""" Returns a pattern for a cache file from a certain row
This function is useful for finding all cache files from a line, ignoring
the number of images in the file.
Args:
row (int): line of the dataset for output
nbands (int): number of bands in dataset
regex (bool, optional): return a regular expression instead of glob
style (default: False)
Returns:
str: filename pattern for cache files from line ``row``
"""
wildcard = '.*' if regex else '*'
pattern = 'yatsm_r{l}_n{w}_b{b}.npy.npz'.format(
l=row, w=wildcard, b=nbands)
return pattern
def test_cache(dataset_config):
""" Test cache directory for ability to read from or write to
Args:
dataset_config (dict): dictionary of dataset configuration options
Returns:
tuple: tuple of bools describing ability to read from and write to
cache directory
"""
# Try to find / use cache
read_cache = False
write_cache = False
cache_dir = dataset_config.get('cache_line_dir')
if cache_dir:
# Test existence
if os.path.isdir(cache_dir):
if os.access(cache_dir, os.R_OK):
read_cache = True
if os.access(cache_dir, os.W_OK):
write_cache = True
if read_cache and not write_cache:
logger.warning('Cache directory exists but is not writable')
else:
# If it doesn't already exist, can we create it?
try:
os.makedirs(cache_dir)
except:
logger.warning('Could not create cache directory')
else:
read_cache = True
write_cache = True
logger.debug('Attempt reading in from cache directory?: {b}'.format(
b=read_cache))
logger.debug('Attempt writing to cache directory?: {b}'.format(
b=write_cache))
return read_cache, write_cache
def read_cache_file(cache_filename, image_IDs=None):
""" Returns image data from a cache file
If ``image_IDs`` is not None this function will try to ensure data from
cache file come from the list of image IDs provided. If cache file does not
contain a list of image IDs, it will skip the check and return cache data.
Args:
cache_filename (str): cache filename
image_IDs (iterable, optional): list of image IDs corresponding to data
in cache file. If not specified, function will not check for
correspondence (default: None)
Returns:
np.ndarray, or None: Return Y as np.ndarray if possible and if the
cache file passes the consistency check specified by ``image_IDs``,
else None
"""
try:
cache = np.load(cache_filename)
except IOError:
return None
if _image_ID_str in cache.files and image_IDs is not None:
if not np.array_equal(image_IDs, cache[_image_ID_str]):
logger.warning('Cache file data in {f} do not match images '
'specified'.format(f=cache_filename))
return None
return cache['Y']
def write_cache_file(cache_filename, Y, image_IDs):
""" Writes data to a cache file using np.savez_compressed
Args:
cache_filename (str): cache filename
Y (np.ndarray): data to write to cache file
image_IDs (iterable): list of image IDs corresponding to data in cache
file. If not specified, function will not check for correspondence
"""
np.savez_compressed(cache_filename, **{
'Y': Y, _image_ID_str: image_IDs
})
# Cache file updating
def update_cache_file(images, image_IDs,
old_cache_filename, new_cache_filename,
line, reader):
""" Modify an existing cache file to contain data within `images`
This should be useful for updating a set of cache files to reflect
modifications to the timeseries dataset without completely reading the
data into another cache file.
For example, the cache file could be updated to reflect the deletion of
a misregistered or cloudy image. Another common example would be for
updating cache files to include newly acquired observations.
Note that this updater will not handle updating cache files to include
new bands.
Args:
images (iterable): list of new image filenames
image_IDs (iterable): list of new image identifying strings
old_cache_filename (str): filename of cache file to update
new_cache_filename (str): filename of new cache file which includes
modified data
line (int): the line of data to be updated
reader (callable): GDAL or BIP image reader function from
:mod:`yatsm.io.stack_line_readers`
Raises:
ValueError: Raise error if old cache file does not record ``image_IDs``
"""
images = np.asarray(images)
image_IDs = np.asarray(image_IDs)
# Cannot proceed if old cache file doesn't store filenames
old_cache = np.load(old_cache_filename)
if _image_ID_str not in old_cache.files:
raise ValueError('Cannot update cache.'
'Old cache file does not store image IDs.')
old_IDs = old_cache[_image_ID_str]
old_Y = old_cache['Y']
nband, _, ncol = old_Y.shape
# Create new Y and add in values retained from old cache
new_Y = np.zeros((nband, image_IDs.size, ncol),
dtype=old_Y.dtype.type)
new_IDs = np.zeros(image_IDs.size, dtype=image_IDs.dtype)
# Check deletions -- find which indices to retain in new cache
retain_old = np.where(np.in1d(old_IDs, image_IDs))[0]
if retain_old.size == 0:
logger.warning('No image IDs in common in old cache file.')
else:
logger.debug(' retaining {r} of {n} images'.format(
r=retain_old.size, n=old_IDs.size))
# Find indices of old data to insert into new data
idx_old_IDs = np.argsort(old_IDs)
sorted_old_IDs = old_IDs[idx_old_IDs]
idx_IDs = np.searchsorted(sorted_old_IDs,
image_IDs[np.in1d(image_IDs, old_IDs)])
retain_old = idx_old_IDs[idx_IDs]
# Indices to insert into new data
retain_new = np.where(np.in1d(image_IDs, old_IDs))[0]
new_Y[:, retain_new, :] = old_Y[:, retain_old, :]
new_IDs[retain_new] = old_IDs[retain_old]
# Check additions -- find which indices we need to insert
insert = np.where(np.in1d(image_IDs, old_IDs, invert=True))[0]
if retain_old.size == 0 and insert.size == 0:
raise ValueError('Cannot update cache file -- '
'no data retained or added')
# Read in the remaining data from disk
if insert.size > 0:
logger.debug('Inserting {n} new images into cache'.format(
n=insert.size))
insert_Y = reader.read_row(images[insert], line)
new_Y[:, insert, :] = insert_Y
new_IDs[insert] = image_IDs[insert]
np.testing.assert_equal(new_IDs, image_IDs)
# Save
write_cache_file(new_cache_filename, new_Y, image_IDs)
| 33.983051 | 79 | 0.646758 | import os
import numpy as np
from log_yatsm import logger
_image_ID_str = 'image_IDs'
def get_line_cache_name(dataset_config, n_images, row, nbands):
path = dataset_config.get('cache_line_dir')
if not path:
return
filename = 'yatsm_r%i_n%i_b%i.npy.npz' % (row, n_images, nbands)
return os.path.join(path, filename)
def get_line_cache_pattern(row, nbands, regex=False):
wildcard = '.*' if regex else '*'
pattern = 'yatsm_r{l}_n{w}_b{b}.npy.npz'.format(
l=row, w=wildcard, b=nbands)
return pattern
def test_cache(dataset_config):
read_cache = False
write_cache = False
cache_dir = dataset_config.get('cache_line_dir')
if cache_dir:
if os.path.isdir(cache_dir):
if os.access(cache_dir, os.R_OK):
read_cache = True
if os.access(cache_dir, os.W_OK):
write_cache = True
if read_cache and not write_cache:
logger.warning('Cache directory exists but is not writable')
else:
try:
os.makedirs(cache_dir)
except:
logger.warning('Could not create cache directory')
else:
read_cache = True
write_cache = True
logger.debug('Attempt reading in from cache directory?: {b}'.format(
b=read_cache))
logger.debug('Attempt writing to cache directory?: {b}'.format(
b=write_cache))
return read_cache, write_cache
def read_cache_file(cache_filename, image_IDs=None):
try:
cache = np.load(cache_filename)
except IOError:
return None
if _image_ID_str in cache.files and image_IDs is not None:
if not np.array_equal(image_IDs, cache[_image_ID_str]):
logger.warning('Cache file data in {f} do not match images '
'specified'.format(f=cache_filename))
return None
return cache['Y']
def write_cache_file(cache_filename, Y, image_IDs):
np.savez_compressed(cache_filename, **{
'Y': Y, _image_ID_str: image_IDs
})
# Cache file updating
def update_cache_file(images, image_IDs,
old_cache_filename, new_cache_filename,
line, reader):
images = np.asarray(images)
image_IDs = np.asarray(image_IDs)
# Cannot proceed if old cache file doesn't store filenames
old_cache = np.load(old_cache_filename)
if _image_ID_str not in old_cache.files:
raise ValueError('Cannot update cache.'
'Old cache file does not store image IDs.')
old_IDs = old_cache[_image_ID_str]
old_Y = old_cache['Y']
nband, _, ncol = old_Y.shape
new_Y = np.zeros((nband, image_IDs.size, ncol),
dtype=old_Y.dtype.type)
new_IDs = np.zeros(image_IDs.size, dtype=image_IDs.dtype)
retain_old = np.where(np.in1d(old_IDs, image_IDs))[0]
if retain_old.size == 0:
logger.warning('No image IDs in common in old cache file.')
else:
logger.debug(' retaining {r} of {n} images'.format(
r=retain_old.size, n=old_IDs.size))
idx_old_IDs = np.argsort(old_IDs)
sorted_old_IDs = old_IDs[idx_old_IDs]
idx_IDs = np.searchsorted(sorted_old_IDs,
image_IDs[np.in1d(image_IDs, old_IDs)])
retain_old = idx_old_IDs[idx_IDs]
retain_new = np.where(np.in1d(image_IDs, old_IDs))[0]
new_Y[:, retain_new, :] = old_Y[:, retain_old, :]
new_IDs[retain_new] = old_IDs[retain_old]
insert = np.where(np.in1d(image_IDs, old_IDs, invert=True))[0]
if retain_old.size == 0 and insert.size == 0:
raise ValueError('Cannot update cache file -- '
'no data retained or added')
if insert.size > 0:
logger.debug('Inserting {n} new images into cache'.format(
n=insert.size))
insert_Y = reader.read_row(images[insert], line)
new_Y[:, insert, :] = insert_Y
new_IDs[insert] = image_IDs[insert]
np.testing.assert_equal(new_IDs, image_IDs)
write_cache_file(new_cache_filename, new_Y, image_IDs)
| true | true |
f715fe7e69213de66aedfbfecdb0bdd840ada5fd | 985 | py | Python | examples/plot_hue.py | mewbak/hypertools | bc2947737be8bd5a6e2a3bdca84132f6fee8989c | [
"MIT"
] | 1,681 | 2017-01-28T00:28:02.000Z | 2022-03-11T00:57:13.000Z | examples/plot_hue.py | mewbak/hypertools | bc2947737be8bd5a6e2a3bdca84132f6fee8989c | [
"MIT"
] | 170 | 2017-01-27T22:59:09.000Z | 2022-02-12T03:47:46.000Z | examples/plot_hue.py | mewbak/hypertools | bc2947737be8bd5a6e2a3bdca84132f6fee8989c | [
"MIT"
] | 180 | 2017-02-01T04:34:42.000Z | 2022-02-22T15:46:23.000Z | # -*- coding: utf-8 -*-
"""
=============================
Grouping data by category
=============================
When plotting, its useful to have a way to color points by some category or
variable. Hypertools does this using the `hue` kwarg, which takes a list
of string category labels or numerical values. If text labels are passed, the
data is restructured according to those labels and plotted in different colors
according to your color palette. If numerical values are passed, the values
are binned (default resolution: 100) and plotted according to your color
palette.
"""
# Code source: Andrew Heusser
# License: MIT
# import
import hypertools as hyp
import numpy as np
# load example data
geo = hyp.load('weights_sample')
data = geo.get_data()
# simulate random groups
hue=[]
for idx,i in enumerate(data):
tmp=[]
for iidx,ii in enumerate(i):
tmp.append(int(np.random.randint(1000, size=1)))
hue.append(tmp)
# plot
geo.plot(fmt='.', hue=hue)
| 26.621622 | 78 | 0.683249 |
import hypertools as hyp
import numpy as np
geo = hyp.load('weights_sample')
data = geo.get_data()
hue=[]
for idx,i in enumerate(data):
tmp=[]
for iidx,ii in enumerate(i):
tmp.append(int(np.random.randint(1000, size=1)))
hue.append(tmp)
geo.plot(fmt='.', hue=hue)
| true | true |
f715ff5939535a01e6aa0c240e3f32c7ba477d37 | 1,866 | py | Python | labyrinth_generator.py | ImTheTom/labyrinth-explorer | 56fa7590aa93e11d0f2bc53f58de2194227a4034 | [
"MIT"
] | null | null | null | labyrinth_generator.py | ImTheTom/labyrinth-explorer | 56fa7590aa93e11d0f2bc53f58de2194227a4034 | [
"MIT"
] | null | null | null | labyrinth_generator.py | ImTheTom/labyrinth-explorer | 56fa7590aa93e11d0f2bc53f58de2194227a4034 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
#
# Python Script
#
# Copyleft © Manoel Vilela
#
#
WIDTH,HEIGHT = 2,3
from random import shuffle, randrange
def make_maze(w=WIDTH, h=HEIGHT):
vis = [[0] * w + [1] for _ in range(h)] + [[1] * (w + 1)]
nowalls = []
def walk(x, y):
vis[x][y] = 1
d = [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)]
shuffle(d)
for (x_n, y_n) in d:
if vis[x_n][y_n]:
continue
nowalls.append((x, y, x_n, y_n))
walk(x_n, y_n)
walk(randrange(h), randrange(w))
return(nowalls)
def draw_maze(nowalls, w=WIDTH, h=HEIGHT):
ver = [["| "] * w + ['|'] for _ in range(h)] + [[]]
hor = [["+--"] * w + ['+'] for _ in range(h + 1)]
for (x, y, x_n, y_n) in nowalls:
if x_n == x:
ver[x][max(y, y_n)] = " "
if y_n == y:
hor[max(x, x_n)][y] = "+ "
arrange = []
for (a, b) in zip(hor, ver):
l = ''.join(a + ['\n'] + b).split('\n')
arrange.extend(l)
return arrange
def random_replace(maze, block):
from random import randint
x, y = randint(1, len(maze) - 2), randint(0, len(maze[0]) - 1)
if maze[x][y] == ' ':
maze[x] = maze[x][:y] + block + maze[x][y + 1:]
else:
maze = random_replace(maze, block)
return maze
def translate(maze):
from re import sub
return [sub(r'[\-\+\|]', 'W', x) for x in maze]
def draw(maze):
for x, line in enumerate(maze):
print('{:>2}'.format(x), line)
def generate(width,height,blocks='EP'):
nw = make_maze(width,height)
maze = draw_maze(nw,width,height)
# nwabs = nowallsabs(nw)
for block in blocks:
maze = random_replace(maze, block)
draw(maze)
translated = translate(maze)
return translated
if __name__ == '__main__':
generate()
| 21.448276 | 66 | 0.505895 |
WIDTH,HEIGHT = 2,3
from random import shuffle, randrange
def make_maze(w=WIDTH, h=HEIGHT):
vis = [[0] * w + [1] for _ in range(h)] + [[1] * (w + 1)]
nowalls = []
def walk(x, y):
vis[x][y] = 1
d = [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)]
shuffle(d)
for (x_n, y_n) in d:
if vis[x_n][y_n]:
continue
nowalls.append((x, y, x_n, y_n))
walk(x_n, y_n)
walk(randrange(h), randrange(w))
return(nowalls)
def draw_maze(nowalls, w=WIDTH, h=HEIGHT):
ver = [["| "] * w + ['|'] for _ in range(h)] + [[]]
hor = [["+--"] * w + ['+'] for _ in range(h + 1)]
for (x, y, x_n, y_n) in nowalls:
if x_n == x:
ver[x][max(y, y_n)] = " "
if y_n == y:
hor[max(x, x_n)][y] = "+ "
arrange = []
for (a, b) in zip(hor, ver):
l = ''.join(a + ['\n'] + b).split('\n')
arrange.extend(l)
return arrange
def random_replace(maze, block):
from random import randint
x, y = randint(1, len(maze) - 2), randint(0, len(maze[0]) - 1)
if maze[x][y] == ' ':
maze[x] = maze[x][:y] + block + maze[x][y + 1:]
else:
maze = random_replace(maze, block)
return maze
def translate(maze):
from re import sub
return [sub(r'[\-\+\|]', 'W', x) for x in maze]
def draw(maze):
for x, line in enumerate(maze):
print('{:>2}'.format(x), line)
def generate(width,height,blocks='EP'):
nw = make_maze(width,height)
maze = draw_maze(nw,width,height)
for block in blocks:
maze = random_replace(maze, block)
draw(maze)
translated = translate(maze)
return translated
if __name__ == '__main__':
generate()
| true | true |
f715ffab1cd92657d37ddc8d113efdafe9821bad | 8,233 | py | Python | preprocess.py | gewoonrik/pullreqs-dnn | dbafd1866c1cd44424d238618e5ca54841c358c0 | [
"MIT"
] | 1 | 2017-02-17T06:51:36.000Z | 2017-02-17T06:51:36.000Z | preprocess.py | gewoonrik/pullreqs-dnn | dbafd1866c1cd44424d238618e5ca54841c358c0 | [
"MIT"
] | null | null | null | preprocess.py | gewoonrik/pullreqs-dnn | dbafd1866c1cd44424d238618e5ca54841c358c0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# (c) 2016 -- onwards Georgios Gousios <gousiosg@gmail.com>, Rik Nijessen <riknijessen@gmail.com>
#
from __future__ import print_function
import pickle
import random
import urllib
import numpy as np
import argparse
from config import *
from code_tokenizer import CodeTokenizer
from my_tokenizer import MyTokenizer
from keras.preprocessing.sequence import pad_sequences
@timeit
def load_pr_csv(file):
"""
Load a PR dataset, including all engineered features
:return: A pandas dataframe with all data loaded
"""
print("Loading pull requests file ", file)
pullreqs = pd.read_csv(file)
pullreqs.set_index(['project_name', 'github_id'])
return pullreqs
def ensure_diffs():
"""
Make sure that the PR diffs have been downloaded in the appropriate dir
"""
if not os.path.exists(DIFFS_DIR):
print("Downloading pull request diffs")
import tarfile
urllib.urlretrieve(DIFFS_DATA_URL, DIFFS_FILE)
tar = tarfile.open(DIFFS_FILE, "r:gz")
tar.extractall()
tar.close()
def read_title_and_comments(file):
str = open(file).read()
splitted = str.split("\n")
title = splitted[0]
# remove title and empty space
comment = str[2:]
return title, comment
@timeit
def create_code_tokenizer(code, vocabulary_size):
tokenizer = CodeTokenizer(nb_words=vocabulary_size)
tokenizer.fit_on_texts(code)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
return tokenizer
def create_text_tokenizer(texts, vocabulary_size):
tokenizer = MyTokenizer(nb_words=vocabulary_size)
tokenizer.fit_on_texts(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
return tokenizer
@timeit
def tokenize(tokenizer, texts, maxlen):
print("Tokenizing")
sequences = tokenizer.texts_to_sequences(texts)
return pad_sequences(sequences, maxlen=maxlen)
def load_data(pullreqs):
diffs = []
titles = []
comments = []
labels = []
successful = failed = 0
for i, row in pullreqs.iterrows():
try:
name = (row['project_name']).replace('/','@')+"@"+str(row['github_id'])+'.patch'
diff_file = os.path.join(DIFFS_DIR, name)
comment_file = os.path.join(TXTS_DIR, name.replace(".patch",".txt"))
diff = open(diff_file).read()
title, comment = read_title_and_comments(comment_file)
diffs.append(diff)
titles.append(title)
comments.append(comment)
labels.append(int(row['merged'] * 1))
successful += 1
except:
failed += 1
pass
print("%s diffs loaded, %s diffs failed" % (successful, failed), end='\r')
print("")
return diffs, comments, titles, labels
@timeit
def create_dataset(prefix="default",
diff_vocabulary_size=20000,
comment_vocabulary_size=20000,
title_vocabulary_size=20000,
max_diff_length=100,
max_comment_length=100,
max_title_length=100):
"""
Create a dataset for further processing
:param prefix: Name for the dataset
:param balance_ratio: The ratio between merged and unmerged PRs to include
:param num_diffs: Total number of diffs to load. Any value below 1 means load all diffs.
:param langs: Only include PRs for repos whose primary language is within this array
:param diff_vocabulary_size: (Max) size of the diff vocabulary to use for tokenizing
:param comment_vocabulary_size: (Max) size of the comment vocabulary to use for tokenizing
:param title_vocabulary_size: (Max) size of the title vocabulary to use for tokenizing
:param max_diff_length: Maximum length of the input diff sequences
:param max_comment_length: Maximum length of the input comment sequences
:param max_title_length: Maximum length of the input title sequences
:return: A training and testing dataset, along with the config used to produce it
"""
config = locals()
pullreqs_train = load_pr_csv(train_csv_file % prefix)
pullreqs_test = load_pr_csv(test_csv_file % prefix)
pullreqs_validation = load_pr_csv(validation_csv_file % prefix)
ensure_diffs()
tr_diffs, tr_comments, tr_titles, tr_labels = load_data(pullreqs_train)
val_diffs, val_comments, val_titles, val_labels = load_data(pullreqs_validation)
te_diffs, te_comments, te_titles, te_labels = load_data(pullreqs_test)
code_tokenizer = create_code_tokenizer(tr_diffs+val_diffs, diff_vocabulary_size)
diff_train = tokenize(code_tokenizer, tr_diffs, max_diff_length)
diff_val = tokenize(code_tokenizer, val_diffs, max_diff_length)
diff_test = tokenize(code_tokenizer, te_diffs, max_diff_length)
comment_tokenizer = create_text_tokenizer(tr_comments+val_comments, comment_vocabulary_size)
comment_train = tokenize(comment_tokenizer, tr_comments, max_comment_length)
comment_val = tokenize(code_tokenizer, val_comments, max_comment_length)
comment_test = tokenize(comment_tokenizer, te_comments, max_comment_length)
title_tokenizer = create_text_tokenizer(tr_titles+val_titles, title_vocabulary_size)
title_train = tokenize(title_tokenizer, tr_titles, max_title_length)
title_val = tokenize(code_tokenizer, val_titles, max_title_length)
title_test = tokenize(title_tokenizer, te_titles, max_title_length)
y_train = np.asarray(tr_labels)
y_val = np.asarray(val_labels)
y_test = np.asarray(te_labels)
print('Shape of diff tensor:', diff_train.shape)
print('Shape of comment tensor:', comment_train.shape)
print('Shape of title tensor:', title_train.shape)
print('Shape of label tensor:', y_train.shape)
# Save dataset
with open(diff_vocab_file % prefix, 'w') as f:
pickle.dump(code_tokenizer, f)
with open(comment_vocab_file % prefix, 'w') as f:
pickle.dump(comment_tokenizer, f)
with open(title_vocab_file % prefix, 'w') as f:
pickle.dump(title_tokenizer, f)
with open(diff_train_file % prefix, 'w') as f:
pickle.dump(diff_train, f)
with open(comment_train_file % prefix, 'w') as f:
pickle.dump(comment_train, f)
with open(title_train_file % prefix, 'w') as f:
pickle.dump(title_train, f)
with open(y_train_file % prefix, 'w') as f:
pickle.dump(y_train, f)
with open(diff_val_file % prefix, 'w') as f:
pickle.dump(diff_val, f)
with open(comment_val_file % prefix, 'w') as f:
pickle.dump(comment_val, f)
with open(title_val_file % prefix, 'w') as f:
pickle.dump(title_val, f)
with open(y_val_file % prefix, 'w') as f:
pickle.dump(y_val, f)
# save testdata
with open(diff_test_file % prefix, 'w') as f:
pickle.dump(diff_test, f)
with open(comment_test_file % prefix, 'w') as f:
pickle.dump(comment_test, f)
with open(title_test_file % prefix, 'w') as f:
pickle.dump(title_test, f)
with open(y_test_file % prefix, 'w') as f:
pickle.dump(y_test, f)
with open(config_file % prefix, 'w') as f:
pickle.dump(config, f)
return diff_train, comment_train, title_train, y_train, diff_val, comment_val, title_val, y_val, diff_test, comment_test, title_test, y_test, config
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', default='default')
parser.add_argument('--diff_vocabulary_size', type=int, default=50000)
parser.add_argument('--comment_vocabulary_size', type=int, default=50000)
parser.add_argument('--title_vocabulary_size', type=int, default=10000)
parser.add_argument('--max_diff_sequence_length', type=int, default=150)
parser.add_argument('--max_comment_sequence_length', type=int, default=150)
parser.add_argument('--max_title_sequence_length', type=int, default=150)
args = parser.parse_args()
if __name__ == '__main__':
create_dataset(args.prefix, args.diff_vocabulary_size, args.comment_vocabulary_size, args.title_vocabulary_size, args.max_diff_sequence_length, args.max_comment_sequence_length, args.max_title_sequence_length)
| 34.161826 | 213 | 0.703146 |
from __future__ import print_function
import pickle
import random
import urllib
import numpy as np
import argparse
from config import *
from code_tokenizer import CodeTokenizer
from my_tokenizer import MyTokenizer
from keras.preprocessing.sequence import pad_sequences
@timeit
def load_pr_csv(file):
print("Loading pull requests file ", file)
pullreqs = pd.read_csv(file)
pullreqs.set_index(['project_name', 'github_id'])
return pullreqs
def ensure_diffs():
if not os.path.exists(DIFFS_DIR):
print("Downloading pull request diffs")
import tarfile
urllib.urlretrieve(DIFFS_DATA_URL, DIFFS_FILE)
tar = tarfile.open(DIFFS_FILE, "r:gz")
tar.extractall()
tar.close()
def read_title_and_comments(file):
str = open(file).read()
splitted = str.split("\n")
title = splitted[0]
comment = str[2:]
return title, comment
@timeit
def create_code_tokenizer(code, vocabulary_size):
tokenizer = CodeTokenizer(nb_words=vocabulary_size)
tokenizer.fit_on_texts(code)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
return tokenizer
def create_text_tokenizer(texts, vocabulary_size):
tokenizer = MyTokenizer(nb_words=vocabulary_size)
tokenizer.fit_on_texts(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
return tokenizer
@timeit
def tokenize(tokenizer, texts, maxlen):
print("Tokenizing")
sequences = tokenizer.texts_to_sequences(texts)
return pad_sequences(sequences, maxlen=maxlen)
def load_data(pullreqs):
diffs = []
titles = []
comments = []
labels = []
successful = failed = 0
for i, row in pullreqs.iterrows():
try:
name = (row['project_name']).replace('/','@')+"@"+str(row['github_id'])+'.patch'
diff_file = os.path.join(DIFFS_DIR, name)
comment_file = os.path.join(TXTS_DIR, name.replace(".patch",".txt"))
diff = open(diff_file).read()
title, comment = read_title_and_comments(comment_file)
diffs.append(diff)
titles.append(title)
comments.append(comment)
labels.append(int(row['merged'] * 1))
successful += 1
except:
failed += 1
pass
print("%s diffs loaded, %s diffs failed" % (successful, failed), end='\r')
print("")
return diffs, comments, titles, labels
@timeit
def create_dataset(prefix="default",
diff_vocabulary_size=20000,
comment_vocabulary_size=20000,
title_vocabulary_size=20000,
max_diff_length=100,
max_comment_length=100,
max_title_length=100):
config = locals()
pullreqs_train = load_pr_csv(train_csv_file % prefix)
pullreqs_test = load_pr_csv(test_csv_file % prefix)
pullreqs_validation = load_pr_csv(validation_csv_file % prefix)
ensure_diffs()
tr_diffs, tr_comments, tr_titles, tr_labels = load_data(pullreqs_train)
val_diffs, val_comments, val_titles, val_labels = load_data(pullreqs_validation)
te_diffs, te_comments, te_titles, te_labels = load_data(pullreqs_test)
code_tokenizer = create_code_tokenizer(tr_diffs+val_diffs, diff_vocabulary_size)
diff_train = tokenize(code_tokenizer, tr_diffs, max_diff_length)
diff_val = tokenize(code_tokenizer, val_diffs, max_diff_length)
diff_test = tokenize(code_tokenizer, te_diffs, max_diff_length)
comment_tokenizer = create_text_tokenizer(tr_comments+val_comments, comment_vocabulary_size)
comment_train = tokenize(comment_tokenizer, tr_comments, max_comment_length)
comment_val = tokenize(code_tokenizer, val_comments, max_comment_length)
comment_test = tokenize(comment_tokenizer, te_comments, max_comment_length)
title_tokenizer = create_text_tokenizer(tr_titles+val_titles, title_vocabulary_size)
title_train = tokenize(title_tokenizer, tr_titles, max_title_length)
title_val = tokenize(code_tokenizer, val_titles, max_title_length)
title_test = tokenize(title_tokenizer, te_titles, max_title_length)
y_train = np.asarray(tr_labels)
y_val = np.asarray(val_labels)
y_test = np.asarray(te_labels)
print('Shape of diff tensor:', diff_train.shape)
print('Shape of comment tensor:', comment_train.shape)
print('Shape of title tensor:', title_train.shape)
print('Shape of label tensor:', y_train.shape)
with open(diff_vocab_file % prefix, 'w') as f:
pickle.dump(code_tokenizer, f)
with open(comment_vocab_file % prefix, 'w') as f:
pickle.dump(comment_tokenizer, f)
with open(title_vocab_file % prefix, 'w') as f:
pickle.dump(title_tokenizer, f)
with open(diff_train_file % prefix, 'w') as f:
pickle.dump(diff_train, f)
with open(comment_train_file % prefix, 'w') as f:
pickle.dump(comment_train, f)
with open(title_train_file % prefix, 'w') as f:
pickle.dump(title_train, f)
with open(y_train_file % prefix, 'w') as f:
pickle.dump(y_train, f)
with open(diff_val_file % prefix, 'w') as f:
pickle.dump(diff_val, f)
with open(comment_val_file % prefix, 'w') as f:
pickle.dump(comment_val, f)
with open(title_val_file % prefix, 'w') as f:
pickle.dump(title_val, f)
with open(y_val_file % prefix, 'w') as f:
pickle.dump(y_val, f)
with open(diff_test_file % prefix, 'w') as f:
pickle.dump(diff_test, f)
with open(comment_test_file % prefix, 'w') as f:
pickle.dump(comment_test, f)
with open(title_test_file % prefix, 'w') as f:
pickle.dump(title_test, f)
with open(y_test_file % prefix, 'w') as f:
pickle.dump(y_test, f)
with open(config_file % prefix, 'w') as f:
pickle.dump(config, f)
return diff_train, comment_train, title_train, y_train, diff_val, comment_val, title_val, y_val, diff_test, comment_test, title_test, y_test, config
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', default='default')
parser.add_argument('--diff_vocabulary_size', type=int, default=50000)
parser.add_argument('--comment_vocabulary_size', type=int, default=50000)
parser.add_argument('--title_vocabulary_size', type=int, default=10000)
parser.add_argument('--max_diff_sequence_length', type=int, default=150)
parser.add_argument('--max_comment_sequence_length', type=int, default=150)
parser.add_argument('--max_title_sequence_length', type=int, default=150)
args = parser.parse_args()
if __name__ == '__main__':
create_dataset(args.prefix, args.diff_vocabulary_size, args.comment_vocabulary_size, args.title_vocabulary_size, args.max_diff_sequence_length, args.max_comment_sequence_length, args.max_title_sequence_length)
| true | true |
f71601eb739410c4a90886b6aae0725f85a7eaed | 5,879 | py | Python | test/functional/p2p_fingerprint.py | PitTxid/bitgreen | 5168cb2db2a3f9d4f32b14c4224e1f41f0e69566 | [
"MIT"
] | 14 | 2019-08-02T21:00:14.000Z | 2020-06-22T17:23:05.000Z | test/functional/p2p_fingerprint.py | PitTxid/bitgreen | 5168cb2db2a3f9d4f32b14c4224e1f41f0e69566 | [
"MIT"
] | 7 | 2019-08-05T23:43:17.000Z | 2020-07-17T17:26:54.000Z | test/functional/p2p_fingerprint.py | PitTxid/bitgreen | 5168cb2db2a3f9d4f32b14c4224e1f41f0e69566 | [
"MIT"
] | 25 | 2019-05-21T01:59:54.000Z | 2020-10-18T14:09:38.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If a stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv
from test_framework.mininode import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
)
from test_framework.test_framework import BitGreenTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
class P2PFingerprintTest(BitGreenTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(2, block_hash)) # 2 == "Block"
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Check whether last block received from node has a given hash
def last_block_equals(self, expected_hash, node):
block_msg = node.last_message.get("block")
return block_msg and block_msg.block.rehash() == expected_hash
# Check whether last block header received from node has a given hash
def last_header_equals(self, expected_hash, node):
headers_msg = node.last_message.get("headers")
return (headers_msg and
headers_msg.headers and
headers_msg.headers[0].rehash() == expected_hash)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata()
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
test_function = lambda: self.last_block_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
test_function = lambda: self.last_header_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
tip = self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[0]
assert_equal(self.nodes[0].getblockcount(), 14)
# Send getdata & getheaders to refresh last received getheader message
block_hash = int(tip, 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
# Request for very old stale block should now fail
self.send_block_request(stale_hash, node0)
time.sleep(3)
assert not self.last_block_equals(stale_hash, node0)
# Request for very old stale block header should now fail
self.send_header_request(stale_hash, node0)
time.sleep(3)
assert not self.last_header_equals(stale_hash, node0)
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
test_function = lambda: self.last_block_equals(block_hash, node0)
wait_until(test_function, timeout=3)
self.send_header_request(block_hash, node0)
test_function = lambda: self.last_header_equals(block_hash, node0)
wait_until(test_function, timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
| 39.456376 | 110 | 0.691274 |
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv
from test_framework.mininode import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
)
from test_framework.test_framework import BitGreenTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
class P2PFingerprintTest(BitGreenTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(2, block_hash))
node.send_message(msg)
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
def last_block_equals(self, expected_hash, node):
block_msg = node.last_message.get("block")
return block_msg and block_msg.block.rehash() == expected_hash
def last_header_equals(self, expected_hash, node):
headers_msg = node.last_message.get("headers")
return (headers_msg and
headers_msg.headers and
headers_msg.headers[0].rehash() == expected_hash)
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata()
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
test_function = lambda: self.last_block_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
test_function = lambda: self.last_header_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
tip = self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[0]
assert_equal(self.nodes[0].getblockcount(), 14)
# Send getdata & getheaders to refresh last received getheader message
block_hash = int(tip, 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
# Request for very old stale block should now fail
self.send_block_request(stale_hash, node0)
time.sleep(3)
assert not self.last_block_equals(stale_hash, node0)
# Request for very old stale block header should now fail
self.send_header_request(stale_hash, node0)
time.sleep(3)
assert not self.last_header_equals(stale_hash, node0)
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
test_function = lambda: self.last_block_equals(block_hash, node0)
wait_until(test_function, timeout=3)
self.send_header_request(block_hash, node0)
test_function = lambda: self.last_header_equals(block_hash, node0)
wait_until(test_function, timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
| true | true |
f71603f6109caf0554c9841dcf750730f5a4c731 | 760 | py | Python | backend/gardenator_backend/urls.py | maany/gardenator | 0dd02a323a71d996aeb970c730a48306c280d29e | [
"Apache-2.0"
] | null | null | null | backend/gardenator_backend/urls.py | maany/gardenator | 0dd02a323a71d996aeb970c730a48306c280d29e | [
"Apache-2.0"
] | null | null | null | backend/gardenator_backend/urls.py | maany/gardenator | 0dd02a323a71d996aeb970c730a48306c280d29e | [
"Apache-2.0"
] | null | null | null | """gardenator_backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.545455 | 77 | 0.713158 | from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| true | true |
f716050ed51f07345c48ef2000b6e1a8b2a7e2de | 17,065 | py | Python | Ui_polkitex.py | Trapizomba/Polkit-Explorer | 59c9662f07a65b0aa7197418d0036501fd533793 | [
"0BSD"
] | null | null | null | Ui_polkitex.py | Trapizomba/Polkit-Explorer | 59c9662f07a65b0aa7197418d0036501fd533793 | [
"0BSD"
] | null | null | null | Ui_polkitex.py | Trapizomba/Polkit-Explorer | 59c9662f07a65b0aa7197418d0036501fd533793 | [
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'polkitex.ui'
##
## Created by: Qt User Interface Compiler version 6.2.3
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale,
QMetaObject, QObject, QPoint, QRect,
QSize, QTime, QUrl, Qt)
from PySide6.QtGui import (QAction, QBrush, QColor, QConicalGradient,
QCursor, QFont, QFontDatabase, QGradient,
QIcon, QImage, QKeySequence, QLinearGradient,
QPainter, QPalette, QPixmap, QRadialGradient,
QTransform)
from PySide6.QtWidgets import (QApplication, QComboBox, QFrame, QLCDNumber,
QLabel, QMainWindow, QMenu, QMenuBar,
QPlainTextEdit, QSizePolicy, QTabWidget, QToolButton,
QWidget)
class Ui_PolkitExplorer(object):
def setupUi(self, PolkitExplorer):
if not PolkitExplorer.objectName():
PolkitExplorer.setObjectName(u"PolkitExplorer")
PolkitExplorer.resize(910, 530)
PolkitExplorer.setMinimumSize(QSize(910, 530))
PolkitExplorer.setMaximumSize(QSize(910, 530))
PolkitExplorer.setTabShape(QTabWidget.Rounded)
self.actionOpen = QAction(PolkitExplorer)
self.actionOpen.setObjectName(u"actionOpen")
font = QFont()
font.setPointSize(12)
font.setBold(True)
self.actionOpen.setFont(font)
self.actionAbout = QAction(PolkitExplorer)
self.actionAbout.setObjectName(u"actionAbout")
self.actionAbout.setFont(font)
self.actionQuit = QAction(PolkitExplorer)
self.actionQuit.setObjectName(u"actionQuit")
self.actionShow_Glossary = QAction(PolkitExplorer)
self.actionShow_Glossary.setObjectName(u"actionShow_Glossary")
self.centralwidget = QWidget(PolkitExplorer)
self.centralwidget.setObjectName(u"centralwidget")
self.polkitActionDescription = QLabel(self.centralwidget)
self.polkitActionDescription.setObjectName(u"polkitActionDescription")
self.polkitActionDescription.setGeometry(QRect(100, 150, 791, 31))
font1 = QFont()
font1.setPointSize(11)
font1.setBold(True)
self.polkitActionDescription.setFont(font1)
self.polkitActionDescription.setAutoFillBackground(True)
self.polkitActionDescription.setFrameShape(QFrame.Box)
self.polkitActionDescription.setFrameShadow(QFrame.Raised)
self.polkitActionDescription.setTextFormat(Qt.PlainText)
self.polkitActionDescription.setScaledContents(False)
self.polkitActionDescription.setTextInteractionFlags(Qt.TextSelectableByKeyboard|Qt.TextSelectableByMouse)
self.policyFileGrp = QLabel(self.centralwidget)
self.policyFileGrp.setObjectName(u"policyFileGrp")
self.policyFileGrp.setGeometry(QRect(10, 10, 891, 51))
font2 = QFont()
font2.setPointSize(10)
font2.setBold(True)
self.policyFileGrp.setFont(font2)
self.policyFileGrp.setFrameShape(QFrame.StyledPanel)
self.policyFileGrp.setFrameShadow(QFrame.Raised)
self.policyFileGrp.setTextFormat(Qt.PlainText)
self.policyFileGrp.setScaledContents(False)
self.policyFileGrp.setWordWrap(True)
self.policyFileGrp.setMargin(0)
self.actionDescriptionGrp = QLabel(self.centralwidget)
self.actionDescriptionGrp.setObjectName(u"actionDescriptionGrp")
self.actionDescriptionGrp.setEnabled(True)
self.actionDescriptionGrp.setGeometry(QRect(10, 70, 891, 131))
self.actionDescriptionGrp.setFont(font2)
self.actionDescriptionGrp.setFrameShape(QFrame.StyledPanel)
self.actionDescriptionGrp.setFrameShadow(QFrame.Raised)
self.actionDescriptionGrp.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignTop)
self.actionDescriptionGrp.setMargin(0)
self.actionComboBox = QComboBox(self.centralwidget)
self.actionComboBox.setObjectName(u"actionComboBox")
self.actionComboBox.setGeometry(QRect(100, 110, 791, 29))
self.actionComboBox.setFont(font2)
self.policiesPrivsGrp = QLabel(self.centralwidget)
self.policiesPrivsGrp.setObjectName(u"policiesPrivsGrp")
self.policiesPrivsGrp.setEnabled(True)
self.policiesPrivsGrp.setGeometry(QRect(10, 210, 471, 231))
self.policiesPrivsGrp.setFont(font2)
self.policiesPrivsGrp.setFrameShape(QFrame.StyledPanel)
self.policiesPrivsGrp.setFrameShadow(QFrame.Raised)
self.policiesPrivsGrp.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignTop)
self.actionsCounterDisplay = QLCDNumber(self.centralwidget)
self.actionsCounterDisplay.setObjectName(u"actionsCounterDisplay")
self.actionsCounterDisplay.setGeometry(QRect(20, 110, 71, 71))
font3 = QFont()
font3.setPointSize(12)
font3.setBold(True)
font3.setKerning(True)
self.actionsCounterDisplay.setFont(font3)
self.actionsCounterDisplay.setLayoutDirection(Qt.LeftToRight)
self.actionsCounterDisplay.setFrameShape(QFrame.Box)
self.actionsCounterDisplay.setFrameShadow(QFrame.Raised)
self.actionsCounterDisplay.setDigitCount(3)
self.actionsCounterDisplay.setSegmentStyle(QLCDNumber.Flat)
self.loadFileToolBtn = QToolButton(self.centralwidget)
self.loadFileToolBtn.setObjectName(u"loadFileToolBtn")
self.loadFileToolBtn.setGeometry(QRect(860, 20, 31, 31))
self.loadFileToolBtn.setFont(font)
self.loadFileToolBtn.setFocusPolicy(Qt.StrongFocus)
self.policyKitFullPath = QLabel(self.centralwidget)
self.policyKitFullPath.setObjectName(u"policyKitFullPath")
self.policyKitFullPath.setGeometry(QRect(10, 450, 891, 41))
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.policyKitFullPath.sizePolicy().hasHeightForWidth())
self.policyKitFullPath.setSizePolicy(sizePolicy)
font4 = QFont()
font4.setPointSize(10)
font4.setItalic(True)
self.policyKitFullPath.setFont(font4)
self.policyKitFullPath.setFrameShape(QFrame.Box)
self.policyKitFullPath.setFrameShadow(QFrame.Raised)
self.policyKitFullPath.setMidLineWidth(1)
self.policyKitFullPath.setTextFormat(Qt.PlainText)
self.policyKitFullPath.setMargin(1)
self.currentAllowActiveLabel = QLabel(self.centralwidget)
self.currentAllowActiveLabel.setObjectName(u"currentAllowActiveLabel")
self.currentAllowActiveLabel.setGeometry(QRect(210, 390, 250, 31))
self.currentAllowActiveLabel.setFont(font2)
self.currentAllowActiveLabel.setFrameShape(QFrame.Box)
self.currentAllowActiveLabel.setFrameShadow(QFrame.Raised)
self.currentAllowActiveLabel.setAlignment(Qt.AlignCenter)
self.allowInactiveGrp = QLabel(self.centralwidget)
self.allowInactiveGrp.setObjectName(u"allowInactiveGrp")
self.allowInactiveGrp.setGeometry(QRect(20, 310, 451, 51))
self.allowInactiveGrp.setFont(font2)
self.allowInactiveGrp.setFrameShape(QFrame.StyledPanel)
self.allowInactiveGrp.setFrameShadow(QFrame.Raised)
self.allowInactiveGrp.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.allowInactiveGrp.setMargin(10)
self.currentAllowInactiveLabel = QLabel(self.centralwidget)
self.currentAllowInactiveLabel.setObjectName(u"currentAllowInactiveLabel")
self.currentAllowInactiveLabel.setGeometry(QRect(210, 320, 250, 31))
self.currentAllowInactiveLabel.setFont(font2)
self.currentAllowInactiveLabel.setFrameShape(QFrame.Box)
self.currentAllowInactiveLabel.setFrameShadow(QFrame.Raised)
self.currentAllowInactiveLabel.setAlignment(Qt.AlignCenter)
self.currentAllowAnyLabel = QLabel(self.centralwidget)
self.currentAllowAnyLabel.setObjectName(u"currentAllowAnyLabel")
self.currentAllowAnyLabel.setGeometry(QRect(210, 250, 250, 31))
self.currentAllowAnyLabel.setFont(font2)
self.currentAllowAnyLabel.setFrameShape(QFrame.Box)
self.currentAllowAnyLabel.setFrameShadow(QFrame.Raised)
self.currentAllowAnyLabel.setAlignment(Qt.AlignCenter)
self.allowAnyGrp = QLabel(self.centralwidget)
self.allowAnyGrp.setObjectName(u"allowAnyGrp")
self.allowAnyGrp.setGeometry(QRect(20, 240, 451, 51))
self.allowAnyGrp.setFont(font2)
self.allowAnyGrp.setFrameShape(QFrame.StyledPanel)
self.allowAnyGrp.setFrameShadow(QFrame.Raised)
self.allowAnyGrp.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.allowAnyGrp.setMargin(10)
self.allowActiveGrp = QLabel(self.centralwidget)
self.allowActiveGrp.setObjectName(u"allowActiveGrp")
self.allowActiveGrp.setGeometry(QRect(20, 380, 451, 51))
self.allowActiveGrp.setFont(font2)
self.allowActiveGrp.setFrameShape(QFrame.StyledPanel)
self.allowActiveGrp.setFrameShadow(QFrame.Raised)
self.allowActiveGrp.setMargin(10)
self.policyKitFileName = QLabel(self.centralwidget)
self.policyKitFileName.setObjectName(u"policyKitFileName")
self.policyKitFileName.setGeometry(QRect(160, 20, 691, 31))
self.policyKitFileName.setMinimumSize(QSize(100, 20))
self.policyKitFileName.setFont(font2)
self.policyKitFileName.setAcceptDrops(False)
self.policyKitFileName.setAutoFillBackground(False)
self.policyKitFileName.setFrameShape(QFrame.Box)
self.policyKitFileName.setFrameShadow(QFrame.Raised)
self.policyKitFileName.setLineWidth(1)
self.policyKitFileName.setTextFormat(Qt.PlainText)
self.policyKitFileName.setScaledContents(False)
self.policyKitFileName.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.policyKitFileName.setMargin(0)
self.policyKitFileName.setIndent(10)
self.policyKitFileName.setTextInteractionFlags(Qt.LinksAccessibleByMouse|Qt.TextSelectableByMouse)
self.pteOutput = QPlainTextEdit(self.centralwidget)
self.pteOutput.setObjectName(u"pteOutput")
self.pteOutput.setGeometry(QRect(490, 210, 411, 231))
self.pteOutput.setFrameShadow(QFrame.Raised)
self.pteOutput.setUndoRedoEnabled(False)
self.pteOutput.setTextInteractionFlags(Qt.NoTextInteraction)
PolkitExplorer.setCentralWidget(self.centralwidget)
self.policiesPrivsGrp.raise_()
self.actionDescriptionGrp.raise_()
self.policyFileGrp.raise_()
self.actionsCounterDisplay.raise_()
self.actionComboBox.raise_()
self.polkitActionDescription.raise_()
self.loadFileToolBtn.raise_()
self.policyKitFullPath.raise_()
self.allowInactiveGrp.raise_()
self.currentAllowInactiveLabel.raise_()
self.allowAnyGrp.raise_()
self.allowActiveGrp.raise_()
self.currentAllowAnyLabel.raise_()
self.currentAllowActiveLabel.raise_()
self.pteOutput.raise_()
self.policyKitFileName.raise_()
self.menubar = QMenuBar(PolkitExplorer)
self.menubar.setObjectName(u"menubar")
self.menubar.setGeometry(QRect(0, 0, 910, 24))
self.menubar.setFont(font)
self.menuFile = QMenu(self.menubar)
self.menuFile.setObjectName(u"menuFile")
self.menuFile.setFont(font)
self.menuHelp = QMenu(self.menubar)
self.menuHelp.setObjectName(u"menuHelp")
self.menuHelp.setFont(font)
PolkitExplorer.setMenuBar(self.menubar)
QWidget.setTabOrder(self.loadFileToolBtn, self.actionComboBox)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.menuFile.addAction(self.actionOpen)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionAbout)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionShow_Glossary)
self.retranslateUi(PolkitExplorer)
self.actionComboBox.currentIndexChanged.connect(PolkitExplorer.actionComboBoxChanged)
self.actionOpen.triggered.connect(PolkitExplorer.fileOpen)
self.actionQuit.triggered.connect(PolkitExplorer.fileQuit)
self.actionAbout.triggered.connect(PolkitExplorer.fileAbout)
self.actionShow_Glossary.triggered.connect(PolkitExplorer.helpGlossary)
self.loadFileToolBtn.clicked.connect(PolkitExplorer.fileOpen)
QMetaObject.connectSlotsByName(PolkitExplorer)
# setupUi
def retranslateUi(self, PolkitExplorer):
self.actionOpen.setText(QCoreApplication.translate("PolkitExplorer", u"&Open", None))
self.actionAbout.setText(QCoreApplication.translate("PolkitExplorer", u"&About", None))
self.actionQuit.setText(QCoreApplication.translate("PolkitExplorer", u"&Quit", None))
self.actionShow_Glossary.setText(QCoreApplication.translate("PolkitExplorer", u"&Glossary", None))
#if QT_CONFIG(tooltip)
self.polkitActionDescription.setToolTip(QCoreApplication.translate("PolkitExplorer", u"The Description of the Action as entered in the Policy file loaded. If no description is found this will tell you that fact.", None))
#endif // QT_CONFIG(tooltip)
self.polkitActionDescription.setText("")
self.policyFileGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Policy File:", None))
self.actionDescriptionGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Action(s) & Description:", None))
#if QT_CONFIG(tooltip)
self.actionComboBox.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p><span style=\" font-weight:600;\">Drop-down list of all the actions within the policy file. Clicking on this will display the drop-down list, or you can use your scrollwheel to browse through them, too.</span></p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.policiesPrivsGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Policies:", None))
#if QT_CONFIG(tooltip)
self.actionsCounterDisplay.setToolTip(QCoreApplication.translate("PolkitExplorer", u"Displays the number of Actions within a Polkit policy file.", None))
#endif // QT_CONFIG(tooltip)
self.loadFileToolBtn.setText(QCoreApplication.translate("PolkitExplorer", u"...", None))
#if QT_CONFIG(tooltip)
self.policyKitFullPath.setToolTip(QCoreApplication.translate("PolkitExplorer", u"The full pathname of the currently opened Polkit policy file.", None))
#endif // QT_CONFIG(tooltip)
self.policyKitFullPath.setText("")
self.currentAllowActiveLabel.setText("")
#if QT_CONFIG(tooltip)
self.allowInactiveGrp.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p>"Inactive" users are ones who are not directly logged into the system's console. This includes anyone who is logged in remotely, whether it be via ssh, telnet, or even RDP.</p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.allowInactiveGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Allow Inactive", None))
self.currentAllowInactiveLabel.setText("")
self.currentAllowAnyLabel.setText("")
#if QT_CONFIG(tooltip)
self.allowAnyGrp.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p>If set to "yes" will give any user permission to perform the action as described in the Description above. </p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.allowAnyGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Allow Any", None))
#if QT_CONFIG(tooltip)
self.allowActiveGrp.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p>"Active" users are ones who are directly logged into a system's console, via a locally connected terminal. Users directly logged into a GUI at the system console, for example.</p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.allowActiveGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Allow Active", None))
#if QT_CONFIG(tooltip)
self.policyKitFileName.setToolTip(QCoreApplication.translate("PolkitExplorer", u"The full name of the currently opened Polkit policy file.", None))
#endif // QT_CONFIG(tooltip)
self.policyKitFileName.setText(QCoreApplication.translate("PolkitExplorer", u"Please open a policy file ->", None))
self.menuFile.setTitle(QCoreApplication.translate("PolkitExplorer", u"&File", None))
self.menuHelp.setTitle(QCoreApplication.translate("PolkitExplorer", u"&Help", None))
pass
# retranslateUi
| 58.242321 | 344 | 0.73097 |
iveLabel.setFont(font2)
self.currentAllowActiveLabel.setFrameShape(QFrame.Box)
self.currentAllowActiveLabel.setFrameShadow(QFrame.Raised)
self.currentAllowActiveLabel.setAlignment(Qt.AlignCenter)
self.allowInactiveGrp = QLabel(self.centralwidget)
self.allowInactiveGrp.setObjectName(u"allowInactiveGrp")
self.allowInactiveGrp.setGeometry(QRect(20, 310, 451, 51))
self.allowInactiveGrp.setFont(font2)
self.allowInactiveGrp.setFrameShape(QFrame.StyledPanel)
self.allowInactiveGrp.setFrameShadow(QFrame.Raised)
self.allowInactiveGrp.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.allowInactiveGrp.setMargin(10)
self.currentAllowInactiveLabel = QLabel(self.centralwidget)
self.currentAllowInactiveLabel.setObjectName(u"currentAllowInactiveLabel")
self.currentAllowInactiveLabel.setGeometry(QRect(210, 320, 250, 31))
self.currentAllowInactiveLabel.setFont(font2)
self.currentAllowInactiveLabel.setFrameShape(QFrame.Box)
self.currentAllowInactiveLabel.setFrameShadow(QFrame.Raised)
self.currentAllowInactiveLabel.setAlignment(Qt.AlignCenter)
self.currentAllowAnyLabel = QLabel(self.centralwidget)
self.currentAllowAnyLabel.setObjectName(u"currentAllowAnyLabel")
self.currentAllowAnyLabel.setGeometry(QRect(210, 250, 250, 31))
self.currentAllowAnyLabel.setFont(font2)
self.currentAllowAnyLabel.setFrameShape(QFrame.Box)
self.currentAllowAnyLabel.setFrameShadow(QFrame.Raised)
self.currentAllowAnyLabel.setAlignment(Qt.AlignCenter)
self.allowAnyGrp = QLabel(self.centralwidget)
self.allowAnyGrp.setObjectName(u"allowAnyGrp")
self.allowAnyGrp.setGeometry(QRect(20, 240, 451, 51))
self.allowAnyGrp.setFont(font2)
self.allowAnyGrp.setFrameShape(QFrame.StyledPanel)
self.allowAnyGrp.setFrameShadow(QFrame.Raised)
self.allowAnyGrp.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.allowAnyGrp.setMargin(10)
self.allowActiveGrp = QLabel(self.centralwidget)
self.allowActiveGrp.setObjectName(u"allowActiveGrp")
self.allowActiveGrp.setGeometry(QRect(20, 380, 451, 51))
self.allowActiveGrp.setFont(font2)
self.allowActiveGrp.setFrameShape(QFrame.StyledPanel)
self.allowActiveGrp.setFrameShadow(QFrame.Raised)
self.allowActiveGrp.setMargin(10)
self.policyKitFileName = QLabel(self.centralwidget)
self.policyKitFileName.setObjectName(u"policyKitFileName")
self.policyKitFileName.setGeometry(QRect(160, 20, 691, 31))
self.policyKitFileName.setMinimumSize(QSize(100, 20))
self.policyKitFileName.setFont(font2)
self.policyKitFileName.setAcceptDrops(False)
self.policyKitFileName.setAutoFillBackground(False)
self.policyKitFileName.setFrameShape(QFrame.Box)
self.policyKitFileName.setFrameShadow(QFrame.Raised)
self.policyKitFileName.setLineWidth(1)
self.policyKitFileName.setTextFormat(Qt.PlainText)
self.policyKitFileName.setScaledContents(False)
self.policyKitFileName.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.policyKitFileName.setMargin(0)
self.policyKitFileName.setIndent(10)
self.policyKitFileName.setTextInteractionFlags(Qt.LinksAccessibleByMouse|Qt.TextSelectableByMouse)
self.pteOutput = QPlainTextEdit(self.centralwidget)
self.pteOutput.setObjectName(u"pteOutput")
self.pteOutput.setGeometry(QRect(490, 210, 411, 231))
self.pteOutput.setFrameShadow(QFrame.Raised)
self.pteOutput.setUndoRedoEnabled(False)
self.pteOutput.setTextInteractionFlags(Qt.NoTextInteraction)
PolkitExplorer.setCentralWidget(self.centralwidget)
self.policiesPrivsGrp.raise_()
self.actionDescriptionGrp.raise_()
self.policyFileGrp.raise_()
self.actionsCounterDisplay.raise_()
self.actionComboBox.raise_()
self.polkitActionDescription.raise_()
self.loadFileToolBtn.raise_()
self.policyKitFullPath.raise_()
self.allowInactiveGrp.raise_()
self.currentAllowInactiveLabel.raise_()
self.allowAnyGrp.raise_()
self.allowActiveGrp.raise_()
self.currentAllowAnyLabel.raise_()
self.currentAllowActiveLabel.raise_()
self.pteOutput.raise_()
self.policyKitFileName.raise_()
self.menubar = QMenuBar(PolkitExplorer)
self.menubar.setObjectName(u"menubar")
self.menubar.setGeometry(QRect(0, 0, 910, 24))
self.menubar.setFont(font)
self.menuFile = QMenu(self.menubar)
self.menuFile.setObjectName(u"menuFile")
self.menuFile.setFont(font)
self.menuHelp = QMenu(self.menubar)
self.menuHelp.setObjectName(u"menuHelp")
self.menuHelp.setFont(font)
PolkitExplorer.setMenuBar(self.menubar)
QWidget.setTabOrder(self.loadFileToolBtn, self.actionComboBox)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.menuFile.addAction(self.actionOpen)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionAbout)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionShow_Glossary)
self.retranslateUi(PolkitExplorer)
self.actionComboBox.currentIndexChanged.connect(PolkitExplorer.actionComboBoxChanged)
self.actionOpen.triggered.connect(PolkitExplorer.fileOpen)
self.actionQuit.triggered.connect(PolkitExplorer.fileQuit)
self.actionAbout.triggered.connect(PolkitExplorer.fileAbout)
self.actionShow_Glossary.triggered.connect(PolkitExplorer.helpGlossary)
self.loadFileToolBtn.clicked.connect(PolkitExplorer.fileOpen)
QMetaObject.connectSlotsByName(PolkitExplorer)
def retranslateUi(self, PolkitExplorer):
self.actionOpen.setText(QCoreApplication.translate("PolkitExplorer", u"&Open", None))
self.actionAbout.setText(QCoreApplication.translate("PolkitExplorer", u"&About", None))
self.actionQuit.setText(QCoreApplication.translate("PolkitExplorer", u"&Quit", None))
self.actionShow_Glossary.setText(QCoreApplication.translate("PolkitExplorer", u"&Glossary", None))
self.polkitActionDescription.setToolTip(QCoreApplication.translate("PolkitExplorer", u"The Description of the Action as entered in the Policy file loaded. If no description is found this will tell you that fact.", None))
self.polkitActionDescription.setText("")
self.policyFileGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Policy File:", None))
self.actionDescriptionGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Action(s) & Description:", None))
self.actionComboBox.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p><span style=\" font-weight:600;\">Drop-down list of all the actions within the policy file. Clicking on this will display the drop-down list, or you can use your scrollwheel to browse through them, too.</span></p></body></html>", None))
self.policiesPrivsGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Policies:", None))
self.actionsCounterDisplay.setToolTip(QCoreApplication.translate("PolkitExplorer", u"Displays the number of Actions within a Polkit policy file.", None))
self.loadFileToolBtn.setText(QCoreApplication.translate("PolkitExplorer", u"...", None))
self.policyKitFullPath.setToolTip(QCoreApplication.translate("PolkitExplorer", u"The full pathname of the currently opened Polkit policy file.", None))
self.policyKitFullPath.setText("")
self.currentAllowActiveLabel.setText("")
self.allowInactiveGrp.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p>"Inactive" users are ones who are not directly logged into the system's console. This includes anyone who is logged in remotely, whether it be via ssh, telnet, or even RDP.</p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.allowInactiveGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Allow Inactive", None))
self.currentAllowInactiveLabel.setText("")
self.currentAllowAnyLabel.setText("")
#if QT_CONFIG(tooltip)
self.allowAnyGrp.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p>If set to "yes" will give any user permission to perform the action as described in the Description above. </p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.allowAnyGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Allow Any", None))
#if QT_CONFIG(tooltip)
self.allowActiveGrp.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p>"Active" users are ones who are directly logged into a system's console, via a locally connected terminal. Users directly logged into a GUI at the system console, for example.</p></body></html>", None))
self.allowActiveGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Allow Active", None))
self.policyKitFileName.setToolTip(QCoreApplication.translate("PolkitExplorer", u"The full name of the currently opened Polkit policy file.", None))
self.policyKitFileName.setText(QCoreApplication.translate("PolkitExplorer", u"Please open a policy file ->", None))
self.menuFile.setTitle(QCoreApplication.translate("PolkitExplorer", u"&File", None))
self.menuHelp.setTitle(QCoreApplication.translate("PolkitExplorer", u"&Help", None))
pass
| true | true |
f71605a096a836f32d317bfc1b1b9c580b670ceb | 2,301 | py | Python | pymatflow/scripts/nebmake.py | DeqiTang/pymatflow | bd8776feb40ecef0e6704ee898d9f42ded3b0186 | [
"MIT"
] | 6 | 2020-03-06T16:13:08.000Z | 2022-03-09T07:53:34.000Z | pymatflow/scripts/nebmake.py | DeqiTang/pymatflow | bd8776feb40ecef0e6704ee898d9f42ded3b0186 | [
"MIT"
] | 1 | 2021-10-02T02:23:08.000Z | 2021-11-08T13:29:37.000Z | pymatflow/scripts/nebmake.py | DeqiTang/pymatflow | bd8776feb40ecef0e6704ee898d9f42ded3b0186 | [
"MIT"
] | 1 | 2021-07-10T16:28:14.000Z | 2021-07-10T16:28:14.000Z | #!/usr/bin/env python
import os
import argparse
from pymatflow.structure.neb import interpolate
from pymatflow.cmd.structflow import read_structure
from pymatflow.cmd.structflow import write_structure
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--images", type=str, nargs=2,
required=True,
help="the initial and final structure file")
parser.add_argument("-n", "--nimage", type=int, default=None,
required=True,
help="number of inter images")
parser.add_argument("-m", "--moving-atom", type=int, nargs="+",
required=True,
help="specifying the moving atoms, index start from 0")
parser.add_argument("-d", "--directory", type=str, default="./",
help="directory to put the generated images")
parser.add_argument("--frac", type=int, default=1,
choices=[0, 1],
help="1(default): use faractional, 0: use cartesian")
# ==============================================================
args = parser.parse_args()
initial = read_structure(args.images[0])
final = read_structure(args.images[1])
inter_images = interpolate(initial=initial, final=final, nimage=args.nimage, moving_atom=args.moving_atom)
os.system("mkdir -p %s" % os.path.join(args.directory, "%.2d" % (0)))
write_structure(structure=initial, filepath=os.path.join(args.directory, "%.2d/POSCAR" % (0)), frac=args.frac)
os.system("mkdir -p %s" % os.path.join(args.directory, "%.2d" % (args.nimage+1)))
write_structure(structure=final, filepath=os.path.join(args.directory, "%.2d/POSCAR" % (args.nimage+1)), frac=args.frac)
for i in range(len(inter_images)):
os.system("mkdir -p %s" % os.path.join(args.directory, "%.2d" % (i+1)))
write_structure(structure=inter_images[i], filepath=os.path.join(args.directory, "%.2d/POSCAR" % (i+1)), frac=args.frac)
print("===========================================\n")
print("generate inter images for neb calculation\n")
print("===========================================\n")
print("-------------------------------------------\n")
if __name__ == "__main__":
main() | 39.672414 | 129 | 0.567579 |
import os
import argparse
from pymatflow.structure.neb import interpolate
from pymatflow.cmd.structflow import read_structure
from pymatflow.cmd.structflow import write_structure
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--images", type=str, nargs=2,
required=True,
help="the initial and final structure file")
parser.add_argument("-n", "--nimage", type=int, default=None,
required=True,
help="number of inter images")
parser.add_argument("-m", "--moving-atom", type=int, nargs="+",
required=True,
help="specifying the moving atoms, index start from 0")
parser.add_argument("-d", "--directory", type=str, default="./",
help="directory to put the generated images")
parser.add_argument("--frac", type=int, default=1,
choices=[0, 1],
help="1(default): use faractional, 0: use cartesian")
args = parser.parse_args()
initial = read_structure(args.images[0])
final = read_structure(args.images[1])
inter_images = interpolate(initial=initial, final=final, nimage=args.nimage, moving_atom=args.moving_atom)
os.system("mkdir -p %s" % os.path.join(args.directory, "%.2d" % (0)))
write_structure(structure=initial, filepath=os.path.join(args.directory, "%.2d/POSCAR" % (0)), frac=args.frac)
os.system("mkdir -p %s" % os.path.join(args.directory, "%.2d" % (args.nimage+1)))
write_structure(structure=final, filepath=os.path.join(args.directory, "%.2d/POSCAR" % (args.nimage+1)), frac=args.frac)
for i in range(len(inter_images)):
os.system("mkdir -p %s" % os.path.join(args.directory, "%.2d" % (i+1)))
write_structure(structure=inter_images[i], filepath=os.path.join(args.directory, "%.2d/POSCAR" % (i+1)), frac=args.frac)
print("===========================================\n")
print("generate inter images for neb calculation\n")
print("===========================================\n")
print("-------------------------------------------\n")
if __name__ == "__main__":
main() | true | true |
f716060870df940910dece369b5b6bf64ae01993 | 13,519 | py | Python | flexget/components/ftp/sftp.py | gjhenrique/Flexget | 2dae4c7e3d002600adcce3b67c399fda115d5ce2 | [
"MIT"
] | null | null | null | flexget/components/ftp/sftp.py | gjhenrique/Flexget | 2dae4c7e3d002600adcce3b67c399fda115d5ce2 | [
"MIT"
] | null | null | null | flexget/components/ftp/sftp.py | gjhenrique/Flexget | 2dae4c7e3d002600adcce3b67c399fda115d5ce2 | [
"MIT"
] | null | null | null | from collections import namedtuple
from itertools import groupby
from pathlib import Path
from typing import List, Optional
from urllib.parse import unquote, urlparse
from loguru import logger
from flexget import plugin
from flexget.components.ftp.sftp_client import SftpClient, SftpError
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.task import Task
from flexget.utils.template import RenderError, render_from_entry
logger = logger.bind(name='sftp')
# Constants
DEFAULT_SFTP_PORT: int = 22
DEFAULT_CONNECT_TRIES: int = 3
DEFAULT_SOCKET_TIMEOUT_SEC: int = 15
SftpConfig = namedtuple(
'SftpConfig', ['host', 'port', 'username', 'password', 'private_key', 'private_key_pass']
)
class SftpList:
"""
Generate entries from SFTP. This plugin requires the pysftp Python module and its dependencies.
Configuration:
host: Host to connect to.
port: Port the remote SSH server is listening on (default 22).
username: Username to log in as.
password: The password to use. Optional if a private key is provided.
private_key: Path to the private key (if any) to log into the SSH server.
private_key_pass: Password for the private key (if needed).
recursive: Indicates whether the listing should be recursive.
get_size: Indicates whetern to calculate the size of the remote file/directory.
WARNING: This can be very slow when computing the size of directories!
files_only: Indicates wheter to omit diredtories from the results.
dirs: List of directories to download.
socket_timeout_sec: Socket timeout in seconds (default 15 seconds).
connection_tries: Number of times to attempt to connect before failing (default 3).
Example:
sftp_list:
host: example.com
username: Username
private_key: /Users/username/.ssh/id_rsa
recursive: False
get_size: True
files_only: False
dirs:
- '/path/to/list/'
- '/another/path/'
"""
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': DEFAULT_SFTP_PORT},
'files_only': {'type': 'boolean', 'default': True},
'recursive': {'type': 'boolean', 'default': False},
'get_size': {'type': 'boolean', 'default': True},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'dirs': one_or_more({'type': 'string'}),
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'additionProperties': False,
'required': ['host', 'username'],
}
@staticmethod
def prepare_config(config: dict) -> dict:
"""
Sets defaults for the provided configuration
"""
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('dirs', ['.'])
return config
@classmethod
def on_task_input(cls, task: Task, config: dict) -> List[Entry]:
"""
Input task handler
"""
config = cls.prepare_config(config)
files_only: bool = config['files_only']
recursive: bool = config['recursive']
get_size: bool = config['get_size']
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
directories: List[str] = []
if isinstance(config['dirs'], list):
directories.extend(config['dirs'])
else:
directories.append(config['dirs'])
sftp_config: SftpConfig = task_config_to_sftp_config(config)
sftp: SftpClient = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
entries: List[Entry] = sftp.list_directories(directories, recursive, get_size, files_only)
sftp.close()
return entries
class SftpDownload:
"""
Download files from a SFTP server. This plugin requires the pysftp Python module and its
dependencies.
Configuration:
to: Destination path; supports Jinja2 templating on the input entry. Fields such
as series_name must be populated prior to input into this plugin using
metainfo_series or similar.
recursive: Indicates whether to download directory contents recursively.
delete_origin: Indicates whether to delete the remote files(s) once they've been downloaded.
socket_timeout_sec: Socket timeout in seconds
connection_tries: Number of times to attempt to connect before failing (default 3).
Example:
sftp_download:
to: '/Volumes/External/Drobo/downloads'
delete_origin: False
"""
schema = {
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'recursive': {'type': 'boolean', 'default': True},
'delete_origin': {'type': 'boolean', 'default': False},
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'required': ['to'],
'additionalProperties': False,
}
@classmethod
def download_entry(cls, entry: Entry, config: dict, sftp: SftpClient) -> None:
"""
Downloads the file(s) described in entry
"""
path: str = unquote(urlparse(entry['url']).path) or '.'
delete_origin: bool = config['delete_origin']
recursive: bool = config['recursive']
to: str = config['to']
try:
sftp.download(path, to, recursive, delete_origin)
except SftpError as e:
entry.fail(e) # type: ignore
@classmethod
def on_task_output(cls, task: Task, config: dict) -> None:
"""Register this as an output plugin"""
@classmethod
def on_task_download(cls, task: Task, config: dict) -> None:
"""
Task handler for sftp_download plugin
"""
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
# Download entries by host so we can reuse the connection
for sftp_config, entries in groupby(task.accepted, cls._get_sftp_config):
if not sftp_config:
continue
error_message: Optional[str] = None
sftp: Optional[SftpClient] = None
try:
sftp = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
except Exception as e:
error_message = f'Failed to connect to {sftp_config.host} ({e})'
for entry in entries:
if sftp:
cls.download_entry(entry, config, sftp)
else:
entry.fail(error_message)
if sftp:
sftp.close()
@classmethod
def _get_sftp_config(cls, entry: Entry):
"""
Parses a url and returns a hashable config, source path, and destination path
"""
# parse url
parsed = urlparse(entry['url'])
host: str = parsed.hostname
username: str = parsed.username
password: str = parsed.password
port: int = parsed.port or DEFAULT_SFTP_PORT
# get private key info if it exists
private_key: str = entry.get('private_key')
private_key_pass: str = entry.get('private_key_pass')
config: Optional[SftpConfig] = None
if parsed.scheme == 'sftp':
config = SftpConfig(host, port, username, password, private_key, private_key_pass)
else:
logger.warning('Scheme does not match SFTP: {}', entry['url'])
return config
class SftpUpload:
"""
Upload files to a SFTP server. This plugin requires the pysftp Python module and its
dependencies.
host: Host to connect to
port: Port the remote SSH server is listening on. Defaults to port 22.
username: Username to log in as
password: The password to use. Optional if a private key is provided.
private_key: Path to the private key (if any) to log into the SSH server
private_key_pass: Password for the private key (if needed)
to: Path to upload the file to; supports Jinja2 templating on the input entry. Fields such
as series_name must be populated prior to input into this plugin using
metainfo_series or similar.
delete_origin: Indicates whether to delete the original file after a successful
upload.
socket_timeout_sec: Socket timeout in seconds
connection_tries: Number of times to attempt to connect before failing (default 3).
Example:
sftp_list:
host: example.com
username: Username
private_key: /Users/username/.ssh/id_rsa
to: /TV/{{series_name}}/Series {{series_season}}
delete_origin: False
"""
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': DEFAULT_SFTP_PORT},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'to': {'type': 'string'},
'delete_origin': {'type': 'boolean', 'default': False},
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'additionProperties': False,
'required': ['host', 'username'],
}
@staticmethod
def prepare_config(config: dict) -> dict:
"""
Sets defaults for the provided configuration
"""
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('to', None)
return config
@classmethod
def handle_entry(cls, entry: Entry, sftp: SftpClient, config: dict):
to: str = config['to']
location: str = entry['location']
delete_origin: bool = config['delete_origin']
if to:
try:
to = render_from_entry(to, entry)
except RenderError as e:
logger.error('Could not render path: {}', to)
entry.fail(str(e)) # type: ignore
return
try:
sftp.upload(location, to)
except SftpError as e:
entry.fail(str(e)) # type: ignore
if delete_origin and Path(location).is_file():
try:
Path(location).unlink()
except Exception as e:
logger.warning('Failed to delete file {} ({})', location, e) # type: ignore
@classmethod
def on_task_output(cls, task: Task, config: dict) -> None:
"""Uploads accepted entries to the specified SFTP server."""
config = cls.prepare_config(config)
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
sftp_config: SftpConfig = task_config_to_sftp_config(config)
sftp = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
for entry in task.accepted:
if sftp:
logger.debug('Uploading file: {}', entry['location'])
cls.handle_entry(entry, sftp, config)
else:
entry.fail('SFTP connection failed.')
def task_config_to_sftp_config(config: dict) -> SftpConfig:
"""
Creates an SFTP connection from a Flexget config object
"""
host: int = config['host']
port: int = config['port']
username: str = config['username']
password: str = config['password']
private_key: str = config['private_key']
private_key_pass: str = config['private_key_pass']
return SftpConfig(host, port, username, password, private_key, private_key_pass)
def sftp_connect(
sftp_config: SftpConfig, socket_timeout_sec: int, connection_tries: int
) -> SftpClient:
sftp_client: SftpClient = SftpClient(
host=sftp_config.host,
username=sftp_config.username,
private_key=sftp_config.private_key,
password=sftp_config.password,
port=sftp_config.port,
private_key_pass=sftp_config.private_key_pass,
connection_tries=connection_tries,
)
sftp_client.set_socket_timeout(socket_timeout_sec)
return sftp_client
@event('plugin.register')
def register_plugin() -> None:
plugin.register(SftpList, 'sftp_list', api_ver=2)
plugin.register(SftpDownload, 'sftp_download', api_ver=2)
plugin.register(SftpUpload, 'sftp_upload', api_ver=2)
| 36.050667 | 112 | 0.609734 | from collections import namedtuple
from itertools import groupby
from pathlib import Path
from typing import List, Optional
from urllib.parse import unquote, urlparse
from loguru import logger
from flexget import plugin
from flexget.components.ftp.sftp_client import SftpClient, SftpError
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.task import Task
from flexget.utils.template import RenderError, render_from_entry
logger = logger.bind(name='sftp')
DEFAULT_SFTP_PORT: int = 22
DEFAULT_CONNECT_TRIES: int = 3
DEFAULT_SOCKET_TIMEOUT_SEC: int = 15
SftpConfig = namedtuple(
'SftpConfig', ['host', 'port', 'username', 'password', 'private_key', 'private_key_pass']
)
class SftpList:
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': DEFAULT_SFTP_PORT},
'files_only': {'type': 'boolean', 'default': True},
'recursive': {'type': 'boolean', 'default': False},
'get_size': {'type': 'boolean', 'default': True},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'dirs': one_or_more({'type': 'string'}),
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'additionProperties': False,
'required': ['host', 'username'],
}
@staticmethod
def prepare_config(config: dict) -> dict:
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('dirs', ['.'])
return config
@classmethod
def on_task_input(cls, task: Task, config: dict) -> List[Entry]:
config = cls.prepare_config(config)
files_only: bool = config['files_only']
recursive: bool = config['recursive']
get_size: bool = config['get_size']
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
directories: List[str] = []
if isinstance(config['dirs'], list):
directories.extend(config['dirs'])
else:
directories.append(config['dirs'])
sftp_config: SftpConfig = task_config_to_sftp_config(config)
sftp: SftpClient = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
entries: List[Entry] = sftp.list_directories(directories, recursive, get_size, files_only)
sftp.close()
return entries
class SftpDownload:
schema = {
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'recursive': {'type': 'boolean', 'default': True},
'delete_origin': {'type': 'boolean', 'default': False},
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'required': ['to'],
'additionalProperties': False,
}
@classmethod
def download_entry(cls, entry: Entry, config: dict, sftp: SftpClient) -> None:
path: str = unquote(urlparse(entry['url']).path) or '.'
delete_origin: bool = config['delete_origin']
recursive: bool = config['recursive']
to: str = config['to']
try:
sftp.download(path, to, recursive, delete_origin)
except SftpError as e:
entry.fail(e)
@classmethod
def on_task_output(cls, task: Task, config: dict) -> None:
@classmethod
def on_task_download(cls, task: Task, config: dict) -> None:
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
for sftp_config, entries in groupby(task.accepted, cls._get_sftp_config):
if not sftp_config:
continue
error_message: Optional[str] = None
sftp: Optional[SftpClient] = None
try:
sftp = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
except Exception as e:
error_message = f'Failed to connect to {sftp_config.host} ({e})'
for entry in entries:
if sftp:
cls.download_entry(entry, config, sftp)
else:
entry.fail(error_message)
if sftp:
sftp.close()
@classmethod
def _get_sftp_config(cls, entry: Entry):
parsed = urlparse(entry['url'])
host: str = parsed.hostname
username: str = parsed.username
password: str = parsed.password
port: int = parsed.port or DEFAULT_SFTP_PORT
private_key: str = entry.get('private_key')
private_key_pass: str = entry.get('private_key_pass')
config: Optional[SftpConfig] = None
if parsed.scheme == 'sftp':
config = SftpConfig(host, port, username, password, private_key, private_key_pass)
else:
logger.warning('Scheme does not match SFTP: {}', entry['url'])
return config
class SftpUpload:
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': DEFAULT_SFTP_PORT},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'to': {'type': 'string'},
'delete_origin': {'type': 'boolean', 'default': False},
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'additionProperties': False,
'required': ['host', 'username'],
}
@staticmethod
def prepare_config(config: dict) -> dict:
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('to', None)
return config
@classmethod
def handle_entry(cls, entry: Entry, sftp: SftpClient, config: dict):
to: str = config['to']
location: str = entry['location']
delete_origin: bool = config['delete_origin']
if to:
try:
to = render_from_entry(to, entry)
except RenderError as e:
logger.error('Could not render path: {}', to)
entry.fail(str(e))
return
try:
sftp.upload(location, to)
except SftpError as e:
entry.fail(str(e))
if delete_origin and Path(location).is_file():
try:
Path(location).unlink()
except Exception as e:
logger.warning('Failed to delete file {} ({})', location, e)
@classmethod
def on_task_output(cls, task: Task, config: dict) -> None:
config = cls.prepare_config(config)
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
sftp_config: SftpConfig = task_config_to_sftp_config(config)
sftp = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
for entry in task.accepted:
if sftp:
logger.debug('Uploading file: {}', entry['location'])
cls.handle_entry(entry, sftp, config)
else:
entry.fail('SFTP connection failed.')
def task_config_to_sftp_config(config: dict) -> SftpConfig:
host: int = config['host']
port: int = config['port']
username: str = config['username']
password: str = config['password']
private_key: str = config['private_key']
private_key_pass: str = config['private_key_pass']
return SftpConfig(host, port, username, password, private_key, private_key_pass)
def sftp_connect(
sftp_config: SftpConfig, socket_timeout_sec: int, connection_tries: int
) -> SftpClient:
sftp_client: SftpClient = SftpClient(
host=sftp_config.host,
username=sftp_config.username,
private_key=sftp_config.private_key,
password=sftp_config.password,
port=sftp_config.port,
private_key_pass=sftp_config.private_key_pass,
connection_tries=connection_tries,
)
sftp_client.set_socket_timeout(socket_timeout_sec)
return sftp_client
@event('plugin.register')
def register_plugin() -> None:
plugin.register(SftpList, 'sftp_list', api_ver=2)
plugin.register(SftpDownload, 'sftp_download', api_ver=2)
plugin.register(SftpUpload, 'sftp_upload', api_ver=2)
| true | true |
f71609666f0531ac8c06b96d69a4042a2ac3a5bc | 853 | py | Python | paperboy/resources/config.py | datalayer-externals/papermill-paperboy | b27bfdbb4ed27dea597ff1d6346eb831542ae81f | [
"Apache-2.0"
] | 233 | 2018-11-01T09:17:08.000Z | 2022-03-22T08:27:24.000Z | paperboy/resources/config.py | datalayer-externals/papermill-paperboy | b27bfdbb4ed27dea597ff1d6346eb831542ae81f | [
"Apache-2.0"
] | 99 | 2018-10-17T21:48:42.000Z | 2021-05-07T08:33:36.000Z | paperboy/resources/config.py | datalayer-externals/papermill-paperboy | b27bfdbb4ed27dea597ff1d6346eb831542ae81f | [
"Apache-2.0"
] | 29 | 2018-11-01T11:33:08.000Z | 2022-01-12T22:12:19.000Z | import falcon
import json
from .base import BaseResource
class ConfigResource(BaseResource):
'''Falcon resource to get form entries'''
def __init__(self, *args, **kwargs):
super(ConfigResource, self).__init__(*args, **kwargs)
def on_get(self, req, resp):
'''Get configuration page to create a new notebook/job/report'''
resp.content_type = 'application/json'
type = req.params.get('type', None)
if type is None:
resp.body = json.dumps(self.config.to_dict())
elif type == 'notebooks':
resp.body = json.dumps(self.db.notebooks.form())
elif type == 'jobs':
resp.body = json.dumps(self.db.jobs.form())
elif type == 'reports':
resp.body = json.dumps(self.db.reports.form())
else:
resp.status = falcon.HTTP_404
| 32.807692 | 72 | 0.607268 | import falcon
import json
from .base import BaseResource
class ConfigResource(BaseResource):
def __init__(self, *args, **kwargs):
super(ConfigResource, self).__init__(*args, **kwargs)
def on_get(self, req, resp):
resp.content_type = 'application/json'
type = req.params.get('type', None)
if type is None:
resp.body = json.dumps(self.config.to_dict())
elif type == 'notebooks':
resp.body = json.dumps(self.db.notebooks.form())
elif type == 'jobs':
resp.body = json.dumps(self.db.jobs.form())
elif type == 'reports':
resp.body = json.dumps(self.db.reports.form())
else:
resp.status = falcon.HTTP_404
| true | true |
f71609b6af41be400030f87cd3c4bfcdfc294a4a | 10,236 | py | Python | READMIT/alpha/fp_VBHC_READMIT_BEA_FIPS_alpha.py | andrewcistola/value-based-healthcare | 12583c33bff8dee83a7daf5aaaf1e7c39883a279 | [
"MIT"
] | 1 | 2021-03-12T07:11:14.000Z | 2021-03-12T07:11:14.000Z | READMIT/alpha/fp_VBHC_READMIT_BEA_FIPS_alpha.py | andrewcistola/value-based-healthcare | 12583c33bff8dee83a7daf5aaaf1e7c39883a279 | [
"MIT"
] | null | null | null | READMIT/alpha/fp_VBHC_READMIT_BEA_FIPS_alpha.py | andrewcistola/value-based-healthcare | 12583c33bff8dee83a7daf5aaaf1e7c39883a279 | [
"MIT"
] | null | null | null | # FractureProof
## Value Based Healthcare Project
### Outcome
#### CMS Hospital Wiide Readmission Rate 2018
### Predictors
#### BEA 2018 County wide Economic Measures
### Table Key
#### State County FIPS
### Set working directory to project folder
os.chdir("C:/Users/drewc/GitHub/allocativ") # Set wd to project repository
### Set file title and path
title = "fp_VBHC_READMIT_BEA_FIPS_alpha"
path = "fp/VBHC/READMIT/"
## Section A: Collect Possible Predictors from Public Access Data
### Import Python Libraries
import os # Operating system navigation
import sqlite3 # SQLite database manager
### Import data science libraries
import pandas as pd # Widely used data manipulation library with R/Excel like tables named 'data frames'
import numpy as np # Widely used matrix library for numerical processes
### Import scikit-learn libraries: data preparation
from sklearn.preprocessing import StandardScaler # Standard scaling for easier use of machine learning algorithms
from sklearn.impute import SimpleImputer # Univariate imputation for missing data
### Step 1: Import and Join Data
### Import ACS
df_bea = pd.read_csv("hnb/BEA/2018/BEA_2018_FIPS_full.csv", low_memory = 'false') # Import dataset saved as csv in _data folder
### Import CMS Data and Join
df_cms = pd.read_csv("hnb/CMS/CMS_2018_FIPS_full.csv", low_memory = 'false') # Import dataset saved as csv in _data folder
df_cms = df_cms.filter(["Rate of readmission after discharge from hospital (hospital-wide)", "FIPS"]) # Keep only selected columns
df_join = pd.merge(df_cms, df_bea, on = "FIPS", how = "inner") # Join by column while keeping only items that exist in both, select outer or left for other options
df_cms = 0 # Clear variable
df_acs = 0 # Clear variable
### Rename and Verify
df_step1 = df_join
df_join = 0
df_step1.info() # Get class, memory, and column info: names, data types, obs.
df_step1.head() # Print first 5 observations
### Step 2: Data Manipulation
### Import Datasets
### Drop ID variables
df_man = df_step1.drop(columns = ["FIPS"]) # Drop Unwanted Columns
### Rename outcome and test
df_man = df_man.rename(columns = {"Rate of readmission after discharge from hospital (hospital-wide)": "outcome"}) # Rename multiple columns in place
### Rename and Verify
df_step2 = df_man
df_man = 0
df_step2.info() # Get class, memory, and column info: names, data types, obs.
df_step2.head() # Print first 5 observations
## Step 3: Data Standardization
### Remove outcome and test
df_NA = df_step2
outcome = df_NA.pop("outcome") # 'pop' column from df
### Drop features with less than 75% data
df_NA = df_NA.dropna(axis = 1, thresh = 0.75*len(df_NA)) # Drop features less than 75% non-NA count for all columns
### Impute missing values
df_NA = pd.DataFrame(SimpleImputer(strategy = "median").fit_transform(df_NA), columns = df_NA.columns) # Impute missing data
### Standard Scale Values
df_NA = pd.DataFrame(StandardScaler().fit_transform(df_NA.values), columns = df_NA.columns) # convert the normalized features into a tabular format with the help of DataFrame.
### Reattach outcome
df_NA.insert(0, "outcome", outcome) # reinsert in index
### Drop all remaining rows (should be none)
df_NA = df_NA.dropna() # Drop all rows with NA values
### Rename and Verify
df_step3 = df_NA
df_NA = 0
df_step3.info() # Get class, memory, and column info: names, data types, obs.
df_step3.head() # Print first 5 observations
## Section B: Identify Significant Predictors with Reduction Algorithms
### Import scikit-learn: machine learning
from sklearn.decomposition import PCA # Principal compnents analysis from sklearn
from sklearn.ensemble import RandomForestClassifier # Random Forest classification component
from sklearn.ensemble import RandomForestRegressor # Random Forest classification component
from sklearn.feature_selection import RFECV # Recursive Feature elimination with cross validation
from sklearn.linear_model import LinearRegression # Used for machine learning with quantitative outcome
### Step 4: Principal Component Analysis
### Setup initial PCA model
df_pca = df_step3.drop(columns = ["outcome"]) # Drop outcome variable
degree = len(df_step3.columns) - 2 # Save number of features -1 to get degrees of freedom
pca = PCA(n_components = degree) # you will pass the number of components to make PCA model based on degrees of freedom
### Fit initial PCA model
pca.fit(df_pca) # fit to data
### Setup final PCA model
df_ev = pd.DataFrame(pca.explained_variance_) # Print explained variance of components
df_ev = df_ev[(df_ev[0] > 1)] # Save eigenvalues above 1
components = len(df_ev.index) # Save count of values for Variable reduction
pca = PCA(n_components = components) # you will pass the number of components to make PCA model
### Fit final PCA model
pca.fit_transform(df_pca) # finally call fit_transform on the aggregate data to create PCA results object
### Collect feature list from PCA
df_pca2 = pd.DataFrame(pca.components_, columns = df_pca.columns) # Export eigenvectors to data frame
df_pca2["Variance"] = pca.explained_variance_ratio_ # Save eigenvalues as their own column
df_pca2 = df_pca2[df_pca2.Variance > df_pca2.Variance.mean()] # Susbet by eigenvalues with above average exlained variance ratio
df_pca2 = df_pca2.abs() # get absolute value for column or data frame
df_pca3 = pd.DataFrame(df_pca2.max(), columns = ["MaxEV"]) # select maximum eigenvector for each feature
df_pc = df_pca3[df_pca3.MaxEV > df_pca3.MaxEV.mean()] # Susbet by above average max eigenvalues
df_pc = df_pc.reset_index() # Add a new index of ascending values, existing index becomes column named "index"
df_pc = df_pc.rename(columns = {"index": "Features"}) # Rename multiple columns in place
### Rename and Verify
df_step4 = df_pc
df_step4.info() # Get class, memory, and column info: names, data types, obs.
df_step4.head() # Print first 5 observations
### Step 5: Random Forest Regressor
### Setup RF model
Y = df_step3["outcome"] # Isolate Outcome variable
X = df_step3.drop(columns = ["outcome"]) # Drop Unwanted Columns # Save features columns as predictor data frame
forest = RandomForestRegressor(n_estimators = 1000, max_depth = 10) #Use default values except for number of trees. For a further explanation see readme included in repository.
### Fit Forest model
forest.fit(X, Y) # This will take time
### Collect features from RF
gini = forest.feature_importances_ # Output importances of features
l_gini = list(zip(X, gini)) # Create list of variables alongside importance scores
df_gini = pd.DataFrame(l_gini, columns = ["Features", "Gini"]) # Create data frame of importances with variables and gini column names
df_gini = df_gini.sort_values(by = ["Gini"], ascending = False) # Sort data frame by gini value in desceding order
df_gini = df_gini[(df_gini["Gini"] > df_gini["Gini"].mean())] # Subset by Gini values higher than mean
### Rename and Verify
df_step5 = df_gini
df_step5.info() # Get class, memory, and column info: names, data types, obs.
df_step5.head() # Print first 5 observations
### Step 6: Recursive Feature Elimination
### Collect features from RF and PC
df_pc_gini = pd.merge(df_pc, df_gini, on = "Features", how = "inner") # Join by column while keeping only items that exist in both, select outer or left for other options
pc_gini_features = df_pc_gini["Features"].tolist() # Save features from data frame
df_rfecv = df_step3[pc_gini_features] # Add selected features to df
### Setup RFE model
X = df_rfecv # Save features columns as predictor data frame
Y = df_step3["outcome"] # Use outcome data frame
RFE = LinearRegression() # Use regression coefficient as estimator
selector = RFECV(estimator = RFE, min_features_to_select = 10) # define selection parameters, in this case all features are selected. See Readme for more ifo
### Fit RFE model
selected = selector.fit(X, Y) # This will take time
### Collect features from RFE model
ar_rfe = selected.support_ # Save Boolean values as numpy array
l_rfe = list(zip(X, ar_rfe)) # Create list of variables alongside RFE value
df_rfe = pd.DataFrame(l_rfe, columns = ["Features", "RFE"]) # Create data frame of importances with variables and gini column names
df_rfe = df_rfe[df_rfe.RFE == True] # Select Variables that were True
df_rfe = df_rfe.reset_index() # Reset Index
df_rfe = df_rfe.filter(["Features"]) # Keep only selected columns
### Rename and Verify
df_step6 = df_rfe
df_step6.info() # Get class, memory, and column info: names, data types, obs.
df_step6.head() # Print first 5 observations
## Section C: Evaluate Significant Features with Modeling and Prediction
### Import scikit-learn libraries: regression
from sklearn.linear_model import LogisticRegression # Used for machine learning with categorical outcome
from sklearn.linear_model import LinearRegression # Used for machine learning with quantitative outcome
### Import scikit-learn: neural network
from sklearn.neural_network import MLPRegressor
### Step 7: Multiple Regression
### Setup MR Model
features = list(df_step6["Features"]) # Save chosen featres as list
x = df_step3.filter(features) # Keep only selected columns from rfe
y = df_step3["outcome"] # Add outcome variable
LR = LinearRegression() # Linear Regression in scikit learn
### Fit MR model
regression = LR.fit(x, y) # Fit model
### Collect features from MR model
coef = regression.coef_ # Coefficient models as scipy array
l_reg = list(zip(x, coef)) # Create list of variables alongside coefficient
df_reg = pd.DataFrame(l_reg, columns = ["Features", "Coefficients"]) # Create data frame of importances with variables and gini column names
### Export feature attributes
df_pc_gini_reg = pd.merge(df_pc_gini, df_reg, on = "Features", how = "inner") # Join by column while keeping only items that exist in both, select outer or left for other options
df_pc_gini_reg.to_csv(r"fp/VBHC/READMIT/fp_VBHC_READMIT_BEA_FIPS_alpha.csv") # Export df as csv
print(df_pc_gini_reg)
### Collect prediction results
determination = regression.score(x, y) # rsq value, ceofficient of determination
print(determination)
### Rename and Verify
df_step7 = df_pc_gini_reg
df_step7.info() # Get class, memory, and column info: names, data types, obs.
df_step7.head() # Print first 5 observations | 45.901345 | 178 | 0.762016 |
2.Variance.mean()]
df_pca2 = df_pca2.abs()
df_pca3 = pd.DataFrame(df_pca2.max(), columns = ["MaxEV"])
df_pc = df_pca3[df_pca3.MaxEV > df_pca3.MaxEV.mean()]
df_pc = df_pc.reset_index()
df_pc = df_pc.rename(columns = {"index": "Features"})
tep4.head()
d.DataFrame(l_gini, columns = ["Features", "Gini"])
df_gini = df_gini.sort_values(by = ["Gini"], ascending = False)
df_gini = df_gini[(df_gini["Gini"] > df_gini["Gini"].mean())]
_step5.head()
pc_gini_features]
e"]
RFE = LinearRegression()
selector = RFECV(estimator = RFE, min_features_to_select = 10)
pd.DataFrame(l_rfe, columns = ["Features", "RFE"])
df_rfe = df_rfe[df_rfe.RFE == True]
df_rfe = df_rfe.reset_index()
df_rfe = df_rfe.filter(["Features"])
step6.head()
oefficients"])
atures", how = "inner")
df_pc_gini_reg.to_csv(r"fp/VBHC/READMIT/fp_VBHC_READMIT_BEA_FIPS_alpha.csv")
print(df_pc_gini_reg)
ion)
o()
df_step7.head() | true | true |
f7160b02cd2fe254d7a127f34fecad15c020c378 | 4,926 | py | Python | optimus/engines/base/dataframe/columns.py | niallscc/Optimus | 35218401556e5acc4beb2859084128ebcd1ab4e5 | [
"Apache-2.0"
] | null | null | null | optimus/engines/base/dataframe/columns.py | niallscc/Optimus | 35218401556e5acc4beb2859084128ebcd1ab4e5 | [
"Apache-2.0"
] | null | null | null | optimus/engines/base/dataframe/columns.py | niallscc/Optimus | 35218401556e5acc4beb2859084128ebcd1ab4e5 | [
"Apache-2.0"
] | null | null | null | from functools import reduce
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler
from optimus.engines.base.columns import BaseColumns
from optimus.helpers.columns import parse_columns, name_col
from optimus.helpers.constants import Actions
from optimus.helpers.raiseit import RaiseIt
class DataFrameBaseColumns(BaseColumns):
def __init__(self, df):
super(DataFrameBaseColumns, self).__init__(df)
@staticmethod
def exec_agg(exprs, compute=None):
"""
Exectute and aggregation
Expression in Non dask dataframe can not handle compute. See exec_agg dask implementation
:param exprs:
:param compute:
:return:
"""
return exprs
def qcut(self, columns, num_buckets, handle_invalid="skip"):
pass
@staticmethod
def correlation(input_cols, method="pearson", output="json"):
pass
@staticmethod
def scatter(columns, buckets=10):
pass
def standard_scaler(self, input_cols="*", output_cols=None):
df = self.root
def _standard_scaler(_value):
return StandardScaler().fit_transform(_value.values.reshape(-1, 1))
return df.cols.apply(input_cols, func=_standard_scaler, output_cols=output_cols, meta_action=Actions.STANDARD_SCALER.value)
def max_abs_scaler(self, input_cols="*", output_cols=None):
df = self.root
def _max_abs_scaler(_value):
return MaxAbsScaler().fit_transform(_value.values.reshape(-1, 1))
return df.cols.apply(input_cols, func=_max_abs_scaler, output_cols=output_cols,meta_action=Actions.MAX_ABS_SCALER.value )
def min_max_scaler(self, input_cols, output_cols=None):
# https://github.com/dask/dask/issues/2690
df = self.root
def _min_max_scaler(_value):
return MinMaxScaler().fit_transform(_value.values.reshape(-1, 1))
return df.cols.apply(input_cols, func=_min_max_scaler, output_cols=output_cols, meta_action=Actions.MIN_MAX_SCALER.value )
def replace_regex(self, input_cols, regex=None, value="", output_cols=None):
"""
Use a Regex to replace values
:param input_cols: '*', list of columns names or a single column name.
:param output_cols:
:param regex: values to look at to be replaced
:param value: new value to replace the old one
:return:
"""
df = self.root
def _replace_regex(_value, _regex, _replace):
return _value.replace(_regex, _replace, regex=True)
return df.cols.apply(input_cols, func=_replace_regex, args=(regex, value,), output_cols=output_cols,
filter_col_by_dtypes=df.constants.STRING_TYPES + df.constants.NUMERIC_TYPES)
def reverse(self, input_cols, output_cols=None):
def _reverse(value):
return str(value)[::-1]
df = self.root
return df.cols.apply(input_cols, _reverse, func_return_type=str,
filter_col_by_dtypes=df.constants.STRING_TYPES,
output_cols=output_cols, set_index=True)
@staticmethod
def astype(*args, **kwargs):
pass
@staticmethod
def apply_by_dtypes(columns, func, func_return_type, args=None, func_type=None, data_type=None):
pass
@staticmethod
def to_timestamp(input_cols, date_format=None, output_cols=None):
pass
def nest(self, input_cols, separator="", output_col=None, shape="string", drop=False):
df = self.root
dfd = df.data
if output_col is None:
output_col = name_col(input_cols)
input_cols = parse_columns(df, input_cols)
output_ordered_columns = df.cols.names()
# cudfd do nor support apply or agg join for this operation
if shape == "vector" or shape == "array":
raise NotImplementedError("Not implemented yet")
# https://stackoverflow.com/questions/43898035/pandas-combine-column-values-into-a-list-in-a-new-column/43898233
# t['combined'] = t.values.tolist()
# dfds = [dfd[input_col] for input_col in input_cols]
# dfd[output_col] = dfd[input_cols].values.tolist()
elif shape == "string":
dfds = [dfd[input_col].astype(str) for input_col in input_cols]
dfd = dfd.assign(**{output_col:reduce((lambda x, y: x + separator + y), dfds)})
if output_col not in output_ordered_columns:
col_index = output_ordered_columns.index(input_cols[-1]) + 1
output_ordered_columns[col_index:col_index] = [output_col]
if drop is True:
for input_col in input_cols:
if input_col in output_ordered_columns and input_col != output_col:
output_ordered_columns.remove(input_col)
return self.root.new(dfd).cols.select(output_ordered_columns)
| 35.695652 | 131 | 0.661186 | from functools import reduce
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler
from optimus.engines.base.columns import BaseColumns
from optimus.helpers.columns import parse_columns, name_col
from optimus.helpers.constants import Actions
from optimus.helpers.raiseit import RaiseIt
class DataFrameBaseColumns(BaseColumns):
def __init__(self, df):
super(DataFrameBaseColumns, self).__init__(df)
@staticmethod
def exec_agg(exprs, compute=None):
return exprs
def qcut(self, columns, num_buckets, handle_invalid="skip"):
pass
@staticmethod
def correlation(input_cols, method="pearson", output="json"):
pass
@staticmethod
def scatter(columns, buckets=10):
pass
def standard_scaler(self, input_cols="*", output_cols=None):
df = self.root
def _standard_scaler(_value):
return StandardScaler().fit_transform(_value.values.reshape(-1, 1))
return df.cols.apply(input_cols, func=_standard_scaler, output_cols=output_cols, meta_action=Actions.STANDARD_SCALER.value)
def max_abs_scaler(self, input_cols="*", output_cols=None):
df = self.root
def _max_abs_scaler(_value):
return MaxAbsScaler().fit_transform(_value.values.reshape(-1, 1))
return df.cols.apply(input_cols, func=_max_abs_scaler, output_cols=output_cols,meta_action=Actions.MAX_ABS_SCALER.value )
def min_max_scaler(self, input_cols, output_cols=None):
df = self.root
def _min_max_scaler(_value):
return MinMaxScaler().fit_transform(_value.values.reshape(-1, 1))
return df.cols.apply(input_cols, func=_min_max_scaler, output_cols=output_cols, meta_action=Actions.MIN_MAX_SCALER.value )
def replace_regex(self, input_cols, regex=None, value="", output_cols=None):
df = self.root
def _replace_regex(_value, _regex, _replace):
return _value.replace(_regex, _replace, regex=True)
return df.cols.apply(input_cols, func=_replace_regex, args=(regex, value,), output_cols=output_cols,
filter_col_by_dtypes=df.constants.STRING_TYPES + df.constants.NUMERIC_TYPES)
def reverse(self, input_cols, output_cols=None):
def _reverse(value):
return str(value)[::-1]
df = self.root
return df.cols.apply(input_cols, _reverse, func_return_type=str,
filter_col_by_dtypes=df.constants.STRING_TYPES,
output_cols=output_cols, set_index=True)
@staticmethod
def astype(*args, **kwargs):
pass
@staticmethod
def apply_by_dtypes(columns, func, func_return_type, args=None, func_type=None, data_type=None):
pass
@staticmethod
def to_timestamp(input_cols, date_format=None, output_cols=None):
pass
def nest(self, input_cols, separator="", output_col=None, shape="string", drop=False):
df = self.root
dfd = df.data
if output_col is None:
output_col = name_col(input_cols)
input_cols = parse_columns(df, input_cols)
output_ordered_columns = df.cols.names()
if shape == "vector" or shape == "array":
raise NotImplementedError("Not implemented yet")
elif shape == "string":
dfds = [dfd[input_col].astype(str) for input_col in input_cols]
dfd = dfd.assign(**{output_col:reduce((lambda x, y: x + separator + y), dfds)})
if output_col not in output_ordered_columns:
col_index = output_ordered_columns.index(input_cols[-1]) + 1
output_ordered_columns[col_index:col_index] = [output_col]
if drop is True:
for input_col in input_cols:
if input_col in output_ordered_columns and input_col != output_col:
output_ordered_columns.remove(input_col)
return self.root.new(dfd).cols.select(output_ordered_columns)
| true | true |
f7160b89bbc0f0135dfed20cc0e5c8f6d06c5128 | 2,407 | py | Python | arviz/wrappers/wrap_pystan.py | brandonwillard/arviz | 1358a04cbb7759a6a15459a3d4e4f7259626484c | [
"Apache-2.0"
] | null | null | null | arviz/wrappers/wrap_pystan.py | brandonwillard/arviz | 1358a04cbb7759a6a15459a3d4e4f7259626484c | [
"Apache-2.0"
] | null | null | null | arviz/wrappers/wrap_pystan.py | brandonwillard/arviz | 1358a04cbb7759a6a15459a3d4e4f7259626484c | [
"Apache-2.0"
] | null | null | null | # pylint: disable=arguments-differ
"""Base class for PyStan wrappers."""
from ..data import from_pystan
from .base import SamplingWrapper
class PyStanSamplingWrapper(SamplingWrapper):
"""PyStan sampling wrapper base class.
See the documentation on :class:`~arviz.SamplingWrapper` for a more detailed
description. An example of ``PyStanSamplingWrapper`` usage can be found
in the :ref:`pystan_refitting` notebook.
Warnings
--------
Sampling wrappers are an experimental feature in a very early stage. Please use them
with caution.
"""
def sel_observations(self, idx):
"""Select a subset of the observations in idata_orig.
**Not implemented**: This method must be implemented on a model basis.
It is documented here to show its format and call signature.
Parameters
----------
idx
Indexes to separate from the rest of the observed data.
Returns
-------
modified_observed_data : dict
Dictionary containing both excluded and included data but properly divided
in the different keys. Passed to ``data`` argument of ``model.sampling``.
excluded_observed_data : str
Variable name containing the pointwise log likelihood data of the excluded
data. As PyStan cannot call C++ functions and log_likelihood__i is already
calculated *during* the simultion, instead of the value on which to evaluate
the likelihood, ``log_likelihood__i`` expects a string so it can extract the
corresponding data from the InferenceData object.
"""
raise NotImplementedError("sel_observations must be implemented on a model basis")
def sample(self, modified_observed_data):
"""Resample the PyStan model stored in self.model on modified_observed_data."""
fit = self.model.sampling(data=modified_observed_data, **self.sample_kwargs)
return fit
def get_inference_data(self, fit):
"""Convert the fit object returned by ``self.sample`` to InferenceData."""
idata = from_pystan(posterior=fit, **self.idata_kwargs)
return idata
def log_likelihood__i(self, excluded_obs_log_like, idata__i):
"""Retrieve the log likelihood of the excluded observations from ``idata__i``."""
return idata__i.log_likelihood[excluded_obs_log_like]
| 41.5 | 90 | 0.687993 |
from ..data import from_pystan
from .base import SamplingWrapper
class PyStanSamplingWrapper(SamplingWrapper):
def sel_observations(self, idx):
raise NotImplementedError("sel_observations must be implemented on a model basis")
def sample(self, modified_observed_data):
fit = self.model.sampling(data=modified_observed_data, **self.sample_kwargs)
return fit
def get_inference_data(self, fit):
idata = from_pystan(posterior=fit, **self.idata_kwargs)
return idata
def log_likelihood__i(self, excluded_obs_log_like, idata__i):
return idata__i.log_likelihood[excluded_obs_log_like]
| true | true |
f7160d0ab638df715f56f4ffaaf4cc3e1943ef2c | 1,835 | py | Python | project/server/auth/wrapper.py | RaihanSabique/Flask-Restful-JWT-Auth | a6be0cc72d4f697ac3cdfa41551de9633f6feb35 | [
"MIT"
] | null | null | null | project/server/auth/wrapper.py | RaihanSabique/Flask-Restful-JWT-Auth | a6be0cc72d4f697ac3cdfa41551de9633f6feb35 | [
"MIT"
] | null | null | null | project/server/auth/wrapper.py | RaihanSabique/Flask-Restful-JWT-Auth | a6be0cc72d4f697ac3cdfa41551de9633f6feb35 | [
"MIT"
] | null | null | null | import functools
from flask import Flask, request, make_response, jsonify
from flask_restful import Resource, Api, abort
from project.server.models import User
def login_required(method):
@functools.wraps(method)
def wrapper(self):
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
abort(400, message='Bearer token malformed.')
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
print(resp)
if not isinstance(resp, str):
user = User.query.filter_by(id=resp).first()
if(user.is_active):
return method(self, user)
abort(400, message='Provide a valid auth token.')
else:
abort(400, message='No auth token')
return wrapper
def admin_required(method):
@functools.wraps(method)
def wrapper(self):
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
abort(400, message='Bearer token malformed.')
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
print(resp)
if not isinstance(resp, str):
user = User.query.filter_by(id=resp).first()
if(user.admin):
return method(self, user)
else:
abort(400, message='Admin required.')
abort(400, message='Provide a valid auth token.')
else:
abort(400, message='No auth token')
return wrapper | 35.288462 | 61 | 0.559128 | import functools
from flask import Flask, request, make_response, jsonify
from flask_restful import Resource, Api, abort
from project.server.models import User
def login_required(method):
@functools.wraps(method)
def wrapper(self):
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
abort(400, message='Bearer token malformed.')
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
print(resp)
if not isinstance(resp, str):
user = User.query.filter_by(id=resp).first()
if(user.is_active):
return method(self, user)
abort(400, message='Provide a valid auth token.')
else:
abort(400, message='No auth token')
return wrapper
def admin_required(method):
@functools.wraps(method)
def wrapper(self):
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
abort(400, message='Bearer token malformed.')
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
print(resp)
if not isinstance(resp, str):
user = User.query.filter_by(id=resp).first()
if(user.admin):
return method(self, user)
else:
abort(400, message='Admin required.')
abort(400, message='Provide a valid auth token.')
else:
abort(400, message='No auth token')
return wrapper | true | true |
f7160d32694f94438915434613085cbed64d24f9 | 4,686 | py | Python | setup.py | itsalexis962/pycroscopy | 8a6557408ffdc332cef102616be16e26a396532f | [
"MIT"
] | 191 | 2016-06-19T18:34:40.000Z | 2022-03-28T08:30:30.000Z | setup.py | itsalexis962/pycroscopy | 8a6557408ffdc332cef102616be16e26a396532f | [
"MIT"
] | 115 | 2016-09-20T22:07:52.000Z | 2022-03-04T20:41:57.000Z | setup.py | itsalexis962/pycroscopy | 8a6557408ffdc332cef102616be16e26a396532f | [
"MIT"
] | 72 | 2016-09-20T10:19:22.000Z | 2022-03-05T12:18:48.000Z | from codecs import open
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
long_description = f.read()
with open(os.path.join(here, 'pycroscopy/__version__.py')) as f:
__version__ = f.read().split("'")[1]
# TODO: Move requirements to requirements.txt
requirements = ['numpy>=1.13.0',
'scipy>=0.17.1',
'scikit-image>=0.12.3',
'scikit-learn>=0.17.1',
'matplotlib>=2.0.0',
'torch>=1.0.0',
'tensorly>=0.6.0',
'psutil',
'six',
'pillow',
'joblib>=0.11.0',
'ipywidgets>=5.2.2',
'ipython>=5.1.0,<6;python_version<"3.3"', # IPython 6.0+ does not support Python 2.6, 2.7, 3.0, 3.1, or 3.2
'ipython>=6.0;python_version>="3.3"', # Beginning with IPython 6.0, Python 3.3 and above is required.
'unittest2;python_version<"3.0"',
'sidpy>=0.0.1',
'pyUSID>=0.0.8',
]
setup(
name='pycroscopy',
version=__version__,
description='Python library for scientific analysis of microscopy data',
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Information Analysis'],
keywords=['EELS', 'STEM', 'TEM', 'XRD', 'AFM', 'SPM', 'STS', 'band excitation', 'BE', 'BEPS', 'Raman', 'NanoIR',
'ptychography', 'g-mode', 'general mode', 'electron microscopy', ' scanning probe', ' x-rays', 'probe',
'atomic force microscopy', 'SIMS', 'energy', 'spectroscopy', 'imaging', 'microscopy', 'spectra'
'characterization', 'spectrogram', 'hyperspectral', 'multidimensional', 'data format', 'universal',
'clustering', 'decomposition', 'curve fitting', 'data analysis PCA', ' SVD', ' NMF', ' DBSCAN', ' kMeans',
'machine learning', 'bayesian inference', 'fft filtering', 'signal processing', 'image cleaning',
'denoising', 'model', 'msa', 'quantification',
'png', 'tiff', 'hdf5', 'igor', 'ibw', 'dm3', 'oneview', 'KPFM', 'FORC', 'ndata',
'Asylum', 'MFP3D', 'Cypher', 'Omicron', 'Nion', 'Nanonis', 'FEI'],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
url='https://pycroscopy.github.io/pycroscopy/about.html',
license='MIT',
author='S. Somnath, C. R. Smith, N. Laanait',
author_email='pycroscopy@gmail.com',
install_requires=requirements,
setup_requires=['pytest-runner'],
tests_require=['pytest'],
platforms=['Linux', 'Mac OSX', 'Windows 10/8.1/8/7'],
# package_data={'sample':['dataset_1.dat']}
test_suite='pytest',
extras_require={
'legacy_guis': ['pyqt5;python_version>="3.5"',
'pyqtgraph>=0.10']},
# dependency='',
# dependency_links=[''],
include_package_data=True,
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| 43.388889 | 124 | 0.588988 | from codecs import open
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
long_description = f.read()
with open(os.path.join(here, 'pycroscopy/__version__.py')) as f:
__version__ = f.read().split("'")[1]
# TODO: Move requirements to requirements.txt
requirements = ['numpy>=1.13.0',
'scipy>=0.17.1',
'scikit-image>=0.12.3',
'scikit-learn>=0.17.1',
'matplotlib>=2.0.0',
'torch>=1.0.0',
'tensorly>=0.6.0',
'psutil',
'six',
'pillow',
'joblib>=0.11.0',
'ipywidgets>=5.2.2',
'ipython>=5.1.0,<6;python_version<"3.3"', # IPython 6.0+ does not support Python 2.6, 2.7, 3.0, 3.1, or 3.2
'ipython>=6.0;python_version>="3.3"', # Beginning with IPython 6.0, Python 3.3 and above is required.
'unittest2;python_version<"3.0"',
'sidpy>=0.0.1',
'pyUSID>=0.0.8',
]
setup(
name='pycroscopy',
version=__version__,
description='Python library for scientific analysis of microscopy data',
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Information Analysis'],
keywords=['EELS', 'STEM', 'TEM', 'XRD', 'AFM', 'SPM', 'STS', 'band excitation', 'BE', 'BEPS', 'Raman', 'NanoIR',
'ptychography', 'g-mode', 'general mode', 'electron microscopy', ' scanning probe', ' x-rays', 'probe',
'atomic force microscopy', 'SIMS', 'energy', 'spectroscopy', 'imaging', 'microscopy', 'spectra'
'characterization', 'spectrogram', 'hyperspectral', 'multidimensional', 'data format', 'universal',
'clustering', 'decomposition', 'curve fitting', 'data analysis PCA', ' SVD', ' NMF', ' DBSCAN', ' kMeans',
'machine learning', 'bayesian inference', 'fft filtering', 'signal processing', 'image cleaning',
'denoising', 'model', 'msa', 'quantification',
'png', 'tiff', 'hdf5', 'igor', 'ibw', 'dm3', 'oneview', 'KPFM', 'FORC', 'ndata',
'Asylum', 'MFP3D', 'Cypher', 'Omicron', 'Nion', 'Nanonis', 'FEI'],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
url='https://pycroscopy.github.io/pycroscopy/about.html',
license='MIT',
author='S. Somnath, C. R. Smith, N. Laanait',
author_email='pycroscopy@gmail.com',
install_requires=requirements,
setup_requires=['pytest-runner'],
tests_require=['pytest'],
platforms=['Linux', 'Mac OSX', 'Windows 10/8.1/8/7'],
# package_data={'sample':['dataset_1.dat']}
test_suite='pytest',
extras_require={
'legacy_guis': ['pyqt5;python_version>="3.5"',
'pyqtgraph>=0.10']},
# dependency='',
# dependency_links=[''],
include_package_data=True,
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| true | true |
f7160db0d5fb20f368cc9ea3007c25dccdf69f7c | 4,197 | py | Python | tests/clvm/coin_store.py | Plotter-Network/plotter-blockchain | 13d10557496f37b9a001786ff837bdf34d8f1bcb | [
"Apache-2.0"
] | 1 | 2021-07-10T12:50:30.000Z | 2021-07-10T12:50:30.000Z | tests/clvm/coin_store.py | Plotter-Network/plotter-blockchain | 13d10557496f37b9a001786ff837bdf34d8f1bcb | [
"Apache-2.0"
] | null | null | null | tests/clvm/coin_store.py | Plotter-Network/plotter-blockchain | 13d10557496f37b9a001786ff837bdf34d8f1bcb | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
from dataclasses import dataclass, replace
from typing import Dict, Iterator, Set
from plotter.full_node.mempool_check_conditions import mempool_check_conditions_dict
from plotter.types.blockchain_format.coin import Coin
from plotter.types.blockchain_format.sized_bytes import bytes32
from plotter.types.coin_record import CoinRecord
from plotter.types.spend_bundle import SpendBundle
from plotter.util.condition_tools import (
conditions_dict_for_solution,
coin_announcement_names_for_conditions_dict,
puzzle_announcement_names_for_conditions_dict,
)
from plotter.util.ints import uint32, uint64
class BadSpendBundleError(Exception):
pass
@dataclass
class CoinTimestamp:
seconds: int
height: int
class CoinStore:
def __init__(self):
self._db: Dict[bytes32, CoinRecord] = dict()
self._ph_index = defaultdict(list)
def farm_coin(self, puzzle_hash: bytes32, birthday: CoinTimestamp, amount: int = 1024) -> Coin:
parent = birthday.height.to_bytes(32, "big")
coin = Coin(parent, puzzle_hash, uint64(amount))
self._add_coin_entry(coin, birthday)
return coin
def validate_spend_bundle(
self,
spend_bundle: SpendBundle,
now: CoinTimestamp,
max_cost: int,
) -> int:
# this should use blockchain consensus code
coin_announcements: Set[bytes32] = set()
puzzle_announcements: Set[bytes32] = set()
conditions_dicts = []
for coin_solution in spend_bundle.coin_solutions:
err, conditions_dict, cost = conditions_dict_for_solution(
coin_solution.puzzle_reveal, coin_solution.solution, max_cost
)
if conditions_dict is None:
raise BadSpendBundleError(f"clvm validation failure {err}")
conditions_dicts.append(conditions_dict)
coin_announcements.update(
coin_announcement_names_for_conditions_dict(conditions_dict, coin_solution.coin.name())
)
puzzle_announcements.update(
puzzle_announcement_names_for_conditions_dict(conditions_dict, coin_solution.coin.puzzle_hash)
)
for coin_solution, conditions_dict in zip(spend_bundle.coin_solutions, conditions_dicts):
prev_transaction_block_height = now.height
timestamp = now.seconds
coin_record = self._db[coin_solution.coin.name()]
err = mempool_check_conditions_dict(
coin_record,
coin_announcements,
puzzle_announcements,
conditions_dict,
uint32(prev_transaction_block_height),
uint64(timestamp),
)
if err is not None:
raise BadSpendBundleError(f"condition validation failure {err}")
return 0
def update_coin_store_for_spend_bundle(self, spend_bundle: SpendBundle, now: CoinTimestamp, max_cost: int):
err = self.validate_spend_bundle(spend_bundle, now, max_cost)
if err != 0:
raise BadSpendBundleError(f"validation failure {err}")
for spent_coin in spend_bundle.removals():
coin_name = spent_coin.name()
coin_record = self._db[coin_name]
self._db[coin_name] = replace(coin_record, spent_block_index=now.height, spent=True)
for new_coin in spend_bundle.additions():
self._add_coin_entry(new_coin, now)
def coins_for_puzzle_hash(self, puzzle_hash: bytes32) -> Iterator[Coin]:
for coin_name in self._ph_index[puzzle_hash]:
coin_entry = self._db[coin_name]
assert coin_entry.coin.puzzle_hash == puzzle_hash
yield coin_entry.coin
def all_coins(self) -> Iterator[Coin]:
for coin_entry in self._db.values():
yield coin_entry.coin
def _add_coin_entry(self, coin: Coin, birthday: CoinTimestamp) -> None:
name = coin.name()
assert name not in self._db
self._db[name] = CoinRecord(coin, uint32(birthday.height), uint32(0), False, False, uint64(birthday.seconds))
self._ph_index[coin.puzzle_hash].append(name)
| 38.861111 | 117 | 0.681677 | from collections import defaultdict
from dataclasses import dataclass, replace
from typing import Dict, Iterator, Set
from plotter.full_node.mempool_check_conditions import mempool_check_conditions_dict
from plotter.types.blockchain_format.coin import Coin
from plotter.types.blockchain_format.sized_bytes import bytes32
from plotter.types.coin_record import CoinRecord
from plotter.types.spend_bundle import SpendBundle
from plotter.util.condition_tools import (
conditions_dict_for_solution,
coin_announcement_names_for_conditions_dict,
puzzle_announcement_names_for_conditions_dict,
)
from plotter.util.ints import uint32, uint64
class BadSpendBundleError(Exception):
pass
@dataclass
class CoinTimestamp:
seconds: int
height: int
class CoinStore:
def __init__(self):
self._db: Dict[bytes32, CoinRecord] = dict()
self._ph_index = defaultdict(list)
def farm_coin(self, puzzle_hash: bytes32, birthday: CoinTimestamp, amount: int = 1024) -> Coin:
parent = birthday.height.to_bytes(32, "big")
coin = Coin(parent, puzzle_hash, uint64(amount))
self._add_coin_entry(coin, birthday)
return coin
def validate_spend_bundle(
self,
spend_bundle: SpendBundle,
now: CoinTimestamp,
max_cost: int,
) -> int:
coin_announcements: Set[bytes32] = set()
puzzle_announcements: Set[bytes32] = set()
conditions_dicts = []
for coin_solution in spend_bundle.coin_solutions:
err, conditions_dict, cost = conditions_dict_for_solution(
coin_solution.puzzle_reveal, coin_solution.solution, max_cost
)
if conditions_dict is None:
raise BadSpendBundleError(f"clvm validation failure {err}")
conditions_dicts.append(conditions_dict)
coin_announcements.update(
coin_announcement_names_for_conditions_dict(conditions_dict, coin_solution.coin.name())
)
puzzle_announcements.update(
puzzle_announcement_names_for_conditions_dict(conditions_dict, coin_solution.coin.puzzle_hash)
)
for coin_solution, conditions_dict in zip(spend_bundle.coin_solutions, conditions_dicts):
prev_transaction_block_height = now.height
timestamp = now.seconds
coin_record = self._db[coin_solution.coin.name()]
err = mempool_check_conditions_dict(
coin_record,
coin_announcements,
puzzle_announcements,
conditions_dict,
uint32(prev_transaction_block_height),
uint64(timestamp),
)
if err is not None:
raise BadSpendBundleError(f"condition validation failure {err}")
return 0
def update_coin_store_for_spend_bundle(self, spend_bundle: SpendBundle, now: CoinTimestamp, max_cost: int):
err = self.validate_spend_bundle(spend_bundle, now, max_cost)
if err != 0:
raise BadSpendBundleError(f"validation failure {err}")
for spent_coin in spend_bundle.removals():
coin_name = spent_coin.name()
coin_record = self._db[coin_name]
self._db[coin_name] = replace(coin_record, spent_block_index=now.height, spent=True)
for new_coin in spend_bundle.additions():
self._add_coin_entry(new_coin, now)
def coins_for_puzzle_hash(self, puzzle_hash: bytes32) -> Iterator[Coin]:
for coin_name in self._ph_index[puzzle_hash]:
coin_entry = self._db[coin_name]
assert coin_entry.coin.puzzle_hash == puzzle_hash
yield coin_entry.coin
def all_coins(self) -> Iterator[Coin]:
for coin_entry in self._db.values():
yield coin_entry.coin
def _add_coin_entry(self, coin: Coin, birthday: CoinTimestamp) -> None:
name = coin.name()
assert name not in self._db
self._db[name] = CoinRecord(coin, uint32(birthday.height), uint32(0), False, False, uint64(birthday.seconds))
self._ph_index[coin.puzzle_hash].append(name)
| true | true |
f7160e0c8e9137a12f16d8b789254f485f26bc0b | 598 | py | Python | src/lib/datasets/dataset_factory.py | nerminsamet/HPRNet | a23e691102ed50bd24391e6295c74f452592cdae | [
"MIT"
] | 34 | 2021-06-09T16:47:59.000Z | 2022-03-29T08:03:46.000Z | src/lib/datasets/dataset_factory.py | nerminsamet/HPRNet | a23e691102ed50bd24391e6295c74f452592cdae | [
"MIT"
] | 3 | 2021-12-14T11:47:06.000Z | 2022-03-17T04:08:39.000Z | src/lib/datasets/dataset_factory.py | nerminsamet/HPRNet | a23e691102ed50bd24391e6295c74f452592cdae | [
"MIT"
] | 4 | 2021-06-10T07:44:15.000Z | 2021-08-30T07:12:40.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .sample.multi_pose import MultiPoseDataset
from .sample.landmark import LandmarkDataset
from src.lib.datasets.dataset.coco_hp import COCOHP
from src.lib.datasets.dataset.coco_body import COCOBODY
dataset_factory = {
'coco_hp': COCOHP,
'coco_body': COCOBODY
}
_sample_factory = {
'multi_pose': MultiPoseDataset,
'landmark': LandmarkDataset,
}
def get_dataset(dataset, task):
class Dataset(dataset_factory[dataset], _sample_factory[task]):
pass
return Dataset
| 21.357143 | 65 | 0.792642 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .sample.multi_pose import MultiPoseDataset
from .sample.landmark import LandmarkDataset
from src.lib.datasets.dataset.coco_hp import COCOHP
from src.lib.datasets.dataset.coco_body import COCOBODY
dataset_factory = {
'coco_hp': COCOHP,
'coco_body': COCOBODY
}
_sample_factory = {
'multi_pose': MultiPoseDataset,
'landmark': LandmarkDataset,
}
def get_dataset(dataset, task):
class Dataset(dataset_factory[dataset], _sample_factory[task]):
pass
return Dataset
| true | true |
f7160e1009ab83e8020f0a7d0f081242b48b6c74 | 1,089 | py | Python | users/migrations/0001_initial.py | pollitosabroson/retoglobal | 456af32516935fb834c9f78359754614635e9910 | [
"Apache-2.0"
] | null | null | null | users/migrations/0001_initial.py | pollitosabroson/retoglobal | 456af32516935fb834c9f78359754614635e9910 | [
"Apache-2.0"
] | null | null | null | users/migrations/0001_initial.py | pollitosabroson/retoglobal | 456af32516935fb834c9f78359754614635e9910 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-10 13:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('genres', '0001_initial'),
('hobbies', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('genre', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genres.Genre')),
('hobbies', models.ManyToManyField(to='hobbies.Hobbie')),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Userss',
},
),
]
| 31.114286 | 114 | 0.56933 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('genres', '0001_initial'),
('hobbies', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('genre', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genres.Genre')),
('hobbies', models.ManyToManyField(to='hobbies.Hobbie')),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Userss',
},
),
]
| true | true |
f7160f1c838fc7c07e3729784983c83446032a75 | 8,147 | py | Python | mctsPlayer.py | dspub99/betazero | b1adf9885166e6fb4974952292653efeea1b19dc | [
"MIT"
] | 11 | 2018-11-23T10:48:00.000Z | 2020-11-24T07:51:32.000Z | mctsPlayer.py | dspub99/betazero | b1adf9885166e6fb4974952292653efeea1b19dc | [
"MIT"
] | null | null | null | mctsPlayer.py | dspub99/betazero | b1adf9885166e6fb4974952292653efeea1b19dc | [
"MIT"
] | 1 | 2018-11-25T15:43:41.000Z | 2018-11-25T15:43:41.000Z | #!/usr/bin/env python
import numpy as np
from randomPlayer import RandomPlayer
import game
import play
# Run MCTS with MC to estimate the rest of the game.
# http://mcts.ai/about/index.html
# http://ccg.doc.gold.ac.uk/wp-content/uploads/2016/10/browne_tciaig12_1.pdf
class UCT:
def __init__(self, c):
self._c = c
def parts(self, pNode, node):
return (node.sum/node.n, 2*self._c*np.sqrt(2*np.log(pNode.n) / node.n))
def __call__(self, pNode, node):
if node.n == 0:
return np.inf
(exploit, explore) = self.parts( pNode, node )
return exploit + explore
class UCTNegamax:
def __init__(self, c):
self._uct = UCT(c)
def __call__(self, pNode, node):
if node.n == 0:
return np.inf
# pNode.chi gives us negamax
# Actually, our scores (like node.sum/node.n) are in [0,1] not [-1,1].
# So to change to the opponent's perspective, we might prefer
# scoreOpponent_A = 1 - score
# to
# scoreOpponent_B = -score
# Note that scoreOpponent_B = scoreOpponent_A - 1. This offset of -1 in exploit
# won't affect which node maximizes exploit + explore.
(exploit, explore) = self._uct.parts( pNode, node )
return pNode.chi*exploit + explore
class Node:
def __init__(self, nprand, ttt, chi, maxPlies, parent=None, move=None):
self._nprand = nprand
# each Node has a clone of ttt with the Node's game state
self.maxPlies = maxPlies
self.chi = chi
self.parent = parent
self.ttt = ttt
self.move = move
self.sum = 0
self.n = 0
self.children = []
self._needMoves = list(self.ttt.validMoves())
def dump(self):
n = 0
queue = [self]
while len(queue) > 0:
# queue[0].ttt.dump()
s = [str(n), " "*n]
newQueue = []
n += 1
for node in queue:
s.append("%d/%d(%d)" % (2*node.sum, 2*node.n, node.maxPlies))
newQueue.extend(node.children)
print (' '.join(s))
queue = newQueue
def check_parentage(self):
# Am I may children's parent?
for c in self.children:
assert(c.parent == self)
c.check_parentage()
def bestChild(self, uct):
assert(len(self.children)>0)
phis = []
for c in self.children:
# print ("CHILD:", uct(self, c))
phis.append(uct(self, c))
phis = np.array(phis)
i = self._nprand.choice(np.where(phis > phis.max() - 1e-6)[0])
return self.children[i]
def findBoard(self, ttt):
# exactly one ply ahead
for c in self.children:
if ttt.equivBoard(c.ttt.board()):
return c
return None
def select(self, uct):
# "Starting at the root node, a child selection policy is recursively applied to descend
# through the tree until the most urgent expandable node is reached. A node is expandable if
# it represents a nonterminal state and has unvisited (i.e. unexpanded) children"
if len(self._needMoves) > 0:
return self
if len(self.children)==0:
return None
return self.bestChild(uct).select(uct)
def expand(self):
# "One (or more) child nodes are added to expand the tree, according to the
# available actions."
assert( len(self._needMoves) > 0 )
if self.maxPlies==0:
# just run another sim from here
return self
m = self._nprand.choice(self._needMoves)
self._needMoves.remove(m)
ttt = self.ttt.clone()
ttt.add(m)
c = Node(self._nprand, ttt, -self.chi, self.maxPlies - 1, self, m.clone())
self.children.append(c)
return c
def backpropagate(self, score):
# "The simulation result is “backed up” (i.e. backpropagated)
# through the selected nodes to update their statistics."
self.n += 1
self.sum += score
if self.parent is not None:
self.parent.backpropagate(score)
def __str__(self):
return "sum = %.4f n = %d nChildren = %d self = %s parent = %s" % (self.sum, self.n, len(self.children), id(self), id(self.parent))
class MCTSPlayer:
def __init__(self, nPlay, maxPlies, bNegamax, cUct = 1/np.sqrt(2), bDump=False):
self._nPlay = nPlay
self._maxPlies = maxPlies
if bNegamax:
self._uct = UCTNegamax(cUct)
else:
self._uct = UCT(cUct)
self._cUct = cUct
self._bNegamax = bNegamax
self._bDump = bDump
self._uctMove = UCT(0)
self._rp = RandomPlayer()
self._nprand = np.random.RandomState()
self._root = None
def __str__(self):
return ("%s nPlay = %d maxPlies = %d bNegamax = %s cUct = %.4f" %
(self.__class__.__name__, self._nPlay, self._maxPlies,
self._bNegamax, self._cUct))
def _simulate(self, node):
# "A simulation is run from the new node(s) according to the
# default policy to produce an outcome."
return play.playRest(self._rp, self._rp, node.ttt.clone(), False, 99999)[0]
def setSeed(self, seed):
self._nprand.seed(seed)
self._rp.setSeed(seed+1)
def move(self, ttt):
if self._root is not None:
self._root = self._root.findBoard(ttt)
if self._root is None:
self._root = Node(self._nprand, ttt, 1, maxPlies=self._maxPlies)
marker = ttt.whoseTurn()
for _ in range(self._nPlay):
nodeLeaf = self._root.select(self._uct)
if nodeLeaf is not None:
nodeSim = nodeLeaf.expand()
if nodeSim is not None:
# print ("START:", nodeSim.maxPlies, nodeSim.move)
w = self._simulate(nodeSim)
if w == ttt.whoseTurn():
score = 1
elif w == game.Draw:
score = .5
else:
score = 0
# print ("SCORE:", marker, w, score)
nodeSim.backpropagate(score)
if self._bDump:
self._root.dump()
self._root = self._root.bestChild(self._uctMove)
return self._root.move
def tests(self):
self._root.check_parentage()
if __name__ == "__main__":
from ticTacToe import TicTacToe
from mmPlayer import MMPlayer
from mcPlayer import MCPlayer
nPlay = 100
maxPlies = 1000
bNegamax = True
cUct = 1/np.sqrt(2)
if True:
mcts = MCTSPlayer(nPlay = nPlay, maxPlies = maxPlies, bNegamax = bNegamax,
cUct = cUct, bDump=True)
mcts.setSeed(1)
mc10 = MCPlayer(nPlay=10)
mc10.setSeed(2)
play.play(TicTacToe, mcts, mc10, bShow = True)
else:
score = []
for _ in range(100):
mcts = MCTSPlayer(nPlay = nPlay, maxPlies = maxPlies, bNegamax = bNegamax,
cUct = cUct)
# mc10 vs. mc10 gives .79, fyi
# mcts100_mp=1_c=1e6 vs. mc 10 gives .82
# mcts100_mp=1_c=1/sqrt(2) vs. mc 10 gives .82
# mcts100_mp=1_c=0 vs. mc 10 gives .82
# mcts100_mp=2_c=0 vs. mc 10 gives .855
# mcts100_mp=3_c=0 vs. mc 10 gives .83
# mcts100_mp=3_c=1/sqrt(2) vs. mc 10 gives .86
# mcts100_mp=3_c=1/sqrt(2)_negamax vs. mc 10 gives .86
# mcts100_mp=1000_c=1/sqrt(2)_negamax vs. mc 10 gives .83
# mcts1000_mp=1000_c=1/sqrt(2)_negamax vs. mc 10 gives .94
# mcts1000_mp=1000_c=1/sqrt(2) vs. mc 10 gives .83
w = play.play(TicTacToe, MCPlayer(nPlay=100), mcts, bShow = False)
if w == 'X':
score.append(1)
elif w == 'D':
score.append(.5)
else:
score.append(0)
print (np.array(score).mean())
| 31.577519 | 139 | 0.551737 |
import numpy as np
from randomPlayer import RandomPlayer
import game
import play
class UCT:
def __init__(self, c):
self._c = c
def parts(self, pNode, node):
return (node.sum/node.n, 2*self._c*np.sqrt(2*np.log(pNode.n) / node.n))
def __call__(self, pNode, node):
if node.n == 0:
return np.inf
(exploit, explore) = self.parts( pNode, node )
return exploit + explore
class UCTNegamax:
def __init__(self, c):
self._uct = UCT(c)
def __call__(self, pNode, node):
if node.n == 0:
return np.inf
# scoreOpponent_A = 1 - score
# to
# scoreOpponent_B = -score
# Note that scoreOpponent_B = scoreOpponent_A - 1. This offset of -1 in exploit
# won't affect which node maximizes exploit + explore.
(exploit, explore) = self._uct.parts( pNode, node )
return pNode.chi*exploit + explore
class Node:
def __init__(self, nprand, ttt, chi, maxPlies, parent=None, move=None):
self._nprand = nprand
self.maxPlies = maxPlies
self.chi = chi
self.parent = parent
self.ttt = ttt
self.move = move
self.sum = 0
self.n = 0
self.children = []
self._needMoves = list(self.ttt.validMoves())
def dump(self):
n = 0
queue = [self]
while len(queue) > 0:
# queue[0].ttt.dump()
s = [str(n), " "*n]
newQueue = []
n += 1
for node in queue:
s.append("%d/%d(%d)" % (2*node.sum, 2*node.n, node.maxPlies))
newQueue.extend(node.children)
print (' '.join(s))
queue = newQueue
def check_parentage(self):
# Am I may children's parent?
for c in self.children:
assert(c.parent == self)
c.check_parentage()
def bestChild(self, uct):
assert(len(self.children)>0)
phis = []
for c in self.children:
phis.append(uct(self, c))
phis = np.array(phis)
i = self._nprand.choice(np.where(phis > phis.max() - 1e-6)[0])
return self.children[i]
def findBoard(self, ttt):
for c in self.children:
if ttt.equivBoard(c.ttt.board()):
return c
return None
def select(self, uct):
# through the tree until the most urgent expandable node is reached. A node is expandable if
# it represents a nonterminal state and has unvisited (i.e. unexpanded) children"
if len(self._needMoves) > 0:
return self
if len(self.children)==0:
return None
return self.bestChild(uct).select(uct)
def expand(self):
# available actions."
assert( len(self._needMoves) > 0 )
if self.maxPlies==0:
return self
m = self._nprand.choice(self._needMoves)
self._needMoves.remove(m)
ttt = self.ttt.clone()
ttt.add(m)
c = Node(self._nprand, ttt, -self.chi, self.maxPlies - 1, self, m.clone())
self.children.append(c)
return c
def backpropagate(self, score):
# through the selected nodes to update their statistics."
self.n += 1
self.sum += score
if self.parent is not None:
self.parent.backpropagate(score)
def __str__(self):
return "sum = %.4f n = %d nChildren = %d self = %s parent = %s" % (self.sum, self.n, len(self.children), id(self), id(self.parent))
class MCTSPlayer:
def __init__(self, nPlay, maxPlies, bNegamax, cUct = 1/np.sqrt(2), bDump=False):
self._nPlay = nPlay
self._maxPlies = maxPlies
if bNegamax:
self._uct = UCTNegamax(cUct)
else:
self._uct = UCT(cUct)
self._cUct = cUct
self._bNegamax = bNegamax
self._bDump = bDump
self._uctMove = UCT(0)
self._rp = RandomPlayer()
self._nprand = np.random.RandomState()
self._root = None
def __str__(self):
return ("%s nPlay = %d maxPlies = %d bNegamax = %s cUct = %.4f" %
(self.__class__.__name__, self._nPlay, self._maxPlies,
self._bNegamax, self._cUct))
def _simulate(self, node):
# default policy to produce an outcome."
return play.playRest(self._rp, self._rp, node.ttt.clone(), False, 99999)[0]
def setSeed(self, seed):
self._nprand.seed(seed)
self._rp.setSeed(seed+1)
def move(self, ttt):
if self._root is not None:
self._root = self._root.findBoard(ttt)
if self._root is None:
self._root = Node(self._nprand, ttt, 1, maxPlies=self._maxPlies)
marker = ttt.whoseTurn()
for _ in range(self._nPlay):
nodeLeaf = self._root.select(self._uct)
if nodeLeaf is not None:
nodeSim = nodeLeaf.expand()
if nodeSim is not None:
w = self._simulate(nodeSim)
if w == ttt.whoseTurn():
score = 1
elif w == game.Draw:
score = .5
else:
score = 0
nodeSim.backpropagate(score)
if self._bDump:
self._root.dump()
self._root = self._root.bestChild(self._uctMove)
return self._root.move
def tests(self):
self._root.check_parentage()
if __name__ == "__main__":
from ticTacToe import TicTacToe
from mmPlayer import MMPlayer
from mcPlayer import MCPlayer
nPlay = 100
maxPlies = 1000
bNegamax = True
cUct = 1/np.sqrt(2)
if True:
mcts = MCTSPlayer(nPlay = nPlay, maxPlies = maxPlies, bNegamax = bNegamax,
cUct = cUct, bDump=True)
mcts.setSeed(1)
mc10 = MCPlayer(nPlay=10)
mc10.setSeed(2)
play.play(TicTacToe, mcts, mc10, bShow = True)
else:
score = []
for _ in range(100):
mcts = MCTSPlayer(nPlay = nPlay, maxPlies = maxPlies, bNegamax = bNegamax,
cUct = cUct)
w = play.play(TicTacToe, MCPlayer(nPlay=100), mcts, bShow = False)
if w == 'X':
score.append(1)
elif w == 'D':
score.append(.5)
else:
score.append(0)
print (np.array(score).mean())
| true | true |
f7161020c4bf4dad2c0c0ebf7e4bb050b02a52e1 | 15,471 | py | Python | tests/controllers/test_api_controller.py | Moesif/moesifapi-python | c1e8b0feab51fdd830154bf981a102c5162943ac | [
"Apache-2.0"
] | 5 | 2017-01-28T17:09:28.000Z | 2020-03-10T19:59:31.000Z | tests/controllers/test_api_controller.py | Moesif/moesifapi-python | c1e8b0feab51fdd830154bf981a102c5162943ac | [
"Apache-2.0"
] | null | null | null | tests/controllers/test_api_controller.py | Moesif/moesifapi-python | c1e8b0feab51fdd830154bf981a102c5162943ac | [
"Apache-2.0"
] | 1 | 2019-05-12T18:37:28.000Z | 2019-05-12T18:37:28.000Z | # -*- coding: utf-8 -*-
"""
tests.controllers.test_api_controller
"""
import jsonpickle
from .controller_test_base import *
from moesifapi.models import *
from datetime import *
class ApiControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(ApiControllerTests, cls).setUpClass()
cls.controller = cls.api_client.api
# Add Single Event via Injestion API
def test_add_event(self):
# Parameters for the API call
req_headers = APIHelper.json_deserialize(""" {
"Host": "api.acmeinc.com",
"Accept": "*/*",
"Connection": "Keep-Alive",
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)",
"Content-Type": "application/json",
"Content-Length": "126",
"Accept-Encoding": "gzip"
} """)
req_body = APIHelper.json_deserialize( """{
"items": [
{
"type": 1,
"id": "fwfrf"
},
{
"type": 2,
"id": "d43d3f"
}
]
}""")
rsp_headers = APIHelper.json_deserialize(""" {
"Date": "Tue, 20 Aug 2019 23:46:49 GMT",
"Vary": "Accept-Encoding",
"Pragma": "no-cache",
"Expires": "-1",
"Content-Type": "application/json; charset=utf-8",
"Cache-Control": "no-cache"
} """)
rsp_body = APIHelper.json_deserialize( """{
"Error": "InvalidArgumentException",
"Message": "Missing field field_a"
}""")
metadata = APIHelper.json_deserialize("""{
"field1": "foo",
"field2": "bar"
}""")
event_req = EventRequestModel(time = datetime.utcnow() - timedelta(seconds=1),
uri = "https://api.acmeinc.com/items/reviews?&page=0&page_size=12®ion[]=Overig&sort=relevance",
verb = "PATCH",
api_version = "1.1.0",
ip_address = "61.48.220.123",
headers = req_headers,
body = req_body)
event_rsp = EventResponseModel(time = datetime.utcnow(),
status = 200,
headers = rsp_headers,
body = rsp_body)
event_model = EventModel(request = event_req,
response = event_rsp,
user_id = "my_user_id",
company_id = "my_company_id",
session_token = "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata = metadata)
# Perform the API call through the SDK function
self.controller.create_event(event_model)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Add Batched Events via Ingestion API
def test_add_batched_events(self):
# Parameters for the API call
body = APIHelper.json_deserialize('[{ "metadata": { "foo" : "bar" }, "request": { "time": "2016-09-09T04:45:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:45:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:46:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:46:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:47:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:47:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:48:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:48:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "exfzweachxjgznvKUYrxFcxv]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:49:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:49:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:50:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:50:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "recvreedfef", "session_token": "xcvkrjmcfghwuignrmcmhxdhaaezse4w]s98y18cx98q3yhwmnhcfx43f" } ]', EventModel.from_dictionary)
for val in body:
val.request.time = datetime.utcnow() - timedelta(seconds=1)
val.response.time = datetime.utcnow()
# Perform the API call through the SDK function
self.controller.create_events_batch(body)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Update Single User via Injestion API
def test_update_user(self):
# Parameters for the API call
metadata = APIHelper.json_deserialize(""" {
"email": "pythonapiuser@email.com",
"name": "pythonapiuser",
"custom": "testdata"
} """)
user_model = UserModel(
user_id="12345",
company_id="67890",
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
modified_time=datetime.utcnow(),
metadata=metadata,
campaign=CampaignModel(utm_source="Newsletter", utm_medium="Email"))
# Perform the API call through the SDK function
self.controller.update_user(user_model)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Update Batched Users via Ingestion API
def test_update_users_batch(self):
# Parameter for the API call
body = [UserModel(user_id="1234", company_id="6789", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f", ),
UserModel(user_id="12345", company_id="67890", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata=APIHelper.json_deserialize(""" {"email": "pythonapiuser@email.com",
"name": "pythonapiuser", "string_field": "value_1", "number_field": 0 } """))]
# Perform the API call through the SDK function
self.controller.update_users_batch(body)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Get Application configuration
def test_get_app_config(self):
# Perform the API call through the SDK function
response = self.controller.get_app_config().__dict__
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 200)
self.assertIsNotNone(response["raw_body"])
self.assertIsNotNone(response["headers"]["X-Moesif-Config-ETag"])
# Add Single company via Injestion API
def test_update_company(self):
# Parameter for the API call
company_model = CompanyModel(
company_id="67890",
modified_time=datetime.utcnow(),
campaign=CampaignModel(utm_source="Adwords", utm_medium="Twitter"))
# Perform the API call through the SDK function
self.controller.update_company(company_model)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Add Batched Companies via Ingestion API
def test_update_companies_batch(self):
# Parameter for the API call
body = [CompanyModel(company_id="67890", modified_time=datetime.utcnow(), company_domain="moesif"),
CompanyModel(company_id="6789", modified_time=datetime.utcnow(), company_domain="moesif",
metadata=APIHelper.json_deserialize(""" {"string_field": "value_1", "number_field": 0 } """))]
# Perform the API call through the SDK function
self.controller.update_companies_batch(body)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
| 82.73262 | 8,873 | 0.548316 |
import jsonpickle
from .controller_test_base import *
from moesifapi.models import *
from datetime import *
class ApiControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(ApiControllerTests, cls).setUpClass()
cls.controller = cls.api_client.api
def test_add_event(self):
req_headers = APIHelper.json_deserialize(""" {
"Host": "api.acmeinc.com",
"Accept": "*/*",
"Connection": "Keep-Alive",
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)",
"Content-Type": "application/json",
"Content-Length": "126",
"Accept-Encoding": "gzip"
} """)
req_body = APIHelper.json_deserialize( """{
"items": [
{
"type": 1,
"id": "fwfrf"
},
{
"type": 2,
"id": "d43d3f"
}
]
}""")
rsp_headers = APIHelper.json_deserialize(""" {
"Date": "Tue, 20 Aug 2019 23:46:49 GMT",
"Vary": "Accept-Encoding",
"Pragma": "no-cache",
"Expires": "-1",
"Content-Type": "application/json; charset=utf-8",
"Cache-Control": "no-cache"
} """)
rsp_body = APIHelper.json_deserialize( """{
"Error": "InvalidArgumentException",
"Message": "Missing field field_a"
}""")
metadata = APIHelper.json_deserialize("""{
"field1": "foo",
"field2": "bar"
}""")
event_req = EventRequestModel(time = datetime.utcnow() - timedelta(seconds=1),
uri = "https://api.acmeinc.com/items/reviews?&page=0&page_size=12®ion[]=Overig&sort=relevance",
verb = "PATCH",
api_version = "1.1.0",
ip_address = "61.48.220.123",
headers = req_headers,
body = req_body)
event_rsp = EventResponseModel(time = datetime.utcnow(),
status = 200,
headers = rsp_headers,
body = rsp_body)
event_model = EventModel(request = event_req,
response = event_rsp,
user_id = "my_user_id",
company_id = "my_company_id",
session_token = "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata = metadata)
self.controller.create_event(event_model)
self.assertEquals(self.response_catcher.response.status_code, 201)
def test_add_batched_events(self):
body = APIHelper.json_deserialize('[{ "metadata": { "foo" : "bar" }, "request": { "time": "2016-09-09T04:45:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:45:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:46:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:46:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:47:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:47:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:48:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:48:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "exfzweachxjgznvKUYrxFcxv]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:49:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:49:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:50:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:50:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "recvreedfef", "session_token": "xcvkrjmcfghwuignrmcmhxdhaaezse4w]s98y18cx98q3yhwmnhcfx43f" } ]', EventModel.from_dictionary)
for val in body:
val.request.time = datetime.utcnow() - timedelta(seconds=1)
val.response.time = datetime.utcnow()
self.controller.create_events_batch(body)
self.assertEquals(self.response_catcher.response.status_code, 201)
def test_update_user(self):
metadata = APIHelper.json_deserialize(""" {
"email": "pythonapiuser@email.com",
"name": "pythonapiuser",
"custom": "testdata"
} """)
user_model = UserModel(
user_id="12345",
company_id="67890",
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
modified_time=datetime.utcnow(),
metadata=metadata,
campaign=CampaignModel(utm_source="Newsletter", utm_medium="Email"))
self.controller.update_user(user_model)
self.assertEquals(self.response_catcher.response.status_code, 201)
def test_update_users_batch(self):
body = [UserModel(user_id="1234", company_id="6789", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f", ),
UserModel(user_id="12345", company_id="67890", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata=APIHelper.json_deserialize(""" {"email": "pythonapiuser@email.com",
"name": "pythonapiuser", "string_field": "value_1", "number_field": 0 } """))]
self.controller.update_users_batch(body)
self.assertEquals(self.response_catcher.response.status_code, 201)
def test_get_app_config(self):
response = self.controller.get_app_config().__dict__
self.assertEquals(self.response_catcher.response.status_code, 200)
self.assertIsNotNone(response["raw_body"])
self.assertIsNotNone(response["headers"]["X-Moesif-Config-ETag"])
def test_update_company(self):
company_model = CompanyModel(
company_id="67890",
modified_time=datetime.utcnow(),
campaign=CampaignModel(utm_source="Adwords", utm_medium="Twitter"))
self.controller.update_company(company_model)
self.assertEquals(self.response_catcher.response.status_code, 201)
def test_update_companies_batch(self):
body = [CompanyModel(company_id="67890", modified_time=datetime.utcnow(), company_domain="moesif"),
CompanyModel(company_id="6789", modified_time=datetime.utcnow(), company_domain="moesif",
metadata=APIHelper.json_deserialize(""" {"string_field": "value_1", "number_field": 0 } """))]
self.controller.update_companies_batch(body)
self.assertEquals(self.response_catcher.response.status_code, 201)
| true | true |
f716105610f9aba80608e6aac525ea5bef34d12c | 2,465 | py | Python | lib/ruleset_apply.py | brennonyork/budget-buddy | f64dc5ab5248794f101cc704e3754b2882f1d3c3 | [
"MIT"
] | null | null | null | lib/ruleset_apply.py | brennonyork/budget-buddy | f64dc5ab5248794f101cc704e3754b2882f1d3c3 | [
"MIT"
] | null | null | null | lib/ruleset_apply.py | brennonyork/budget-buddy | f64dc5ab5248794f101cc704e3754b2882f1d3c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Arg1 - ruleset file that contains all rulesets
# Arg2 - cleaned, sorted single file with all transactions
#
# Transforms a given file - Arg2 - into the column set form below
# with the rulesets written and applied for the given financial
# source - Arg1
import re
import sys
if len(sys.argv) < 3:
print("ERROR: need to supply a ruleset file and transaction file")
exit()
ruleset_file = sys.argv[1]
merge_file = sys.argv[2]
incl_history = None
# if extra arg passed then we include the historical transaction before
# we change it w the ruleset regexs
if len(sys.argv) == 4:
incl_history = sys.argv[3]
rule_map = []
with open(ruleset_file, 'r') as rules:
for rule in rules:
# if its only a newline we skip it or if the line starts with a '#' character then skip the line
if rule == "\n" or rule[0] == '#':
continue
else:
# else split by a '#' if it exists and take everything before it
category, regex = map(lambda x: x.strip(), rule.split('#')[0].split(','))
rule_map.append([category, regex])
with open(merge_file, 'r') as transactions:
for transaction in transactions:
d, m, c, p = map(lambda x: x.strip(), transaction.split(',', 4))
regex_matches = list(map(lambda x: re.search(x, m),
map(lambda y: y[1],
rule_map)))
if any(regex_matches):
# find longest match by taking the second element from the
# `span` regex method thus returning the length of the match as
# well as the index
longest_match = max([[i, j.span()[1]-j.span()[0]] for i, j in enumerate(regex_matches) if j],
key=lambda x: x[1])
# pull the new category by taking the index from the longest
# match, looking up that index in the rule_map, and then taking
# the first element from that list (ie the category, not the
# regex assigned to that category label)
new_category = rule_map[longest_match[0]][0]
if incl_history:
if not(c): c = "Empty"
sys.stdout.write(d+','+m+','+new_category+','+p+','+c+'\n')
else:
sys.stdout.write(d+','+m+','+new_category+','+p+'\n')
else:
sys.stdout.write(d+','+m+','+c+','+p+'\n')
| 37.923077 | 105 | 0.574037 |
import re
import sys
if len(sys.argv) < 3:
print("ERROR: need to supply a ruleset file and transaction file")
exit()
ruleset_file = sys.argv[1]
merge_file = sys.argv[2]
incl_history = None
if len(sys.argv) == 4:
incl_history = sys.argv[3]
rule_map = []
with open(ruleset_file, 'r') as rules:
for rule in rules:
if rule == "\n" or rule[0] == '#':
continue
else:
category, regex = map(lambda x: x.strip(), rule.split('#')[0].split(','))
rule_map.append([category, regex])
with open(merge_file, 'r') as transactions:
for transaction in transactions:
d, m, c, p = map(lambda x: x.strip(), transaction.split(',', 4))
regex_matches = list(map(lambda x: re.search(x, m),
map(lambda y: y[1],
rule_map)))
if any(regex_matches):
longest_match = max([[i, j.span()[1]-j.span()[0]] for i, j in enumerate(regex_matches) if j],
key=lambda x: x[1])
new_category = rule_map[longest_match[0]][0]
if incl_history:
if not(c): c = "Empty"
sys.stdout.write(d+','+m+','+new_category+','+p+','+c+'\n')
else:
sys.stdout.write(d+','+m+','+new_category+','+p+'\n')
else:
sys.stdout.write(d+','+m+','+c+','+p+'\n')
| true | true |
f71610eccde5fef5a72814ac19d392b5c15c9201 | 7,703 | py | Python | src/sage/groups/matrix_gps/unitary.py | bopopescu/sage-5 | 9d85b34956ca2edd55af307f99c5d3859acd30bf | [
"BSL-1.0"
] | 5 | 2015-01-04T07:15:06.000Z | 2022-03-04T15:15:18.000Z | src/sage/groups/matrix_gps/unitary.py | bopopescu/sage-5 | 9d85b34956ca2edd55af307f99c5d3859acd30bf | [
"BSL-1.0"
] | null | null | null | src/sage/groups/matrix_gps/unitary.py | bopopescu/sage-5 | 9d85b34956ca2edd55af307f99c5d3859acd30bf | [
"BSL-1.0"
] | 10 | 2016-09-28T13:12:40.000Z | 2022-02-12T09:28:34.000Z | r"""
Unitary Groups `GU(n,q)` and `SU(n,q)`
These are `n \times n` unitary matrices with entries in
`GF(q^2)`.
EXAMPLES::
sage: G = SU(3,5)
sage: G.order()
378000
sage: G
Special Unitary Group of degree 3 over Finite Field in a of size 5^2
sage: G.gens()
(
[ a 0 0] [4*a 4 1]
[ 0 2*a + 2 0] [ 4 4 0]
[ 0 0 3*a], [ 1 0 0]
)
sage: G.base_ring()
Finite Field in a of size 5^2
AUTHORS:
- David Joyner (2006-03): initial version, modified from
special_linear (by W. Stein)
- David Joyner (2006-05): minor additions (examples, _latex_, __str__,
gens)
- William Stein (2006-12): rewrite
- Volker Braun (2013-1) port to new Parent, libGAP, extreme refactoring.
"""
#*********************************************************************************
# Copyright (C) 2006 David Joyner and William Stein
# Copyright (C) 2013 Volker Braun <vbraun.name@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*********************************************************************************
from sage.rings.all import ZZ, is_FiniteField, GF
from sage.misc.latex import latex
from sage.groups.matrix_gps.named_group import (
normalize_args_vectorspace, NamedMatrixGroup_generic, NamedMatrixGroup_gap )
def finite_field_sqrt(ring):
"""
Helper function.
INPUT:
A ring.
OUTPUT:
Integer q such that ``ring`` is the finite field with `q^2` elements.
EXAMPLES::
sage: from sage.groups.matrix_gps.unitary import finite_field_sqrt
sage: finite_field_sqrt(GF(4, 'a'))
2
"""
if not is_FiniteField(ring):
raise ValueError('not a finite field')
q, rem = ring.cardinality().sqrtrem()
if rem != 0:
raise ValueError('cardinatity not a square')
return q
###############################################################################
# General Unitary Group
###############################################################################
def GU(n, R, var='a'):
r"""
Return the general unitary group.
The general unitary group `GU( d, R )` consists of all `d \times
d` matrices that preserve a nondegenerate sequilinear form over
the ring `R`.
.. note::
For a finite field the matrices that preserve a sesquilinear
form over `F_q` live over `F_{q^2}`. So ``GU(n,q)`` for
integer ``q`` constructs the matrix group over the base ring
``GF(q^2)``.
.. note::
This group is also available via ``groups.matrix.GU()``.
INPUT:
- ``n`` -- a positive integer.
- ``R`` -- ring or an integer. If an integer is specified, the
corresponding finite field is used.
- ``var`` -- variable used to represent generator of the finite
field, if needed.
OUTPUT:
Return the general unitary group.
EXAMPLES::
sage: G = GU(3, 7); G
General Unitary Group of degree 3 over Finite Field in a of size 7^2
sage: G.gens()
(
[ a 0 0] [6*a 6 1]
[ 0 1 0] [ 6 6 0]
[ 0 0 5*a], [ 1 0 0]
)
sage: GU(2,QQ)
General Unitary Group of degree 2 over Rational Field
sage: G = GU(3, 5, var='beta')
sage: G.base_ring()
Finite Field in beta of size 5^2
sage: G.gens()
(
[ beta 0 0] [4*beta 4 1]
[ 0 1 0] [ 4 4 0]
[ 0 0 3*beta], [ 1 0 0]
)
TESTS::
sage: groups.matrix.GU(2, 3)
General Unitary Group of degree 2 over Finite Field in a of size 3^2
"""
degree, ring = normalize_args_vectorspace(n, R, var=var)
if is_FiniteField(ring):
q = ring.cardinality()
ring = GF(q ** 2, name=var)
name = 'General Unitary Group of degree {0} over {1}'.format(degree, ring)
ltx = r'\text{{GU}}_{{{0}}}({1})'.format(degree, latex(ring))
if is_FiniteField(ring):
cmd = 'GU({0}, {1})'.format(degree, q)
return UnitaryMatrixGroup_gap(degree, ring, False, name, ltx, cmd)
else:
return UnitaryMatrixGroup_generic(degree, ring, False, name, ltx)
###############################################################################
# Special Unitary Group
###############################################################################
def SU(n, R, var='a'):
"""
The special unitary group `SU( d, R )` consists of all `d \times d`
matrices that preserve a nondegenerate sequilinear form over the
ring `R` and have determinant one.
.. note::
For a finite field the matrices that preserve a sesquilinear
form over `F_q` live over `F_{q^2}`. So ``SU(n,q)`` for
integer ``q`` constructs the matrix group over the base ring
``GF(q^2)``.
.. note::
This group is also available via ``groups.matrix.SU()``.
INPUT:
- ``n`` -- a positive integer.
- ``R`` -- ring or an integer. If an integer is specified, the
corresponding finite field is used.
- ``var`` -- variable used to represent generator of the finite
field, if needed.
OUTPUT:
Return the special unitary group.
EXAMPLES::
sage: SU(3,5)
Special Unitary Group of degree 3 over Finite Field in a of size 5^2
sage: SU(3, GF(5))
Special Unitary Group of degree 3 over Finite Field in a of size 5^2
sage: SU(3,QQ)
Special Unitary Group of degree 3 over Rational Field
TESTS::
sage: groups.matrix.SU(2, 3)
Special Unitary Group of degree 2 over Finite Field in a of size 3^2
"""
degree, ring = normalize_args_vectorspace(n, R, var=var)
if is_FiniteField(ring):
q = ring.cardinality()
ring = GF(q ** 2, name=var)
name = 'Special Unitary Group of degree {0} over {1}'.format(degree, ring)
ltx = r'\text{{SU}}_{{{0}}}({1})'.format(degree, latex(ring))
if is_FiniteField(ring):
cmd = 'SU({0}, {1})'.format(degree, q)
return UnitaryMatrixGroup_gap(degree, ring, True, name, ltx, cmd)
else:
return UnitaryMatrixGroup_generic(degree, ring, True, name, ltx)
########################################################################
# Unitary Group class
########################################################################
class UnitaryMatrixGroup_generic(NamedMatrixGroup_generic):
r"""
General Unitary Group over arbitrary rings.
EXAMPLES::
sage: G = GU(3, GF(7)); G
General Unitary Group of degree 3 over Finite Field in a of size 7^2
sage: latex(G)
\text{GU}_{3}(\Bold{F}_{7^{2}})
sage: G = SU(3, GF(5)); G
Special Unitary Group of degree 3 over Finite Field in a of size 5^2
sage: latex(G)
\text{SU}_{3}(\Bold{F}_{5^{2}})
"""
def _check_matrix(self, x, *args):
"""a
Check whether the matrix ``x`` is unitary.
See :meth:`~sage.groups.matrix_gps.matrix_group._check_matrix`
for details.
EXAMPLES::
sage: G = GU(2, GF(5))
sage: G._check_matrix(G.an_element().matrix())
sage: G = SU(2, GF(5))
sage: G._check_matrix(G.an_element().matrix())
"""
if self._special and x.determinant() != 1:
raise TypeError('matrix must have determinant one')
if not x.is_unitary():
raise TypeError('matrix must be unitary')
class UnitaryMatrixGroup_gap(UnitaryMatrixGroup_generic, NamedMatrixGroup_gap):
pass
| 29.288973 | 82 | 0.537583 |
from sage.rings.all import ZZ, is_FiniteField, GF
from sage.misc.latex import latex
from sage.groups.matrix_gps.named_group import (
normalize_args_vectorspace, NamedMatrixGroup_generic, NamedMatrixGroup_gap )
def finite_field_sqrt(ring):
if not is_FiniteField(ring):
raise ValueError('not a finite field')
q, rem = ring.cardinality().sqrtrem()
if rem != 0:
raise ValueError('cardinatity not a square')
return q
| true | true |
f71611444874d1fdc566b5e40bd2782abdfab6c2 | 3,694 | py | Python | more_one_memo/slack/model/response.py | nonylene/more-one-memo | 2c1007bb0bbafe47cba1ac63f237cd4aa66c3374 | [
"MIT"
] | 1 | 2018-06-07T01:20:42.000Z | 2018-06-07T01:20:42.000Z | more_one_memo/slack/model/response.py | nonylene/more-one-memo | 2c1007bb0bbafe47cba1ac63f237cd4aa66c3374 | [
"MIT"
] | 5 | 2021-06-02T00:13:17.000Z | 2022-02-26T23:38:56.000Z | more_one_memo/slack/model/response.py | nonylene/more-one-memo | 2c1007bb0bbafe47cba1ac63f237cd4aa66c3374 | [
"MIT"
] | null | null | null | from typing import List, Optional
from dataclasses import dataclass
UserID = str
BotID = str
ChannelID = str
@dataclass
class Channel:
"""
https://api.slack.com/types/channel
"""
id: ChannelID
name: str
is_archived: bool
is_member: bool
@staticmethod
def from_json(json: dict):
return Channel(json['id'], json['name'], json['is_archived'], json['is_member'])
@dataclass
class User:
"""
https://api.slack.com/types/user
"""
@dataclass
class Profile:
image_72: Optional[str]
image_192: Optional[str]
def get_image(self) -> Optional[str]:
if self.image_192 is not None:
return self.image_192
if self.image_72 is not None:
return self.image_72
return None
@staticmethod
def from_json(json: dict):
return User.Profile(json['image_72'], json['image_192'])
id: UserID
name: str
profile: Profile
@staticmethod
def from_json(json: dict):
return User(json['id'], json['name'], User.Profile.from_json(json['profile']))
@dataclass
class Conversations:
# https://api.slack.com/methods/conversations.list
@dataclass
class ResponseMetadata:
next_cursor: Optional[str]
@staticmethod
def from_json(json: dict):
return Conversations.ResponseMetadata(json.get('next_cursor'))
channels: List[Channel] # Regard Conversation as Channel
response_metadata: ResponseMetadata
@staticmethod
def from_json(json: dict):
return Conversations(
[Channel.from_json(obj) for obj in json['channels']],
Conversations.ResponseMetadata.from_json(json['response_metadata'])
)
@dataclass
class Users:
# https://api.slack.com/methods/users.list
@dataclass
class ResponseMetadata:
next_cursor: Optional[str]
@staticmethod
def from_json(json: dict):
return Users.ResponseMetadata(json.get('next_cursor'))
members: List[User]
response_metadata: ResponseMetadata
@staticmethod
def from_json(json: dict):
return Users(
[User.from_json(obj) for obj in json['members']],
Users.ResponseMetadata.from_json(json['response_metadata'])
)
@dataclass
class RtmStart:
# https://api.slack.com/methods/rtm.start
@dataclass
class Self:
id: UserID
@dataclass
class Prefs:
muted_channels: List[str]
@staticmethod
def from_json(json: dict):
return RtmStart.Self.Prefs(json['muted_channels'])
prefs: Prefs
@staticmethod
def from_json(json: dict):
return RtmStart.Self(
json['id'],
RtmStart.Self.Prefs.from_json(json['prefs'])
)
@dataclass
class Team:
domain: str
@staticmethod
def from_json(json: dict):
return RtmStart.Team(json['domain'])
url: str
self_: Self
team: Team
users: List[User]
channels: List[Channel]
@staticmethod
def from_json(json: dict):
return RtmStart(
json['url'],
RtmStart.Self.from_json(json['self']),
RtmStart.Team.from_json(json['team']),
[User.from_json(user) for user in json['users']],
[Channel.from_json(channel) for channel in json['channels']],
)
@dataclass
class RtmConnect:
# https://api.slack.com/methods/rtm.connect
url: str
@staticmethod
def from_json(json: dict):
return RtmConnect(
json['url'],
)
| 22.387879 | 88 | 0.600433 | from typing import List, Optional
from dataclasses import dataclass
UserID = str
BotID = str
ChannelID = str
@dataclass
class Channel:
id: ChannelID
name: str
is_archived: bool
is_member: bool
@staticmethod
def from_json(json: dict):
return Channel(json['id'], json['name'], json['is_archived'], json['is_member'])
@dataclass
class User:
@dataclass
class Profile:
image_72: Optional[str]
image_192: Optional[str]
def get_image(self) -> Optional[str]:
if self.image_192 is not None:
return self.image_192
if self.image_72 is not None:
return self.image_72
return None
@staticmethod
def from_json(json: dict):
return User.Profile(json['image_72'], json['image_192'])
id: UserID
name: str
profile: Profile
@staticmethod
def from_json(json: dict):
return User(json['id'], json['name'], User.Profile.from_json(json['profile']))
@dataclass
class Conversations:
@dataclass
class ResponseMetadata:
next_cursor: Optional[str]
@staticmethod
def from_json(json: dict):
return Conversations.ResponseMetadata(json.get('next_cursor'))
channels: List[Channel]
response_metadata: ResponseMetadata
@staticmethod
def from_json(json: dict):
return Conversations(
[Channel.from_json(obj) for obj in json['channels']],
Conversations.ResponseMetadata.from_json(json['response_metadata'])
)
@dataclass
class Users:
@dataclass
class ResponseMetadata:
next_cursor: Optional[str]
@staticmethod
def from_json(json: dict):
return Users.ResponseMetadata(json.get('next_cursor'))
members: List[User]
response_metadata: ResponseMetadata
@staticmethod
def from_json(json: dict):
return Users(
[User.from_json(obj) for obj in json['members']],
Users.ResponseMetadata.from_json(json['response_metadata'])
)
@dataclass
class RtmStart:
@dataclass
class Self:
id: UserID
@dataclass
class Prefs:
muted_channels: List[str]
@staticmethod
def from_json(json: dict):
return RtmStart.Self.Prefs(json['muted_channels'])
prefs: Prefs
@staticmethod
def from_json(json: dict):
return RtmStart.Self(
json['id'],
RtmStart.Self.Prefs.from_json(json['prefs'])
)
@dataclass
class Team:
domain: str
@staticmethod
def from_json(json: dict):
return RtmStart.Team(json['domain'])
url: str
self_: Self
team: Team
users: List[User]
channels: List[Channel]
@staticmethod
def from_json(json: dict):
return RtmStart(
json['url'],
RtmStart.Self.from_json(json['self']),
RtmStart.Team.from_json(json['team']),
[User.from_json(user) for user in json['users']],
[Channel.from_json(channel) for channel in json['channels']],
)
@dataclass
class RtmConnect:
url: str
@staticmethod
def from_json(json: dict):
return RtmConnect(
json['url'],
)
| true | true |
f716116e261e01c85b7274d3654d5b780989190b | 2,740 | py | Python | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/phys/Phys_Studio_LongRange.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 82 | 2016-06-29T17:24:43.000Z | 2021-04-16T06:49:17.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/phys/Phys_Studio_LongRange.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 6 | 2022-01-12T18:22:08.000Z | 2022-03-25T10:19:27.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/phys/Phys_Studio_LongRange.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 56 | 2016-08-02T10:50:50.000Z | 2021-07-19T08:57:34.000Z | from pyradioconfig.parts.ocelot.phys.Phys_Studio_LongRange import PHYS_OQPSK_LoRa_Ocelot
from pyradioconfig.calculator_model_framework.decorators.phy_decorators import do_not_inherit_phys
@do_not_inherit_phys
class PHYS_Studio_LongRange_Sol(PHYS_OQPSK_LoRa_Ocelot):
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-81
def PHY_Longrange_915M_OQPSK_DSSS8_80p0kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_80p0kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-80
def PHY_Longrange_915M_OQPSK_DSSS8_38p4kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_38p4kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-79
def PHY_Longrange_915M_OQPSK_DSSS8_19p2kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_19p2kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-78
def PHY_Longrange_915M_OQPSK_DSSS8_9p6kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_9p6kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-77
def PHY_Longrange_915M_OQPSK_DSSS8_4p8kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_4p8kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-76
def PHY_Longrange_490M_OQPSK_DSSS8_19p2kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_19p2kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-75
def PHY_Longrange_490M_OQPSK_DSSS8_9p6kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_9p6kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-74
def PHY_Longrange_490M_OQPSK_DSSS8_4p8kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_4p8kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-73
def PHY_Longrange_490M_OQPSK_DSSS8_2p4kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_2p4kbps(model, phy_name=phy_name)
return phy | 46.440678 | 98 | 0.75219 | from pyradioconfig.parts.ocelot.phys.Phys_Studio_LongRange import PHYS_OQPSK_LoRa_Ocelot
from pyradioconfig.calculator_model_framework.decorators.phy_decorators import do_not_inherit_phys
@do_not_inherit_phys
class PHYS_Studio_LongRange_Sol(PHYS_OQPSK_LoRa_Ocelot):
def PHY_Longrange_915M_OQPSK_DSSS8_80p0kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_80p0kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_915M_OQPSK_DSSS8_38p4kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_38p4kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_915M_OQPSK_DSSS8_19p2kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_19p2kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_915M_OQPSK_DSSS8_9p6kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_9p6kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_915M_OQPSK_DSSS8_4p8kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_4p8kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_490M_OQPSK_DSSS8_19p2kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_19p2kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_490M_OQPSK_DSSS8_9p6kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_9p6kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_490M_OQPSK_DSSS8_4p8kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_4p8kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_490M_OQPSK_DSSS8_2p4kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_2p4kbps(model, phy_name=phy_name)
return phy | true | true |
f716119887849d0bffc5971384860939823a8114 | 4,839 | py | Python | tensorflow_datasets/core/dataset_utils.py | Global19-atlassian-net/datasets | db298928fe0e45907fcd61443d2319665a933afc | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/core/dataset_utils.py | Global19-atlassian-net/datasets | db298928fe0e45907fcd61443d2319665a933afc | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/core/dataset_utils.py | Global19-atlassian-net/datasets | db298928fe0e45907fcd61443d2319665a933afc | [
"Apache-2.0"
] | 1 | 2020-08-03T20:19:12.000Z | 2020-08-03T20:19:12.000Z | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with tf.data.Dataset."""
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core import tf_compat
from tensorflow_datasets.core import utils
def _eager_dataset_iterator(dataset):
for item in dataset:
flat = tf.nest.flatten(item)
flat = [t if isinstance(t, tf.RaggedTensor) else t.numpy() for t in flat]
yield tf.nest.pack_sequence_as(item, flat)
def _graph_dataset_iterator(ds_iter, graph=None):
"""Constructs a Python generator from a tf.data.Iterator."""
with utils.maybe_with_graph(graph, create_if_none=False):
init = ds_iter.initializer
ds_item = ds_iter.get_next()
with utils.nogpu_session(graph) as sess:
sess.run(init)
while True:
try:
yield sess.run(ds_item)
except tf.errors.OutOfRangeError:
break
def as_numpy(dataset, *, graph=None):
"""Converts a `tf.data.Dataset` to an iterable of NumPy arrays.
`as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.
Note that because TensorFlow has support for ragged tensors and NumPy has
no equivalent representation,
[`tf.RaggedTensor`s](https://www.tensorflow.org/api_docs/python/tf/RaggedTensor)
are left as-is for the user to deal with them (e.g. using `to_list()`).
In TF 1 (i.e. graph mode), `tf.RaggedTensor`s are returned as
`tf.ragged.RaggedTensorValue`s.
Example:
```
ds = tfds.load(name="mnist", split="train")
ds_numpy = tfds.as_numpy(ds) # Convert `tf.data.Dataset` to Python generator
for ex in ds_numpy:
# `{'image': np.array(shape=(28, 28, 1)), 'labels': np.array(shape=())}`
print(ex)
```
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
graph: `tf.Graph`, optional, explicitly set the graph to use.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.
"""
nested_ds = dataset
del dataset
# Flatten
flat_ds = tf.nest.flatten(nested_ds)
flat_np = []
# Type check for Tensors and Datasets
for ds_el in flat_ds:
types = [type(el) for el in flat_ds]
types = tf.nest.pack_sequence_as(nested_ds, types)
if not (
isinstance(ds_el, (tf.Tensor, tf.RaggedTensor)) or
tf_compat.is_dataset(ds_el)):
raise ValueError("Arguments to as_numpy must be tf.Tensors or "
"tf.data.Datasets. Got: %s" % types)
if tf.executing_eagerly():
# Eager mode
for ds_el in flat_ds:
if isinstance(ds_el, tf.Tensor):
np_el = ds_el.numpy()
elif isinstance(ds_el, tf.RaggedTensor):
np_el = ds_el
elif tf_compat.is_dataset(ds_el):
np_el = _eager_dataset_iterator(ds_el)
else:
assert False
flat_np.append(np_el)
else:
# Graph mode
# First create iterators for datasets
with utils.maybe_with_graph(graph, create_if_none=False):
ds_iters = [
tf.compat.v1.data.make_initializable_iterator(ds_el)
for ds_el in flat_ds if tf_compat.is_dataset(ds_el)
]
ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]
# Then create numpy arrays for tensors
with utils.nogpu_session(graph) as sess: # Shared session for tf.Tensor
# Calling sess.run once so that randomness is shared.
np_arrays = sess.run([tensor for tensor in flat_ds
if not tf_compat.is_dataset(tensor)])
# Merge the dataset iterators and np arrays
iter_ds = iter(ds_iters)
iter_array = iter(np_arrays)
flat_np = [
next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)
for ds_el in flat_ds
]
# Nest
return tf.nest.pack_sequence_as(nested_ds, flat_np)
def dataset_shape_is_fully_defined(ds):
output_shapes = tf.compat.v1.data.get_output_shapes(ds)
return all([ts.is_fully_defined() for ts in tf.nest.flatten(output_shapes)])
def features_shape_is_fully_defined(features):
return all([tf.TensorShape(info.shape).is_fully_defined() for info in
tf.nest.flatten(features.get_tensor_info())])
| 34.077465 | 82 | 0.701178 |
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core import tf_compat
from tensorflow_datasets.core import utils
def _eager_dataset_iterator(dataset):
for item in dataset:
flat = tf.nest.flatten(item)
flat = [t if isinstance(t, tf.RaggedTensor) else t.numpy() for t in flat]
yield tf.nest.pack_sequence_as(item, flat)
def _graph_dataset_iterator(ds_iter, graph=None):
with utils.maybe_with_graph(graph, create_if_none=False):
init = ds_iter.initializer
ds_item = ds_iter.get_next()
with utils.nogpu_session(graph) as sess:
sess.run(init)
while True:
try:
yield sess.run(ds_item)
except tf.errors.OutOfRangeError:
break
def as_numpy(dataset, *, graph=None):
nested_ds = dataset
del dataset
flat_ds = tf.nest.flatten(nested_ds)
flat_np = []
for ds_el in flat_ds:
types = [type(el) for el in flat_ds]
types = tf.nest.pack_sequence_as(nested_ds, types)
if not (
isinstance(ds_el, (tf.Tensor, tf.RaggedTensor)) or
tf_compat.is_dataset(ds_el)):
raise ValueError("Arguments to as_numpy must be tf.Tensors or "
"tf.data.Datasets. Got: %s" % types)
if tf.executing_eagerly():
for ds_el in flat_ds:
if isinstance(ds_el, tf.Tensor):
np_el = ds_el.numpy()
elif isinstance(ds_el, tf.RaggedTensor):
np_el = ds_el
elif tf_compat.is_dataset(ds_el):
np_el = _eager_dataset_iterator(ds_el)
else:
assert False
flat_np.append(np_el)
else:
with utils.maybe_with_graph(graph, create_if_none=False):
ds_iters = [
tf.compat.v1.data.make_initializable_iterator(ds_el)
for ds_el in flat_ds if tf_compat.is_dataset(ds_el)
]
ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]
with utils.nogpu_session(graph) as sess:
np_arrays = sess.run([tensor for tensor in flat_ds
if not tf_compat.is_dataset(tensor)])
iter_ds = iter(ds_iters)
iter_array = iter(np_arrays)
flat_np = [
next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)
for ds_el in flat_ds
]
return tf.nest.pack_sequence_as(nested_ds, flat_np)
def dataset_shape_is_fully_defined(ds):
output_shapes = tf.compat.v1.data.get_output_shapes(ds)
return all([ts.is_fully_defined() for ts in tf.nest.flatten(output_shapes)])
def features_shape_is_fully_defined(features):
return all([tf.TensorShape(info.shape).is_fully_defined() for info in
tf.nest.flatten(features.get_tensor_info())])
| true | true |
f71611a25dd8760de2e03dd6de23a77dc59b5b29 | 7,560 | py | Python | datary/datasets/test/test_datasets.py | Datary/python-sdk | 2790a50e1ad262cbe3210665dc34f497625e923d | [
"MIT"
] | null | null | null | datary/datasets/test/test_datasets.py | Datary/python-sdk | 2790a50e1ad262cbe3210665dc34f497625e923d | [
"MIT"
] | null | null | null | datary/datasets/test/test_datasets.py | Datary/python-sdk | 2790a50e1ad262cbe3210665dc34f497625e923d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Datary python sdk Datasets test file
"""
import mock
from datary.test.test_datary import DataryTestCase
from datary.test.mock_requests import MockRequestResponse
class DataryDatasetsTestCase(DataryTestCase):
"""
DataryDatasets Test case
"""
@mock.patch('datary.requests.requests.requests.get')
def test_get_kern(self, mock_request):
"""
Test Datary datasets get_kern
"""
mock_request.return_value = MockRequestResponse(
"", json=self.element.get('data', {}).get('kern'))
kern = self.datary.get_kern(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(kern, dict))
self.assertEqual(kern, self.element.get('data', {}).get('kern'))
mock_request.return_value = MockRequestResponse("", status_code=500)
kern2 = self.datary.get_kern(self.dataset_uuid, self.repo_uuid)
self.assertTrue(isinstance(kern2, dict))
self.assertEqual(kern2, {})
@mock.patch('datary.requests.requests.requests.get')
def test_get_metadata(self, mock_request):
"""
Test Datary datasets get_metadata
"""
mock_request.return_value = MockRequestResponse(
"", json=self.element.get('data', {}).get('meta'))
metadata = self.datary.get_metadata(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(metadata, dict))
self.assertEqual(metadata, self.element.get('data', {}).get('meta'))
mock_request.return_value = MockRequestResponse("", status_code=500)
metadata2 = self.datary.get_metadata(self.dataset_uuid, self.repo_uuid)
self.assertTrue(isinstance(metadata2, dict))
self.assertEqual(metadata2, {})
@mock.patch('datary.requests.requests.requests.get')
def test_get_original(self, mock_request):
"""
Test Datary datasets get_original
"""
mock_request.return_value = MockRequestResponse("", json=self.original)
original = self.datary.get_original(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original, dict))
self.assertEqual(original, self.original)
mock_request.reset_mock()
# not dataset_uuid, introduced
original2 = self.datary.get_original(
self.dataset_uuid, self.repo_uuid, self.wdir_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original2, dict))
self.assertEqual(original2, self.original)
mock_request.reset_mock()
# not dataset_uuid, introduced
original3 = self.datary.get_original(
self.dataset_uuid, wdir_uuid=self.wdir_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original3, dict))
self.assertEqual(original3, self.original)
mock_request.reset_mock()
mock_request.side_effect = iter([
MockRequestResponse("", status_code=500),
MockRequestResponse("", json=self.original)
])
original4 = self.datary.get_original(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 2)
self.assertTrue(isinstance(original4, dict))
self.assertEqual(original4, self.original)
mock_request.reset_mock()
mock_request.side_effect = iter([
MockRequestResponse("", status_code=500),
MockRequestResponse("", status_code=500)
])
original4b = self.datary.get_original(
self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 2)
self.assertTrue(isinstance(original4b, dict))
self.assertEqual(original4b, {})
mock_request.reset_mock()
# not dataset_uuid, introduced
original5 = self.datary.get_original(
MockRequestResponse("", status_code=500))
self.assertEqual(mock_request.call_count, 0)
self.assertTrue(isinstance(original5, dict))
self.assertEqual(original5, {})
mock_request.reset_mock()
# scope
mock_request.side_effect = iter(
[MockRequestResponse("", json=self.original),
MockRequestResponse("", json=self.original)])
original6 = self.datary.get_original(
self.dataset_uuid, self.repo_uuid, scope='repo')
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original6, dict))
self.assertEqual(original6, self.original)
@mock.patch('datary.workdirs.DataryWorkdirs.get_wdir_filetree')
@mock.patch('datary.workdirs.DataryWorkdirs.get_wdir_changes')
def test_get_dataset_uuid(self, mock_get_wdir_changes,
mock_get_wdir_filetree):
"""
Test Datary datasets get_datasaet_uuid
"""
mock_get_wdir_filetree.return_value = self.workdir
mock_get_wdir_changes.return_value = self.changes
path = 'b'
basename = 'bb'
empty_result = self.datary.get_dataset_uuid(self.wdir_uuid)
self.assertEqual(empty_result, None)
from_changes_result = self.datary.get_dataset_uuid(
self.wdir_uuid, path, basename)
self.assertEqual(from_changes_result, 'inode1_changes')
self.assertEqual(mock_get_wdir_filetree.call_count, 1)
self.assertEqual(mock_get_wdir_changes.call_count, 1)
mock_get_wdir_filetree.reset_mock()
mock_get_wdir_changes.reset_mock()
# retrive from workdir
path = ''
basename = 'c'
from_commit_result = self.datary.get_dataset_uuid(
self.wdir_uuid, path, basename)
self.assertEqual(from_commit_result, 'c_sha1')
self.assertEqual(mock_get_wdir_filetree.call_count, 1)
self.assertEqual(mock_get_wdir_changes.call_count, 1)
mock_get_wdir_filetree.reset_mock()
mock_get_wdir_changes.reset_mock()
# NOT exists
path = 'bb'
basename = 'b'
no_result = self.datary.get_dataset_uuid(
self.wdir_uuid, path, basename)
self.assertEqual(no_result, None)
self.assertEqual(mock_get_wdir_filetree.call_count, 1)
self.assertEqual(mock_get_wdir_changes.call_count, 1)
@mock.patch('datary.requests.requests.requests.get')
def test_get_commited_dataset_uuid(self, mock_request):
"""
Test Datary get_commited_dataset_uuid
"""
# no args path and basename introduced
mock_request.return_value = MockRequestResponse(
"", json=self.dataset_uuid)
result_no_pathname = self.datary.get_commited_dataset_uuid(
self.wdir_uuid)
self.assertEqual(result_no_pathname, {})
self.assertEqual(mock_request.call_count, 0)
# good case
result = self.datary.get_commited_dataset_uuid(
self.wdir_uuid, 'path', 'basename')
self.assertEqual(result, self.dataset_uuid)
self.assertEqual(mock_request.call_count, 1)
# datary request return None
mock_request.reset_mock()
mock_request.return_value = MockRequestResponse("", status_code=500)
no_response_result = self.datary.get_commited_dataset_uuid(
self.wdir_uuid, 'path', 'basename')
self.assertEqual(no_response_result, {})
self.assertEqual(mock_request.call_count, 1)
| 37.98995 | 79 | 0.668915 |
import mock
from datary.test.test_datary import DataryTestCase
from datary.test.mock_requests import MockRequestResponse
class DataryDatasetsTestCase(DataryTestCase):
@mock.patch('datary.requests.requests.requests.get')
def test_get_kern(self, mock_request):
mock_request.return_value = MockRequestResponse(
"", json=self.element.get('data', {}).get('kern'))
kern = self.datary.get_kern(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(kern, dict))
self.assertEqual(kern, self.element.get('data', {}).get('kern'))
mock_request.return_value = MockRequestResponse("", status_code=500)
kern2 = self.datary.get_kern(self.dataset_uuid, self.repo_uuid)
self.assertTrue(isinstance(kern2, dict))
self.assertEqual(kern2, {})
@mock.patch('datary.requests.requests.requests.get')
def test_get_metadata(self, mock_request):
mock_request.return_value = MockRequestResponse(
"", json=self.element.get('data', {}).get('meta'))
metadata = self.datary.get_metadata(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(metadata, dict))
self.assertEqual(metadata, self.element.get('data', {}).get('meta'))
mock_request.return_value = MockRequestResponse("", status_code=500)
metadata2 = self.datary.get_metadata(self.dataset_uuid, self.repo_uuid)
self.assertTrue(isinstance(metadata2, dict))
self.assertEqual(metadata2, {})
@mock.patch('datary.requests.requests.requests.get')
def test_get_original(self, mock_request):
mock_request.return_value = MockRequestResponse("", json=self.original)
original = self.datary.get_original(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original, dict))
self.assertEqual(original, self.original)
mock_request.reset_mock()
original2 = self.datary.get_original(
self.dataset_uuid, self.repo_uuid, self.wdir_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original2, dict))
self.assertEqual(original2, self.original)
mock_request.reset_mock()
original3 = self.datary.get_original(
self.dataset_uuid, wdir_uuid=self.wdir_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original3, dict))
self.assertEqual(original3, self.original)
mock_request.reset_mock()
mock_request.side_effect = iter([
MockRequestResponse("", status_code=500),
MockRequestResponse("", json=self.original)
])
original4 = self.datary.get_original(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 2)
self.assertTrue(isinstance(original4, dict))
self.assertEqual(original4, self.original)
mock_request.reset_mock()
mock_request.side_effect = iter([
MockRequestResponse("", status_code=500),
MockRequestResponse("", status_code=500)
])
original4b = self.datary.get_original(
self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 2)
self.assertTrue(isinstance(original4b, dict))
self.assertEqual(original4b, {})
mock_request.reset_mock()
original5 = self.datary.get_original(
MockRequestResponse("", status_code=500))
self.assertEqual(mock_request.call_count, 0)
self.assertTrue(isinstance(original5, dict))
self.assertEqual(original5, {})
mock_request.reset_mock()
mock_request.side_effect = iter(
[MockRequestResponse("", json=self.original),
MockRequestResponse("", json=self.original)])
original6 = self.datary.get_original(
self.dataset_uuid, self.repo_uuid, scope='repo')
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original6, dict))
self.assertEqual(original6, self.original)
@mock.patch('datary.workdirs.DataryWorkdirs.get_wdir_filetree')
@mock.patch('datary.workdirs.DataryWorkdirs.get_wdir_changes')
def test_get_dataset_uuid(self, mock_get_wdir_changes,
mock_get_wdir_filetree):
mock_get_wdir_filetree.return_value = self.workdir
mock_get_wdir_changes.return_value = self.changes
path = 'b'
basename = 'bb'
empty_result = self.datary.get_dataset_uuid(self.wdir_uuid)
self.assertEqual(empty_result, None)
from_changes_result = self.datary.get_dataset_uuid(
self.wdir_uuid, path, basename)
self.assertEqual(from_changes_result, 'inode1_changes')
self.assertEqual(mock_get_wdir_filetree.call_count, 1)
self.assertEqual(mock_get_wdir_changes.call_count, 1)
mock_get_wdir_filetree.reset_mock()
mock_get_wdir_changes.reset_mock()
path = ''
basename = 'c'
from_commit_result = self.datary.get_dataset_uuid(
self.wdir_uuid, path, basename)
self.assertEqual(from_commit_result, 'c_sha1')
self.assertEqual(mock_get_wdir_filetree.call_count, 1)
self.assertEqual(mock_get_wdir_changes.call_count, 1)
mock_get_wdir_filetree.reset_mock()
mock_get_wdir_changes.reset_mock()
path = 'bb'
basename = 'b'
no_result = self.datary.get_dataset_uuid(
self.wdir_uuid, path, basename)
self.assertEqual(no_result, None)
self.assertEqual(mock_get_wdir_filetree.call_count, 1)
self.assertEqual(mock_get_wdir_changes.call_count, 1)
@mock.patch('datary.requests.requests.requests.get')
def test_get_commited_dataset_uuid(self, mock_request):
mock_request.return_value = MockRequestResponse(
"", json=self.dataset_uuid)
result_no_pathname = self.datary.get_commited_dataset_uuid(
self.wdir_uuid)
self.assertEqual(result_no_pathname, {})
self.assertEqual(mock_request.call_count, 0)
result = self.datary.get_commited_dataset_uuid(
self.wdir_uuid, 'path', 'basename')
self.assertEqual(result, self.dataset_uuid)
self.assertEqual(mock_request.call_count, 1)
mock_request.reset_mock()
mock_request.return_value = MockRequestResponse("", status_code=500)
no_response_result = self.datary.get_commited_dataset_uuid(
self.wdir_uuid, 'path', 'basename')
self.assertEqual(no_response_result, {})
self.assertEqual(mock_request.call_count, 1)
| true | true |
f71612467e9b5dc259949c2813d2f39841a075f0 | 78 | py | Python | src/main.py | jadmz/pygame-box2d-template | cd5ef75940b1c919aade5acb11924cbfba8e7c60 | [
"MIT"
] | null | null | null | src/main.py | jadmz/pygame-box2d-template | cd5ef75940b1c919aade5acb11924cbfba8e7c60 | [
"MIT"
] | null | null | null | src/main.py | jadmz/pygame-box2d-template | cd5ef75940b1c919aade5acb11924cbfba8e7c60 | [
"MIT"
] | 1 | 2020-03-22T18:20:54.000Z | 2020-03-22T18:20:54.000Z | from game import Game
game = Game("Pygame with Box2d Template")
game.run()
| 11.142857 | 41 | 0.717949 | from game import Game
game = Game("Pygame with Box2d Template")
game.run()
| true | true |
f716125d67e85c57e3e02321d8def2b0570ba241 | 1,953 | py | Python | TwitchApiPy/TwitchApiPy.py | xegepa/Twitch-Api-Py | 84613dd32654315422481d24bb9afc1ab3967d3d | [
"MIT"
] | 2 | 2020-08-16T12:54:23.000Z | 2021-02-11T20:43:42.000Z | TwitchApiPy/TwitchApiPy.py | xegepa/Twitch-Api-Py | 84613dd32654315422481d24bb9afc1ab3967d3d | [
"MIT"
] | null | null | null | TwitchApiPy/TwitchApiPy.py | xegepa/Twitch-Api-Py | 84613dd32654315422481d24bb9afc1ab3967d3d | [
"MIT"
] | null | null | null | import requests
class TwitchApiPy():
def __init__(self):
self.ClientID = ""
self.OAuth = ""
"""
You don't really use this its for other requests
"""
def GetUserID(self,name):
r = requests.get(url = "https://api.twitch.tv/helix/users?login={}".format(name), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
id = r["data"][0]['id']
return id
"""
This part will get you number of followers of asked channel
"""
def GetFollowerCount(self,name):
id = self.GetUserID(name)
r = requests.get(url="https://api.twitch.tv/helix/users/follows?to_id={}".format(id), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
return r['total']
"""
This part will say that if the streamer is online or not and the language the streamer streams
"""
def GetChannelStatus(self, name):
r = requests.get(url="https://api.twitch.tv/helix/search/channels?query={}".format(name), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
is_live=r["data"][0]['is_live']
lang =r["data"][0]['broadcaster_language']
total_info = {
"islive": is_live,
"language": lang,
}
return total_info
"""
This part will get you general info about channel
"""
def GetChannelInfo(self,name):
id = self.GetUserID(name)
r = requests.get(url="https://api.twitch.tv/helix/channels?broadcaster_id={}".format(id), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
name= r["data"][0]["broadcaster_name"]
game = r["data"][0]["game_name"]
title = r["data"][0]["title"]
total_info = {
"name" : name,
"game" : game,
"title" : title
}
return total_info
| 34.875 | 165 | 0.573989 | import requests
class TwitchApiPy():
def __init__(self):
self.ClientID = ""
self.OAuth = ""
def GetUserID(self,name):
r = requests.get(url = "https://api.twitch.tv/helix/users?login={}".format(name), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
id = r["data"][0]['id']
return id
def GetFollowerCount(self,name):
id = self.GetUserID(name)
r = requests.get(url="https://api.twitch.tv/helix/users/follows?to_id={}".format(id), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
return r['total']
def GetChannelStatus(self, name):
r = requests.get(url="https://api.twitch.tv/helix/search/channels?query={}".format(name), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
is_live=r["data"][0]['is_live']
lang =r["data"][0]['broadcaster_language']
total_info = {
"islive": is_live,
"language": lang,
}
return total_info
def GetChannelInfo(self,name):
id = self.GetUserID(name)
r = requests.get(url="https://api.twitch.tv/helix/channels?broadcaster_id={}".format(id), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
name= r["data"][0]["broadcaster_name"]
game = r["data"][0]["game_name"]
title = r["data"][0]["title"]
total_info = {
"name" : name,
"game" : game,
"title" : title
}
return total_info
| true | true |
f71612ddef304fe8e27a1500d0a1c4bde6565bb6 | 35,689 | py | Python | fhirclient/models/medicationrequest.py | mdx-dev/client-py | f6c16c9bd386c5b05d69753b89c6519d568814ac | [
"Apache-2.0"
] | null | null | null | fhirclient/models/medicationrequest.py | mdx-dev/client-py | f6c16c9bd386c5b05d69753b89c6519d568814ac | [
"Apache-2.0"
] | null | null | null | fhirclient/models/medicationrequest.py | mdx-dev/client-py | f6c16c9bd386c5b05d69753b89c6519d568814ac | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/MedicationRequest) on 2019-01-22.
# 2019, SMART Health IT.
from . import domainresource
class MedicationRequest(domainresource.DomainResource):
"""
O
r
d
e
r
i
n
g
o
f
m
e
d
i
c
a
t
i
o
n
f
o
r
p
a
t
i
e
n
t
o
r
g
r
o
u
p
.
A
n
o
r
d
e
r
o
r
r
e
q
u
e
s
t
f
o
r
b
o
t
h
s
u
p
p
l
y
o
f
t
h
e
m
e
d
i
c
a
t
i
o
n
a
n
d
t
h
e
i
n
s
t
r
u
c
t
i
o
n
s
f
o
r
a
d
m
i
n
i
s
t
r
a
t
i
o
n
o
f
t
h
e
m
e
d
i
c
a
t
i
o
n
t
o
a
p
a
t
i
e
n
t
.
T
h
e
r
e
s
o
u
r
c
e
i
s
c
a
l
l
e
d
"
M
e
d
i
c
a
t
i
o
n
R
e
q
u
e
s
t
"
r
a
t
h
e
r
t
h
a
n
"
M
e
d
i
c
a
t
i
o
n
P
r
e
s
c
r
i
p
t
i
o
n
"
o
r
"
M
e
d
i
c
a
t
i
o
n
O
r
d
e
r
"
t
o
g
e
n
e
r
a
l
i
z
e
t
h
e
u
s
e
a
c
r
o
s
s
i
n
p
a
t
i
e
n
t
a
n
d
o
u
t
p
a
t
i
e
n
t
s
e
t
t
i
n
g
s
,
i
n
c
l
u
d
i
n
g
c
a
r
e
p
l
a
n
s
,
e
t
c
.
,
a
n
d
t
o
h
a
r
m
o
n
i
z
e
w
i
t
h
w
o
r
k
f
l
o
w
p
a
t
t
e
r
n
s
.
"""
resource_type = "MedicationRequest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.authoredOn = None
"""
W
h
e
n
r
e
q
u
e
s
t
w
a
s
i
n
i
t
i
a
l
l
y
a
u
t
h
o
r
e
d
.
Type `FHIRDate` (represented as `str` in JSON). """
self.basedOn = None
"""
W
h
a
t
r
e
q
u
e
s
t
f
u
l
f
i
l
l
s
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.category = None
"""
T
y
p
e
o
f
m
e
d
i
c
a
t
i
o
n
u
s
a
g
e
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.courseOfTherapyType = None
"""
O
v
e
r
a
l
l
p
a
t
t
e
r
n
o
f
m
e
d
i
c
a
t
i
o
n
a
d
m
i
n
i
s
t
r
a
t
i
o
n
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.detectedIssue = None
"""
C
l
i
n
i
c
a
l
I
s
s
u
e
w
i
t
h
a
c
t
i
o
n
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.dispenseRequest = None
"""
M
e
d
i
c
a
t
i
o
n
s
u
p
p
l
y
a
u
t
h
o
r
i
z
a
t
i
o
n
.
Type `MedicationRequestDispenseRequest` (represented as `dict` in JSON). """
self.doNotPerform = None
"""
T
r
u
e
i
f
r
e
q
u
e
s
t
i
s
p
r
o
h
i
b
i
t
i
n
g
a
c
t
i
o
n
.
Type `bool`. """
self.dosageInstruction = None
"""
H
o
w
t
h
e
m
e
d
i
c
a
t
i
o
n
s
h
o
u
l
d
b
e
t
a
k
e
n
.
List of `Dosage` items (represented as `dict` in JSON). """
self.encounter = None
"""
E
n
c
o
u
n
t
e
r
c
r
e
a
t
e
d
a
s
p
a
r
t
o
f
e
n
c
o
u
n
t
e
r
/
a
d
m
i
s
s
i
o
n
/
s
t
a
y
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.eventHistory = None
"""
A
l
i
s
t
o
f
e
v
e
n
t
s
o
f
i
n
t
e
r
e
s
t
i
n
t
h
e
l
i
f
e
c
y
c
l
e
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.groupIdentifier = None
"""
C
o
m
p
o
s
i
t
e
r
e
q
u
e
s
t
t
h
i
s
i
s
p
a
r
t
o
f
.
Type `Identifier` (represented as `dict` in JSON). """
self.identifier = None
"""
E
x
t
e
r
n
a
l
i
d
s
f
o
r
t
h
i
s
r
e
q
u
e
s
t
.
List of `Identifier` items (represented as `dict` in JSON). """
self.instantiatesCanonical = None
"""
I
n
s
t
a
n
t
i
a
t
e
s
F
H
I
R
p
r
o
t
o
c
o
l
o
r
d
e
f
i
n
i
t
i
o
n
.
List of `str` items. """
self.instantiatesUri = None
"""
I
n
s
t
a
n
t
i
a
t
e
s
e
x
t
e
r
n
a
l
p
r
o
t
o
c
o
l
o
r
d
e
f
i
n
i
t
i
o
n
.
List of `str` items. """
self.insurance = None
"""
A
s
s
o
c
i
a
t
e
d
i
n
s
u
r
a
n
c
e
c
o
v
e
r
a
g
e
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.intent = None
"""
p
r
o
p
o
s
a
l
|
p
l
a
n
|
o
r
d
e
r
|
o
r
i
g
i
n
a
l
-
o
r
d
e
r
|
i
n
s
t
a
n
c
e
-
o
r
d
e
r
|
o
p
t
i
o
n
.
Type `str`. """
self.medicationCodeableConcept = None
"""
M
e
d
i
c
a
t
i
o
n
t
o
b
e
t
a
k
e
n
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.medicationReference = None
"""
M
e
d
i
c
a
t
i
o
n
t
o
b
e
t
a
k
e
n
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.note = None
"""
I
n
f
o
r
m
a
t
i
o
n
a
b
o
u
t
t
h
e
p
r
e
s
c
r
i
p
t
i
o
n
.
List of `Annotation` items (represented as `dict` in JSON). """
self.performer = None
"""
I
n
t
e
n
d
e
d
p
e
r
f
o
r
m
e
r
o
f
a
d
m
i
n
i
s
t
r
a
t
i
o
n
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.performerType = None
"""
D
e
s
i
r
e
d
k
i
n
d
o
f
p
e
r
f
o
r
m
e
r
o
f
t
h
e
m
e
d
i
c
a
t
i
o
n
a
d
m
i
n
i
s
t
r
a
t
i
o
n
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.priorPrescription = None
"""
A
n
o
r
d
e
r
/
p
r
e
s
c
r
i
p
t
i
o
n
t
h
a
t
i
s
b
e
i
n
g
r
e
p
l
a
c
e
d
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.priority = None
"""
r
o
u
t
i
n
e
|
u
r
g
e
n
t
|
a
s
a
p
|
s
t
a
t
.
Type `str`. """
self.reasonCode = None
"""
R
e
a
s
o
n
o
r
i
n
d
i
c
a
t
i
o
n
f
o
r
o
r
d
e
r
i
n
g
o
r
n
o
t
o
r
d
e
r
i
n
g
t
h
e
m
e
d
i
c
a
t
i
o
n
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.reasonReference = None
"""
C
o
n
d
i
t
i
o
n
o
r
o
b
s
e
r
v
a
t
i
o
n
t
h
a
t
s
u
p
p
o
r
t
s
w
h
y
t
h
e
p
r
e
s
c
r
i
p
t
i
o
n
i
s
b
e
i
n
g
w
r
i
t
t
e
n
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.recorder = None
"""
P
e
r
s
o
n
w
h
o
e
n
t
e
r
e
d
t
h
e
r
e
q
u
e
s
t
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.reportedBoolean = None
"""
R
e
p
o
r
t
e
d
r
a
t
h
e
r
t
h
a
n
p
r
i
m
a
r
y
r
e
c
o
r
d
.
Type `bool`. """
self.reportedReference = None
"""
R
e
p
o
r
t
e
d
r
a
t
h
e
r
t
h
a
n
p
r
i
m
a
r
y
r
e
c
o
r
d
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.requester = None
"""
W
h
o
/
W
h
a
t
r
e
q
u
e
s
t
e
d
t
h
e
R
e
q
u
e
s
t
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.status = None
"""
a
c
t
i
v
e
|
o
n
-
h
o
l
d
|
c
a
n
c
e
l
l
e
d
|
c
o
m
p
l
e
t
e
d
|
e
n
t
e
r
e
d
-
i
n
-
e
r
r
o
r
|
s
t
o
p
p
e
d
|
d
r
a
f
t
|
u
n
k
n
o
w
n
.
Type `str`. """
self.statusReason = None
"""
R
e
a
s
o
n
f
o
r
c
u
r
r
e
n
t
s
t
a
t
u
s
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.subject = None
"""
W
h
o
o
r
g
r
o
u
p
m
e
d
i
c
a
t
i
o
n
r
e
q
u
e
s
t
i
s
f
o
r
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.substitution = None
"""
A
n
y
r
e
s
t
r
i
c
t
i
o
n
s
o
n
m
e
d
i
c
a
t
i
o
n
s
u
b
s
t
i
t
u
t
i
o
n
.
Type `MedicationRequestSubstitution` (represented as `dict` in JSON). """
self.supportingInformation = None
"""
I
n
f
o
r
m
a
t
i
o
n
t
o
s
u
p
p
o
r
t
o
r
d
e
r
i
n
g
o
f
t
h
e
m
e
d
i
c
a
t
i
o
n
.
List of `FHIRReference` items (represented as `dict` in JSON). """
super(MedicationRequest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequest, self).elementProperties()
js.extend([
("authoredOn", "authoredOn", fhirdate.FHIRDate, False, None, False),
("basedOn", "basedOn", fhirreference.FHIRReference, True, None, False),
("category", "category", codeableconcept.CodeableConcept, True, None, False),
("courseOfTherapyType", "courseOfTherapyType", codeableconcept.CodeableConcept, False, None, False),
("detectedIssue", "detectedIssue", fhirreference.FHIRReference, True, None, False),
("dispenseRequest", "dispenseRequest", MedicationRequestDispenseRequest, False, None, False),
("doNotPerform", "doNotPerform", bool, False, None, False),
("dosageInstruction", "dosageInstruction", dosage.Dosage, True, None, False),
("encounter", "encounter", fhirreference.FHIRReference, False, None, False),
("eventHistory", "eventHistory", fhirreference.FHIRReference, True, None, False),
("groupIdentifier", "groupIdentifier", identifier.Identifier, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("instantiatesCanonical", "instantiatesCanonical", str, True, None, False),
("instantiatesUri", "instantiatesUri", str, True, None, False),
("insurance", "insurance", fhirreference.FHIRReference, True, None, False),
("intent", "intent", str, False, None, True),
("medicationCodeableConcept", "medicationCodeableConcept", codeableconcept.CodeableConcept, False, "medication", True),
("medicationReference", "medicationReference", fhirreference.FHIRReference, False, "medication", True),
("note", "note", annotation.Annotation, True, None, False),
("performer", "performer", fhirreference.FHIRReference, False, None, False),
("performerType", "performerType", codeableconcept.CodeableConcept, False, None, False),
("priorPrescription", "priorPrescription", fhirreference.FHIRReference, False, None, False),
("priority", "priority", str, False, None, False),
("reasonCode", "reasonCode", codeableconcept.CodeableConcept, True, None, False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, True, None, False),
("recorder", "recorder", fhirreference.FHIRReference, False, None, False),
("reportedBoolean", "reportedBoolean", bool, False, "reported", False),
("reportedReference", "reportedReference", fhirreference.FHIRReference, False, "reported", False),
("requester", "requester", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, True),
("statusReason", "statusReason", codeableconcept.CodeableConcept, False, None, False),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
("substitution", "substitution", MedicationRequestSubstitution, False, None, False),
("supportingInformation", "supportingInformation", fhirreference.FHIRReference, True, None, False),
])
return js
from . import backboneelement
class MedicationRequestDispenseRequest(backboneelement.BackboneElement):
"""
M
e
d
i
c
a
t
i
o
n
s
u
p
p
l
y
a
u
t
h
o
r
i
z
a
t
i
o
n
.
I
n
d
i
c
a
t
e
s
t
h
e
s
p
e
c
i
f
i
c
d
e
t
a
i
l
s
f
o
r
t
h
e
d
i
s
p
e
n
s
e
o
r
m
e
d
i
c
a
t
i
o
n
s
u
p
p
l
y
p
a
r
t
o
f
a
m
e
d
i
c
a
t
i
o
n
r
e
q
u
e
s
t
(
a
l
s
o
k
n
o
w
n
a
s
a
M
e
d
i
c
a
t
i
o
n
P
r
e
s
c
r
i
p
t
i
o
n
o
r
M
e
d
i
c
a
t
i
o
n
O
r
d
e
r
)
.
N
o
t
e
t
h
a
t
t
h
i
s
i
n
f
o
r
m
a
t
i
o
n
i
s
n
o
t
a
l
w
a
y
s
s
e
n
t
w
i
t
h
t
h
e
o
r
d
e
r
.
T
h
e
r
e
m
a
y
b
e
i
n
s
o
m
e
s
e
t
t
i
n
g
s
(
e
.
g
.
h
o
s
p
i
t
a
l
s
)
i
n
s
t
i
t
u
t
i
o
n
a
l
o
r
s
y
s
t
e
m
s
u
p
p
o
r
t
f
o
r
c
o
m
p
l
e
t
i
n
g
t
h
e
d
i
s
p
e
n
s
e
d
e
t
a
i
l
s
i
n
t
h
e
p
h
a
r
m
a
c
y
d
e
p
a
r
t
m
e
n
t
.
"""
resource_type = "MedicationRequestDispenseRequest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.dispenseInterval = None
"""
M
i
n
i
m
u
m
p
e
r
i
o
d
o
f
t
i
m
e
b
e
t
w
e
e
n
d
i
s
p
e
n
s
e
s
.
Type `Duration` (represented as `dict` in JSON). """
self.expectedSupplyDuration = None
"""
N
u
m
b
e
r
o
f
d
a
y
s
s
u
p
p
l
y
p
e
r
d
i
s
p
e
n
s
e
.
Type `Duration` (represented as `dict` in JSON). """
self.initialFill = None
"""
F
i
r
s
t
f
i
l
l
d
e
t
a
i
l
s
.
Type `MedicationRequestDispenseRequestInitialFill` (represented as `dict` in JSON). """
self.numberOfRepeatsAllowed = None
"""
N
u
m
b
e
r
o
f
r
e
f
i
l
l
s
a
u
t
h
o
r
i
z
e
d
.
Type `int`. """
self.performer = None
"""
I
n
t
e
n
d
e
d
d
i
s
p
e
n
s
e
r
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.quantity = None
"""
A
m
o
u
n
t
o
f
m
e
d
i
c
a
t
i
o
n
t
o
s
u
p
p
l
y
p
e
r
d
i
s
p
e
n
s
e
.
Type `Quantity` (represented as `dict` in JSON). """
self.validityPeriod = None
"""
T
i
m
e
p
e
r
i
o
d
s
u
p
p
l
y
i
s
a
u
t
h
o
r
i
z
e
d
f
o
r
.
Type `Period` (represented as `dict` in JSON). """
super(MedicationRequestDispenseRequest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequestDispenseRequest, self).elementProperties()
js.extend([
("dispenseInterval", "dispenseInterval", duration.Duration, False, None, False),
("expectedSupplyDuration", "expectedSupplyDuration", duration.Duration, False, None, False),
("initialFill", "initialFill", MedicationRequestDispenseRequestInitialFill, False, None, False),
("numberOfRepeatsAllowed", "numberOfRepeatsAllowed", int, False, None, False),
("performer", "performer", fhirreference.FHIRReference, False, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
("validityPeriod", "validityPeriod", period.Period, False, None, False),
])
return js
class MedicationRequestDispenseRequestInitialFill(backboneelement.BackboneElement):
"""
F
i
r
s
t
f
i
l
l
d
e
t
a
i
l
s
.
I
n
d
i
c
a
t
e
s
t
h
e
q
u
a
n
t
i
t
y
o
r
d
u
r
a
t
i
o
n
f
o
r
t
h
e
f
i
r
s
t
d
i
s
p
e
n
s
e
o
f
t
h
e
m
e
d
i
c
a
t
i
o
n
.
"""
resource_type = "MedicationRequestDispenseRequestInitialFill"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.duration = None
"""
F
i
r
s
t
f
i
l
l
d
u
r
a
t
i
o
n
.
Type `Duration` (represented as `dict` in JSON). """
self.quantity = None
"""
F
i
r
s
t
f
i
l
l
q
u
a
n
t
i
t
y
.
Type `Quantity` (represented as `dict` in JSON). """
super(MedicationRequestDispenseRequestInitialFill, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequestDispenseRequestInitialFill, self).elementProperties()
js.extend([
("duration", "duration", duration.Duration, False, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
])
return js
class MedicationRequestSubstitution(backboneelement.BackboneElement):
"""
A
n
y
r
e
s
t
r
i
c
t
i
o
n
s
o
n
m
e
d
i
c
a
t
i
o
n
s
u
b
s
t
i
t
u
t
i
o
n
.
I
n
d
i
c
a
t
e
s
w
h
e
t
h
e
r
o
r
n
o
t
s
u
b
s
t
i
t
u
t
i
o
n
c
a
n
o
r
s
h
o
u
l
d
b
e
p
a
r
t
o
f
t
h
e
d
i
s
p
e
n
s
e
.
I
n
s
o
m
e
c
a
s
e
s
,
s
u
b
s
t
i
t
u
t
i
o
n
m
u
s
t
h
a
p
p
e
n
,
i
n
o
t
h
e
r
c
a
s
e
s
s
u
b
s
t
i
t
u
t
i
o
n
m
u
s
t
n
o
t
h
a
p
p
e
n
.
T
h
i
s
b
l
o
c
k
e
x
p
l
a
i
n
s
t
h
e
p
r
e
s
c
r
i
b
e
r
'
s
i
n
t
e
n
t
.
I
f
n
o
t
h
i
n
g
i
s
s
p
e
c
i
f
i
e
d
s
u
b
s
t
i
t
u
t
i
o
n
m
a
y
b
e
d
o
n
e
.
"""
resource_type = "MedicationRequestSubstitution"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.allowedBoolean = None
"""
W
h
e
t
h
e
r
s
u
b
s
t
i
t
u
t
i
o
n
i
s
a
l
l
o
w
e
d
o
r
n
o
t
.
Type `bool`. """
self.allowedCodeableConcept = None
"""
W
h
e
t
h
e
r
s
u
b
s
t
i
t
u
t
i
o
n
i
s
a
l
l
o
w
e
d
o
r
n
o
t
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.reason = None
"""
W
h
y
s
h
o
u
l
d
(
n
o
t
)
s
u
b
s
t
i
t
u
t
i
o
n
b
e
m
a
d
e
.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationRequestSubstitution, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequestSubstitution, self).elementProperties()
js.extend([
("allowedBoolean", "allowedBoolean", bool, False, "allowed", True),
("allowedCodeableConcept", "allowedCodeableConcept", codeableconcept.CodeableConcept, False, "allowed", True),
("reason", "reason", codeableconcept.CodeableConcept, False, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import dosage
except ImportError:
dosage = sys.modules[__package__ + '.dosage']
try:
from . import duration
except ImportError:
duration = sys.modules[__package__ + '.duration']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
| 12.053023 | 131 | 0.304548 |
from . import domainresource
class MedicationRequest(domainresource.DomainResource):
resource_type = "MedicationRequest"
def __init__(self, jsondict=None, strict=True):
self.authoredOn = None
self.basedOn = None
self.category = None
self.courseOfTherapyType = None
self.detectedIssue = None
self.dispenseRequest = None
self.doNotPerform = None
self.dosageInstruction = None
self.encounter = None
self.eventHistory = None
self.groupIdentifier = None
self.identifier = None
self.instantiatesCanonical = None
self.instantiatesUri = None
self.insurance = None
self.intent = None
self.medicationCodeableConcept = None
self.medicationReference = None
self.note = None
self.performer = None
self.performerType = None
self.priorPrescription = None
self.priority = None
self.reasonCode = None
self.reasonReference = None
self.recorder = None
self.reportedBoolean = None
self.reportedReference = None
self.requester = None
self.status = None
self.statusReason = None
self.subject = None
self.substitution = None
self.supportingInformation = None
super(MedicationRequest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequest, self).elementProperties()
js.extend([
("authoredOn", "authoredOn", fhirdate.FHIRDate, False, None, False),
("basedOn", "basedOn", fhirreference.FHIRReference, True, None, False),
("category", "category", codeableconcept.CodeableConcept, True, None, False),
("courseOfTherapyType", "courseOfTherapyType", codeableconcept.CodeableConcept, False, None, False),
("detectedIssue", "detectedIssue", fhirreference.FHIRReference, True, None, False),
("dispenseRequest", "dispenseRequest", MedicationRequestDispenseRequest, False, None, False),
("doNotPerform", "doNotPerform", bool, False, None, False),
("dosageInstruction", "dosageInstruction", dosage.Dosage, True, None, False),
("encounter", "encounter", fhirreference.FHIRReference, False, None, False),
("eventHistory", "eventHistory", fhirreference.FHIRReference, True, None, False),
("groupIdentifier", "groupIdentifier", identifier.Identifier, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("instantiatesCanonical", "instantiatesCanonical", str, True, None, False),
("instantiatesUri", "instantiatesUri", str, True, None, False),
("insurance", "insurance", fhirreference.FHIRReference, True, None, False),
("intent", "intent", str, False, None, True),
("medicationCodeableConcept", "medicationCodeableConcept", codeableconcept.CodeableConcept, False, "medication", True),
("medicationReference", "medicationReference", fhirreference.FHIRReference, False, "medication", True),
("note", "note", annotation.Annotation, True, None, False),
("performer", "performer", fhirreference.FHIRReference, False, None, False),
("performerType", "performerType", codeableconcept.CodeableConcept, False, None, False),
("priorPrescription", "priorPrescription", fhirreference.FHIRReference, False, None, False),
("priority", "priority", str, False, None, False),
("reasonCode", "reasonCode", codeableconcept.CodeableConcept, True, None, False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, True, None, False),
("recorder", "recorder", fhirreference.FHIRReference, False, None, False),
("reportedBoolean", "reportedBoolean", bool, False, "reported", False),
("reportedReference", "reportedReference", fhirreference.FHIRReference, False, "reported", False),
("requester", "requester", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, True),
("statusReason", "statusReason", codeableconcept.CodeableConcept, False, None, False),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
("substitution", "substitution", MedicationRequestSubstitution, False, None, False),
("supportingInformation", "supportingInformation", fhirreference.FHIRReference, True, None, False),
])
return js
from . import backboneelement
class MedicationRequestDispenseRequest(backboneelement.BackboneElement):
resource_type = "MedicationRequestDispenseRequest"
def __init__(self, jsondict=None, strict=True):
self.dispenseInterval = None
self.expectedSupplyDuration = None
self.initialFill = None
self.numberOfRepeatsAllowed = None
self.performer = None
self.quantity = None
self.validityPeriod = None
super(MedicationRequestDispenseRequest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequestDispenseRequest, self).elementProperties()
js.extend([
("dispenseInterval", "dispenseInterval", duration.Duration, False, None, False),
("expectedSupplyDuration", "expectedSupplyDuration", duration.Duration, False, None, False),
("initialFill", "initialFill", MedicationRequestDispenseRequestInitialFill, False, None, False),
("numberOfRepeatsAllowed", "numberOfRepeatsAllowed", int, False, None, False),
("performer", "performer", fhirreference.FHIRReference, False, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
("validityPeriod", "validityPeriod", period.Period, False, None, False),
])
return js
class MedicationRequestDispenseRequestInitialFill(backboneelement.BackboneElement):
resource_type = "MedicationRequestDispenseRequestInitialFill"
def __init__(self, jsondict=None, strict=True):
self.duration = None
self.quantity = None
super(MedicationRequestDispenseRequestInitialFill, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequestDispenseRequestInitialFill, self).elementProperties()
js.extend([
("duration", "duration", duration.Duration, False, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
])
return js
class MedicationRequestSubstitution(backboneelement.BackboneElement):
resource_type = "MedicationRequestSubstitution"
def __init__(self, jsondict=None, strict=True):
self.allowedBoolean = None
self.allowedCodeableConcept = None
self.reason = None
super(MedicationRequestSubstitution, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequestSubstitution, self).elementProperties()
js.extend([
("allowedBoolean", "allowedBoolean", bool, False, "allowed", True),
("allowedCodeableConcept", "allowedCodeableConcept", codeableconcept.CodeableConcept, False, "allowed", True),
("reason", "reason", codeableconcept.CodeableConcept, False, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import dosage
except ImportError:
dosage = sys.modules[__package__ + '.dosage']
try:
from . import duration
except ImportError:
duration = sys.modules[__package__ + '.duration']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
| true | true |
f716130a5e4aa592b5742e419a3914560c7330fc | 1,320 | py | Python | homeassistant/components/synology_dsm/const.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 1,635 | 2015-01-01T14:59:18.000Z | 2016-04-13T02:36:16.000Z | homeassistant/components/synology_dsm/const.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 1,463 | 2015-01-06T06:18:07.000Z | 2016-04-12T22:30:37.000Z | homeassistant/components/synology_dsm/const.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 659 | 2015-01-05T14:02:23.000Z | 2016-04-12T23:39:31.000Z | """Constants for Synology DSM."""
from __future__ import annotations
from synology_dsm.api.surveillance_station.const import SNAPSHOT_PROFILE_BALANCED
from homeassistant.const import Platform
DOMAIN = "synology_dsm"
ATTRIBUTION = "Data provided by Synology"
PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.BUTTON,
Platform.CAMERA,
Platform.SENSOR,
Platform.SWITCH,
Platform.UPDATE,
]
COORDINATOR_CAMERAS = "coordinator_cameras"
COORDINATOR_CENTRAL = "coordinator_central"
COORDINATOR_SWITCHES = "coordinator_switches"
SYSTEM_LOADED = "system_loaded"
EXCEPTION_DETAILS = "details"
EXCEPTION_UNKNOWN = "unknown"
# Entry keys
SYNO_API = "syno_api"
UNDO_UPDATE_LISTENER = "undo_update_listener"
# Configuration
CONF_SERIAL = "serial"
CONF_VOLUMES = "volumes"
CONF_DEVICE_TOKEN = "device_token"
CONF_SNAPSHOT_QUALITY = "snap_profile_type"
DEFAULT_USE_SSL = True
DEFAULT_VERIFY_SSL = False
DEFAULT_PORT = 5000
DEFAULT_PORT_SSL = 5001
# Options
DEFAULT_SCAN_INTERVAL = 15 # min
DEFAULT_TIMEOUT = 10 # sec
DEFAULT_SNAPSHOT_QUALITY = SNAPSHOT_PROFILE_BALANCED
ENTITY_UNIT_LOAD = "load"
# Signals
SIGNAL_CAMERA_SOURCE_CHANGED = "synology_dsm.camera_stream_source_changed"
# Services
SERVICE_REBOOT = "reboot"
SERVICE_SHUTDOWN = "shutdown"
SERVICES = [
SERVICE_REBOOT,
SERVICE_SHUTDOWN,
]
| 23.571429 | 81 | 0.79697 | from __future__ import annotations
from synology_dsm.api.surveillance_station.const import SNAPSHOT_PROFILE_BALANCED
from homeassistant.const import Platform
DOMAIN = "synology_dsm"
ATTRIBUTION = "Data provided by Synology"
PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.BUTTON,
Platform.CAMERA,
Platform.SENSOR,
Platform.SWITCH,
Platform.UPDATE,
]
COORDINATOR_CAMERAS = "coordinator_cameras"
COORDINATOR_CENTRAL = "coordinator_central"
COORDINATOR_SWITCHES = "coordinator_switches"
SYSTEM_LOADED = "system_loaded"
EXCEPTION_DETAILS = "details"
EXCEPTION_UNKNOWN = "unknown"
SYNO_API = "syno_api"
UNDO_UPDATE_LISTENER = "undo_update_listener"
CONF_SERIAL = "serial"
CONF_VOLUMES = "volumes"
CONF_DEVICE_TOKEN = "device_token"
CONF_SNAPSHOT_QUALITY = "snap_profile_type"
DEFAULT_USE_SSL = True
DEFAULT_VERIFY_SSL = False
DEFAULT_PORT = 5000
DEFAULT_PORT_SSL = 5001
DEFAULT_SCAN_INTERVAL = 15
DEFAULT_TIMEOUT = 10
DEFAULT_SNAPSHOT_QUALITY = SNAPSHOT_PROFILE_BALANCED
ENTITY_UNIT_LOAD = "load"
SIGNAL_CAMERA_SOURCE_CHANGED = "synology_dsm.camera_stream_source_changed"
SERVICE_REBOOT = "reboot"
SERVICE_SHUTDOWN = "shutdown"
SERVICES = [
SERVICE_REBOOT,
SERVICE_SHUTDOWN,
]
| true | true |
f716137a258773159a3f46fb247a0224787d63af | 85 | py | Python | 2020/09/30/Django Pagination Tutorial/library/library/books/apps.py | kenjitagawa/youtube_video_code | ef3c48b9e136b3745d10395d94be64cb0a1f1c97 | [
"Unlicense"
] | 492 | 2019-06-25T12:54:31.000Z | 2022-03-30T12:38:28.000Z | 2020/09/30/Django Pagination Tutorial/library/library/books/apps.py | kenjitagawa/youtube_video_code | ef3c48b9e136b3745d10395d94be64cb0a1f1c97 | [
"Unlicense"
] | 122 | 2018-10-06T21:31:24.000Z | 2020-11-09T15:04:56.000Z | 2020/09/30/Django Pagination Tutorial/library/library/books/apps.py | kenjitagawa/youtube_video_code | ef3c48b9e136b3745d10395d94be64cb0a1f1c97 | [
"Unlicense"
] | 1,734 | 2019-06-03T06:25:13.000Z | 2022-03-31T23:57:53.000Z | from django.apps import AppConfig
class BooksConfig(AppConfig):
name = 'books'
| 14.166667 | 33 | 0.741176 | from django.apps import AppConfig
class BooksConfig(AppConfig):
name = 'books'
| true | true |
f71613f7207decd88d8c0d1641da8ad9b079d689 | 2,085 | py | Python | code/generateTimeline.py | sahilmgandhi/sahilmgandhi.github.io | e2d6aba9d90f53a4ebfbbd36b6b1d301dce039d3 | [
"CC-BY-3.0"
] | null | null | null | code/generateTimeline.py | sahilmgandhi/sahilmgandhi.github.io | e2d6aba9d90f53a4ebfbbd36b6b1d301dce039d3 | [
"CC-BY-3.0"
] | null | null | null | code/generateTimeline.py | sahilmgandhi/sahilmgandhi.github.io | e2d6aba9d90f53a4ebfbbd36b6b1d301dce039d3 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/python
import random, sys, string, csv, argparse, subprocess
parser=argparse.ArgumentParser(
description='''This script generates the HTML code for the timeline boxes''',
epilog="""Have fun!""")
parser.add_argument('-i', default='movies.csv', dest='inputFile', help='Name of the csv file. Default is movies.csv')
parser.add_argument('-o', default='reviews.txt', dest='outputFile', help='Name of the output file. Default is reviews.txt')
args=parser.parse_args()
outputFile = open(args.outputFile, 'w')
currRating = 9
counter = 0
htmlFile = 'movieReviews.html'
htmlEndingLine = 112
htmlDesiredLine = 74
if args.outputFile != 'reviews.txt':
htmlFile
with open(args.inputFile, 'r') as movies:
movieEntries = csv.reader(movies)
outputFile.write("<div id=\"9\">")
outputFile.write("<div id=\"ratingsBanner\"><h2>%d.00/10 - %d/10</h2></div>" % (currRating, (currRating + 1)))
for row in movieEntries:
if int(float(row[0])) < currRating:
currRating = int(float(row[0]))
outputFile.write("</div>")
outputFile.write("<div id=\"%d\">" % (currRating))
outputFile.write("<div id=\"ratingsBanner\"><h2>%d.00/10 - %d.99/10</h2></div>" % (currRating, (currRating)))
if counter % 2 == 0:
outputFile.write("<div class=\"container left\">")
else:
outputFile.write("<div class=\"container right\">")
outputFile.write("<div class=\"timelineContent\">")
if row[1] == 'None':
outputFile.write("<p>No movies that are ranked in the %d's yet</p>" % (currRating))
else:
outputFile.write("<h2>%.2f</h2>" % (float(row[0])))
outputFile.write("<p>%s</p>" % (str(row[1])))
outputFile.write("</div></div>")
counter += 1
outputFile.write("</div>")
subprocess.call('sed -i \'/.*<div id="9">.*/d\' ../movieReviews.html', shell=True)
subprocess.call('cat %s >> ../movieReviews.html' % args.outputFile, shell=True)
subprocess.call('printf \'112m74\nw\n\' | ed ../movieReviews.html', shell=True)
| 40.882353 | 123 | 0.617746 |
import random, sys, string, csv, argparse, subprocess
parser=argparse.ArgumentParser(
description='''This script generates the HTML code for the timeline boxes''',
epilog="""Have fun!""")
parser.add_argument('-i', default='movies.csv', dest='inputFile', help='Name of the csv file. Default is movies.csv')
parser.add_argument('-o', default='reviews.txt', dest='outputFile', help='Name of the output file. Default is reviews.txt')
args=parser.parse_args()
outputFile = open(args.outputFile, 'w')
currRating = 9
counter = 0
htmlFile = 'movieReviews.html'
htmlEndingLine = 112
htmlDesiredLine = 74
if args.outputFile != 'reviews.txt':
htmlFile
with open(args.inputFile, 'r') as movies:
movieEntries = csv.reader(movies)
outputFile.write("<div id=\"9\">")
outputFile.write("<div id=\"ratingsBanner\"><h2>%d.00/10 - %d/10</h2></div>" % (currRating, (currRating + 1)))
for row in movieEntries:
if int(float(row[0])) < currRating:
currRating = int(float(row[0]))
outputFile.write("</div>")
outputFile.write("<div id=\"%d\">" % (currRating))
outputFile.write("<div id=\"ratingsBanner\"><h2>%d.00/10 - %d.99/10</h2></div>" % (currRating, (currRating)))
if counter % 2 == 0:
outputFile.write("<div class=\"container left\">")
else:
outputFile.write("<div class=\"container right\">")
outputFile.write("<div class=\"timelineContent\">")
if row[1] == 'None':
outputFile.write("<p>No movies that are ranked in the %d's yet</p>" % (currRating))
else:
outputFile.write("<h2>%.2f</h2>" % (float(row[0])))
outputFile.write("<p>%s</p>" % (str(row[1])))
outputFile.write("</div></div>")
counter += 1
outputFile.write("</div>")
subprocess.call('sed -i \'/.*<div id="9">.*/d\' ../movieReviews.html', shell=True)
subprocess.call('cat %s >> ../movieReviews.html' % args.outputFile, shell=True)
subprocess.call('printf \'112m74\nw\n\' | ed ../movieReviews.html', shell=True)
| true | true |
f71614d3da0d9da31c0fa08bb2b57c555c07181a | 4,274 | py | Python | deprecated/pycqed/instrument_drivers/physical_instruments/_controlbox/xiangs_timing_tape_code.py | nuttamas/PycQED_py3 | 1ee35c7428d36ed42ba4afb5d4bda98140b2283e | [
"MIT"
] | 60 | 2016-08-03T10:00:18.000Z | 2021-11-10T11:46:16.000Z | deprecated/pycqed/instrument_drivers/physical_instruments/_controlbox/xiangs_timing_tape_code.py | nuttamas/PycQED_py3 | 1ee35c7428d36ed42ba4afb5d4bda98140b2283e | [
"MIT"
] | 512 | 2016-08-03T17:10:02.000Z | 2022-03-31T14:03:43.000Z | deprecated/pycqed/instrument_drivers/physical_instruments/_controlbox/xiangs_timing_tape_code.py | nuttamas/PycQED_py3 | 1ee35c7428d36ed42ba4afb5d4bda98140b2283e | [
"MIT"
] | 34 | 2016-10-19T12:00:52.000Z | 2022-03-19T04:43:26.000Z | def set_conditional_tape(self, awg_nr, tape_nr, tape):
'''
set the conditional tape content for an awg
@param awg : the awg of the dac, (0,1,2).
@param tape_nr : the number of the tape, integer ranging (0~6)
@param tape : the array of entries, with a maximum number of entries 512.
Every entry is an integer has the following structure:
|WaitingTime (9bits) | PUlse number (3 bits) | EndofSegment marker (1bit)|
WaitingTime: The waiting time before the end of last pulse or trigger, in ns.
Pulse number: 0~7, indicating which pulse to be output
EndofSegment marker: 1 if the entry is the last entry of the tape, otherwise 0.
@return stat : 0 if the upload succeeded and 1 if the upload failed.
'''
length = len(tape)
tape_addr_width = 9
entry_length = 9 + 3 + 1
# Check out of bounds
if awg_nr < 0 or awg_nr > 2:
raise ValueError
if tape_nr < 0 or tape_nr > 6:
raise ValueError
if length < 1 or length > 512:
raise ValueError
cmd = defHeaders.AwgCondionalTape
data_bytes = []
data_bytes.append(self.encode_byte(awg_nr, 4))
data_bytes.append(self.encode_byte(tape_nr, 4))
data_bytes.append(self.encode_byte(length-1, 7,
signed_integer_length=tape_addr_width,
expected_number_of_bytes=np.ceil(tape_addr_width/7.0)))
for sample_data in tape:
data_bytes.append(self.encode_byte(self.convert_to_signed(sample_data, entry_length), 7,
signed_integer_length=entry_length,
expected_number_of_bytes=np.ceil(entry_length/7.0)))
message = self.create_message(cmd, data_bytes)
(stat, mesg) = self.serial_write(message)
return (stat, mesg)
def set_segmented_tape(self, awg_nr, tape):
'''
set the conditional tape content for an awg
@param awg : the awg of the dac, (0,1,2).
@param tape : the array of entries, with a maximum number of entries 29184.
Every entry is an integer has the following structure:
|WaitingTime (9bits) | PUlse number (3 bits) | EndofSegment marker (1bit)|
WaitingTime: The waiting time before the end of last pulse or trigger, in ns.
Pulse number: 0~7, indicating which pulse to be output
EndofSegment marker: 1 if the entry is the last entry of a segment, otherwise 0.
@return stat : 0 if the upload succeeded and 1 if the upload failed.
'''
length = len(tape)
tape_addr_width = 15
entry_length = 9 + 3 + 1
# Check out of bounds
if awg_nr < 0 or awg_nr > 2:
raise ValueError
if length < 1 or length > 29184:
raise ValueError
cmd = defHeaders.AwgSegmentedTape
data_bytes = []
data_bytes.append(self.encode_byte(awg_nr, 4))
data_bytes.append(self.encode_byte(length-1, 7,
signed_integer_length=tape_addr_width,
expected_number_of_bytes=np.ceil(tape_addr_width / 7.0)))
for sample_data in tape:
data_bytes.append(self.encode_byte(self.convert_to_signed(sample_data, entry_length), 7,
signed_integer_length=entry_length,
expected_number_of_bytes=np.ceil(entry_length / 7.0)))
message = self.create_message(cmd, data_bytes)
(stat, mesg) = self.serial_write(message)
return (stat, mesg)
def create_entry(self, interval, pulse_num, end_of_marker):
'''
@param interval : The waiting time before the end of last pulse or trigger in ns,
ranging from 0ns to 2560ns with minimum step of 5ns.
@param pulse_num : 0~7, indicating which pulse to be output
@param end_of_marker : 1 if the entry is the last entry of a segment, otherwise 0.
'''
if interval < 0 or interval > 2560:
raise ValueError
if pulse_num < 0 or pulse_num > 7:
raise ValueError
if end_of_marker < 0 or end_of_marker > 1:
raise ValueError
entry_bits = BitArray(Bits(uint=interval, length=9))
entry_bits.append(BitArray(Bits(uint=pulse_num, length=3)))
entry_bits.append(BitArray(Bits(uint=end_of_marker, length=1)))
# print "The entry generated is: ",
# print entry_bits.uint
return entry_bits.uint
| 39.943925 | 96 | 0.662611 | def set_conditional_tape(self, awg_nr, tape_nr, tape):
length = len(tape)
tape_addr_width = 9
entry_length = 9 + 3 + 1
if awg_nr < 0 or awg_nr > 2:
raise ValueError
if tape_nr < 0 or tape_nr > 6:
raise ValueError
if length < 1 or length > 512:
raise ValueError
cmd = defHeaders.AwgCondionalTape
data_bytes = []
data_bytes.append(self.encode_byte(awg_nr, 4))
data_bytes.append(self.encode_byte(tape_nr, 4))
data_bytes.append(self.encode_byte(length-1, 7,
signed_integer_length=tape_addr_width,
expected_number_of_bytes=np.ceil(tape_addr_width/7.0)))
for sample_data in tape:
data_bytes.append(self.encode_byte(self.convert_to_signed(sample_data, entry_length), 7,
signed_integer_length=entry_length,
expected_number_of_bytes=np.ceil(entry_length/7.0)))
message = self.create_message(cmd, data_bytes)
(stat, mesg) = self.serial_write(message)
return (stat, mesg)
def set_segmented_tape(self, awg_nr, tape):
length = len(tape)
tape_addr_width = 15
entry_length = 9 + 3 + 1
if awg_nr < 0 or awg_nr > 2:
raise ValueError
if length < 1 or length > 29184:
raise ValueError
cmd = defHeaders.AwgSegmentedTape
data_bytes = []
data_bytes.append(self.encode_byte(awg_nr, 4))
data_bytes.append(self.encode_byte(length-1, 7,
signed_integer_length=tape_addr_width,
expected_number_of_bytes=np.ceil(tape_addr_width / 7.0)))
for sample_data in tape:
data_bytes.append(self.encode_byte(self.convert_to_signed(sample_data, entry_length), 7,
signed_integer_length=entry_length,
expected_number_of_bytes=np.ceil(entry_length / 7.0)))
message = self.create_message(cmd, data_bytes)
(stat, mesg) = self.serial_write(message)
return (stat, mesg)
def create_entry(self, interval, pulse_num, end_of_marker):
if interval < 0 or interval > 2560:
raise ValueError
if pulse_num < 0 or pulse_num > 7:
raise ValueError
if end_of_marker < 0 or end_of_marker > 1:
raise ValueError
entry_bits = BitArray(Bits(uint=interval, length=9))
entry_bits.append(BitArray(Bits(uint=pulse_num, length=3)))
entry_bits.append(BitArray(Bits(uint=end_of_marker, length=1)))
return entry_bits.uint
| true | true |
f716155583711d06a3bf11dab07383b6f8697428 | 1,801 | py | Python | securityheaders/checkers/cors/exposeheaders/test_exposesensitiveheaders.py | th3cyb3rc0p/securityheaders | 941264be581dc01afe28f6416f2d7bed79aecfb3 | [
"Apache-2.0"
] | 151 | 2018-07-29T22:34:43.000Z | 2022-03-22T05:08:27.000Z | securityheaders/checkers/cors/exposeheaders/test_exposesensitiveheaders.py | th3cyb3rc0p/securityheaders | 941264be581dc01afe28f6416f2d7bed79aecfb3 | [
"Apache-2.0"
] | 5 | 2019-04-24T07:31:36.000Z | 2021-04-15T14:31:23.000Z | securityheaders/checkers/cors/exposeheaders/test_exposesensitiveheaders.py | th3cyb3rc0p/securityheaders | 941264be581dc01afe28f6416f2d7bed79aecfb3 | [
"Apache-2.0"
] | 42 | 2018-07-31T08:18:59.000Z | 2022-03-28T08:18:32.000Z | import unittest
from securityheaders.checkers.cors import AccessControlExposeHeadersSensitiveChecker
class AccessControlExposeHeadersSensitiveCheckerTest(unittest.TestCase):
def setUp(self):
self.x = AccessControlExposeHeadersSensitiveChecker()
def test_checkNoHeader(self):
nox = dict()
nox['test'] = 'value'
self.assertEqual(self.x.check(nox), [])
def test_checkNone(self):
nonex = None
self.assertEqual(self.x.check(nonex), [])
def test_checkNone2(self):
hasx = dict()
hasx['access-control-expose-headers'] = None
self.assertEqual(self.x.check(hasx), [])
def test_checkInvalid(self):
hasx2 = dict()
hasx2['access-control-expose-headers'] = "Authentication-Token"
result = self.x.check(hasx2)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkInvalid2(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "Authorization"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkInvalid3(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "Session"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkInvalid4(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "Session, Authentication-Token, PUT"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 2)
def test_checkValid2(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "PUT"
self.assertEqual(self.x.check(hasx5), [])
if __name__ == '__main__':
unittest.main()
| 31.051724 | 84 | 0.655747 | import unittest
from securityheaders.checkers.cors import AccessControlExposeHeadersSensitiveChecker
class AccessControlExposeHeadersSensitiveCheckerTest(unittest.TestCase):
def setUp(self):
self.x = AccessControlExposeHeadersSensitiveChecker()
def test_checkNoHeader(self):
nox = dict()
nox['test'] = 'value'
self.assertEqual(self.x.check(nox), [])
def test_checkNone(self):
nonex = None
self.assertEqual(self.x.check(nonex), [])
def test_checkNone2(self):
hasx = dict()
hasx['access-control-expose-headers'] = None
self.assertEqual(self.x.check(hasx), [])
def test_checkInvalid(self):
hasx2 = dict()
hasx2['access-control-expose-headers'] = "Authentication-Token"
result = self.x.check(hasx2)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkInvalid2(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "Authorization"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkInvalid3(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "Session"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkInvalid4(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "Session, Authentication-Token, PUT"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 2)
def test_checkValid2(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "PUT"
self.assertEqual(self.x.check(hasx5), [])
if __name__ == '__main__':
unittest.main()
| true | true |
f71617895efc3dfd23246121c700461891099a24 | 6,196 | py | Python | docs/conf.py | EVEprosper/ProsperDatareader | 31f0d77074c21222161774f4d653326925611167 | [
"MIT"
] | null | null | null | docs/conf.py | EVEprosper/ProsperDatareader | 31f0d77074c21222161774f4d653326925611167 | [
"MIT"
] | 14 | 2017-08-14T02:25:42.000Z | 2018-11-16T19:15:52.000Z | docs/conf.py | EVEprosper/ProsperDatareader | 31f0d77074c21222161774f4d653326925611167 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ProsperDatareader documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 31 09:30:33 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
## vv TODO vv: autodocs ##
import os
import sys
sys.path.insert(0, os.path.abspath('../prosper/datareader'))
sys.path.insert(0, os.path.abspath('../prosper'))
from _version import __version__
## ^^ TODO ^^ ##
import alabaster
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinxcontrib.napoleon',
'alabaster',
]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ProsperDatareader'
copyright = '2017, John Purcell'
author = 'John Purcell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.0'
# The full version, including alpha/beta/rc tags.
release = '0.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_static_path = ['_static']
templates_path = ['templates']
html_show_sourcelink = False
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'logo-colour-sm.png',
'description': 'Uniform Data Collection',
'description_font_style': 'italic',
'github_user': 'eveprosper',
'github_repo': 'prosperdatareader',
'github_banner': True,
}
html_favicon = "static/prosper.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'index': [
'about.html', 'patreon.html', 'globaltoc.html', 'searchbox.html',
],
'**': [
'about.html', 'patreon.html', 'globaltoc.html', 'searchbox.html'
]
}
#html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
#}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ProsperDatareaderdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ProsperDatareader.tex', 'ProsperDatareader Documentation',
'John Purcell', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'prosperdatareader', 'ProsperDatareader Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ProsperDatareader', 'ProsperDatareader Documentation',
author, 'ProsperDatareader', 'One line description of project.',
'Miscellaneous'),
]
| 29.788462 | 80 | 0.675274 |
th.insert(0, os.path.abspath('../prosper/datareader'))
sys.path.insert(0, os.path.abspath('../prosper'))
from _version import __version__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinxcontrib.napoleon',
'alabaster',
]
source_suffix = '.rst'
master_doc = 'index'
project = 'ProsperDatareader'
copyright = '2017, John Purcell'
author = 'John Purcell'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.0'
# The full version, including alpha/beta/rc tags.
release = '0.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_static_path = ['_static']
templates_path = ['templates']
html_show_sourcelink = False
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'logo-colour-sm.png',
'description': 'Uniform Data Collection',
'description_font_style': 'italic',
'github_user': 'eveprosper',
'github_repo': 'prosperdatareader',
'github_banner': True,
}
html_favicon = "static/prosper.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'index': [
'about.html', 'patreon.html', 'globaltoc.html', 'searchbox.html',
],
'**': [
'about.html', 'patreon.html', 'globaltoc.html', 'searchbox.html'
]
}
#html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
#}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ProsperDatareaderdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ProsperDatareader.tex', 'ProsperDatareader Documentation',
'John Purcell', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'prosperdatareader', 'ProsperDatareader Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ProsperDatareader', 'ProsperDatareader Documentation',
author, 'ProsperDatareader', 'One line description of project.',
'Miscellaneous'),
]
| true | true |
f716179af5712ede1126fe27e7a30594aafd8164 | 5,204 | py | Python | sdk/python/pulumi_azure_nextgen/logic/latest/get_integration_account_batch_configuration.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/logic/latest/get_integration_account_batch_configuration.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/logic/latest/get_integration_account_batch_configuration.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetIntegrationAccountBatchConfigurationResult',
'AwaitableGetIntegrationAccountBatchConfigurationResult',
'get_integration_account_batch_configuration',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:logic:getIntegrationAccountBatchConfiguration'.""", DeprecationWarning)
@pulumi.output_type
class GetIntegrationAccountBatchConfigurationResult:
"""
The batch configuration resource definition.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The resource id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Gets the resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.BatchConfigurationPropertiesResponse':
"""
The batch configuration properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Gets the resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetIntegrationAccountBatchConfigurationResult(GetIntegrationAccountBatchConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIntegrationAccountBatchConfigurationResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_integration_account_batch_configuration(batch_configuration_name: Optional[str] = None,
integration_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIntegrationAccountBatchConfigurationResult:
"""
The batch configuration resource definition.
Latest API Version: 2019-05-01.
:param str batch_configuration_name: The batch configuration name.
:param str integration_account_name: The integration account name.
:param str resource_group_name: The resource group name.
"""
pulumi.log.warn("get_integration_account_batch_configuration is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:logic:getIntegrationAccountBatchConfiguration'.")
__args__ = dict()
__args__['batchConfigurationName'] = batch_configuration_name
__args__['integrationAccountName'] = integration_account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:logic/latest:getIntegrationAccountBatchConfiguration', __args__, opts=opts, typ=GetIntegrationAccountBatchConfigurationResult).value
return AwaitableGetIntegrationAccountBatchConfigurationResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| 37.438849 | 236 | 0.662183 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetIntegrationAccountBatchConfigurationResult',
'AwaitableGetIntegrationAccountBatchConfigurationResult',
'get_integration_account_batch_configuration',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:logic:getIntegrationAccountBatchConfiguration'.""", DeprecationWarning)
@pulumi.output_type
class GetIntegrationAccountBatchConfigurationResult:
def __init__(__self__, id=None, location=None, name=None, properties=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.BatchConfigurationPropertiesResponse':
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetIntegrationAccountBatchConfigurationResult(GetIntegrationAccountBatchConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIntegrationAccountBatchConfigurationResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_integration_account_batch_configuration(batch_configuration_name: Optional[str] = None,
integration_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIntegrationAccountBatchConfigurationResult:
pulumi.log.warn("get_integration_account_batch_configuration is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:logic:getIntegrationAccountBatchConfiguration'.")
__args__ = dict()
__args__['batchConfigurationName'] = batch_configuration_name
__args__['integrationAccountName'] = integration_account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:logic/latest:getIntegrationAccountBatchConfiguration', __args__, opts=opts, typ=GetIntegrationAccountBatchConfigurationResult).value
return AwaitableGetIntegrationAccountBatchConfigurationResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| true | true |
f7161889a0f2637bcacb6385931f3df8ce3d1eb6 | 2,639 | py | Python | PYTHON/skyscrapper.py | iamsuryakant/100-days-of-code | eaf4863d98dc273f03a989fe87d010d201d91516 | [
"MIT"
] | 1 | 2020-07-04T12:45:50.000Z | 2020-07-04T12:45:50.000Z | PYTHON/skyscrapper.py | iamsuryakant/100-days-of-code | eaf4863d98dc273f03a989fe87d010d201d91516 | [
"MIT"
] | 1 | 2020-08-08T02:23:46.000Z | 2020-08-08T02:47:56.000Z | PYTHON/skyscrapper.py | iamsuryakant/100-days-of-code | eaf4863d98dc273f03a989fe87d010d201d91516 | [
"MIT"
] | null | null | null | class Solution:
def getSkyline(self, buildings: 'List[List[int]]') -> 'List[List[int]]':
"""
Divide-and-conquer algorithm to solve skyline problem,
which is similar with the merge sort algorithm.
"""
n = len(buildings)
# The base cases
if n == 0:
return []
if n == 1:
x_start, x_end, y = buildings[0]
return [[x_start, y], [x_end, 0]]
# If there is more than one building,
# recursively divide the input into two subproblems.
left_skyline = self.getSkyline(buildings[: n // 2])
right_skyline = self.getSkyline(buildings[n // 2:])
# Merge the results of subproblem together.
return self.merge_skylines(left_skyline, right_skyline)
def merge_skylines(self, left, right):
"""
Merge two skylines together.
"""
def update_output(x, y):
"""
Update the final output with the new element.
"""
# if skyline change is not vertical -
# add the new point
if not output or output[-1][0] != x:
output.append([x, y])
# if skyline change is vertical -
# update the last point
else:
output[-1][1] = y
def append_skyline(p, lst, n, y, curr_y):
"""
Append the rest of the skyline elements with indice (p, n)
to the final output.
"""
while p < n:
x, y = lst[p]
p += 1
if curr_y != y:
update_output(x, y)
curr_y = y
n_l, n_r = len(left), len(right)
p_l = p_r = 0
curr_y = left_y = right_y = 0
output = []
# while we're in the region where both skylines are present
while p_l < n_l and p_r < n_r:
point_l, point_r = left[p_l], right[p_r]
# pick up the smallest x
if point_l[0] < point_r[0]:
x, left_y = point_l
p_l += 1
else:
x, right_y = point_r
p_r += 1
# max height (i.e. y) between both skylines
max_y = max(left_y, right_y)
# if there is a skyline change
if curr_y != max_y:
update_output(x, max_y)
curr_y = max_y
# there is only left skyline
append_skyline(p_l, left, n_l, left_y, curr_y)
# there is only right skyline
append_skyline(p_r, right, n_r, right_y, curr_y)
return output
| 32.580247 | 76 | 0.497537 | class Solution:
def getSkyline(self, buildings: 'List[List[int]]') -> 'List[List[int]]':
n = len(buildings)
if n == 0:
return []
if n == 1:
x_start, x_end, y = buildings[0]
return [[x_start, y], [x_end, 0]]
left_skyline = self.getSkyline(buildings[: n // 2])
right_skyline = self.getSkyline(buildings[n // 2:])
return self.merge_skylines(left_skyline, right_skyline)
def merge_skylines(self, left, right):
def update_output(x, y):
if not output or output[-1][0] != x:
output.append([x, y])
else:
output[-1][1] = y
def append_skyline(p, lst, n, y, curr_y):
while p < n:
x, y = lst[p]
p += 1
if curr_y != y:
update_output(x, y)
curr_y = y
n_l, n_r = len(left), len(right)
p_l = p_r = 0
curr_y = left_y = right_y = 0
output = []
while p_l < n_l and p_r < n_r:
point_l, point_r = left[p_l], right[p_r]
# pick up the smallest x
if point_l[0] < point_r[0]:
x, left_y = point_l
p_l += 1
else:
x, right_y = point_r
p_r += 1
# max height (i.e. y) between both skylines
max_y = max(left_y, right_y)
# if there is a skyline change
if curr_y != max_y:
update_output(x, max_y)
curr_y = max_y
# there is only left skyline
append_skyline(p_l, left, n_l, left_y, curr_y)
# there is only right skyline
append_skyline(p_r, right, n_r, right_y, curr_y)
return output
| true | true |
f71618eae8454fc424b1ab0fecf5817c6c652137 | 31,411 | py | Python | tests/models/bloom/test_modeling_bloom.py | JingyaHuang/transformers | 6589e510fa4e6c442059de2fab84752535de9b23 | [
"Apache-2.0"
] | null | null | null | tests/models/bloom/test_modeling_bloom.py | JingyaHuang/transformers | 6589e510fa4e6c442059de2fab84752535de9b23 | [
"Apache-2.0"
] | null | null | null | tests/models/bloom/test_modeling_bloom.py | JingyaHuang/transformers | 6589e510fa4e6c442059de2fab84752535de9b23 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import unittest
from transformers import BloomConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomTokenizerFast,
)
@require_torch
class BloomModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=False,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = None
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
def get_large_model_config(self):
return BloomConfig.from_pretrained("bigscience/bloom")
def prepare_config_and_inputs(self, gradient_checkpointing=False):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
sequence_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config(gradient_checkpointing=gradient_checkpointing)
return (config, input_ids, input_mask, sequence_labels)
def get_config(self, gradient_checkpointing=False, slow_but_exact=True):
return BloomConfig(
vocab_size=self.vocab_size,
seq_length=self.seq_length,
hidden_size=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
resid_pdrop=self.hidden_dropout_prob,
attn_pdrop=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
num_labels=self.num_labels,
gradient_checkpointing=gradient_checkpointing,
slow_but_exact=slow_but_exact,
dtype="float32",
)
def create_and_check_bloom_model(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_bloom_model_past(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, attention_mask=torch.ones_like(input_ids), use_cache=True)
outputs_use_cache_conf = model(input_ids, attention_mask=torch.ones_like(input_ids))
outputs_no_past = model(input_ids, use_cache=False, attention_mask=torch.ones_like(input_ids))
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_bloom_model_attention_mask_past(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_bloom_model_past_large_inputs(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, attention_mask=input_mask, use_cache=True)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[
"last_hidden_state"
]
self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_lm_head_model(self, config, input_ids, input_mask, *args):
model = BloomForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_sequence_classification_model(self, config, input_ids, input_mask, *args):
config.num_labels = self.num_labels
model = BloomForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_token_classification_model(self, config, input_ids, input_mask, *args):
model = BloomForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_forward_and_backwards(
self, config, input_ids, input_mask, *args, gradient_checkpointing=False
):
model = BloomForCausalLM(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def create_and_check_bloom_weight_initialization(self, config, *args):
model = BloomModel(config)
model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layer)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask, sequence_labels = config_and_inputs
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
@require_torch
class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(
BloomModel,
BloomForCausalLM,
BloomForSequenceClassification,
BloomForTokenClassification,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (BloomForCausalLM,) if is_torch_available() else ()
fx_compatible = False
test_missing_keys = False
test_pruning = False
test_torchscript = True # torch.autograd functions seems to be not supported
def setUp(self):
self.model_tester = BloomModelTester(self)
self.config_tester = ConfigTester(self, config_class=BloomConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_bloom_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model(*config_and_inputs)
def test_bloom_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_past(*config_and_inputs)
def test_bloom_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_attention_mask_past(*config_and_inputs)
def test_bloom_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_past_large_inputs(*config_and_inputs)
def test_bloom_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_bloom_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_sequence_classification_model(*config_and_inputs)
def test_bloom_token_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_token_classification_model(*config_and_inputs)
def test_bloom_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)
def test_bloom_weight_initialization(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_weight_initialization(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BloomModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
@require_torch_gpu
def test_simple_generation(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m)
input_sentence = "I enjoy walking with my cute dog"
EXPECTED_OUTPUT = (
"I enjoy walking with my cute dog, and I love to watch the kids play. I am a very active person, and I am"
" a very good listener. I am a very good person, and I am a very good person. I am a"
)
input_ids = tokenizer.encode(input_sentence, return_tensors="pt")
greedy_output = model.generate(input_ids.cuda(), max_length=50)
self.assertEqual(tokenizer.decode(greedy_output[0], skip_special_tokens=True), EXPECTED_OUTPUT)
@slow
@require_torch_gpu
def test_batch_generation(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left")
input_sentence = ["I enjoy walking with my cute dog", "I enjoy walking with my cute dog"]
input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors="pt", padding=True)
greedy_output = model.generate(
input_ids["input_ids"].cuda(), attention_mask=input_ids["attention_mask"], max_length=50, do_sample=False
)
self.assertEqual(
tokenizer.decode(greedy_output[0], skip_special_tokens=True),
tokenizer.decode(greedy_output[1], skip_special_tokens=True),
)
@slow
@require_torch_gpu
def test_batch_generation_padd(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left")
input_sentence = ["I enjoy walking with my cute dog", "Hello my name is"]
input_sentence_without_pad = "Hello my name is"
input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors="pt", padding=True)
input_ids_without_pad = tokenizer.encode(input_sentence_without_pad, return_tensors="pt")
greedy_output = model.generate(
input_ids["input_ids"].cuda(), attention_mask=input_ids["attention_mask"], max_length=50, do_sample=False
)
greedy_output_without_pad = model.generate(input_ids_without_pad.cuda(), max_length=50, do_sample=False)
# test token values
self.assertEqual(greedy_output[-1, 3:].tolist(), greedy_output_without_pad[0, :-3].tolist())
# test reconstructions
self.assertEqual(
tokenizer.decode(greedy_output[-1, 3:], skip_special_tokens=True),
tokenizer.decode(greedy_output_without_pad[0, :-3], skip_special_tokens=True),
)
@require_torch
class BloomEmbeddingTest(unittest.TestCase):
"""
The goal here is to compare the embeddings generated by the model trained
using Megatron-LM with the one from the transformers library, with a small GPT2-like model
to ensure that the conversion from Megatron-LM to transformers has been done successfully.
The script compares the logits of the embedding layer and the transformer layers.
WARNING: It is expected that these logits will not have exactly the same statistics when running
the code on CPU or GPU. For more info, please visit:
- https://github.com/pytorch/pytorch/issues/76052#issuecomment-1103193548
- https://discuss.pytorch.org/t/reproducibility-issue-between-intel-and-amd-cpus/144779/9
You need to install tokenizers following this readme:
- https://huggingface.co/bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
Tokenizer used during training:
- https://huggingface.co/bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
# TODO change the script (or just add skip) when building the env with tokenizers 0.12.0
"""
def setUp(self):
super().setUp()
self.path_bigscience_model = "bigscience/bigscience-small-testing"
@require_torch
def test_embeddings(self):
model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, torch_dtype="auto") # load in fp32
model.eval()
EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN = {
3478: 0.0002307891845703125,
368: -0.000568389892578125,
109586: -0.0003910064697265625,
35433: -0.000194549560546875,
2: 0.0004138946533203125,
77: 0.000659942626953125,
132619: -0.00031280517578125,
2175: 0.000457763671875,
23714: 0.000263214111328125,
73173: -0.000286102294921875,
144252: 0.00052642822265625,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM = {"value": 0.08203125}
EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN = {
132619: -0.00031256675720214844,
3478: 0.00023090839385986328,
368: -0.0005702972412109375,
109586: -0.00039124488830566406,
35433: -0.000194549560546875,
2: 0.0004146099090576172,
2175: 0.0004572868347167969,
23714: 0.00026416778564453125,
73173: -0.0002865791320800781,
144252: 0.0005254745483398438,
77: 0.0006618499755859375,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_SUM = {"value": 0.0821533203125}
EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN = {
132619: -0.00031267106533050537,
3478: 0.00023087859153747559,
368: -0.0005701072514057159,
109586: -0.0003911703824996948,
35433: -0.0001944899559020996,
2: 0.0004146844148635864,
2175: 0.00045740045607089996,
23714: 0.0002641640603542328,
73173: -0.0002864748239517212,
144252: 0.0005256589502096176,
77: 0.0006617321632802486,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_SUM = {"value": 0.08217757940292358}
TEST_EMBEDDINGS = {
"torch.bfloat16": {
"mean": EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM,
},
"torch.float32": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,
},
"torch.float": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,
},
"torch.float16": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_16_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_16_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_16_SUM,
},
}
# fmt: off
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
# fmt: on
EMBEDDINGS_DS_AFTER_LN_MEAN = {
3478: -6.580352783203125e-05,
368: 0.0001316070556640625,
109586: -0.00030517578125,
35433: 4.00543212890625e-05,
2: -7.2479248046875e-05,
77: -8.96453857421875e-05,
132619: 0.0001583099365234375,
2175: 2.1219253540039062e-05,
23714: -0.000247955322265625,
73173: -0.00021839141845703125,
144252: -0.0001430511474609375,
}
EMBEDDINGS_DS_AFTER_LN_MIN = {
3478: -1.6953125,
368: -1.6875,
109586: -1.6875,
35433: -2.125,
2: -1.390625,
77: -1.5390625,
132619: -1.875,
2175: -1.4609375,
23714: -2.296875,
73173: -1.3515625,
144252: -1.78125,
}
EMBEDDINGS_DS_AFTER_LN_MAX = {
3478: 2.265625,
368: 2.28125,
109586: 1.953125,
35433: 1.90625,
2: 2.703125,
77: 2.828125,
132619: 1.65625,
2175: 2.015625,
23714: 2.234375,
73173: 2.171875,
144252: 1.828125,
}
EMBEDDINGS_DS_AFTER_LN = {
"mean": EMBEDDINGS_DS_AFTER_LN_MEAN,
"min": EMBEDDINGS_DS_AFTER_LN_MIN,
"max": EMBEDDINGS_DS_AFTER_LN_MAX,
}
tensor_ids = torch.LongTensor([EXAMPLE_IDS])
with torch.no_grad():
embeddings = model.transformer.word_embeddings(tensor_ids)
embeddings_ln = model.transformer.word_embeddings_layernorm(embeddings) #
# first check the embeddings before LN
output_dict = {"min": {}, "max": {}, "mean": {}, "sum": {"value": embeddings.sum().item()}}
for i, idx in enumerate(EXAMPLE_IDS):
output_dict["min"][idx] = embeddings.min(dim=-1).values[0][i].item()
output_dict["max"][idx] = embeddings.max(dim=-1).values[0][i].item()
output_dict["mean"][idx] = embeddings.mean(dim=-1)[0][i].item()
for key in TEST_EMBEDDINGS[str(model.dtype)].keys():
self.assertDictEqual(TEST_EMBEDDINGS[str(model.dtype)][key], output_dict[key])
output_dict_norm = {"min": {}, "max": {}, "mean": {}}
for i, idx in enumerate(EXAMPLE_IDS):
output_dict_norm["min"][idx] = embeddings_ln.min(dim=-1).values[0][i].item()
output_dict_norm["max"][idx] = embeddings_ln.max(dim=-1).values[0][i].item()
output_dict_norm["mean"][idx] = embeddings_ln.mean(dim=-1)[0][i].item()
# This test does not pass when places = 2
for i, key in enumerate(output_dict_norm.keys()):
for j, idx in enumerate(output_dict[key].keys()):
self.assertAlmostEqual(EMBEDDINGS_DS_AFTER_LN[key][idx], output_dict_norm[key][idx], places=1)
@require_torch
def test_hidden_states_transformers(self):
cuda_available = torch.cuda.is_available()
model = BloomModel.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to(
torch_device
)
model.eval()
# fmt: off
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
# fmt: on
MEAN_VALUE_LAST_LM = -4.3392181396484375e-05
MIN_MAX_DICT = {"min": -2.0625, "max": 2.75}
tensor_ids = torch.LongTensor([EXAMPLE_IDS])
with torch.no_grad():
logits = model(tensor_ids.to(torch_device))
output_dict = {
"min": logits.last_hidden_state.min(dim=-1).values[0][0].item(),
"max": logits.last_hidden_state.max(dim=-1).values[0][0].item(),
}
if cuda_available:
self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=4)
else:
self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=3)
self.assertDictEqual(MIN_MAX_DICT, output_dict)
@require_torch
def test_logits(self):
cuda_available = torch.cuda.is_available()
model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to(
torch_device
) # load in bf16
model.eval()
# fmt: off
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
# fmt: on
MEAN_LOGITS_GPU_1 = -1.823902130126953e-05
MEAN_LOGITS_GPU_2 = 1.9431114196777344e-05
tensor_ids = torch.LongTensor([EXAMPLE_IDS]).to(torch_device)
with torch.no_grad():
output = model(tensor_ids).logits
output_gpu_1, output_gpu_2 = output.split(125440, dim=-1)
if cuda_available:
self.assertEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1)
self.assertEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2)
else:
self.assertAlmostEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1, places=6) # 1e-06 precision!!
self.assertAlmostEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2, places=6)
| 41.439314 | 155 | 0.661934 |
import math
import unittest
from transformers import BloomConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomTokenizerFast,
)
@require_torch
class BloomModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=False,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = None
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
def get_large_model_config(self):
return BloomConfig.from_pretrained("bigscience/bloom")
def prepare_config_and_inputs(self, gradient_checkpointing=False):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
sequence_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config(gradient_checkpointing=gradient_checkpointing)
return (config, input_ids, input_mask, sequence_labels)
def get_config(self, gradient_checkpointing=False, slow_but_exact=True):
return BloomConfig(
vocab_size=self.vocab_size,
seq_length=self.seq_length,
hidden_size=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
resid_pdrop=self.hidden_dropout_prob,
attn_pdrop=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
num_labels=self.num_labels,
gradient_checkpointing=gradient_checkpointing,
slow_but_exact=slow_but_exact,
dtype="float32",
)
def create_and_check_bloom_model(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_bloom_model_past(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
outputs = model(input_ids, attention_mask=torch.ones_like(input_ids), use_cache=True)
outputs_use_cache_conf = model(input_ids, attention_mask=torch.ones_like(input_ids))
outputs_no_past = model(input_ids, use_cache=False, attention_mask=torch.ones_like(input_ids))
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past = outputs["past_key_values"]
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"]
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_bloom_model_attention_mask_past(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"]
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_bloom_model_past_large_inputs(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
outputs = model(input_ids, attention_mask=input_mask, use_cache=True)
output, past = outputs.to_tuple()
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[
"last_hidden_state"
]
self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_lm_head_model(self, config, input_ids, input_mask, *args):
model = BloomForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_sequence_classification_model(self, config, input_ids, input_mask, *args):
config.num_labels = self.num_labels
model = BloomForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_token_classification_model(self, config, input_ids, input_mask, *args):
model = BloomForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_forward_and_backwards(
self, config, input_ids, input_mask, *args, gradient_checkpointing=False
):
model = BloomForCausalLM(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def create_and_check_bloom_weight_initialization(self, config, *args):
model = BloomModel(config)
model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layer)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask, sequence_labels = config_and_inputs
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
@require_torch
class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(
BloomModel,
BloomForCausalLM,
BloomForSequenceClassification,
BloomForTokenClassification,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (BloomForCausalLM,) if is_torch_available() else ()
fx_compatible = False
test_missing_keys = False
test_pruning = False
test_torchscript = True
def setUp(self):
self.model_tester = BloomModelTester(self)
self.config_tester = ConfigTester(self, config_class=BloomConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_bloom_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model(*config_and_inputs)
def test_bloom_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_past(*config_and_inputs)
def test_bloom_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_attention_mask_past(*config_and_inputs)
def test_bloom_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_past_large_inputs(*config_and_inputs)
def test_bloom_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_bloom_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_sequence_classification_model(*config_and_inputs)
def test_bloom_token_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_token_classification_model(*config_and_inputs)
def test_bloom_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)
def test_bloom_weight_initialization(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_weight_initialization(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BloomModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
@require_torch_gpu
def test_simple_generation(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m)
input_sentence = "I enjoy walking with my cute dog"
EXPECTED_OUTPUT = (
"I enjoy walking with my cute dog, and I love to watch the kids play. I am a very active person, and I am"
" a very good listener. I am a very good person, and I am a very good person. I am a"
)
input_ids = tokenizer.encode(input_sentence, return_tensors="pt")
greedy_output = model.generate(input_ids.cuda(), max_length=50)
self.assertEqual(tokenizer.decode(greedy_output[0], skip_special_tokens=True), EXPECTED_OUTPUT)
@slow
@require_torch_gpu
def test_batch_generation(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left")
input_sentence = ["I enjoy walking with my cute dog", "I enjoy walking with my cute dog"]
input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors="pt", padding=True)
greedy_output = model.generate(
input_ids["input_ids"].cuda(), attention_mask=input_ids["attention_mask"], max_length=50, do_sample=False
)
self.assertEqual(
tokenizer.decode(greedy_output[0], skip_special_tokens=True),
tokenizer.decode(greedy_output[1], skip_special_tokens=True),
)
@slow
@require_torch_gpu
def test_batch_generation_padd(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left")
input_sentence = ["I enjoy walking with my cute dog", "Hello my name is"]
input_sentence_without_pad = "Hello my name is"
input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors="pt", padding=True)
input_ids_without_pad = tokenizer.encode(input_sentence_without_pad, return_tensors="pt")
greedy_output = model.generate(
input_ids["input_ids"].cuda(), attention_mask=input_ids["attention_mask"], max_length=50, do_sample=False
)
greedy_output_without_pad = model.generate(input_ids_without_pad.cuda(), max_length=50, do_sample=False)
self.assertEqual(greedy_output[-1, 3:].tolist(), greedy_output_without_pad[0, :-3].tolist())
self.assertEqual(
tokenizer.decode(greedy_output[-1, 3:], skip_special_tokens=True),
tokenizer.decode(greedy_output_without_pad[0, :-3], skip_special_tokens=True),
)
@require_torch
class BloomEmbeddingTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.path_bigscience_model = "bigscience/bigscience-small-testing"
@require_torch
def test_embeddings(self):
model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, torch_dtype="auto")
model.eval()
EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN = {
3478: 0.0002307891845703125,
368: -0.000568389892578125,
109586: -0.0003910064697265625,
35433: -0.000194549560546875,
2: 0.0004138946533203125,
77: 0.000659942626953125,
132619: -0.00031280517578125,
2175: 0.000457763671875,
23714: 0.000263214111328125,
73173: -0.000286102294921875,
144252: 0.00052642822265625,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM = {"value": 0.08203125}
EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN = {
132619: -0.00031256675720214844,
3478: 0.00023090839385986328,
368: -0.0005702972412109375,
109586: -0.00039124488830566406,
35433: -0.000194549560546875,
2: 0.0004146099090576172,
2175: 0.0004572868347167969,
23714: 0.00026416778564453125,
73173: -0.0002865791320800781,
144252: 0.0005254745483398438,
77: 0.0006618499755859375,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_SUM = {"value": 0.0821533203125}
EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN = {
132619: -0.00031267106533050537,
3478: 0.00023087859153747559,
368: -0.0005701072514057159,
109586: -0.0003911703824996948,
35433: -0.0001944899559020996,
2: 0.0004146844148635864,
2175: 0.00045740045607089996,
23714: 0.0002641640603542328,
73173: -0.0002864748239517212,
144252: 0.0005256589502096176,
77: 0.0006617321632802486,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_SUM = {"value": 0.08217757940292358}
TEST_EMBEDDINGS = {
"torch.bfloat16": {
"mean": EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM,
},
"torch.float32": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,
},
"torch.float": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,
},
"torch.float16": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_16_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_16_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_16_SUM,
},
}
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
EMBEDDINGS_DS_AFTER_LN_MEAN = {
3478: -6.580352783203125e-05,
368: 0.0001316070556640625,
109586: -0.00030517578125,
35433: 4.00543212890625e-05,
2: -7.2479248046875e-05,
77: -8.96453857421875e-05,
132619: 0.0001583099365234375,
2175: 2.1219253540039062e-05,
23714: -0.000247955322265625,
73173: -0.00021839141845703125,
144252: -0.0001430511474609375,
}
EMBEDDINGS_DS_AFTER_LN_MIN = {
3478: -1.6953125,
368: -1.6875,
109586: -1.6875,
35433: -2.125,
2: -1.390625,
77: -1.5390625,
132619: -1.875,
2175: -1.4609375,
23714: -2.296875,
73173: -1.3515625,
144252: -1.78125,
}
EMBEDDINGS_DS_AFTER_LN_MAX = {
3478: 2.265625,
368: 2.28125,
109586: 1.953125,
35433: 1.90625,
2: 2.703125,
77: 2.828125,
132619: 1.65625,
2175: 2.015625,
23714: 2.234375,
73173: 2.171875,
144252: 1.828125,
}
EMBEDDINGS_DS_AFTER_LN = {
"mean": EMBEDDINGS_DS_AFTER_LN_MEAN,
"min": EMBEDDINGS_DS_AFTER_LN_MIN,
"max": EMBEDDINGS_DS_AFTER_LN_MAX,
}
tensor_ids = torch.LongTensor([EXAMPLE_IDS])
with torch.no_grad():
embeddings = model.transformer.word_embeddings(tensor_ids)
embeddings_ln = model.transformer.word_embeddings_layernorm(embeddings)
output_dict = {"min": {}, "max": {}, "mean": {}, "sum": {"value": embeddings.sum().item()}}
for i, idx in enumerate(EXAMPLE_IDS):
output_dict["min"][idx] = embeddings.min(dim=-1).values[0][i].item()
output_dict["max"][idx] = embeddings.max(dim=-1).values[0][i].item()
output_dict["mean"][idx] = embeddings.mean(dim=-1)[0][i].item()
for key in TEST_EMBEDDINGS[str(model.dtype)].keys():
self.assertDictEqual(TEST_EMBEDDINGS[str(model.dtype)][key], output_dict[key])
output_dict_norm = {"min": {}, "max": {}, "mean": {}}
for i, idx in enumerate(EXAMPLE_IDS):
output_dict_norm["min"][idx] = embeddings_ln.min(dim=-1).values[0][i].item()
output_dict_norm["max"][idx] = embeddings_ln.max(dim=-1).values[0][i].item()
output_dict_norm["mean"][idx] = embeddings_ln.mean(dim=-1)[0][i].item()
for i, key in enumerate(output_dict_norm.keys()):
for j, idx in enumerate(output_dict[key].keys()):
self.assertAlmostEqual(EMBEDDINGS_DS_AFTER_LN[key][idx], output_dict_norm[key][idx], places=1)
@require_torch
def test_hidden_states_transformers(self):
cuda_available = torch.cuda.is_available()
model = BloomModel.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to(
torch_device
)
model.eval()
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
MEAN_VALUE_LAST_LM = -4.3392181396484375e-05
MIN_MAX_DICT = {"min": -2.0625, "max": 2.75}
tensor_ids = torch.LongTensor([EXAMPLE_IDS])
with torch.no_grad():
logits = model(tensor_ids.to(torch_device))
output_dict = {
"min": logits.last_hidden_state.min(dim=-1).values[0][0].item(),
"max": logits.last_hidden_state.max(dim=-1).values[0][0].item(),
}
if cuda_available:
self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=4)
else:
self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=3)
self.assertDictEqual(MIN_MAX_DICT, output_dict)
@require_torch
def test_logits(self):
cuda_available = torch.cuda.is_available()
model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to(
torch_device
)
model.eval()
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
MEAN_LOGITS_GPU_1 = -1.823902130126953e-05
MEAN_LOGITS_GPU_2 = 1.9431114196777344e-05
tensor_ids = torch.LongTensor([EXAMPLE_IDS]).to(torch_device)
with torch.no_grad():
output = model(tensor_ids).logits
output_gpu_1, output_gpu_2 = output.split(125440, dim=-1)
if cuda_available:
self.assertEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1)
self.assertEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2)
else:
self.assertAlmostEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1, places=6)
self.assertAlmostEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2, places=6)
| true | true |
f71619031253fb486e6ba783dca022105538c931 | 3,935 | py | Python | server/website/website/parser/parser.py | mjain2/ottertune | 011e896bf89df831fb1189b1ab4c9a7d7dca420a | [
"Apache-2.0"
] | 1 | 2019-08-16T19:35:35.000Z | 2019-08-16T19:35:35.000Z | server/website/website/parser/parser.py | mjain2/ottertune | 011e896bf89df831fb1189b1ab4c9a7d7dca420a | [
"Apache-2.0"
] | null | null | null | server/website/website/parser/parser.py | mjain2/ottertune | 011e896bf89df831fb1189b1ab4c9a7d7dca420a | [
"Apache-2.0"
] | null | null | null | #
# OtterTune - parser.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
'''
Created on Dec 12, 2017
@author: dvanaken
'''
from website.models import DBMSCatalog
from website.types import DBMSType
from .myrocks import MyRocks56Parser
from .mysql import MySql57Parser
from .postgres import Postgres96Parser, PostgresOldParser
from .oracle import Oracle19Parser
class Parser(object):
__DBMS_UTILS_IMPLS = None
@staticmethod
def __utils(dbms_id=None):
if Parser.__DBMS_UTILS_IMPLS is None:
Parser.__DBMS_UTILS_IMPLS = {
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.3').pk: PostgresOldParser('9.3'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.2').pk: PostgresOldParser('9.2'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.6').pk: Postgres96Parser('9.6'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.4').pk: Postgres96Parser('9.4'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.5').pk: Postgres96Parser('9.5'),
DBMSCatalog.objects.get(
type=DBMSType.MYROCKS, version='5.6').pk: MyRocks56Parser(),
DBMSCatalog.objects.get(
type=DBMSType.ORACLE, version='19.0.0.0.0').pk: Oracle19Parser(),
DBMSCatalog.objects.get(
type=DBMSType.MYSQL, version='5.7').pk: MySql57Parser()
}
try:
if dbms_id is None:
return Parser.__DBMS_UTILS_IMPLS
return Parser.__DBMS_UTILS_IMPLS[dbms_id]
except KeyError:
raise NotImplementedError(
'Implement me! ({})'.format(dbms_id))
@staticmethod
def parse_version_string(dbms_type, version_string):
for k, v in list(Parser.__utils(dbms_type).items()):
dbms = DBMSCatalog.objects.get(pk=k)
if dbms.type == dbms_type:
try:
return v.parse_version_string(version_string)
except AttributeError:
pass
return None
@staticmethod
def convert_dbms_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).convert_dbms_knobs(knobs)
@staticmethod
def convert_dbms_metrics(dbms_id, numeric_metrics, observation_time, target_objective=None):
return Parser.__utils(dbms_id).convert_dbms_metrics(
numeric_metrics, observation_time, target_objective)
@staticmethod
def parse_dbms_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).parse_dbms_knobs(knobs)
@staticmethod
def parse_dbms_metrics(dbms_id, metrics):
return Parser.__utils(dbms_id).parse_dbms_metrics(metrics)
@staticmethod
def get_nondefault_knob_settings(dbms_id, knobs):
return Parser.__utils(dbms_id).get_nondefault_knob_settings(knobs)
@staticmethod
def create_knob_configuration(dbms_id, tuning_knobs):
return Parser.__utils(dbms_id).create_knob_configuration(tuning_knobs)
@staticmethod
def format_dbms_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).format_dbms_knobs(knobs)
@staticmethod
def get_knob_configuration_filename(dbms_id):
return Parser.__utils(dbms_id).knob_configuration_filename
@staticmethod
def filter_numeric_metrics(dbms_id, metrics):
return Parser.__utils(dbms_id).filter_numeric_metrics(metrics)
@staticmethod
def filter_tunable_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).filter_tunable_knobs(knobs)
@staticmethod
def calculate_change_in_metrics(dbms_id, metrics_start, metrics_end):
return Parser.__utils(dbms_id).calculate_change_in_metrics(
metrics_start, metrics_end)
| 35.45045 | 96 | 0.662516 |
from website.models import DBMSCatalog
from website.types import DBMSType
from .myrocks import MyRocks56Parser
from .mysql import MySql57Parser
from .postgres import Postgres96Parser, PostgresOldParser
from .oracle import Oracle19Parser
class Parser(object):
__DBMS_UTILS_IMPLS = None
@staticmethod
def __utils(dbms_id=None):
if Parser.__DBMS_UTILS_IMPLS is None:
Parser.__DBMS_UTILS_IMPLS = {
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.3').pk: PostgresOldParser('9.3'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.2').pk: PostgresOldParser('9.2'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.6').pk: Postgres96Parser('9.6'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.4').pk: Postgres96Parser('9.4'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.5').pk: Postgres96Parser('9.5'),
DBMSCatalog.objects.get(
type=DBMSType.MYROCKS, version='5.6').pk: MyRocks56Parser(),
DBMSCatalog.objects.get(
type=DBMSType.ORACLE, version='19.0.0.0.0').pk: Oracle19Parser(),
DBMSCatalog.objects.get(
type=DBMSType.MYSQL, version='5.7').pk: MySql57Parser()
}
try:
if dbms_id is None:
return Parser.__DBMS_UTILS_IMPLS
return Parser.__DBMS_UTILS_IMPLS[dbms_id]
except KeyError:
raise NotImplementedError(
'Implement me! ({})'.format(dbms_id))
@staticmethod
def parse_version_string(dbms_type, version_string):
for k, v in list(Parser.__utils(dbms_type).items()):
dbms = DBMSCatalog.objects.get(pk=k)
if dbms.type == dbms_type:
try:
return v.parse_version_string(version_string)
except AttributeError:
pass
return None
@staticmethod
def convert_dbms_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).convert_dbms_knobs(knobs)
@staticmethod
def convert_dbms_metrics(dbms_id, numeric_metrics, observation_time, target_objective=None):
return Parser.__utils(dbms_id).convert_dbms_metrics(
numeric_metrics, observation_time, target_objective)
@staticmethod
def parse_dbms_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).parse_dbms_knobs(knobs)
@staticmethod
def parse_dbms_metrics(dbms_id, metrics):
return Parser.__utils(dbms_id).parse_dbms_metrics(metrics)
@staticmethod
def get_nondefault_knob_settings(dbms_id, knobs):
return Parser.__utils(dbms_id).get_nondefault_knob_settings(knobs)
@staticmethod
def create_knob_configuration(dbms_id, tuning_knobs):
return Parser.__utils(dbms_id).create_knob_configuration(tuning_knobs)
@staticmethod
def format_dbms_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).format_dbms_knobs(knobs)
@staticmethod
def get_knob_configuration_filename(dbms_id):
return Parser.__utils(dbms_id).knob_configuration_filename
@staticmethod
def filter_numeric_metrics(dbms_id, metrics):
return Parser.__utils(dbms_id).filter_numeric_metrics(metrics)
@staticmethod
def filter_tunable_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).filter_tunable_knobs(knobs)
@staticmethod
def calculate_change_in_metrics(dbms_id, metrics_start, metrics_end):
return Parser.__utils(dbms_id).calculate_change_in_metrics(
metrics_start, metrics_end)
| true | true |
f716194f5cc205b886a9dd79a6796056afa57b63 | 15,927 | py | Python | old/fastai/structured.py | fjaragones/fastai | be48d209a4526191f71dc7adaef090828897b9ec | [
"Apache-2.0"
] | 2 | 2019-02-19T18:34:29.000Z | 2019-12-09T17:51:41.000Z | old/fastai/structured.py | fjaragones/fastai | be48d209a4526191f71dc7adaef090828897b9ec | [
"Apache-2.0"
] | 4 | 2020-02-25T20:46:35.000Z | 2022-02-26T04:45:55.000Z | old/fastai/structured.py | fjaragones/fastai | be48d209a4526191f71dc7adaef090828897b9ec | [
"Apache-2.0"
] | 1 | 2019-01-16T08:10:48.000Z | 2019-01-16T08:10:48.000Z | from .imports import *
from sklearn_pandas import DataFrameMapper
from sklearn.preprocessing import LabelEncoder, Imputer, StandardScaler
from pandas.api.types import is_string_dtype, is_numeric_dtype
from sklearn.ensemble import forest
from sklearn.tree import export_graphviz
def set_plot_sizes(sml, med, big):
plt.rc('font', size=sml) # controls default text sizes
plt.rc('axes', titlesize=sml) # fontsize of the axes title
plt.rc('axes', labelsize=med) # fontsize of the x and y labels
plt.rc('xtick', labelsize=sml) # fontsize of the tick labels
plt.rc('ytick', labelsize=sml) # fontsize of the tick labels
plt.rc('legend', fontsize=sml) # legend fontsize
plt.rc('figure', titlesize=big) # fontsize of the figure title
def parallel_trees(m, fn, n_jobs=8):
return list(ProcessPoolExecutor(n_jobs).map(fn, m.estimators_))
def draw_tree(t, df, size=10, ratio=0.6, precision=0):
""" Draws a representation of a random forest in IPython.
Parameters:
-----------
t: The tree you wish to draw
df: The data used to train the tree. This is used to get the names of the features.
"""
s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,
special_characters=True, rotate=True, precision=precision)
IPython.display.display(graphviz.Source(re.sub('Tree {',
f'Tree {{ size={size}; ratio={ratio}', s)))
def combine_date(years, months=1, days=1, weeks=None, hours=None, minutes=None,
seconds=None, milliseconds=None, microseconds=None, nanoseconds=None):
years = np.asarray(years) - 1970
months = np.asarray(months) - 1
days = np.asarray(days) - 1
types = ('<M8[Y]', '<m8[M]', '<m8[D]', '<m8[W]', '<m8[h]',
'<m8[m]', '<m8[s]', '<m8[ms]', '<m8[us]', '<m8[ns]')
vals = (years, months, days, weeks, hours, minutes, seconds,
milliseconds, microseconds, nanoseconds)
return sum(np.asarray(v, dtype=t) for t, v in zip(types, vals)
if v is not None)
def get_sample(df,n):
""" Gets a random sample of n rows from df, without replacement.
Parameters:
-----------
df: A pandas data frame, that you wish to sample from.
n: The number of rows you wish to sample.
Returns:
--------
return value: A random sample of n rows of df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
>>> get_sample(df, 2)
col1 col2
1 2 b
2 3 a
"""
idxs = sorted(np.random.permutation(len(df))[:n])
return df.iloc[idxs].copy()
def add_datepart(df, fldname, drop=True, time=False, errors="raise"):
"""add_datepart converts a column of df from a datetime64 to many columns containing
the information from the date. This applies changes inplace.
Parameters:
-----------
df: A pandas data frame. df gain several new columns.
fldname: A string that is the name of the date column you wish to expand.
If it is not a datetime64 series, it will be converted to one with pd.to_datetime.
drop: If true then the original date column will be removed.
time: If true time features: Hour, Minute, Second will be added.
Examples:
---------
>>> df = pd.DataFrame({ 'A' : pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'], infer_datetime_format=False) })
>>> df
A
0 2000-03-11
1 2000-03-12
2 2000-03-13
>>> add_datepart(df, 'A')
>>> df
AYear AMonth AWeek ADay ADayofweek ADayofyear AIs_month_end AIs_month_start AIs_quarter_end AIs_quarter_start AIs_year_end AIs_year_start AElapsed
0 2000 3 10 11 5 71 False False False False False False 952732800
1 2000 3 10 12 6 72 False False False False False False 952819200
2 2000 3 11 13 0 73 False False False False False False 952905600
"""
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors)
targ_pre = re.sub('[Dd]ate$', '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9
if drop: df.drop(fldname, axis=1, inplace=True)
def is_date(x): return np.issubdtype(x.dtype, np.datetime64)
def train_cats(df):
"""Change any columns of strings in a panda's dataframe to a column of
categorical values. This applies the changes inplace.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category
"""
for n,c in df.items():
if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()
def apply_cats(df, trn):
"""Changes any columns of strings in df into categorical variables using trn as
a template for the category codes.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values. The category codes are determined by trn.
trn: A pandas dataframe. When creating a category for df, it looks up the
what the category's code were in trn and makes those the category codes
for df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category {a : 1, b : 2}
>>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})
>>> apply_cats(df2, df)
col1 col2
0 1 b
1 2 a
2 3 a
now the type of col is category {a : 1, b : 2}
"""
for n,c in df.items():
if (n in trn.columns) and (trn[n].dtype.name=='category'):
df[n] = c.astype('category').cat.as_ordered()
df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True)
def fix_missing(df, col, name, na_dict):
""" Fill missing data in a column of df with the median, and add a {name}_na column
which specifies if the data was missing.
Parameters:
-----------
df: The data frame that will be changed.
col: The column of data to fix by filling in missing data.
name: The name of the new filled column in df.
na_dict: A dictionary of values to create na's of and the value to insert. If
name is not a key of na_dict the median will fill any missing data. Also
if name is not a key of na_dict and there is no missing data in col, then
no {name}_na column is not created.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})
>>> df
col1 col2
0 1 5
1 nan 2
2 3 2
>>> fix_missing(df, df['col1'], 'col1', {})
>>> df
col1 col2 col1_na
0 1 5 False
1 2 2 True
2 3 2 False
>>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})
>>> df
col1 col2
0 1 5
1 nan 2
2 3 2
>>> fix_missing(df, df['col2'], 'col2', {})
>>> df
col1 col2
0 1 5
1 nan 2
2 3 2
>>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})
>>> df
col1 col2
0 1 5
1 nan 2
2 3 2
>>> fix_missing(df, df['col1'], 'col1', {'col1' : 500})
>>> df
col1 col2 col1_na
0 1 5 False
1 500 2 True
2 3 2 False
"""
if is_numeric_dtype(col):
if pd.isnull(col).sum() or (name in na_dict):
df[name+'_na'] = pd.isnull(col)
filler = na_dict[name] if name in na_dict else col.median()
df[name] = col.fillna(filler)
na_dict[name] = filler
return na_dict
def numericalize(df, col, name, max_n_cat):
""" Changes the column col from a categorical type to it's integer codes.
Parameters:
-----------
df: A pandas dataframe. df[name] will be filled with the integer codes from
col.
col: The column you wish to change into the categories.
name: The column name you wish to insert into df. This column will hold the
integer codes.
max_n_cat: If col has more categories than max_n_cat it will not change the
it to its integer codes. If max_n_cat is None, then col will always be
converted.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category { a : 1, b : 2}
>>> numericalize(df, df['col2'], 'col3', None)
col1 col2 col3
0 1 a 1
1 2 b 2
2 3 a 1
"""
if not is_numeric_dtype(col) and ( max_n_cat is None or len(col.cat.categories)>max_n_cat):
df[name] = col.cat.codes+1
def scale_vars(df, mapper):
warnings.filterwarnings('ignore', category=sklearn.exceptions.DataConversionWarning)
if mapper is None:
map_f = [([n],StandardScaler()) for n in df.columns if is_numeric_dtype(df[n])]
mapper = DataFrameMapper(map_f).fit(df)
df[mapper.transformed_names_] = mapper.transform(df)
return mapper
def proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None,
preproc_fn=None, max_n_cat=None, subset=None, mapper=None):
""" proc_df takes a data frame df and splits off the response variable, and
changes the df into an entirely numeric dataframe. For each column of df
which is not in skip_flds nor in ignore_flds, na values are replaced by the
median value of the column.
Parameters:
-----------
df: The data frame you wish to process.
y_fld: The name of the response variable
skip_flds: A list of fields that dropped from df.
ignore_flds: A list of fields that are ignored during processing.
do_scale: Standardizes each column in df. Takes Boolean Values(True,False)
na_dict: a dictionary of na columns to add. Na columns are also added if there
are any missing values.
preproc_fn: A function that gets applied to df.
max_n_cat: The maximum number of categories to break into dummy values, instead
of integer codes.
subset: Takes a random subset of size subset from df.
mapper: If do_scale is set as True, the mapper variable
calculates the values used for scaling of variables during training time (mean and standard deviation).
Returns:
--------
[x, y, nas, mapper(optional)]:
x: x is the transformed version of df. x will not have the response variable
and is entirely numeric.
y: y is the response variable
nas: returns a dictionary of which nas it created, and the associated median.
mapper: A DataFrameMapper which stores the mean and standard deviation of the corresponding continuous
variables which is then used for scaling of during test-time.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category { a : 1, b : 2}
>>> x, y, nas = proc_df(df, 'col1')
>>> x
col2
0 1
1 2
2 1
>>> data = DataFrame(pet=["cat", "dog", "dog", "fish", "cat", "dog", "cat", "fish"],
children=[4., 6, 3, 3, 2, 3, 5, 4],
salary=[90, 24, 44, 27, 32, 59, 36, 27])
>>> mapper = DataFrameMapper([(:pet, LabelBinarizer()),
([:children], StandardScaler())])
>>>round(fit_transform!(mapper, copy(data)), 2)
8x4 Array{Float64,2}:
1.0 0.0 0.0 0.21
0.0 1.0 0.0 1.88
0.0 1.0 0.0 -0.63
0.0 0.0 1.0 -0.63
1.0 0.0 0.0 -1.46
0.0 1.0 0.0 -0.63
1.0 0.0 0.0 1.04
0.0 0.0 1.0 0.21
"""
if not ignore_flds: ignore_flds=[]
if not skip_flds: skip_flds=[]
if subset: df = get_sample(df,subset)
else: df = df.copy()
ignored_flds = df.loc[:, ignore_flds]
df.drop(ignore_flds, axis=1, inplace=True)
if preproc_fn: preproc_fn(df)
if y_fld is None: y = None
else:
if not is_numeric_dtype(df[y_fld]): df[y_fld] = df[y_fld].cat.codes
y = df[y_fld].values
skip_flds += [y_fld]
df.drop(skip_flds, axis=1, inplace=True)
if na_dict is None: na_dict = {}
else: na_dict = na_dict.copy()
na_dict_initial = na_dict.copy()
for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)
if len(na_dict_initial.keys()) > 0:
df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True)
if do_scale: mapper = scale_vars(df, mapper)
for n,c in df.items(): numericalize(df, c, n, max_n_cat)
df = pd.get_dummies(df, dummy_na=True)
df = pd.concat([ignored_flds, df], axis=1)
res = [df, y, na_dict]
if do_scale: res = res + [mapper]
return res
def rf_feat_importance(m, df):
return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}
).sort_values('imp', ascending=False)
def set_rf_samples(n):
""" Changes Scikit learn's random forests to give each tree a random sample of
n random rows.
"""
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n))
def reset_rf_samples():
""" Undoes the changes produced by set_rf_samples.
"""
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n_samples))
def get_nn_mappers(df, cat_vars, contin_vars):
# Replace nulls with 0 for continuous, "" for categorical.
for v in contin_vars: df[v] = df[v].fillna(df[v].max()+100,)
for v in cat_vars: df[v].fillna('#NA#', inplace=True)
# list of tuples, containing variable and instance of a transformer for that variable
# for categoricals, use LabelEncoder to map to integers. For continuous, standardize
cat_maps = [(o, LabelEncoder()) for o in cat_vars]
contin_maps = [([o], StandardScaler()) for o in contin_vars]
return DataFrameMapper(cat_maps).fit(df), DataFrameMapper(contin_maps).fit(df)
| 32.975155 | 155 | 0.585547 | from .imports import *
from sklearn_pandas import DataFrameMapper
from sklearn.preprocessing import LabelEncoder, Imputer, StandardScaler
from pandas.api.types import is_string_dtype, is_numeric_dtype
from sklearn.ensemble import forest
from sklearn.tree import export_graphviz
def set_plot_sizes(sml, med, big):
plt.rc('font', size=sml)
plt.rc('axes', titlesize=sml)
plt.rc('axes', labelsize=med)
plt.rc('xtick', labelsize=sml)
plt.rc('ytick', labelsize=sml)
plt.rc('legend', fontsize=sml)
plt.rc('figure', titlesize=big)
def parallel_trees(m, fn, n_jobs=8):
return list(ProcessPoolExecutor(n_jobs).map(fn, m.estimators_))
def draw_tree(t, df, size=10, ratio=0.6, precision=0):
s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,
special_characters=True, rotate=True, precision=precision)
IPython.display.display(graphviz.Source(re.sub('Tree {',
f'Tree {{ size={size}; ratio={ratio}', s)))
def combine_date(years, months=1, days=1, weeks=None, hours=None, minutes=None,
seconds=None, milliseconds=None, microseconds=None, nanoseconds=None):
years = np.asarray(years) - 1970
months = np.asarray(months) - 1
days = np.asarray(days) - 1
types = ('<M8[Y]', '<m8[M]', '<m8[D]', '<m8[W]', '<m8[h]',
'<m8[m]', '<m8[s]', '<m8[ms]', '<m8[us]', '<m8[ns]')
vals = (years, months, days, weeks, hours, minutes, seconds,
milliseconds, microseconds, nanoseconds)
return sum(np.asarray(v, dtype=t) for t, v in zip(types, vals)
if v is not None)
def get_sample(df,n):
idxs = sorted(np.random.permutation(len(df))[:n])
return df.iloc[idxs].copy()
def add_datepart(df, fldname, drop=True, time=False, errors="raise"):
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors)
targ_pre = re.sub('[Dd]ate$', '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9
if drop: df.drop(fldname, axis=1, inplace=True)
def is_date(x): return np.issubdtype(x.dtype, np.datetime64)
def train_cats(df):
for n,c in df.items():
if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()
def apply_cats(df, trn):
for n,c in df.items():
if (n in trn.columns) and (trn[n].dtype.name=='category'):
df[n] = c.astype('category').cat.as_ordered()
df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True)
def fix_missing(df, col, name, na_dict):
if is_numeric_dtype(col):
if pd.isnull(col).sum() or (name in na_dict):
df[name+'_na'] = pd.isnull(col)
filler = na_dict[name] if name in na_dict else col.median()
df[name] = col.fillna(filler)
na_dict[name] = filler
return na_dict
def numericalize(df, col, name, max_n_cat):
if not is_numeric_dtype(col) and ( max_n_cat is None or len(col.cat.categories)>max_n_cat):
df[name] = col.cat.codes+1
def scale_vars(df, mapper):
warnings.filterwarnings('ignore', category=sklearn.exceptions.DataConversionWarning)
if mapper is None:
map_f = [([n],StandardScaler()) for n in df.columns if is_numeric_dtype(df[n])]
mapper = DataFrameMapper(map_f).fit(df)
df[mapper.transformed_names_] = mapper.transform(df)
return mapper
def proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None,
preproc_fn=None, max_n_cat=None, subset=None, mapper=None):
if not ignore_flds: ignore_flds=[]
if not skip_flds: skip_flds=[]
if subset: df = get_sample(df,subset)
else: df = df.copy()
ignored_flds = df.loc[:, ignore_flds]
df.drop(ignore_flds, axis=1, inplace=True)
if preproc_fn: preproc_fn(df)
if y_fld is None: y = None
else:
if not is_numeric_dtype(df[y_fld]): df[y_fld] = df[y_fld].cat.codes
y = df[y_fld].values
skip_flds += [y_fld]
df.drop(skip_flds, axis=1, inplace=True)
if na_dict is None: na_dict = {}
else: na_dict = na_dict.copy()
na_dict_initial = na_dict.copy()
for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)
if len(na_dict_initial.keys()) > 0:
df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True)
if do_scale: mapper = scale_vars(df, mapper)
for n,c in df.items(): numericalize(df, c, n, max_n_cat)
df = pd.get_dummies(df, dummy_na=True)
df = pd.concat([ignored_flds, df], axis=1)
res = [df, y, na_dict]
if do_scale: res = res + [mapper]
return res
def rf_feat_importance(m, df):
return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}
).sort_values('imp', ascending=False)
def set_rf_samples(n):
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n))
def reset_rf_samples():
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n_samples))
def get_nn_mappers(df, cat_vars, contin_vars):
for v in contin_vars: df[v] = df[v].fillna(df[v].max()+100,)
for v in cat_vars: df[v].fillna('#NA#', inplace=True)
cat_maps = [(o, LabelEncoder()) for o in cat_vars]
contin_maps = [([o], StandardScaler()) for o in contin_vars]
return DataFrameMapper(cat_maps).fit(df), DataFrameMapper(contin_maps).fit(df)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.