hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7948d4440e39b9de576f7b9e6b3376364d06708f
| 2,586
|
py
|
Python
|
Datasets/Terrain/srtm.py
|
OIEIEIO/earthengine-py-notebooks
|
5d6c5cdec0c73bf02020ee17d42c9e30d633349f
|
[
"MIT"
] | 1,008
|
2020-01-27T02:03:18.000Z
|
2022-03-24T10:42:14.000Z
|
Datasets/Terrain/srtm.py
|
rafatieppo/earthengine-py-notebooks
|
99fbc4abd1fb6ba41e3d8a55f8911217353a3237
|
[
"MIT"
] | 8
|
2020-02-01T20:18:18.000Z
|
2021-11-23T01:48:02.000Z
|
Datasets/Terrain/srtm.py
|
rafatieppo/earthengine-py-notebooks
|
99fbc4abd1fb6ba41e3d8a55f8911217353a3237
|
[
"MIT"
] | 325
|
2020-01-27T02:03:36.000Z
|
2022-03-25T20:33:33.000Z
|
# %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Terrain/srtm.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/srtm.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/srtm.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('Installing geemap ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# %%
import ee
import geemap
# %%
"""
## Create an interactive map
The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
image = ee.Image('srtm90_v4')
# path = image.getDownloadUrl({
# 'scale': 30,
# 'crs': 'EPSG:4326',
# 'region': '[[-120, 35], [-119, 35], [-119, 34], [-120, 34]]'
# })
vis_params = {'min': 0, 'max': 3000}
Map.addLayer(image, vis_params, 'SRTM')
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
| 40.40625
| 457
| 0.70959
|
7948d4e45797e1eceb1cdc1ecc9b8a535b5214bb
| 25,282
|
py
|
Python
|
src/sentry/web/api.py
|
theatlantic/django-sentry
|
0b963478ebed473826591ef88b1ddd4fb5a524e8
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/web/api.py
|
theatlantic/django-sentry
|
0b963478ebed473826591ef88b1ddd4fb5a524e8
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/web/api.py
|
theatlantic/django-sentry
|
0b963478ebed473826591ef88b1ddd4fb5a524e8
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.web.views
~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import datetime
import logging
from functools import wraps
from django.contrib import messages
from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from django.db.models import Sum, Q
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache, cache_control
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.vary import vary_on_cookie
from django.views.generic.base import View as BaseView
from sentry.constants import (
MEMBER_USER, STATUS_MUTED, STATUS_UNRESOLVED, STATUS_RESOLVED,
EVENTS_PER_PAGE)
from sentry.coreapi import (
project_from_auth_vars, decode_and_decompress_data,
safely_load_json_string, validate_data, insert_data_to_database, APIError,
APIForbidden, extract_auth_vars)
from sentry.exceptions import InvalidData
from sentry.models import (
Group, GroupBookmark, Project, ProjectCountByMinute, TagValue, Activity,
User)
from sentry.plugins import plugins
from sentry.utils import json
from sentry.utils.cache import cache
from sentry.utils.db import has_trending
from sentry.utils.javascript import to_json
from sentry.utils.http import is_valid_origin, get_origins, is_same_domain
from sentry.utils.safe import safe_execute
from sentry.web.decorators import has_access
from sentry.web.frontend.groups import _get_group_list
from sentry.web.helpers import render_to_response
error_logger = logging.getLogger('sentry.errors.api.http')
logger = logging.getLogger('sentry.api.http')
# Transparent 1x1 gif
# See http://probablyprogramming.com/2009/03/15/the-tiniest-gif-ever
PIXEL = 'R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs='.decode('base64')
def api(func):
@wraps(func)
def wrapped(request, *args, **kwargs):
data = func(request, *args, **kwargs)
if request.is_ajax():
response = HttpResponse(data)
response['Content-Type'] = 'application/json'
else:
ref = request.META.get('HTTP_REFERER')
if ref is None or not is_same_domain(ref, request.build_absolute_uri()):
ref = reverse('sentry')
return HttpResponseRedirect(ref)
return response
return wrapped
class Auth(object):
def __init__(self, auth_vars):
self.client = auth_vars.get('sentry_client')
self.version = int(float(auth_vars.get('sentry_version')))
self.secret_key = auth_vars.get('sentry_secret')
self.public_key = auth_vars.get('sentry_key')
class APIView(BaseView):
def _get_project_from_id(self, project_id):
if project_id:
if project_id.isdigit():
lookup_kwargs = {'id': int(project_id)}
else:
lookup_kwargs = {'slug': project_id}
try:
return Project.objects.get_from_cache(**lookup_kwargs)
except Project.DoesNotExist:
raise APIError('Invalid project_id: %r' % project_id)
return None
def _parse_header(self, request, project):
try:
auth_vars = extract_auth_vars(request)
except (IndexError, ValueError):
raise APIError('Invalid auth header')
if not auth_vars:
raise APIError('Client/server version mismatch: Unsupported client')
server_version = auth_vars.get('sentry_version', '1.0')
client = auth_vars.get('sentry_client', request.META.get('HTTP_USER_AGENT'))
if server_version not in ('2.0', '3', '4'):
raise APIError('Client/server version mismatch: Unsupported protocol version (%s)' % server_version)
if not client:
raise APIError('Client request error: Missing client version identifier')
return auth_vars
@csrf_exempt
def dispatch(self, request, project_id=None, *args, **kwargs):
try:
origin = self.get_request_origin(request)
response = self._dispatch(request, project_id=project_id, *args, **kwargs)
except Exception:
response = HttpResponse(status=500)
if response.status_code != 200:
# Set X-Sentry-Error as in many cases it is easier to inspect the headers
response['X-Sentry-Error'] = response.content[:200] # safety net on content length
if response.status_code == 500:
log = logger.error
exc_info = True
else:
log = logger.info
exc_info = None
log('status=%s project_id=%s user_id=%s ip=%s agent=%s %s', response.status_code, project_id,
request.user.is_authenticated() and request.user.id or None,
request.META['REMOTE_ADDR'], request.META.get('HTTP_USER_AGENT'),
response['X-Sentry-Error'], extra={
'request': request,
}, exc_info=exc_info)
if origin:
# We allow all origins on errors
response['Access-Control-Allow-Origin'] = '*'
if origin:
response['Access-Control-Allow-Headers'] = 'X-Sentry-Auth, X-Requested-With, Origin, Accept, Content-Type, ' \
'Authentication'
response['Access-Control-Allow-Methods'] = ', '.join(self._allowed_methods())
return response
def get_request_origin(self, request):
"""
Returns either the Origin or Referer value from the request headers.
"""
return request.META.get('HTTP_ORIGIN', request.META.get('HTTP_REFERER'))
def _dispatch(self, request, project_id=None, *args, **kwargs):
request.user = AnonymousUser()
try:
project = self._get_project_from_id(project_id)
except APIError, e:
return HttpResponse(str(e), content_type='text/plain', status=400)
origin = self.get_request_origin(request)
if origin is not None:
if not project:
return HttpResponse('Your client must be upgraded for CORS support.')
elif not is_valid_origin(origin, project):
return HttpResponse('Invalid origin: %r' % origin, content_type='text/plain', status=400)
# XXX: It seems that the OPTIONS call does not always include custom headers
if request.method == 'OPTIONS':
response = self.options(request, project)
else:
try:
auth_vars = self._parse_header(request, project)
except APIError, e:
return HttpResponse(str(e), content_type='text/plain', status=400)
try:
project_, user = project_from_auth_vars(auth_vars)
except APIError, error:
return HttpResponse(unicode(error.msg), status=error.http_status)
else:
if user:
request.user = user
# Legacy API was /api/store/ and the project ID was only available elsewhere
if not project:
if not project_:
return HttpResponse('Unable to identify project', content_type='text/plain', status=400)
project = project_
elif project_ != project:
return HttpResponse('Project ID mismatch', content_type='text/plain', status=400)
auth = Auth(auth_vars)
if auth.version >= 3:
# Version 3 enforces secret key for server side requests
if origin is None and not auth.secret_key:
return HttpResponse('Missing required attribute in authentication header: sentry_secret', status=400)
try:
response = super(APIView, self).dispatch(request, project=project, auth=auth, **kwargs)
except APIError, error:
response = HttpResponse(unicode(error.msg), content_type='text/plain', status=error.http_status)
if origin:
response['Access-Control-Allow-Origin'] = origin
return response
# XXX: backported from Django 1.5
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
def options(self, request, *args, **kwargs):
response = HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
class StoreView(APIView):
"""
The primary endpoint for storing new events.
This will validate the client's authentication and data, and if
successful pass on the payload to the internal database handler.
Authentication works in three flavors:
1. Explicit signed requests
These are implemented using the documented signed request protocol, and
require an authentication header which is signed using with the project
member's secret key.
2. CORS Secured Requests
Generally used for communications with client-side platforms (such as
JavaScript in the browser), they require a standard header, excluding
the signature and timestamp requirements, and must be listed in the
origins for the given project (or the global origins).
3. Implicit trusted requests
Used by the Sentry core, they are only available from same-domain requests
and do not require any authentication information. They only require that
the user be authenticated, and a project_id be sent in the GET variables.
"""
@never_cache
def post(self, request, project, auth, **kwargs):
data = request.raw_post_data
response_or_event_id = self.process(request, project, auth, data, **kwargs)
if isinstance(response_or_event_id, HttpResponse):
return response_or_event_id
return HttpResponse(json.dumps({
'id': response_or_event_id,
}), content_type='application/json')
@never_cache
def get(self, request, project, auth, **kwargs):
data = request.GET.get('sentry_data', '')
self.process(request, project, auth, data, **kwargs)
# We should return a simple 1x1 gif for browser so they don't throw a warning
return HttpResponse(PIXEL, 'image/gif')
def process(self, request, project, auth, data, **kwargs):
for plugin in plugins.all():
if safe_execute(plugin.is_rate_limited, project=project):
return HttpResponse('Creation of this event was denied due to rate limiting.', content_type='text/plain', status=429)
result = plugins.first('has_perm', request.user, 'create_event', project)
if result is False:
raise APIForbidden('Creation of this event was blocked')
if not data.startswith('{'):
data = decode_and_decompress_data(data)
data = safely_load_json_string(data)
try:
# mutates data
validate_data(project, data, auth.client)
except InvalidData, e:
raise APIError(u'Invalid data: %s (%s)' % (unicode(e), type(e)))
# mutates data
Group.objects.normalize_event_data(data)
event_id = data['event_id']
# mutates data (strips a lot of context if not queued)
insert_data_to_database(data)
logger.debug('New event from project %s/%s (id=%s)', project.team.slug, project.slug, event_id)
return event_id
@csrf_exempt
@has_access
@never_cache
@api
def poll(request, team, project):
offset = 0
limit = EVENTS_PER_PAGE
response = _get_group_list(
request=request,
project=project,
)
event_list = response['event_list']
event_list = list(event_list[offset:limit])
return to_json(event_list, request)
@csrf_exempt
@has_access(MEMBER_USER)
@never_cache
@api
def resolve(request, team, project):
gid = request.REQUEST.get('gid')
if not gid:
return HttpResponseForbidden()
try:
group = Group.objects.get(pk=gid)
except Group.DoesNotExist:
return HttpResponseForbidden()
now = timezone.now()
happened = Group.objects.filter(
pk=group.pk,
).exclude(status=STATUS_RESOLVED).update(
status=STATUS_RESOLVED,
resolved_at=now,
)
group.status = STATUS_RESOLVED
group.resolved_at = now
if happened:
Activity.objects.create(
project=project,
group=group,
type=Activity.SET_RESOLVED,
user=request.user,
)
return to_json(group, request)
@csrf_exempt
@has_access(MEMBER_USER)
@never_cache
@api
def make_group_public(request, team, project, group_id):
try:
group = Group.objects.get(pk=group_id)
except Group.DoesNotExist:
return HttpResponseForbidden()
happened = group.update(is_public=True)
if happened:
Activity.objects.create(
project=project,
group=group,
type=Activity.SET_PUBLIC,
user=request.user,
)
return to_json(group, request)
@csrf_exempt
@has_access(MEMBER_USER)
@never_cache
@api
def make_group_private(request, team, project, group_id):
try:
group = Group.objects.get(pk=group_id)
except Group.DoesNotExist:
return HttpResponseForbidden()
happened = group.update(is_public=False)
if happened:
Activity.objects.create(
project=project,
group=group,
type=Activity.SET_PRIVATE,
user=request.user,
)
return to_json(group, request)
@csrf_exempt
@has_access(MEMBER_USER)
@never_cache
@api
def resolve_group(request, team, project, group_id):
try:
group = Group.objects.get(pk=group_id)
except Group.DoesNotExist:
return HttpResponseForbidden()
happened = group.update(status=STATUS_RESOLVED)
if happened:
Activity.objects.create(
project=project,
group=group,
type=Activity.SET_RESOLVED,
user=request.user,
)
return to_json(group, request)
@csrf_exempt
@has_access(MEMBER_USER)
@never_cache
@api
def mute_group(request, team, project, group_id):
try:
group = Group.objects.get(pk=group_id)
except Group.DoesNotExist:
return HttpResponseForbidden()
happened = group.update(status=STATUS_MUTED)
if happened:
Activity.objects.create(
project=project,
group=group,
type=Activity.SET_MUTED,
user=request.user,
)
return to_json(group, request)
@csrf_exempt
@has_access(MEMBER_USER)
@never_cache
@api
def unresolve_group(request, team, project, group_id):
try:
group = Group.objects.get(pk=group_id)
except Group.DoesNotExist:
return HttpResponseForbidden()
happened = group.update(status=STATUS_UNRESOLVED)
if happened:
Activity.objects.create(
project=project,
group=group,
type=Activity.SET_UNRESOLVED,
user=request.user,
)
return to_json(group, request)
@csrf_exempt
@has_access(MEMBER_USER)
@never_cache
def remove_group(request, team, project, group_id):
from sentry.tasks.deletion import delete_group
try:
group = Group.objects.get(pk=group_id)
except Group.DoesNotExist:
return HttpResponseForbidden()
delete_group.delay(object_id=group.id)
if request.is_ajax():
response = HttpResponse('{}')
response['Content-Type'] = 'application/json'
else:
messages.add_message(request, messages.SUCCESS,
_('Deletion has been queued and should occur shortly.'))
response = HttpResponseRedirect(reverse('sentry-stream', args=[team.slug, project.slug]))
return response
@csrf_exempt
@has_access
@never_cache
@api
def bookmark(request, team, project):
gid = request.REQUEST.get('gid')
if not gid:
return HttpResponseForbidden()
if not request.user.is_authenticated():
return HttpResponseForbidden()
try:
group = Group.objects.get(pk=gid)
except Group.DoesNotExist:
return HttpResponseForbidden()
gb, created = GroupBookmark.objects.get_or_create(
project=group.project,
user=request.user,
group=group,
)
if not created:
gb.delete()
return to_json(group, request)
@csrf_exempt
@has_access(MEMBER_USER)
@never_cache
def clear(request, team, project):
response = _get_group_list(
request=request,
project=project,
)
# TODO: should we record some kind of global event in Activity?
event_list = response['event_list']
happened = event_list.update(status=STATUS_RESOLVED)
if happened:
Activity.objects.create(
project=project,
type=Activity.SET_RESOLVED,
user=request.user,
)
data = []
response = HttpResponse(json.dumps(data))
response['Content-Type'] = 'application/json'
return response
@vary_on_cookie
@csrf_exempt
@has_access
def chart(request, team=None, project=None):
gid = request.REQUEST.get('gid')
days = int(request.REQUEST.get('days', '90'))
if gid:
try:
group = Group.objects.get(pk=gid)
except Group.DoesNotExist:
return HttpResponseForbidden()
data = Group.objects.get_chart_data(group, max_days=days)
elif project:
data = Project.objects.get_chart_data(project, max_days=days)
elif team:
cache_key = 'api.chart:team=%s,days=%s' % (team.id, days)
data = cache.get(cache_key)
if data is None:
project_list = list(Project.objects.filter(team=team))
data = Project.objects.get_chart_data_for_group(project_list, max_days=days)
cache.set(cache_key, data, 300)
else:
cache_key = 'api.chart:user=%s,days=%s' % (request.user.id, days)
data = cache.get(cache_key)
if data is None:
project_list = Project.objects.get_for_user(request.user)
data = Project.objects.get_chart_data_for_group(project_list, max_days=days)
cache.set(cache_key, data, 300)
response = HttpResponse(json.dumps(data))
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def get_group_trends(request, team=None, project=None):
minutes = int(request.REQUEST.get('minutes', 15))
limit = min(100, int(request.REQUEST.get('limit', 10)))
if not team and project:
project_list = [project]
else:
project_list = Project.objects.get_for_user(request.user, team=team)
project_dict = dict((p.id, p) for p in project_list)
base_qs = Group.objects.filter(
project__in=project_list,
status=0,
)
if has_trending():
group_list = list(Group.objects.get_accelerated(project_dict, base_qs, minutes=(
minutes
))[:limit])
else:
cutoff = datetime.timedelta(minutes=minutes)
cutoff_dt = timezone.now() - cutoff
group_list = list(base_qs.filter(
status=STATUS_UNRESOLVED,
last_seen__gte=cutoff_dt
).extra(select={'sort_value': 'score'}).order_by('-score')[:limit])
for group in group_list:
group._project_cache = project_dict.get(group.project_id)
data = to_json(group_list, request)
response = HttpResponse(data)
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def get_new_groups(request, team=None, project=None):
minutes = int(request.REQUEST.get('minutes', 15))
limit = min(100, int(request.REQUEST.get('limit', 10)))
if not team and project:
project_list = [project]
else:
project_list = Project.objects.get_for_user(request.user, team=team)
project_dict = dict((p.id, p) for p in project_list)
cutoff = datetime.timedelta(minutes=minutes)
cutoff_dt = timezone.now() - cutoff
group_list = list(Group.objects.filter(
project__in=project_dict.keys(),
status=STATUS_UNRESOLVED,
active_at__gte=cutoff_dt,
).extra(select={'sort_value': 'score'}).order_by('-score', '-first_seen')[:limit])
for group in group_list:
group._project_cache = project_dict.get(group.project_id)
data = to_json(group_list, request)
response = HttpResponse(data)
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def get_resolved_groups(request, team=None, project=None):
minutes = int(request.REQUEST.get('minutes', 15))
limit = min(100, int(request.REQUEST.get('limit', 10)))
if not team and project:
project_list = [project]
else:
project_list = Project.objects.get_for_user(request.user, team=team)
project_dict = dict((p.id, p) for p in project_list)
cutoff = datetime.timedelta(minutes=minutes)
cutoff_dt = timezone.now() - cutoff
group_list = list(Group.objects.filter(
project__in=project_list,
status=STATUS_RESOLVED,
resolved_at__gte=cutoff_dt,
).order_by('-score')[:limit])
for group in group_list:
group._project_cache = project_dict.get(group.project_id)
data = to_json(group_list, request)
response = HttpResponse(json.dumps(data))
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def get_stats(request, team=None, project=None):
minutes = int(request.REQUEST.get('minutes', 15))
if not team and project:
project_list = [project]
else:
project_list = Project.objects.get_for_user(request.user, team=team)
cutoff = datetime.timedelta(minutes=minutes)
cutoff_dt = timezone.now() - cutoff
num_events = ProjectCountByMinute.objects.filter(
project__in=project_list,
date__gte=cutoff_dt,
).aggregate(t=Sum('times_seen'))['t'] or 0
# XXX: This is too slow if large amounts of groups are resolved
num_resolved = Group.objects.filter(
project__in=project_list,
status=STATUS_RESOLVED,
resolved_at__gte=cutoff_dt,
).aggregate(t=Sum('times_seen'))['t'] or 0
data = {
'events': num_events,
'resolved': num_resolved,
}
response = HttpResponse(json.dumps(data))
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def search_tags(request, team, project):
limit = min(100, int(request.GET.get('limit', 10)))
name = request.GET['name']
query = request.GET['query']
results = list(TagValue.objects.filter(
project=project,
key=name,
value__icontains=query,
).values_list('value', flat=True).order_by('value')[:limit])
response = HttpResponse(json.dumps({
'results': results,
'query': query,
}))
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def search_users(request, team):
limit = min(100, int(request.GET.get('limit', 10)))
query = request.GET['query']
results = list(User.objects.filter(
Q(email__istartswith=query) | Q(first_name__istartswith=query) | Q(username__istartswith=query),
).filter(
Q(team_memberships=team) | Q(accessgroup__team=team),
).distinct().order_by('first_name', 'email').values('id', 'username', 'first_name', 'email')[:limit])
response = HttpResponse(json.dumps({
'results': results,
'query': query,
}))
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def search_projects(request, team):
limit = min(100, int(request.GET.get('limit', 10)))
query = request.GET['query']
results = list(Project.objects.filter(
Q(name__istartswith=query) | Q(slug__istartswith=query),
).filter(team=team).distinct().order_by('name', 'slug').values('id', 'name', 'slug')[:limit])
response = HttpResponse(json.dumps({
'results': results,
'query': query,
}))
response['Content-Type'] = 'application/json'
return response
@cache_control(max_age=3600, public=True)
def crossdomain_xml_index(request):
response = render_to_response('sentry/crossdomain_index.xml')
response['Content-Type'] = 'application/xml'
return response
@cache_control(max_age=60)
def crossdomain_xml(request, project_id):
if project_id.isdigit():
lookup = {'id': project_id}
else:
lookup = {'slug': project_id}
try:
project = Project.objects.get_from_cache(**lookup)
except Project.DoesNotExist:
return HttpResponse(status=404)
origin_list = get_origins(project)
if origin_list == '*':
origin_list = [origin_list]
response = render_to_response('sentry/crossdomain.xml', {
'origin_list': origin_list
})
response['Content-Type'] = 'application/xml'
return response
| 30.644848
| 133
| 0.655644
|
7948d4fefebf6f5962c932a7f2962e8b9bec4242
| 6,861
|
py
|
Python
|
test/programytest/storage/test_engine.py
|
cen-ai/program-y
|
a753667638147544c54dbebd9f1c8f9ae7f2159e
|
[
"MIT"
] | 5
|
2018-08-21T00:13:45.000Z
|
2018-09-01T20:00:55.000Z
|
test/programytest/storage/test_engine.py
|
cen-ai/program-y
|
a753667638147544c54dbebd9f1c8f9ae7f2159e
|
[
"MIT"
] | 1
|
2018-09-12T18:30:17.000Z
|
2018-09-12T18:30:17.000Z
|
test/programytest/storage/test_engine.py
|
cen-ai/program-y
|
a753667638147544c54dbebd9f1c8f9ae7f2159e
|
[
"MIT"
] | 5
|
2018-08-21T00:08:36.000Z
|
2018-09-23T06:11:04.000Z
|
import unittest
import unittest.mock
from programy.storage.engine import StorageEngine
class StorageEngineTests(unittest.TestCase):
def test_test_initialise_with_config_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
self.assertIsNotNone(engine)
self.assertIsNotNone(engine.configuration)
self.assertEqual(engine.configuration, config)
def test_user_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.user_store()
def test_linked_account_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.linked_account_store()
def test_link_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.link_store()
def test_category_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.category_store()
def test_errors_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.errors_store()
def test_duplicates_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.duplicates_store()
def test_learnf_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.learnf_store()
def test_conversation_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.conversation_store()
def test_sets_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.sets_store()
def test_maps_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.maps_store()
def test_rdf_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.rdf_store()
def test_denormal_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.denormal_store()
def test_normal_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.normal_store()
def test_gender_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.gender_store()
def test_person_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.person_store()
def test_person2_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.person2_store()
def test_regex_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.regex_store()
def test_property_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.property_store()
def test_variables_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.variables_store()
def test_twitter_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.twitter_store()
def test_spelling_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.spelling_store()
def test_license_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.license_store()
def test_pattern_nodes_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.pattern_nodes_store()
def test_template_nodes_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.template_nodes_store()
def test_binaries_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.binaries_store()
def test_braintree_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.braintree_store()
def test_preprocessors_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.preprocessors_store()
def test_postprocessors_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.postprocessors_store()
def test_usergroups_store_not_implemented(self):
config = unittest.mock.Mock()
engine = StorageEngine(config)
with self.assertRaises(NotImplementedError):
_ = engine.usergroups_store()
| 36.110526
| 63
| 0.676724
|
7948d50740e40d0279674d0c811d4f275d6705cb
| 864
|
py
|
Python
|
python/luhn/luhn.py
|
rob93c/Exercism
|
51e38b6559a5650a56d81e733be22b7ee349cedd
|
[
"MIT"
] | null | null | null |
python/luhn/luhn.py
|
rob93c/Exercism
|
51e38b6559a5650a56d81e733be22b7ee349cedd
|
[
"MIT"
] | null | null | null |
python/luhn/luhn.py
|
rob93c/Exercism
|
51e38b6559a5650a56d81e733be22b7ee349cedd
|
[
"MIT"
] | 1
|
2019-03-19T12:18:39.000Z
|
2019-03-19T12:18:39.000Z
|
class Luhn(object):
def __init__(self, card_num):
self.card_num = self.unspace(card_num)
def is_valid(self) -> bool:
if len(self.card_num) <= 1 or not self.card_num.isdecimal():
return False
else:
return self.total(self.double(self.card_num)) % 10 == 0
@staticmethod
def double(card_num: str) -> str:
func = lambda n: str(2 * n) if n * 2 < 10 else str(n * 2 - 9)
return ''.join(func(int(char))
if n % 2 == 1 else char
for n, char in enumerate(card_num[::-1]))[::-1]
@staticmethod
def total(string: str) -> int:
return sum(int(char)
for char in string)
@staticmethod
def unspace(card_num: str) -> str:
return ''.join(part.strip()
for part in card_num.split())
| 30.857143
| 70
| 0.528935
|
7948d59e6e82bb34fa87bac9152091d685c2e3c3
| 3,110
|
py
|
Python
|
non_semantic_speech_benchmark/eval_embedding/finetune/train_keras_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
non_semantic_speech_benchmark/eval_embedding/finetune/train_keras_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
non_semantic_speech_benchmark/eval_embedding/finetune/train_keras_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for non_semantic_speech_benchmark.eval_embedding.keras.train_keras."""
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import mock
import tensorflow as tf
from non_semantic_speech_benchmark.eval_embedding.finetune import train_keras
def _get_data(*args, **kwargs):
del args
assert 'samples_key' in kwargs
assert 'min_length' in kwargs
assert 'batch_size' in kwargs
assert 'label_list' in kwargs
bs = kwargs['batch_size']
samples = tf.zeros((bs, 32000), tf.float32)
labels = tf.zeros([bs], tf.int32)
labels_onehot = tf.one_hot(labels, len(kwargs['label_list']))
return tf.data.Dataset.from_tensors((samples, labels_onehot)).repeat()
class TrainKerasTest(parameterized.TestCase):
@parameterized.parameters(
{'num_clusters': 0, 'alpha_init': 0},
{'num_clusters': 4, 'alpha_init': 0},
{'num_clusters': 0, 'alpha_init': 1.0},
)
def test_get_model(self, num_clusters, alpha_init):
num_classes = 4
batched_samples = tf.zeros([3, 20000])
y_onehot = tf.one_hot([0, 1, 2], num_classes)
model = train_keras.models.get_keras_model(
num_classes, input_length=20000, use_batchnorm=True,
num_clusters=num_clusters, alpha_init=alpha_init)
loss_obj = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
opt = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean()
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
summary_writer = tf.summary.create_file_writer(
absltest.get_default_test_tmpdir())
train_step = train_keras.get_train_step(
model, loss_obj, opt, train_loss, train_accuracy, summary_writer)
gstep = opt.iterations
train_step(batched_samples, y_onehot, gstep)
self.assertEqual(1, gstep)
train_step(batched_samples, y_onehot, gstep)
self.assertEqual(2, gstep)
@mock.patch.object(train_keras.get_data, 'get_data', new=_get_data)
@flagsaver.flagsaver
def test_full_flow(self):
flags.FLAGS.file_pattern = 'dummy'
flags.FLAGS.shuffle_buffer_size = 4
flags.FLAGS.samples_key = 'audio'
flags.FLAGS.nc = 2
flags.FLAGS.label_key = 'emotion'
flags.FLAGS.label_list = ['no', 'yes']
flags.FLAGS.logdir = absltest.get_default_test_tmpdir()
train_keras.train_and_report(debug=True)
if __name__ == '__main__':
tf.compat.v2.enable_v2_behavior()
assert tf.executing_eagerly()
absltest.main()
| 34.555556
| 79
| 0.73955
|
7948d5ed6c33fdc184fb4bcb78cce77776ae6273
| 3,989
|
py
|
Python
|
pages/views.py
|
JSotres/Academic-Group-Django-Website
|
38af959cb6a596642fc11c522b28224cc733e89a
|
[
"MIT"
] | 5
|
2020-10-26T20:21:50.000Z
|
2021-12-14T11:12:39.000Z
|
pages/views.py
|
JSotres/Academic-Group-Django-Website
|
38af959cb6a596642fc11c522b28224cc733e89a
|
[
"MIT"
] | 12
|
2020-01-13T00:57:14.000Z
|
2022-03-12T00:11:34.000Z
|
pages/views.py
|
JSotres/Academic-Group-Django-Website
|
38af959cb6a596642fc11c522b28224cc733e89a
|
[
"MIT"
] | 1
|
2021-05-13T18:27:11.000Z
|
2021-05-13T18:27:11.000Z
|
from django.shortcuts import render
from django.views.generic import TemplateView, CreateView, ListView, DetailView
from .models import ContactRequest, Publications, Member, GroupInformation, ResearchField
class HomePageView(TemplateView):
template_name = 'home.html'
def group_information_list(self):
return GroupInformation.objects.get(pk=1)
class PublicationsListPageView(ListView):
model = Publications
template_name = 'publications.html'
def group_information_list(self):
return GroupInformation.objects.get(pk=1)
def paperslists(self):
return Publications.objects.filter(
publication_type='Paper').order_by('-year')
def reviewslists(self):
return Publications.objects.filter(
publication_type='Review').order_by('-year')
def chapterslists(self):
return Publications.objects.filter(
publication_type='Chapter').order_by('-year')
def proceedingslists(self):
return Publications.objects.filter(
publication_type='Proceeding').order_by('-year')
class MembersPageView(TemplateView):
template_name = 'members.html'
def group_information_list(self):
return GroupInformation.objects.get(pk=1)
def currentmemberslist(self):
return Member.objects.filter(
final_year=0)
def currentPI(self):
return GroupInformation.objects.get(pk=1)
def currentpostdocs(self):
return Member.objects.filter(final_year=0, position='Postdoc')
def currentphds(self):
return Member.objects.filter(final_year=0, position='PhD')
def currenttechnicians(self):
return Member.objects.filter(final_year=0, position='Technician')
def currentmasterstudents(self):
return Member.objects.filter(final_year=0, position='MSc')
def currentguestphdstudents(self):
return Member.objects.filter(final_year=0, position='Guest_PhD')
def currentgueststudents(self):
return Member.objects.filter(final_year=0, position='Guest_Student')
def previousmemberslist(self):
return Member.objects.exclude(final_year=0).order_by('-initial_year')
def previouspostdocs(self):
return Member.objects.exclude(final_year=0).filter(position='Postdoc').order_by('-initial_year')
def previousphds(self):
return Member.objects.exclude(final_year=0).filter(position='PhD').order_by('-initial_year')
def previoustechnicians(self):
return Member.objects.exclude(final_year=0).filter(position='Technician').order_by('-initial_year')
def previousmasterstudents(self):
return Member.objects.exclude(final_year=0).filter(position='MSc').order_by('-initial_year')
def previousguestphdstudents(self):
return Member.objects.exclude(final_year=0).filter(position='Guest_PhD').order_by('-initial_year')
def previousgueststudents(self):
return Member.objects.exclude(final_year=0).filter(position='Guest_Student').order_by('-initial_year')
class MembersDetailView(DetailView):
model = Member
template_name = 'memberdetail.html'
def group_information_list(self):
return GroupInformation.objects.get(pk=1)
class ResearchPageView(ListView):
model = ResearchField
template_name = 'research.html'
def group_information_list(self):
return GroupInformation.objects.get(pk=1)
class ResearchFieldView(DetailView):
model = ResearchField
template_name = 'researchfield.html'
def group_information_list(self):
return GroupInformation.objects.get(pk=1)
class ContactPageView(CreateView):
model = ContactRequest
template_name = 'contact.html'
fields = '__all__'
def group_information_list(self):
return GroupInformation.objects.get(pk=1)
class RequestReceivedPageView(TemplateView):
template_name = 'request_received.html'
def group_information_list(self):
return GroupInformation.objects.get(pk=1)
| 31.164063
| 110
| 0.723239
|
7948d7660fbc3063fa8a7881ffd68c6a610f27e6
| 7,395
|
py
|
Python
|
parl/remote/grpc_heartbeat/tests/heartbeat_server_arguments_test.py
|
lp2333/PARL
|
e4bde1f5b7e69c5f8d3ee3a90a647dfe12204bd3
|
[
"ECL-2.0",
"Apache-2.0"
] | 3,172
|
2018-05-22T02:02:29.000Z
|
2022-03-31T09:14:56.000Z
|
parl/remote/grpc_heartbeat/tests/heartbeat_server_arguments_test.py
|
BKBK00/PARL
|
f508bc6085420431b504441c7ff129e64826603e
|
[
"Apache-2.0"
] | 422
|
2018-05-17T16:58:45.000Z
|
2022-03-31T02:03:25.000Z
|
parl/remote/grpc_heartbeat/tests/heartbeat_server_arguments_test.py
|
BKBK00/PARL
|
f508bc6085420431b504441c7ff129e64826603e
|
[
"Apache-2.0"
] | 794
|
2018-05-21T18:33:19.000Z
|
2022-03-30T13:38:09.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
from parl.remote.grpc_heartbeat import HeartbeatServerThread
from parl.remote.grpc_heartbeat import HeartbeatClientThread
from parl.remote import remote_constants
class TestHeartbeatServerArguments(unittest.TestCase):
def setUp(self):
self.server_exited = False
self.client_exited = False
def test_heartbeat_server_exit_with_args(self):
arg1_value = 10
def server_exit_func(arg1):
print("exit heartbeat server")
assert arg1 == arg1_value
self.server_exited = True
heartbeat_server_thread = HeartbeatServerThread(
server_exit_func, exit_func_args=(arg1_value, ))
heartbeat_server_thread.start()
server_address = heartbeat_server_thread.get_address()
def client_exit_func():
print("exit heartbeat client")
self.client_exited = True
heartbeat_client_thread = HeartbeatClientThread(
server_address, client_exit_func)
heartbeat_client_thread.start()
time.sleep(remote_constants.HEARTBEAT_RCVTIMEO_S * 2)
# check server and client are still alive after HEARTBEAT_RCVTIMEO_S * 2
assert heartbeat_server_thread.is_alive()
assert heartbeat_client_thread.is_alive()
heartbeat_server_thread.exit() # manually exit the server
# wait for threads exiting
for _ in range(10):
if not heartbeat_server_thread.is_alive(
) and not heartbeat_client_thread.is_alive():
break
time.sleep(remote_constants.HEARTBEAT_INTERVAL_S)
# check heartbeat server and client are exited
assert not heartbeat_server_thread.is_alive()
assert not heartbeat_client_thread.is_alive()
assert self.server_exited == True
assert self.client_exited == True
def test_heartbeat_server_exit_with_wrong_args(self):
arg1_value = 10
def server_exit_func(arg1):
print("exit heartbeat server")
assert arg1 == arg1_value
self.server_exited = True
heartbeat_server_thread = HeartbeatServerThread(
server_exit_func, exit_func_args=(arg1_value, "wrong_args"))
heartbeat_server_thread.start()
server_address = heartbeat_server_thread.get_address()
def client_exit_func():
print("exit heartbeat client")
self.client_exited = True
heartbeat_client_thread = HeartbeatClientThread(
server_address, client_exit_func)
heartbeat_client_thread.start()
time.sleep(remote_constants.HEARTBEAT_RCVTIMEO_S * 2)
# check server and client are still alive after HEARTBEAT_RCVTIMEO_S * 2
assert heartbeat_server_thread.is_alive()
assert heartbeat_client_thread.is_alive()
heartbeat_server_thread.exit() # manually exit the server
# will raise an exception in the backend thread
# wait for threads exiting
for _ in range(10):
if not heartbeat_server_thread.is_alive(
) and not heartbeat_client_thread.is_alive():
break
time.sleep(remote_constants.HEARTBEAT_INTERVAL_S)
# check heartbeat server and client are exited
assert not heartbeat_server_thread.is_alive()
assert not heartbeat_client_thread.is_alive()
assert self.server_exited == False # the heartbeat server cannot exit normally
assert self.client_exited == True
def test_heartbeat_server_exit_with_kwargs(self):
arg1_value = 10
def server_exit_func(arg1):
print("exit heartbeat server")
assert arg1 == arg1_value
self.server_exited = True
heartbeat_server_thread = HeartbeatServerThread(
server_exit_func, exit_func_kwargs={"arg1": arg1_value})
heartbeat_server_thread.start()
server_address = heartbeat_server_thread.get_address()
def client_exit_func():
print("exit heartbeat client")
self.client_exited = True
heartbeat_client_thread = HeartbeatClientThread(
server_address, client_exit_func)
heartbeat_client_thread.start()
time.sleep(remote_constants.HEARTBEAT_RCVTIMEO_S * 2)
# check server and client are still alive after HEARTBEAT_RCVTIMEO_S * 2
assert heartbeat_server_thread.is_alive()
assert heartbeat_client_thread.is_alive()
heartbeat_server_thread.exit() # manually exit the server
# wait for threads exiting
for _ in range(10):
if not heartbeat_server_thread.is_alive(
) and not heartbeat_client_thread.is_alive():
break
time.sleep(remote_constants.HEARTBEAT_INTERVAL_S)
# check heartbeat server and client are exited
assert not heartbeat_server_thread.is_alive()
assert not heartbeat_client_thread.is_alive()
assert self.server_exited == True
assert self.client_exited == True
def test_heartbeat_server_exit_with_wrong_kwargs(self):
arg1_value = 10
def server_exit_func(arg1):
print("exit heartbeat server")
assert arg1 == arg1_value
self.server_exited = True
heartbeat_server_thread = HeartbeatServerThread(
server_exit_func, exit_func_kwargs={"wrong_args": arg1_value})
heartbeat_server_thread.start()
server_address = heartbeat_server_thread.get_address()
def client_exit_func():
print("exit heartbeat client")
self.client_exited = True
heartbeat_client_thread = HeartbeatClientThread(
server_address, client_exit_func)
heartbeat_client_thread.start()
time.sleep(remote_constants.HEARTBEAT_RCVTIMEO_S * 2)
# check server and client are still alive after HEARTBEAT_RCVTIMEO_S * 2
assert heartbeat_server_thread.is_alive()
assert heartbeat_client_thread.is_alive()
heartbeat_server_thread.exit() # manually exit the server
# will raise an exception in the backend thread
# wait for threads exiting
for _ in range(10):
if not heartbeat_server_thread.is_alive(
) and not heartbeat_client_thread.is_alive():
break
time.sleep(remote_constants.HEARTBEAT_INTERVAL_S)
# check heartbeat server and client are exited
assert not heartbeat_server_thread.is_alive()
assert not heartbeat_client_thread.is_alive()
assert self.server_exited == False # the heartbeat server cannot exit normally
assert self.client_exited == True
if __name__ == '__main__':
unittest.main()
| 35.552885
| 87
| 0.684922
|
7948d7a25e3f02b0c6140d8932e424fccf94811e
| 3,247
|
py
|
Python
|
src/python/pants/base/run_info.py
|
hythloday/pants
|
107e9b0957f6949ac4bd535fbef8d2d8cba05c5c
|
[
"Apache-2.0"
] | 11
|
2015-01-20T01:39:41.000Z
|
2019-08-08T07:27:44.000Z
|
src/python/pants/base/run_info.py
|
hythloday/pants
|
107e9b0957f6949ac4bd535fbef8d2d8cba05c5c
|
[
"Apache-2.0"
] | 1
|
2019-08-21T07:29:26.000Z
|
2019-08-21T07:29:26.000Z
|
src/python/pants/base/run_info.py
|
fakeNetflix/square-repo-pants
|
28a018c7f47900aec4f576c81a52e0e4b41d9fec
|
[
"Apache-2.0"
] | 5
|
2015-03-30T02:46:53.000Z
|
2018-03-08T20:10:43.000Z
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import getpass
import os
import re
import socket
import time
from pants.base.build_environment import get_buildroot, get_scm
from pants.util.dirutil import safe_mkdir_for
class RunInfo(object):
"""A little plaintext file containing very basic info about a pants run.
Can only be appended to, never edited.
"""
@classmethod
def dir(cls, config):
"""Returns the configured base directory run info files are stored under."""
# TODO(John Sirois): This is centralized, but in an awkward location. Isolate RunInfo reading
# and writing in 1 package or class that could naturally know this location and synthesize
# info_file names.
return config.getdefault('info_dir',
default=os.path.join(config.getdefault('pants_workdir'), 'runs'))
def __init__(self, info_file):
self._info_file = info_file
safe_mkdir_for(self._info_file)
self._info = {}
if os.path.exists(self._info_file):
with open(self._info_file, 'r') as infile:
info = infile.read()
for m in re.finditer("""^([^:]+):(.*)$""", info, re.MULTILINE):
self._info[m.group(1).strip()] = m.group(2).strip()
def path(self):
return self._info_file
def get_info(self, key):
return self._info.get(key, None)
def __getitem__(self, key):
ret = self.get_info(key)
if ret is None:
raise KeyError(key)
return ret
def get_as_dict(self):
return self._info.copy()
def add_info(self, key, val):
"""Adds the given info and returns a dict composed of just this added info."""
return self.add_infos((key, val))
def add_infos(self, *keyvals):
"""Adds the given info and returns a dict composed of just this added info."""
infos = dict(keyvals)
with open(self._info_file, 'a') as outfile:
for key, val in infos.items():
key = key.strip()
val = str(val).strip()
if ':' in key:
raise Exception, 'info key must not contain a colon'
outfile.write('%s: %s\n' % (key, val))
self._info[key] = val
return infos
def add_basic_info(self, run_id, timestamp):
"""Adds basic build info and returns a dict composed of just this added info."""
datetime = time.strftime('%A %b %d, %Y %H:%M:%S', time.localtime(timestamp))
user = getpass.getuser()
machine = socket.gethostname()
path = get_buildroot()
return self.add_infos(('id', run_id), ('timestamp', timestamp), ('datetime', datetime),
('user', user), ('machine', machine), ('path', path))
def add_scm_info(self):
"""Adds SCM-related info and returns a dict composed of just this added info."""
scm = get_scm()
if scm:
revision = scm.commit_id
tag = scm.tag_name or 'none'
branch = scm.branch_name or revision
else:
revision, tag, branch = 'none', 'none', 'none'
return self.add_infos(('revision', revision), ('tag', tag), ('branch', branch))
| 34.542553
| 98
| 0.655066
|
7948d7caab6e3c864e4d710c9262e62a6495d857
| 2,760
|
py
|
Python
|
model_zoo/official/nlp/cpm/src/lr_schedule.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | 1
|
2021-07-03T06:52:20.000Z
|
2021-07-03T06:52:20.000Z
|
model_zoo/official/nlp/cpm/src/lr_schedule.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/official/nlp/cpm/src/lr_schedule.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Learning rate schedule."""
import numpy as np
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore.common import dtype as mstype
from mindspore.nn.learning_rate_schedule import LearningRateSchedule, WarmUpLR
class DecayLR(LearningRateSchedule):
"""
Implements of decay learning rate scheduler.
Args:
learning_rate (float): Initial learning rate.
warmup_steps (int): Warmup steps.
end_steps (int): A value used to calculate decayed learning rate.
Returns:
np.ndarray, learning rate of each step.
"""
def __init__(self, learning_rate, warmup_steps, end_iter):
super(DecayLR, self).__init__()
self.learning_rate = learning_rate
self.warmup_steps = warmup_steps
self.end_iter = end_iter
self.cast = P.Cast()
def construct(self, global_step):
warmup_percent = self.cast((self.end_iter - (global_step - self.warmup_steps)), mstype.float32) / self.end_iter
return self.learning_rate * warmup_percent
class CPMLearningRate(LearningRateSchedule):
"""
Implements of warmup-polynomial decay learning rate scheduler.
Args:
learning_rate (float): The initial value of learning rate.
warmup_steps (int): The warm up steps of learning rate.
end_steps (int): A value used to calculate decayed learning rate.
Returns:
Tensor. The learning rate value for the current step.
"""
def __init__(self, learning_rate, warmup_steps, end_steps):
super(CPMLearningRate, self).__init__()
self.warmup_lr = WarmUpLR(learning_rate, warmup_steps)
self.decay_lr = DecayLR(learning_rate, warmup_steps, end_steps)
self.warmup_steps = Tensor(np.array([warmup_steps]).astype(np.float32))
def construct(self, global_step):
if global_step < self.warmup_steps:
lr = self.warmup_lr(global_step)
else:
lr = self.decay_lr(global_step)
return lr
| 37.297297
| 120
| 0.671377
|
7948d7d91f08a9c1e683cd6e316f4757bf85c0a1
| 4,644
|
py
|
Python
|
apps/dash-web-trader/env/Lib/site-packages/dash_html_components/Acronym.py
|
alzo425/dash-sample-apps
|
d3e9f521a3bc2b8d39ed2922838ad35b9b17beb0
|
[
"MIT"
] | 1
|
2020-03-01T04:38:24.000Z
|
2020-03-01T04:38:24.000Z
|
apps/dash-web-trader/env/Lib/site-packages/dash_html_components/Acronym.py
|
alzo425/dash-sample-apps
|
d3e9f521a3bc2b8d39ed2922838ad35b9b17beb0
|
[
"MIT"
] | null | null | null |
apps/dash-web-trader/env/Lib/site-packages/dash_html_components/Acronym.py
|
alzo425/dash-sample-apps
|
d3e9f521a3bc2b8d39ed2922838ad35b9b17beb0
|
[
"MIT"
] | 17
|
2019-11-21T14:11:29.000Z
|
2019-11-21T15:26:23.000Z
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Acronym(Component):
"""A Acronym component.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional): The children of this component
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- n_clicks (number; optional): An integer that represents the number of times
that this element has been clicked on.
- n_clicks_timestamp (number; optional): An integer that represents the time (in ms since 1970)
at which n_clicks changed. This can be used to tell
which button was changed most recently.
- key (string; optional): A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info
- role (string; optional): The ARIA role attribute
- data-* (string; optional): A wildcard data attribute
- aria-* (string; optional): A wildcard aria attribute
- accessKey (string; optional): Defines a keyboard shortcut to activate or add focus to the element.
- className (string; optional): Often used with CSS to style elements with common properties.
- contentEditable (string; optional): Indicates whether the element's content is editable.
- contextMenu (string; optional): Defines the ID of a <menu> element which will serve as the element's context menu.
- dir (string; optional): Defines the text direction. Allowed values are ltr (Left-To-Right) or rtl (Right-To-Left)
- draggable (string; optional): Defines whether the element can be dragged.
- hidden (a value equal to: 'hidden', 'HIDDEN' | boolean; optional): Prevents rendering of given element, while keeping child elements, e.g. script elements, active.
- lang (string; optional): Defines the language used in the element.
- spellCheck (string; optional): Indicates whether spell checking is allowed for the element.
- style (dict; optional): Defines CSS styles which will override styles previously set.
- tabIndex (string; optional): Overrides the browser's default tab order and follows the one specified instead.
- title (string; optional): Text to be displayed in a tooltip when hovering over the element.
- loading_state (optional): Object that holds the loading state object coming from dash-renderer. loading_state has the following type: dict containing keys 'is_loading', 'prop_name', 'component_name'.
Those keys have the following types:
- is_loading (boolean; optional): Determines if the component is loading or not
- prop_name (string; optional): Holds which property is loading
- component_name (string; optional): Holds the name of the component that is loading"""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, n_clicks=Component.UNDEFINED, n_clicks_timestamp=Component.UNDEFINED, key=Component.UNDEFINED, role=Component.UNDEFINED, accessKey=Component.UNDEFINED, className=Component.UNDEFINED, contentEditable=Component.UNDEFINED, contextMenu=Component.UNDEFINED, dir=Component.UNDEFINED, draggable=Component.UNDEFINED, hidden=Component.UNDEFINED, lang=Component.UNDEFINED, spellCheck=Component.UNDEFINED, style=Component.UNDEFINED, tabIndex=Component.UNDEFINED, title=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'data-*', 'aria-*', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title', 'loading_state']
self._type = 'Acronym'
self._namespace = 'dash_html_components'
self._valid_wildcard_attributes = ['data-', 'aria-']
self.available_properties = ['children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'data-*', 'aria-*', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title', 'loading_state']
self.available_wildcard_properties = ['data-', 'aria-']
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Acronym, self).__init__(children=children, **args)
| 74.903226
| 586
| 0.736865
|
7948d7ea9f3437c3c96c5d8819e6cee5a698c96a
| 441
|
py
|
Python
|
albumentations/albumentations/augmentations/__init__.py
|
hfzx01/Substation
|
760e2f1a5d21102a6a05973cc31bc8252659757c
|
[
"Apache-2.0"
] | 6,316
|
2019-11-18T14:19:17.000Z
|
2022-03-31T22:25:23.000Z
|
albumentations/albumentations/augmentations/__init__.py
|
hfzx01/Substation
|
760e2f1a5d21102a6a05973cc31bc8252659757c
|
[
"Apache-2.0"
] | 558
|
2019-11-19T00:36:01.000Z
|
2022-03-30T22:04:15.000Z
|
albumentations/albumentations/augmentations/__init__.py
|
hfzx01/Substation
|
760e2f1a5d21102a6a05973cc31bc8252659757c
|
[
"Apache-2.0"
] | 889
|
2019-11-18T16:49:44.000Z
|
2022-03-28T11:00:14.000Z
|
# Common classes
from .bbox_utils import *
from .crops.functional import *
from .crops.transforms import *
# New transformations goes to individual files listed below
from .domain_adaptation import *
from .functional import *
from .geometric.functional import *
from .geometric.resize import *
from .geometric.rotate import *
from .geometric.transforms import *
from .keypoints_utils import *
from .transforms import *
from .utils import *
| 27.5625
| 59
| 0.789116
|
7948d8ab43a9a0e647a799783fa8a1e32bcb362e
| 1,051
|
py
|
Python
|
mlflow/entities/metric.py
|
xgk/mlflow
|
f43c3ccb05e8dfcd3c8030d53e2ef98148c0f6b4
|
[
"Apache-2.0"
] | 3
|
2019-08-16T15:17:20.000Z
|
2022-02-22T10:01:47.000Z
|
mlflow/entities/metric.py
|
mateiz/mlflow
|
9e7ec0093a72f4cbe3d55629c7e2cc5043068e12
|
[
"Apache-2.0"
] | null | null | null |
mlflow/entities/metric.py
|
mateiz/mlflow
|
9e7ec0093a72f4cbe3d55629c7e2cc5043068e12
|
[
"Apache-2.0"
] | 1
|
2018-06-06T06:06:49.000Z
|
2018-06-06T06:06:49.000Z
|
from mlflow.entities._mlflow_object import _MLflowObject
from mlflow.protos.service_pb2 import Metric as ProtoMetric
class Metric(_MLflowObject):
"""
Metric object for python client. Backend stores will hydrate this object in APIs.
"""
def __init__(self, key, value, timestamp):
self._key = key
self._value = value
self._timestamp = timestamp
@property
def key(self):
return self._key
@property
def value(self):
return self._value
@property
def timestamp(self):
return self._timestamp
def to_proto(self):
metric = ProtoMetric()
metric.key = self.key
metric.value = self.value
metric.timestamp = self.timestamp
return metric
@classmethod
def from_proto(cls, proto):
return cls(proto.key, proto.value, proto.timestamp)
@classmethod
def _properties(cls):
# TODO: Hard coding this list of props for now. There has to be a clearer way...
return ["key", "value", "timestamp"]
| 25.02381
| 88
| 0.644148
|
7948d921f6d5f1cec79ab9f3209c8dd878b1aae1
| 1,024
|
py
|
Python
|
beecrowd-1050.py
|
jessicabessaoliveira/Python
|
c4732f5e9528a40721b7c16364e6310e7ed8d490
|
[
"MIT"
] | null | null | null |
beecrowd-1050.py
|
jessicabessaoliveira/Python
|
c4732f5e9528a40721b7c16364e6310e7ed8d490
|
[
"MIT"
] | null | null | null |
beecrowd-1050.py
|
jessicabessaoliveira/Python
|
c4732f5e9528a40721b7c16364e6310e7ed8d490
|
[
"MIT"
] | null | null | null |
# https://www.beecrowd.com.br/judge/pt/problems/view/1050
'''
Leia um número inteiro que representa um código de DDD para discagem interurbana. Em seguida, informe à qual cidade o DDD pertence, considerando a tabela abaixo:
imagem:
https://resources.beecrowd.com.br/gallery/images/problems/UOJ_1050.png
Se a entrada for qualquer outro DDD que não esteja presente na tabela acima, o programa deverá informar:
DDD nao cadastrado
Entrada
A entrada consiste de um único valor inteiro.
Saída
Imprima o nome da cidade correspondente ao DDD existente na entrada. Imprima DDD nao cadastrado caso não existir DDD correspondente ao número digitado.
'''
ddd = int(input())
if ddd == 61:
print('Brasilia')
elif ddd == 71:
print('Salvador')
elif ddd == 11:
print('Sao Paulo')
elif ddd == 21:
print('Rio de Janeiro')
elif ddd == 32:
print('Juiz de Fora')
elif ddd == 19:
print('Campinas')
elif ddd == 27:
print('Vitoria')
elif ddd == 31:
print('Belo Horizonte')
else:
print('DDD nao cadastrado')
| 26.947368
| 161
| 0.720703
|
7948db7459180490d1a7091ea12e1afee89fbcc5
| 67
|
py
|
Python
|
test/regression/features/integers/ints.py
|
bjpop/blip
|
3d9105a44d1afb7bd007da3742fb19dc69372e10
|
[
"BSD-3-Clause"
] | 137
|
2015-02-13T21:03:23.000Z
|
2021-11-24T03:53:55.000Z
|
test/regression/features/integers/ints.py
|
bjpop/blip
|
3d9105a44d1afb7bd007da3742fb19dc69372e10
|
[
"BSD-3-Clause"
] | 2
|
2015-03-07T14:08:33.000Z
|
2015-10-13T02:00:40.000Z
|
test/regression/features/integers/ints.py
|
bjpop/blip
|
3d9105a44d1afb7bd007da3742fb19dc69372e10
|
[
"BSD-3-Clause"
] | 4
|
2015-05-03T22:07:27.000Z
|
2018-09-10T08:55:03.000Z
|
print(0)
print(-0)
print(1)
print(-1)
print(type(1))
print(int(1))
| 9.571429
| 14
| 0.641791
|
7948dc595d481f654fb7bb84bb9c244727068135
| 2,202
|
py
|
Python
|
cmssw/clean_buildfiles.py
|
guitargeek/PKGBUILDs
|
a71e887c838827bb876f3ad4badb66c2eda5f61c
|
[
"MIT"
] | null | null | null |
cmssw/clean_buildfiles.py
|
guitargeek/PKGBUILDs
|
a71e887c838827bb876f3ad4badb66c2eda5f61c
|
[
"MIT"
] | null | null | null |
cmssw/clean_buildfiles.py
|
guitargeek/PKGBUILDs
|
a71e887c838827bb876f3ad4badb66c2eda5f61c
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as ET
import os
def root_node_from_build_file(build_file):
with open(build_file) as f:
xml = f.read()
xml_strip = xml.strip()
root_node = ET.fromstring("<root>" + xml + "</root>")
return root_node
def package_included(package, path):
if not os.path.isdir(path):
return False
cmd = "cd " + path + ' && git --no-pager grep "' + package + '"'
out = os.popen(cmd).read()
# We need to make sure the hit was not in a BuildFile
hits = out.split("\n")
hits = [h for h in hits if not "BuildFile.xml" in h]
cleaned_out = "\n".join(hits)
return cleaned_out.strip() != ""
directory = "."
build_file_dirs = []
for root, directories, files in os.walk(directory):
for f in files:
if os.path.basename(f) == "BuildFile.xml":
build_file_dirs.append(root)
for build_file_dir in build_file_dirs:
build_file = os.path.join(build_file_dir, "BuildFile.xml")
try:
root_node = root_node_from_build_file(build_file)
except:
print("Skipping", build_file_dir, "because xml was not well formed")
continue
unused_dependencies = []
is_library = not build_file_dir.split("/")[-1] in ["test", "plugins", "bin"]
print(build_file)
def process(elem):
for inner_elem in elem:
process(inner_elem)
if elem.tag == "use":
dependency = elem.get("name")
if not dependency:
return
if "/" in dependency:
if is_library:
if not (
package_included(dependency, os.path.join(build_file_dir, "interface"))
or package_included(dependency, os.path.join(build_file_dir, "src"))
):
unused_dependencies.append(dependency)
else:
if not package_included(dependency, build_file_dir):
unused_dependencies.append(dependency)
for elem in root_node:
process(elem)
for dependency in unused_dependencies:
os.system("sed -i '/" + dependency.replace("/", "\/") + "/d' " + build_file)
| 27.525
| 95
| 0.582652
|
7948dc6dcc08d66de7e5e6522a31cb8291233ffd
| 28,084
|
py
|
Python
|
qcelemental/molparse/from_arrays.py
|
mattwthompson/QCElemental
|
7fb730e66a794a41d403f8686adef8f2ef109fb1
|
[
"BSD-3-Clause"
] | null | null | null |
qcelemental/molparse/from_arrays.py
|
mattwthompson/QCElemental
|
7fb730e66a794a41d403f8686adef8f2ef109fb1
|
[
"BSD-3-Clause"
] | 1
|
2021-03-05T19:06:09.000Z
|
2021-03-05T19:06:09.000Z
|
qcelemental/molparse/from_arrays.py
|
Andrew-AbiMansour/QCElemental
|
2e84cd686d5fff0fc79accb28ffa985de4684704
|
[
"BSD-3-Clause"
] | null | null | null |
import pprint
import re
from copy import deepcopy
import numpy as np
from ..exceptions import ValidationError
from ..physical_constants import constants
from ..util import provenance_stamp, unnp, update_with_error
from .chgmult import validate_and_fill_chgmult
from .nucleus import reconcile_nucleus
from .regex import VERSION_PATTERN
def from_input_arrays(
*,
enable_qm=True,
enable_efp=True,
missing_enabled_return_qm="error",
missing_enabled_return_efp="error",
# qm
geom=None,
elea=None,
elez=None,
elem=None,
mass=None,
real=None,
elbl=None,
name=None,
units="Angstrom",
input_units_to_au=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
fragment_separators=None,
fragment_charges=None,
fragment_multiplicities=None,
molecular_charge=None,
molecular_multiplicity=None,
# efp
fragment_files=None,
hint_types=None,
geom_hints=None,
# qm-vz
geom_unsettled=None,
variables=None,
# processing details
speclabel=True,
tooclose=0.1,
zero_ghost_fragments=False,
nonphysical=False,
mtol=1.0e-3,
copy=True,
verbose=1,
):
"""Compose a Molecule dict from unvalidated arrays and variables
in multiple domains.
Drives :py:func:`qcelemental.molparse.from_arrays` for sucessive
domains and hooks them together (e.g., impose `fix_com` on "qm"
when "efp" present.
"""
molinit = {}
if enable_qm:
molinit["qm"] = {}
if enable_efp:
molinit["efp"] = {}
if enable_efp:
processed = from_arrays(
domain="efp",
missing_enabled_return=missing_enabled_return_efp,
units=units,
input_units_to_au=input_units_to_au,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
fragment_files=fragment_files,
hint_types=hint_types,
geom_hints=geom_hints,
# which other processing details needed?
verbose=verbose,
)
update_with_error(molinit, {"efp": processed})
if molinit["efp"] == {}:
del molinit["efp"]
efp_present = enable_efp and "efp" in molinit and bool(len(molinit["efp"]["geom_hints"]))
if efp_present:
fix_com = True
fix_orientation = True
fix_symmetry = "c1"
if enable_qm:
dm = "qmvz" if geom_unsettled else "qm"
processed = from_arrays(
domain=dm,
missing_enabled_return=missing_enabled_return_qm,
geom=geom,
elea=elea,
elez=elez,
elem=elem,
mass=mass,
real=real,
elbl=elbl,
name=name,
units=units,
input_units_to_au=input_units_to_au,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
fragment_separators=fragment_separators,
fragment_charges=fragment_charges,
fragment_multiplicities=fragment_multiplicities,
molecular_charge=molecular_charge,
molecular_multiplicity=molecular_multiplicity,
geom_unsettled=geom_unsettled,
variables=variables,
# processing details
speclabel=speclabel,
tooclose=tooclose,
zero_ghost_fragments=zero_ghost_fragments,
nonphysical=nonphysical,
mtol=mtol,
copy=copy,
verbose=1,
)
update_with_error(molinit, {"qm": processed})
if molinit["qm"] == {}:
del molinit["qm"]
return molinit
def from_arrays(
*,
geom=None,
elea=None,
elez=None,
elem=None,
mass=None,
real=None,
elbl=None,
name=None,
units="Angstrom",
input_units_to_au=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
fragment_separators=None,
fragment_charges=None,
fragment_multiplicities=None,
molecular_charge=None,
molecular_multiplicity=None,
comment=None,
provenance=None,
connectivity=None,
fragment_files=None,
hint_types=None,
geom_hints=None,
geom_unsettled=None,
variables=None,
domain="qm",
missing_enabled_return="error",
np_out=True,
speclabel=True,
tooclose=0.1,
zero_ghost_fragments=False,
nonphysical=False,
mtol=1.0e-3,
copy=True,
verbose=1,
):
"""Compose a Molecule dict from unvalidated arrays and variables, returning dict.
See fields of Return molrec below. Required parameters (for QM XYZ)
are `geom` and one of `elem`, `elez`, `elbl` (`speclabel=True`)
Parameters
----------
geom : array-like
(nat, 3) or (3 * nat, ) ndarray or list o'lists of Cartesian coordinates.
fragment_separators : array-like of int, optional
(nfr - 1, ) list of atom indices at which to split `geom` into fragments.
elbl : ndarray of str
(nat, ) Label extending `elem` symbol, possibly conveying ghosting, isotope, mass, tagging information.
tooclose : float, optional
Interatom distance (native `geom` units) nearer than which atoms not allowed.
nonphysical : bool, optional
Do allow masses outside an element's natural range to pass validation?
speclabel : bool, optional
If `True`, interpret `elbl` as potentially full nucleus spec including
ghosting, isotope, mass, tagging information, e.g., `@13C_mine` or
`He4@4.01`. If `False`, interpret `elbl` as only the user/tagging
extension to nucleus label, e.g. `_mine` or `4` in the previous examples.
missing_enabled_return : {'minimal', 'none', 'error'}
What to do when an enabled domain is of zero-length? Respectively, return
a fully valid but empty molrec, return empty dictionary, or throw error.
np_out : bool, optional
When `True`, fields geom, elea, elez, elem, mass, real, elbl will be ndarray.
Use `False` to get a json-able version.
Returns
-------
molrec : dict
Molecule dictionary spec follows. Its principles are
(1) contents are fully validated and defaulted - no error
checking necessary,
(2) contents may be mildly redundant - atomic numbers and
element symbols present,
(3) big system, nat-length single-type arrays, not small system,
nat-number heterogeneous objects,
(4) some fields are optional (e.g., fix_symmetry) but largely
self-describing so units or fix_com must be present.
(5) apart from some mild optional fields, _all_ fields will
be present (corollary of "fully validated and defaulted") - no
need to check for every key. in some cases like efp, keys will
appear in blocks, so pre-handshake there will be a few hint keys
and post-handshake they will be joined by full qm-like molrec.
(6) molrec should be idempotent through this function (equiv to
schema validator) but are not idempotent throughout its life. if
fields permit, frame may be changed. Future? if fields permit,
mol may be symmetrized. Coordinates and angles may change units
or range if program returns them in only one form.
name : str, optional
Label for molecule; should be valid Python identifier.
units : {'Angstrom', 'Bohr'}
Units for `geom`.
input_units_to_au : float, optional
If `units='Angstrom'`, overrides consumer's value for [A]-->[a0] conversion.
fix_com : bool
Whether translation of `geom` is allowed or disallowed.
fix_orientation : bool
Whether rotation of `geom` is allowed or disallowed.
fix_symmetry : str, optional
Maximal point group symmetry which `geom` should be treated. Lowercase.
geom : ndarray of float
(3 * nat, ) Cartesian coordinates in `units`.
elea : ndarray of int
(nat, ) Mass number for atoms, if known isotope, else -1.
elez : ndarray of int
(nat, ) Number of protons, nuclear charge for atoms.
elem : ndarray of str
(nat, ) Element symbol for atoms.
mass : ndarray of float
(nat, ) Atomic mass [u] for atoms.
real : ndarray of bool
(nat, ) Real/ghostedness for atoms.
elbl : ndarray of str
(nat, ) Label with any tagging information from element spec.
fragment_separators : list of int
(nfr - 1, ) list of atom indices at which to split `geom` into fragments.
fragment_charges : list of float
(nfr, ) list of charge allocated to each fragment.
fragment_multiplicities : list of int
(nfr, ) list of multiplicity allocated to each fragment.
molecular_charge : float
total charge on system.
molecular_multiplicity : int
total multiplicity on system.
comment : str, optional
Additional comment for molecule.
provenance : dict of str
Accumulated history of molecule, with fields "creator", "version", "routine".
connectivity : list of tuples of int, optional
(nbond, 3) list of (0-indexed) (atomA, atomB, bond_order) (int, int, double) tuples
EFP extension (this + units is minimal)
fragment_files : list of str
(nfr, ) lowercased names of efp meat fragment files.
hint_types : {'xyzabc', 'points'}
(nfr, ) type of fragment orientation hint.
geom_hints : list of lists of float
(nfr, ) inner lists have length 6 (xyzabc; to orient the center) or
9 (points; to orient the first three atoms) of the EFP fragment.
QMVZ extension (geom_unsettled replaces geom)
geom_unsettled : list of lists of str
(nat, ) all-string Cartesian and/or zmat anchor and value contents
mixing anchors, values, and variables.
variables : list of pairs
(nvar, 2) pairs of variables (str) and values (float). May be incomplete.
Raises
------
qcelemental.ValidationError
For most anything wrong.
"""
# << domain sorting >>
available_domains = ["qm", "efp", "qmvz"]
if domain not in available_domains:
raise ValidationError(
"Topology domain {} not available for processing. Choose among {}".format(domain, available_domains)
)
if domain == "qm" and (geom is None or np.asarray(geom).size == 0):
if missing_enabled_return == "none":
return {}
elif missing_enabled_return == "minimal":
geom = []
else:
raise ValidationError("""For domain 'qm', `geom` must be provided.""")
if domain == "efp" and (geom_hints is None or np.asarray(geom_hints, dtype=object).size == 0):
if missing_enabled_return == "none":
return {}
elif missing_enabled_return == "minimal":
geom_hints = []
fragment_files = []
hint_types = []
else:
raise ValidationError("""For domain 'efp', `geom_hints` must be provided.""")
molinit = {}
extern = False
processed = validate_and_fill_units(
name=name,
units=units,
input_units_to_au=input_units_to_au,
comment=comment,
provenance=provenance,
connectivity=connectivity,
always_return_iutau=False,
)
processed["provenance"] = provenance_stamp(__name__)
update_with_error(molinit, processed)
if domain == "efp":
processed = validate_and_fill_efp(fragment_files=fragment_files, hint_types=hint_types, geom_hints=geom_hints)
update_with_error(molinit, processed)
extern = bool(len(molinit["geom_hints"]))
if domain == "qm" or (domain == "efp" and geom is not None) or domain == "qmvz":
if domain == "qmvz":
processed = validate_and_fill_unsettled_geometry(geom_unsettled=geom_unsettled, variables=variables)
update_with_error(molinit, processed)
nat = len(molinit["geom_unsettled"])
else:
processed = validate_and_fill_geometry(geom=geom, tooclose=tooclose, copy=copy)
update_with_error(molinit, processed)
nat = molinit["geom"].shape[0] // 3
processed = validate_and_fill_nuclei(
nat,
elea=elea,
elez=elez,
elem=elem,
mass=mass,
real=real,
elbl=elbl,
speclabel=speclabel,
nonphysical=nonphysical,
mtol=mtol,
verbose=verbose,
)
update_with_error(molinit, processed)
processed = validate_and_fill_fragments(
nat,
fragment_separators=fragment_separators,
fragment_charges=fragment_charges,
fragment_multiplicities=fragment_multiplicities,
)
update_with_error(molinit, processed)
Z_available = molinit["elez"] * molinit["real"] * 1.0
processed = validate_and_fill_chgmult(
zeff=Z_available,
fragment_separators=molinit["fragment_separators"],
molecular_charge=molecular_charge,
fragment_charges=molinit["fragment_charges"],
molecular_multiplicity=molecular_multiplicity,
fragment_multiplicities=molinit["fragment_multiplicities"],
zero_ghost_fragments=zero_ghost_fragments,
verbose=verbose,
)
del molinit["fragment_charges"] # sometimes safe update is too picky about overwriting v_a_f_fragments values
del molinit["fragment_multiplicities"]
update_with_error(molinit, processed)
extern = domain == "efp"
processed = validate_and_fill_frame(
extern=extern, fix_com=fix_com, fix_orientation=fix_orientation, fix_symmetry=fix_symmetry
)
update_with_error(molinit, processed)
if verbose >= 2:
print("RETURN FROM qcel.molparse.from_arrays(domain={})".format(domain.upper()))
pprint.pprint(molinit)
if not np_out:
molinit = unnp(molinit)
return molinit
def validate_and_fill_units(
name=None,
units="Angstrom",
input_units_to_au=None,
comment=None,
provenance=None,
connectivity=None,
always_return_iutau=False,
):
molinit = {}
if name is not None:
molinit["name"] = name
if comment is not None:
molinit["comment"] = comment
def validate_provenance(dicary):
expected_prov_keys = ["creator", "routine", "version"]
try:
prov_keys = sorted(dicary.keys())
except AttributeError:
raise ValidationError("Provenance entry is not dictionary: {}".format(dicary))
if prov_keys == expected_prov_keys:
if not isinstance(dicary["creator"], str):
raise ValidationError(
"""Provenance key 'creator' should be string of creating program's name: {}""".format(
dicary["creator"]
)
)
if not re.fullmatch(VERSION_PATTERN, dicary["version"], re.VERBOSE):
raise ValidationError(
"""Provenance key 'version' should be a valid PEP 440 string: {}""".format(dicary["version"])
)
if not isinstance(dicary["routine"], str):
raise ValidationError(
"""Provenance key 'routine' should be string of creating function's name: {}""".format(
dicary["routine"]
)
)
return True
else:
raise ValidationError("Provenance keys ({}) incorrect: {}".format(expected_prov_keys, prov_keys))
if provenance is None:
molinit["provenance"] = {}
else:
if validate_provenance(provenance):
molinit["provenance"] = deepcopy(provenance)
if connectivity is not None:
conn = []
try:
for (at1, at2, bondorder) in connectivity:
if not (float(at1)).is_integer() or at1 < 0: # or at1 >= nat:
raise ValidationError("""Connectivity first atom should be int [0, nat): {}""".format(at1))
if not (float(at2)).is_integer() or at2 < 0: # or at2 >= nat:
raise ValidationError("""Connectivity second atom should be int [0, nat): {}""".format(at2))
if bondorder < 0 or bondorder > 5:
raise ValidationError("""Connectivity bond order should be float [0, 5]: {}""".format(bondorder))
conn.append((int(min(at1, at2)), int(max(at1, at2)), float(bondorder)))
conn.sort(key=lambda tup: tup[0])
molinit["connectivity"] = conn
except ValueError:
raise ValidationError(
"Connectivity entry is not of form [(at1, at2, bondorder), ...]: {}".format(connectivity)
)
if units.capitalize() in ["Angstrom", "Bohr"]:
molinit["units"] = units.capitalize()
else:
raise ValidationError("Invalid molecule geometry units: {}".format(units))
if molinit["units"] == "Bohr":
iutau = 1.0
elif molinit["units"] == "Angstrom":
iutau = 1.0 / constants.bohr2angstroms
if input_units_to_au is not None:
if abs(input_units_to_au - iutau) < 0.05:
iutau = input_units_to_au
else:
raise ValidationError(
"""No big perturbations to physical constants! {} !~= {}""".format(iutau, input_units_to_au)
)
if always_return_iutau or input_units_to_au is not None:
molinit["input_units_to_au"] = iutau
return molinit
def validate_and_fill_frame(extern, fix_com=None, fix_orientation=None, fix_symmetry=None):
if fix_com is True:
com = True
elif fix_com is False:
if extern:
raise ValidationError("Invalid fix_com ({}) with extern ({})".format(fix_com, extern))
else:
com = False
elif fix_com is None:
com = extern
else:
raise ValidationError("Invalid fix_com: {}".format(fix_com))
if fix_orientation is True:
orient = True
elif fix_orientation is False:
if extern:
raise ValidationError("Invalid fix_orientation ({}) with extern ({})".format(fix_orientation, extern))
else:
orient = False
elif fix_orientation is None:
orient = extern
else:
raise ValidationError("Invalid fix_orientation: {}".format(fix_orientation))
symm = None
if extern:
if fix_symmetry is None:
symm = "c1"
elif fix_symmetry.lower() == "c1":
symm = "c1"
else:
raise ValidationError("Invalid (non-C1) fix_symmetry ({}) with extern ({})".format(fix_symmetry, extern))
else:
if fix_symmetry is not None:
symm = fix_symmetry.lower()
molinit = {}
molinit["fix_com"] = com
molinit["fix_orientation"] = orient
if symm:
molinit["fix_symmetry"] = symm
return molinit
def validate_and_fill_efp(fragment_files=None, hint_types=None, geom_hints=None):
if (
fragment_files is None
or hint_types is None
or geom_hints is None
or fragment_files == [None]
or hint_types == [None]
or geom_hints == [None]
or not (len(fragment_files) == len(hint_types) == len(geom_hints))
):
raise ValidationError(
"""Missing or inconsistent length among efp quantities: fragment_files ({}), hint_types ({}), and geom_hints ({})""".format(
fragment_files, hint_types, geom_hints
)
)
# NOTE: imposing case on file
try:
files = [f.lower() for f in fragment_files]
except AttributeError:
raise ValidationError("""fragment_files not strings: {}""".format(fragment_files))
if all(f in ["xyzabc", "points", "rotmat"] for f in hint_types):
types = hint_types
else:
raise ValidationError("""hint_types not among 'xyzabc', 'points', 'rotmat': {}""".format(hint_types))
hints = []
hlen = {"xyzabc": 6, "points": 9, "rotmat": 12}
for ifr, fr in enumerate(geom_hints):
try:
hint = [float(f) for f in fr]
except (ValueError, TypeError):
raise ValidationError("""Un float-able elements in geom_hints[{}]: {}""".format(ifr, fr))
htype = hint_types[ifr]
if len(hint) == hlen[htype]:
hints.append(hint)
else:
raise ValidationError("""EFP hint type {} not {} elements: {}""".format(htype, hlen[htype], hint))
return {"fragment_files": files, "hint_types": types, "geom_hints": hints}
def validate_and_fill_geometry(geom=None, tooclose=0.1, copy=True):
"""Check `geom` for overlapping atoms. Return flattened"""
npgeom = np.array(geom, copy=copy, dtype=float).reshape((-1, 3))
# Upper triangular
metric = tooclose ** 2
tooclose_inds = []
for x in range(npgeom.shape[0]):
diffs = npgeom[x] - npgeom[x + 1 :]
dists = np.einsum("ij,ij->i", diffs, diffs)
# Record issues
if np.any(dists < metric):
indices = np.where(dists < metric)[0]
tooclose_inds.extend([(x, y, dist) for y, dist in zip(indices + x + 1, dists[indices] ** 0.5)])
if tooclose_inds:
raise ValidationError(
"""Following atoms are too close: {}""".format([(i, j, dist) for i, j, dist in tooclose_inds])
)
return {"geom": npgeom.reshape((-1))}
def validate_and_fill_nuclei(
nat,
elea=None,
elez=None,
elem=None,
mass=None,
real=None,
elbl=None,
# processing details
speclabel=True,
nonphysical=False,
mtol=1.0e-3,
verbose=1,
):
"""Check the nuclear identity arrays for consistency and fill in knowable values."""
if elea is None:
elea = np.asarray([None] * nat)
else:
# -1 equivalent to None
elea = np.asarray(elea)
if -1 in elea:
elea = np.array([(None if at == -1 else at) for at in elea]) # Rebuild to change dtype if needed.
if elez is None:
elez = np.asarray([None] * nat)
else:
elez = np.asarray(elez)
if elem is None:
elem = np.asarray([None] * nat)
else:
elem = np.asarray(elem)
if mass is None:
mass = np.asarray([None] * nat)
else:
mass = np.asarray(mass)
if real is None:
real = np.asarray([None] * nat)
else:
real = np.asarray(real)
if elbl is None:
elbl = np.asarray([None] * nat)
else:
elbl = np.asarray(elbl)
if not ((nat,) == elea.shape == elez.shape == elem.shape == mass.shape == real.shape == elbl.shape):
raise ValidationError(
"""Dimension mismatch natom {} among A {}, Z {}, E {}, mass {}, real {}, and elbl {}""".format(
(nat,), elea.shape, elez.shape, elem.shape, mass.shape, real.shape, elbl.shape
)
)
if nat:
A, Z, E, mass, real, label = zip(
*[
reconcile_nucleus(
A=elea[at],
Z=elez[at],
E=elem[at],
mass=mass[at],
real=real[at],
label=elbl[at],
speclabel=speclabel,
nonphysical=nonphysical,
mtol=mtol,
verbose=verbose,
)
for at in range(nat)
]
)
else:
A = Z = E = mass = real = label = []
return {
"elea": np.array(A, dtype=int),
"elez": np.array(Z, dtype=int),
"elem": np.array(E),
"mass": np.array(mass, dtype=float),
"real": np.array(real, dtype=bool),
"elbl": np.array(label),
}
def validate_and_fill_fragments(nat, fragment_separators=None, fragment_charges=None, fragment_multiplicities=None):
"""Check consistency of fragment specifiers wrt type and length. For
charge & multiplicity, scientific defaults are not computed or applied;
rather, missing slots are filled with `None` for later processing.
"""
if fragment_separators is None:
if fragment_charges is None and fragment_multiplicities is None:
frs = [] # np.array([], dtype=int) # if empty, needs to be both ndarray and int
frc = [None]
frm = [None]
else:
raise ValidationError(
"""Fragment quantities given without separation info: sep ({}), chg ({}), and mult ({})""".format(
fragment_separators, fragment_charges, fragment_multiplicities
)
)
else:
trial_geom = np.zeros((nat, 3))
try:
split_geom = np.split(trial_geom, fragment_separators, axis=0)
except TypeError:
raise ValidationError(
"""fragment_separators ({}) unable to perform trial np.split on geometry.""".format(fragment_separators)
)
if any(len(f) == 0 for f in split_geom):
if nat != 0:
raise ValidationError(
"""fragment_separators ({}) yields zero-length fragment(s) after trial np.split on geometry.""".format(
split_geom
)
)
if sum(len(f) for f in split_geom) != nat:
raise ValidationError(
"""fragment_separators ({}) yields overlapping fragment(s) after trial np.split on geometry, possibly unsorted.""".format(
split_geom
)
)
frs = fragment_separators
nfr = len(split_geom)
if fragment_charges is None:
frc = [None] * nfr
else:
try:
frc = [(f if f is None else float(f)) for f in fragment_charges]
except TypeError:
raise ValidationError("""fragment_charges not among None or float: {}""".format(fragment_charges))
if fragment_multiplicities is None:
frm = [None] * nfr
elif all(f is None or (isinstance(f, (int, np.integer)) and f >= 1) for f in fragment_multiplicities):
frm = fragment_multiplicities
else:
raise ValidationError(
"""fragment_multiplicities not among None or positive integer: {}""".format(fragment_multiplicities)
)
if not (len(frc) == len(frm) == len(frs) + 1):
raise ValidationError(
"""Dimension mismatch among fragment quantities: sep + 1 ({}), chg ({}), and mult({})""".format(
len(frs) + 1, len(frc), len(frm)
)
)
return {"fragment_separators": list(frs), "fragment_charges": frc, "fragment_multiplicities": frm}
def validate_and_fill_unsettled_geometry(geom_unsettled, variables):
lgeom = [len(g) for g in geom_unsettled]
if lgeom[0] not in [0, 3]:
raise ValidationError("""First line must be Cartesian or single atom.""")
if any(l == 3 for l in lgeom) and not all((l in [3, 6]) for l in lgeom):
raise ValidationError(
"""Mixing Cartesian and Zmat formats must occur in just that order once absolute frame established."""
)
allowed_to_follow = {0: [2], 2: [4], 3: [3, 6], 4: [6], 6: [3, 6]}
for il in range(len(lgeom) - 1):
if lgeom[il + 1] not in allowed_to_follow[lgeom[il]]:
raise ValidationError(
"""This is not how a Zmat works - aim for lower triangular. Line len ({}) may be followed by line len ({}), not ({}).""".format(
lgeom[il], allowed_to_follow[lgeom[il]], lgeom[il + 1]
)
)
if not all(len(v) == 2 for v in variables):
raise ValidationError("""Variables should come in pairs: {}""".format(variables))
vvars = [[str(v[0]), float(v[1])] for v in variables]
return {"geom_unsettled": geom_unsettled, "variables": vvars}
| 35.017456
| 144
| 0.602478
|
7948dd93cdc57f74a778c980bed0f4bd127b4571
| 1,278
|
py
|
Python
|
src/blog/urls.py
|
Dingo5733/djangoblog19
|
1f218e45414f9666b93fac90b261060942e2a5e4
|
[
"MIT"
] | null | null | null |
src/blog/urls.py
|
Dingo5733/djangoblog19
|
1f218e45414f9666b93fac90b261060942e2a5e4
|
[
"MIT"
] | null | null | null |
src/blog/urls.py
|
Dingo5733/djangoblog19
|
1f218e45414f9666b93fac90b261060942e2a5e4
|
[
"MIT"
] | null | null | null |
"""blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^comments/', include("comments.urls", namespace='comments')),
url(r'^posts/', include("posts.urls", namespace='posts')),
#url(r'^posts/$', "<appname>.views.<function_name>"),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 39.9375
| 82
| 0.715962
|
7948de649fadfb439bd342bcf50734cc4851f9d1
| 2,873
|
py
|
Python
|
main.py
|
SeanMelody/CryptoPrice-withPython
|
4c90c92a006783b34d6012822c4bd3662596949f
|
[
"MIT"
] | null | null | null |
main.py
|
SeanMelody/CryptoPrice-withPython
|
4c90c92a006783b34d6012822c4bd3662596949f
|
[
"MIT"
] | null | null | null |
main.py
|
SeanMelody/CryptoPrice-withPython
|
4c90c92a006783b34d6012822c4bd3662596949f
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import inquirer
import requests
import time
# Import all the requirements
# Function to get the user selected coin price
def get_crypto_price(user_selected):
# Inquirer returns a dictionary, so must get the value to search it
coin = user_selected['crypto']
# Set up the print so that the user knows that it is working, and will have more inforation on what is diplayed.
print('One ' + coin + ' costs:')
# Get the Url to seach using Beautiful Soup and the selected coin
url = 'https://coinmarketcap.com/currencies/' + coin
# Make the request
HTML = requests.get(url)
# Parse the data with Beautiful soup, using the html parser
soup = BeautifulSoup(HTML.text, 'html.parser')
# Get the price from the webpage, that is located in the <div class="priceValue___11gHJ">
price = soup.find('div', attrs={'class': 'priceValue___11gHJ'}).text
# Return the price to be able to print it.
return price
# Function to ask user if they want to get another coin price using Inquirer Confirm
def another():
# Inquirer confrim letting the user select 'y' or 'n'
pick_again = {inquirer.Confirm('again',
message="Do you want to get the price of another coin?: ",
default=True)
}
# Set the response to a variable
run_again = inquirer.prompt(pick_again)
# print(run_again)
# If statement to see if the user selected yes or no.
# If yes, run the pick_a_coin function again
if run_again['again'] == True:
pick_a_coin()
# Else make a silly crypto joke and say goodbye.
else:
print("HODL! Goodbye!")
# Main function to ask the user what coin they would like the price of
def pick_a_coin():
# Using inquirer, give the user a list of coins to find the price of.
# This is done to avoid user misspelling, or typing in something that is not a coin.
crypto_options = [inquirer.List('crypto',
message="What coin price would you like to check?: ",
choices=['Bitcoin', 'Ethereum', 'Tether', 'Cardano', 'Dogecoin',
'XRP', 'Polkadot', 'Uniswap', 'Litecoin', 'Solana', 'Filecoin']
)
]
# Ask the question and set the response to a variable
user_selected = inquirer.prompt(crypto_options)
# Call the get_crypto_price function and send it the user selected coin.
print_price = get_crypto_price(user_selected)
# Print the results of the get_crypto_price function!
print(print_price)
# Call the another function to see if the user would like to ask another question
another()
# Start the program by calling the pick_a_coin function!
pick_a_coin()
| 35.9125
| 116
| 0.642534
|
7948df5d7218f7ce1e33a0eee92fc6811a8c7ab8
| 3,796
|
py
|
Python
|
python/simulation/TS8_sim_traveler_generator.py
|
lsst-camera-dh/IandT-jobs
|
f62f9f796208d23a4f775752c084f8ef3dd6cf35
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2017-05-22T20:53:12.000Z
|
2017-05-22T20:53:12.000Z
|
python/simulation/TS8_sim_traveler_generator.py
|
lsst-camera-dh/IandT-jobs
|
f62f9f796208d23a4f775752c084f8ef3dd6cf35
|
[
"BSD-3-Clause-LBNL"
] | 21
|
2016-10-12T22:42:16.000Z
|
2020-09-29T05:37:54.000Z
|
python/simulation/TS8_sim_traveler_generator.py
|
lsst-camera-dh/IandT-jobs
|
f62f9f796208d23a4f775752c084f8ef3dd6cf35
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from eTravelerComponents import Traveler
traveler = Traveler('TS8_sim', 'ScienceRaft', 'Test Stand 8 EO simulation')
#
# Data acquisition jobs
#
fe55_raft_acq = traveler.stepFactory('fe55_raft_acq',
description='Fe55 acquisition')
dark_raft_acq = traveler.stepFactory('dark_raft_acq',
description='Darks acquisition')
flat_pair_raft_acq = traveler.stepFactory('flat_pair_raft_acq',
description='Flat pairs acquisition')
ppump_raft_acq = traveler.stepFactory('ppump_raft_acq',
description='Pocket Pumping acquisition')
sflat_raft_acq = traveler.stepFactory('sflat_raft_acq',
description='Superflats acquisition')
qe_raft_acq = traveler.stepFactory('qe_raft_acq', description='QE acquisition')
spot_raft_acq = traveler.stepFactory('spot_raft_acq',
description='Spot acquisition')
#
# Analysis jobs
#
fe55_analysis = traveler.stepFactory('fe55_raft_analysis',
description='Fe55 analysis')
fe55_analysis.add_pre_reqs(fe55_raft_acq)
read_noise = traveler.stepFactory('read_noise_raft',
description='Read noise analysis')
read_noise.add_pre_reqs(fe55_raft_acq, fe55_analysis)
bright_defects = traveler.stepFactory('bright_defects_raft',
description='Bright defects analysis')
bright_defects.add_pre_reqs(dark_raft_acq, fe55_analysis)
dark_defects = traveler.stepFactory('dark_defects_raft',
description='Dark defects analysis')
dark_defects.add_pre_reqs(sflat_raft_acq, fe55_analysis, bright_defects)
traps = traveler.stepFactory('traps_raft', description='Charge traps analysis')
traps.add_pre_reqs(ppump_raft_acq, fe55_analysis, bright_defects, dark_defects)
mask_generators = fe55_analysis, bright_defects, dark_defects, traps
dark_current = traveler.stepFactory('dark_current_raft',
description='Dark current analysis')
dark_current.add_pre_reqs(dark_raft_acq)
dark_current.add_pre_reqs(*mask_generators)
cte = traveler.stepFactory('cte_raft', description='Charge transfer efficiency')
cte.add_pre_reqs(sflat_raft_acq)
cte.add_pre_reqs(*mask_generators)
prnu = \
traveler.stepFactory('prnu_raft',
description='Photo-response non-uniformity analysis')
prnu.add_pre_reqs(qe_raft_acq)
prnu.add_pre_reqs(*mask_generators)
flat_pairs_analysis = \
traveler.stepFactory('flat_pairs_raft_analysis',
description='Full well and linearity analysis')
flat_pairs_analysis.add_pre_reqs(flat_pair_raft_acq)
flat_pairs_analysis.add_pre_reqs(*mask_generators)
ptc = traveler.stepFactory('ptc_raft', description='Photon transfer curve')
ptc.add_pre_reqs(flat_pair_raft_acq)
ptc.add_pre_reqs(*mask_generators)
qe_analysis = traveler.stepFactory('qe_raft_analysis', description='QE analysis')
qe_analysis.add_pre_reqs(qe_raft_acq)
qe_analysis.add_pre_reqs(*mask_generators)
crosstalk = traveler.stepFactory('crosstalk_raft',
description='Crosstalk analysis')
crosstalk.add_pre_reqs(spot_raft_acq)
crosstalk.add_pre_reqs(*mask_generators)
test_report = traveler.stepFactory('test_report_raft',
description='Test report generation')
test_report.add_pre_reqs(fe55_analysis, read_noise, bright_defects,
dark_defects, traps, dark_current, cte, prnu,
flat_pairs_analysis, ptc, qe_analysis, crosstalk)
#
# Write travelers
#
traveler.write_fake_eT_traveler('TS8_sim_traveler.py')
traveler.write_yml('TS8_sim_traveler.yml')
| 41.714286
| 81
| 0.705216
|
7948e0338b6a23e75d2e9f0156750d09bb147440
| 1,861
|
py
|
Python
|
python/GafferOSLUI/__init__.py
|
davidsminor/gaffer
|
64f75654ce778105dd93fbaad0e4486a5577cd09
|
[
"BSD-3-Clause"
] | 1
|
2015-02-10T19:04:46.000Z
|
2015-02-10T19:04:46.000Z
|
python/GafferOSLUI/__init__.py
|
danbethell/gaffer
|
455fe7420fb4c01adac59cbecb25a6b1d8a3db53
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferOSLUI/__init__.py
|
danbethell/gaffer
|
455fe7420fb4c01adac59cbecb25a6b1d8a3db53
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2013, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import OSLShaderUI
import OSLImageUI
import OSLObjectUI
| 46.525
| 77
| 0.681354
|
7948e0dfe7178b673734f0b0da0d04cce7c32ed7
| 13,499
|
py
|
Python
|
src/models.py
|
personads/smu
|
e954b17a55c2ccbeaa7030b67d7ff5e3e6cda1fe
|
[
"MIT"
] | 1
|
2020-02-22T20:41:25.000Z
|
2020-02-22T20:41:25.000Z
|
src/models.py
|
personads/smu
|
e954b17a55c2ccbeaa7030b67d7ff5e3e6cda1fe
|
[
"MIT"
] | null | null | null |
src/models.py
|
personads/smu
|
e954b17a55c2ccbeaa7030b67d7ff5e3e6cda1fe
|
[
"MIT"
] | 1
|
2020-03-08T13:13:35.000Z
|
2020-03-08T13:13:35.000Z
|
'''
SMT Tools
'''
from utils import *
from math import log, exp
from collections import defaultdict
from sys import stdout
#
# functions
#
def train_model1(corpus, iterations, verbose=False) :
'''
EM training function according to IBM Model 1
returns the translation probability t = {(e,f) : prob}
'''
if verbose : print(" - training IBM Model 1 - ")
# initialize t uniformly
t = defaultdict(lambda: 1./corpus.count_unique_f())
# training loop
for i in range(iterations) :
count = defaultdict(lambda:0.)
total = defaultdict(lambda:0.)
stotal = {}
for index_pair, pair in enumerate(corpus) :
if (verbose) and ( ((index_pair+1)%100 == 0) or (i+1 == iterations) ):
stdout.write(('\rtraining iteration : %d of %d | %d of %d sentence pairs | %d token pairs'+(' '*10)) % (i+1, iterations, index_pair+1, len(corpus), len(t.keys())))
stdout.flush()
# insert null token
sentence_f = [""] + pair[0]
sentence_e = [""] + pair[1]
# compute normalization
for token_e in sentence_e :
stotal[token_e] = 0
for token_f in sentence_f :
stotal[token_e] += t[(token_e,token_f)]
# collect counts
for token_e in sentence_e :
for token_f in sentence_f :
count[(token_e,token_f)] += t[(token_e,token_f)] / stotal[token_e]
total[token_f] += t[(token_e,token_f)] / stotal[token_e]
if total[token_f] == 0 :
print(token_f, total[token_f])
# probability estimation
for token_e, token_f in corpus.get_token_pairs() :
t[(token_e,token_f)] = count[(token_e,token_f)] / total[token_f]
corpus.reset_iter()
if verbose : print("\n - training of IBM Model 1 complete - ")
return dict(t)
def train_model2(corpus, iterations, verbose=False) :
'''
EM training function according to IBM Model 2
returns (t, a)
the translation probability t = {(e,f) : prob}
the alignment probability a = {(i,j,l_e,l_f) : prob }
'''
if verbose : print(" - training IBM Model 2 - ")
t = {}
a = {}
# initialize t according to Model 1
if verbose : print("initialize t according to Model 1...")
t = train_model1(corpus, iterations, verbose=verbose)
# initialize a uniformly
for pair in corpus :
length_f = len(pair[0])+1
length_e = len(pair[1])+1
for index_f in range(length_f) :
for index_e in range(length_e) :
a[(index_f,index_e,length_e,length_f)] = 1./(length_f+1)
# training loop
for i in range(iterations) :
count_t = defaultdict(lambda:0)
total_t = defaultdict(lambda:0)
count_a = defaultdict(lambda:0)
total_a = defaultdict(lambda:0)
stotal = {}
corpus.reset_iter()
for index_pair, pair in enumerate(corpus) :
if (verbose) and ( ((index_pair+1)%100 == 0) or (i+1 == iterations) ):
stdout.write(('\rtraining iteration : %d of %d | %d of %d sentence pairs | %d alignments'+(' '*10)) % (i+1, iterations, index_pair+1, len(corpus), len(a.keys())))
stdout.flush()
sentence_f = [""] + pair[0] # insert null token
sentence_e = [""] + pair[1]
length_f = len(sentence_f)
length_e = len(sentence_e)
# compute normalization
for index_e, token_e in enumerate(sentence_e) :
stotal[token_e] = 0
for index_f, token_f in enumerate(sentence_f) :
stotal[token_e] += t[(token_e,token_f)] * a[(index_f,index_e,length_e,length_f)]
# collect counts
for index_e, token_e in enumerate(sentence_e) :
for index_f, token_f in enumerate(sentence_f) :
update_c = t[(token_e,token_f)] * a[(index_f,index_e,length_e,length_f)]/stotal[token_e]
count_t[(token_e,token_f)] += update_c
total_t[token_f] += update_c
count_a[(index_f,index_e,length_e,length_f)] += update_c
total_a[(index_e,length_e,length_f)] += update_c
# probability estimation
for token_e, token_f in t.keys() :
t[(token_e, token_f)] = count_t[(token_e, token_f)] / total_t[token_f]
for alignment in a.keys() :
a[alignment] = count_a[alignment] / total_a[alignment[1:]]
if verbose : print("\n - training of IBM Model 2 complete - ")
return dict(t), dict(a)
def train_model3(corpus, iterations, verbose=False) :
'''
EM training function according to IBM Model 3
returns (t, d, f, n)
the translation probability t = {(e,f) : prob}
the distortion probability d = {(j,i,l_e,l_f) : prob }
the fertility probability f = {(n,f) : prob }
the null non-insertion probability p0 = prob
'''
if verbose : print(" - training IBM Model 3 - ")
t = {}
d = {}
f = {}
p0 = None
# initialize t,d according to Model 2
if verbose : print("initialize t, d according to Model 2...")
t, d = train_model2(corpus, iterations*2, verbose=verbose)
# remap distributions t, d
for pair in t :
# convert and filter 0 probabilites
if t[pair] > 0 : t[pair] = log(t[pair])
remap_d = {}
for align in d :
# convert and filter 0 probabilites
if d[align] > 0 : remap_d[(align[1], align[0], align[2], align[3])] = log(d[align])
d = remap_d
# training loop
for i in range(iterations) :
count_t = defaultdict(lambda:0)
total_t = defaultdict(lambda:0)
count_d = defaultdict(lambda:0)
total_d = defaultdict(lambda:0)
count_f = defaultdict(lambda:0)
total_f = defaultdict(lambda:0)
count_null = 0
count_p1 = 0
count_p0 = 0
stotal = {}
corpus.reset_iter()
for index_pair, pair in enumerate(corpus) :
if (verbose) :
stdout.write(('\rtraining iteration : %d of %d | %d of %d sentence pairs | %d alignments | %d fertiliy values |'+(' '*10)) % (i+1, iterations, index_pair+1, len(corpus), len(d.keys()), len(f.keys())))
stdout.flush()
# initialize local pair variables
sentence_f = [""] + pair[0] # insert null token
sentence_e = [""] + pair[1]
length_f = len(sentence_f)
length_e = len(sentence_e)
# get sample alignments
sample_alignments = sample_model3(sentence_e, sentence_f, t, d)
if sample_alignments is None :
# skip if no valid alignments are found
continue
sample_probs = []
count_total = 0
valid_alignments = []
for align in sample_alignments :
align_prob = align.get_probability(d)
for index_f, token_f in enumerate(sentence_f) :
token_e = sentence_e[align.get_index_e(index_f)]
if (token_e, token_f) in t :
cur_sample_prob = t[(token_e, token_f)]+align_prob # log probability
valid_alignments.append(align)
sample_probs.append(cur_sample_prob)
sample_alignments = valid_alignments
min_sample_prob = min(sample_probs)
for index_prob in range(len(sample_probs)) :
sample_probs[index_prob] = -1*min_sample_prob + sample_probs[index_prob]
count_norm = -1*min_sample_prob
for index_align, align in enumerate(sample_alignments) :
# normalize log probabilities as count
if sample_probs[index_align] == 0 :
count = 1
else :
count = sample_probs[index_align] / count_norm
for index_f, token_f in enumerate(sentence_f) :
index_e = align.get_index_e(index_f)
token_e = sentence_e[index_e]
count_t[(token_e, token_f)] += count
total_t[token_f] += count
count_d[(index_e, index_f, length_e, length_f)] += count
total_d[(index_f, length_e, length_f)] += count
if index_e == 0 :
count_null += 1
count_p1 += count_null * count
count_p0 += (length_e - 2 * count_null) * count
for index_f in range(length_f) :
fertility = 0
for index_e in range(length_e) :
if (index_e == align.get_index_e(index_f)) and (align.get_index_e(index_f) != 0) :
fertility += 1
count_f[(fertility, sentence_f[index_f])] += count
total_f[sentence_f[index_f]] += count
# probability estimation
t = {}
d = {}
f = {}
for token_e, token_f in count_t.keys() :
cur_prob_t = count_t[(token_e, token_f)] / total_t[token_f]
if cur_prob_t > 0 : t[(token_e, token_f)] = log(cur_prob_t) # log probability
for index_e, index_f, length_e, length_f in count_d.keys() :
cur_prob_d = count_d[(index_e, index_f, length_e, length_f)] / total_d[(index_f, length_e, length_f)]
if cur_prob_d > 0 : d[(index_e, index_f, length_e, length_f)] = log(cur_prob_d) # log probability
for fertility, token_f in count_f.keys() :
cur_prob_f = count_f[(fertility, token_f)] / total_f[token_f]
if cur_prob_f > 0 : f[(fertility, token_f)] = log(cur_prob_f) # log probability
p1 = count_p1 / (count_p0 + count_p1)
p0 = 1 - p1
if verbose : print("\n - training of IBM Model 3 complete - ")
return dict(t), dict(d), dict(f), p0
def sample_model3(sentence_e, sentence_f, prob_t, prob_d) :
res = []
length_e = len(sentence_e)
length_f = len(sentence_f)
# determine argmax over index_e
argmax_token_alignments = []
for index_f in range(length_f) :
max_alignment = (None, None)
for try_e in range(length_e) :
cur_prob_t = None
if (sentence_e[try_e], sentence_f[index_f]) in prob_t.keys() :
cur_prob_t = prob_t[(sentence_e[try_e], sentence_f[index_f])]
cur_prob_d = None
if (try_e, index_f, length_e, length_f) in prob_d.keys() :
cur_prob_d = prob_d[(try_e, index_f, length_e, length_f)]
if (cur_prob_t is not None) and (cur_prob_d is not None) :
cur_prob = cur_prob_t + cur_prob_d # log probability
if (max_alignment[1] is None) or (cur_prob > max_alignment[1]):
max_alignment = (try_e, cur_prob)
if max_alignment[0] is None:
argmax_token_alignments = None
break
argmax_token_alignments.append(max_alignment[0])
if argmax_token_alignments is not None :
cur_alignment = alignment(length_e, length_f, argmax_token_alignments)
res.append(cur_alignment)
else :
# cur_alignment = alignment(length_e, length_f)
return None
# perform sampling
# for index_pegged in range(length_f) :
# # cur_alignment = cur_alignment.hillclimb(prob_d, index_pegged)
# # if cur_alignment not in res :
# # res.append(cur_alignment)
# for neighbor in cur_alignment.get_neighbors(index_pegged) :
# if (neighbor not in res) and (neighbor.get_probability(prob_d) is not None) :
# res.append(neighbor)
return res
def train_lm(corpus, n_length, verbose=False) :
if verbose : print(" - training "+str(n_length)+"-gram language model - ")
res = {}
# collect counts
counts = {}
for n in range(1,n_length+1) :
res[n] = {}
counts[n] = {}
for index_sen, sentence in enumerate(corpus) :
if (verbose) and ((index_sen+1)%100 == 0):
stdout.write(('\rtraining : %d of %d sentences'+(' '*10)) % (index_sen+1, len(corpus)))
stdout.flush()
sentence = ["<s>"] + sentence + ["</s>"]
for index_token in range(len(sentence)) :
for n in range(1, n_length+1):
ngram = tuple(sentence[index_token:(index_token+n)])
if index_token+n <= len(sentence) :
if ngram in counts[n] :
counts[n][ngram] += 1
else :
counts[n][ngram] = 1
# probability estimation
if verbose : print("\nestimating probabilites...")
for n in range(1,n_length+1) :
for ngram in counts[n] :
if n > 1 :
res[n][(ngram[len(ngram)-1],)+ngram[:-1]] = log(counts[n][ngram] / counts[n-1][ngram[:n-1]])
else :
res[n][ngram] = log(counts[n][ngram] / len(counts[n].keys()))
if verbose : print(" - training complete - ")
return res
| 46.071672
| 217
| 0.549522
|
7948e3562a9b52101d935cb6e57e7f16ca339246
| 23,458
|
py
|
Python
|
gluon/gluoncv2/models/fishnet.py
|
naviocean/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
[
"MIT"
] | 2,649
|
2018-08-03T14:18:00.000Z
|
2022-03-31T08:08:17.000Z
|
gluon/gluoncv2/models/fishnet.py
|
naviocean/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
[
"MIT"
] | 95
|
2018-08-13T01:46:03.000Z
|
2022-03-13T08:38:14.000Z
|
gluon/gluoncv2/models/fishnet.py
|
naviocean/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
[
"MIT"
] | 549
|
2018-08-06T08:09:22.000Z
|
2022-03-31T08:08:21.000Z
|
"""
FishNet for ImageNet-1K, implemented in Gluon.
Original paper: 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
"""
__all__ = ['FishNet', 'fishnet99', 'fishnet150', 'ChannelSqueeze']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import Identity
from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1, SesquialteralHourglass, InterpolationBlock
from .preresnet import PreResActivation
from .senet import SEInitBlock
def channel_squeeze(x,
channels_per_group):
"""
Channel squeeze operation.
Parameters:
----------
x : NDArray
Input tensor.
channels_per_group : int
Number of channels per group.
Returns:
-------
NDArray
Resulted tensor.
"""
return x.reshape((0, -4, channels_per_group, -1, -2)).sum(axis=2)
class ChannelSqueeze(HybridBlock):
"""
Channel squeeze layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups,
**kwargs):
super(ChannelSqueeze, self).__init__(**kwargs)
assert (channels % groups == 0)
self.channels_per_group = channels // groups
def hybrid_forward(self, F, x):
return channel_squeeze(x, self.channels_per_group)
class PreSEAttBlock(HybridBlock):
"""
FishNet specific Squeeze-and-Excitation attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
reduction : int, default 16
Squeeze reduction value.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
reduction=16,
**kwargs):
super(PreSEAttBlock, self).__init__(**kwargs)
mid_cannels = out_channels // reduction
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.relu = nn.Activation("relu")
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_cannels,
use_bias=True)
self.conv2 = conv1x1(
in_channels=mid_cannels,
out_channels=out_channels,
use_bias=True)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.relu(x)
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.sigmoid(x)
return x
class FishBottleneck(HybridBlock):
"""
FishNet bottleneck block for residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
**kwargs):
super(FishBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
padding=dilation,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class FishBlock(HybridBlock):
"""
FishNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
squeeze : bool, default False
Whether to use a channel squeeze operation.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
dilation=1,
bn_use_global_stats=False,
squeeze=False,
**kwargs):
super(FishBlock, self).__init__(**kwargs)
self.squeeze = squeeze
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = FishBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
if self.squeeze:
assert (in_channels // 2 == out_channels)
self.c_squeeze = ChannelSqueeze(
channels=in_channels,
groups=2)
elif self.resize_identity:
self.identity_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
if self.squeeze:
identity = self.c_squeeze(x)
elif self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class DownUnit(HybridBlock):
"""
FishNet down unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(DownUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
def hybrid_forward(self, F, x):
x = self.blocks(x)
x = self.pool(x)
return x
class UpUnit(HybridBlock):
"""
FishNet up unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
dilation=1,
bn_use_global_stats=False,
**kwargs):
super(UpUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
squeeze = (dilation > 1) and (i == 0)
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
squeeze=squeeze))
in_channels = out_channels
self.upsample = InterpolationBlock(scale_factor=2, bilinear=False)
def hybrid_forward(self, F, x):
x = self.blocks(x)
x = self.upsample(x)
return x
class SkipUnit(HybridBlock):
"""
FishNet skip connection unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(SkipUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.blocks(x)
return x
class SkipAttUnit(HybridBlock):
"""
FishNet skip connection unit with attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(SkipAttUnit, self).__init__(**kwargs)
mid_channels1 = in_channels // 2
mid_channels2 = 2 * in_channels
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels1,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv1x1_block(
in_channels=mid_channels1,
out_channels=mid_channels2,
use_bias=True,
bn_use_global_stats=bn_use_global_stats)
in_channels = mid_channels2
self.se = PreSEAttBlock(
in_channels=mid_channels2,
out_channels=out_channels_list[-1],
bn_use_global_stats=bn_use_global_stats)
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
w = self.se(x)
x = self.blocks(x)
x = F.broadcast_add(F.broadcast_mul(x, w), w)
return x
class FishFinalBlock(HybridBlock):
"""
FishNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats,
**kwargs):
super(FishFinalBlock, self).__init__(**kwargs)
mid_channels = in_channels // 2
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.preactiv = PreResActivation(
in_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.preactiv(x)
return x
class FishNet(HybridBlock):
"""
FishNet model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
direct_channels : list of list of list of int
Number of output channels for each unit along the straight path.
skip_channels : list of list of list of int
Number of output channels for each skip connection unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
direct_channels,
skip_channels,
init_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(FishNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
depth = len(direct_channels[0])
down1_channels = direct_channels[0]
up_channels = direct_channels[1]
down2_channels = direct_channels[2]
skip1_channels = skip_channels[0]
skip2_channels = skip_channels[1]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
down1_seq = nn.HybridSequential(prefix="")
skip1_seq = nn.HybridSequential(prefix="")
for i in range(depth + 1):
skip1_channels_list = skip1_channels[i]
if i < depth:
skip1_seq.add(SkipUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list,
bn_use_global_stats=bn_use_global_stats))
down1_channels_list = down1_channels[i]
down1_seq.add(DownUnit(
in_channels=in_channels,
out_channels_list=down1_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = down1_channels_list[-1]
else:
skip1_seq.add(SkipAttUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = skip1_channels_list[-1]
up_seq = nn.HybridSequential(prefix="")
skip2_seq = nn.HybridSequential(prefix="")
for i in range(depth + 1):
skip2_channels_list = skip2_channels[i]
if i > 0:
in_channels += skip1_channels[depth - i][-1]
if i < depth:
skip2_seq.add(SkipUnit(
in_channels=in_channels,
out_channels_list=skip2_channels_list,
bn_use_global_stats=bn_use_global_stats))
up_channels_list = up_channels[i]
dilation = 2 ** i
up_seq.add(UpUnit(
in_channels=in_channels,
out_channels_list=up_channels_list,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats))
in_channels = up_channels_list[-1]
else:
skip2_seq.add(Identity())
down2_seq = nn.HybridSequential(prefix="")
for i in range(depth):
down2_channels_list = down2_channels[i]
down2_seq.add(DownUnit(
in_channels=in_channels,
out_channels_list=down2_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = down2_channels_list[-1] + skip2_channels[depth - 1 - i][-1]
self.features.add(SesquialteralHourglass(
down1_seq=down1_seq,
skip1_seq=skip1_seq,
up_seq=up_seq,
skip2_seq=skip2_seq,
down2_seq=down2_seq))
self.features.add(FishFinalBlock(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_fishnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create FishNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 99:
direct_layers = [[2, 2, 6], [1, 1, 1], [1, 2, 2]]
skip_layers = [[1, 1, 1, 2], [4, 1, 1, 0]]
elif blocks == 150:
direct_layers = [[2, 4, 8], [2, 2, 2], [2, 2, 4]]
skip_layers = [[2, 2, 2, 4], [4, 2, 2, 0]]
else:
raise ValueError("Unsupported FishNet with number of blocks: {}".format(blocks))
direct_channels_per_layers = [[128, 256, 512], [512, 384, 256], [320, 832, 1600]]
skip_channels_per_layers = [[64, 128, 256, 512], [512, 768, 512, 0]]
direct_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(direct_channels_per_layers, direct_layers)])]
skip_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(skip_channels_per_layers, skip_layers)])]
init_block_channels = 64
net = FishNet(
direct_channels=direct_channels,
skip_channels=skip_channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def fishnet99(**kwargs):
"""
FishNet-99 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=99, model_name="fishnet99", **kwargs)
def fishnet150(**kwargs):
"""
FishNet-150 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=150, model_name="fishnet150", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
fishnet99,
fishnet150,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fishnet99 or weight_count == 16628904)
assert (model != fishnet150 or weight_count == 24959400)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 34.09593
| 115
| 0.579205
|
7948e406c9aa342568222e53eb9bf3f443c6fa7d
| 1,735
|
py
|
Python
|
to_language_set.py
|
Jupaoqq/Jupaoqq_LaRL
|
ae64adda5627987d71f2948f499daa11e9f309ad
|
[
"Apache-2.0"
] | null | null | null |
to_language_set.py
|
Jupaoqq/Jupaoqq_LaRL
|
ae64adda5627987d71f2948f499daa11e9f309ad
|
[
"Apache-2.0"
] | null | null | null |
to_language_set.py
|
Jupaoqq/Jupaoqq_LaRL
|
ae64adda5627987d71f2948f499daa11e9f309ad
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from tqdm import tqdm
import pickle as pkl
import json
from nltk import word_tokenize
import re
from torch.utils.data.dataset import Dataset
import numpy as np
from copy import deepcopy
def process(input, output1, output2, dummy = True):
f = open(input, encoding='utf-8')
text_usr = []
text_sys = []
for line in tqdm(f):
lines=json.loads(line.strip())
seekerid=lines["initiatorWorkerId"]
recommenderid=lines["respondentWorkerId"]
contexts=lines['messages']
altitude=lines['respondentQuestions']
initial_altitude=lines['initiatorQuestions']
if (altitude and initial_altitude):
for m in contexts:
proc = m['text'].split()
procced = []
for token in proc:
if "@" in token:
token = "[ITEM]"
procced.append(token)
newstr = " ".join(procced)
if m['senderWorkerId'] == seekerid:
text_usr.append(newstr)
elif m['senderWorkerId'] == recommenderid:
text_sys.append(newstr)
# print(text_sys)
textfile1= open(output1, "w", encoding='utf-8')
for element1 in text_usr:
textfile1.write(element1 + "\n")
textfile1.close()
textfile2 = open(output2, "w", encoding='utf-8')
for element2 in text_sys:
textfile2.write(element2 + "\n")
textfile2.close()
if __name__=='__main__':
process('data/raw/train_data.jsonl', 'data/similarity/user.txt', 'data/similarity/system.txt')
# entity('data/raw/train_data.jsonl', 'data/raw/valid_data.jsonl', 'data/raw/test_data.jsonl', 'data/negotiate/entity.txt')
| 36.145833
| 127
| 0.604035
|
7948e475d76d462755849dccfd823b68b2dfd372
| 9,050
|
py
|
Python
|
src/skmultiflow/trees/nodes/ada_split_node_for_regression.py
|
lambertsbennett/scikit-multiflow
|
bc714fd5ee4f0a486adc00ec6ae39eafa64f81cc
|
[
"BSD-3-Clause"
] | 1
|
2020-04-16T10:17:03.000Z
|
2020-04-16T10:17:03.000Z
|
src/skmultiflow/trees/nodes/ada_split_node_for_regression.py
|
lambertsbennett/scikit-multiflow
|
bc714fd5ee4f0a486adc00ec6ae39eafa64f81cc
|
[
"BSD-3-Clause"
] | null | null | null |
src/skmultiflow/trees/nodes/ada_split_node_for_regression.py
|
lambertsbennett/scikit-multiflow
|
bc714fd5ee4f0a486adc00ec6ae39eafa64f81cc
|
[
"BSD-3-Clause"
] | null | null | null |
import math
from skmultiflow.trees.attribute_test import NominalAttributeMultiwayTest
from skmultiflow.trees.nodes import FoundNode
from skmultiflow.trees.nodes import SplitNode
from skmultiflow.trees.nodes import ActiveLearningNode
from skmultiflow.trees.nodes import InactiveLearningNode
from skmultiflow.trees.nodes import AdaNode
from skmultiflow.drift_detection.adwin import ADWIN
from skmultiflow.utils import check_random_state
class AdaSplitNodeForRegression(SplitNode, AdaNode):
""" Node that splits the data in a Regression Hoeffding Adaptive Tree.
Parameters
----------
split_test: skmultiflow.split_test.InstanceConditionalTest
Split test.
class_observations: dict
In regression tasks this dictionary carries the sufficient to perform
online variance calculation. They refer to the number of observations
(key '0'), the sum of the target values (key '1'), and the sum of the
squared target values (key '2').
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
def __init__(self, split_test, class_observations, random_state=None):
super().__init__(split_test, class_observations)
self._estimation_error_weight = ADWIN()
self._alternate_tree = None
self.error_change = False
self.random_state = check_random_state(random_state)
# To normalize the observed errors in the [0, 1] range
self._min_error = float('Inf')
self._max_error = float('-Inf')
# Override AdaNode
def number_leaves(self):
num_of_leaves = 0
for child in self._children.values():
if child is not None:
num_of_leaves += child.number_leaves()
return num_of_leaves
# Override AdaNode
def get_error_estimation(self):
return self._estimation_error_weight.estimation
# Override AdaNode
def get_error_width(self):
w = 0.0
if not self.is_null_error():
w = self._estimation_error_weight.width
return w
# Override AdaNode
def is_null_error(self):
return self._estimation_error_weight is None
# Override AdaNode
def learn_from_instance(self, X, y, weight, rhat, parent, parent_branch):
normalized_error = 0.0
if self.filter_instance_to_leaf(X, parent, parent_branch).node is not None:
y_pred = rhat.predict([X])[0]
normalized_error = self.get_normalized_error(y, y_pred)
if self._estimation_error_weight is None:
self._estimation_error_weight = ADWIN()
old_error = self.get_error_estimation()
# Add element to Change detector
self._estimation_error_weight.add_element(normalized_error)
# Detect change
self.error_change = self._estimation_error_weight.detected_change()
if self.error_change and old_error > self.get_error_estimation():
self.error_change = False
# Check condition to build a new alternate tree
if self.error_change:
self._alternate_tree = rhat._new_learning_node()
rhat.alternate_trees_cnt += 1
# Condition to replace alternate tree
elif self._alternate_tree is not None and not self._alternate_tree.is_null_error():
if self.get_error_width() > rhat._ERROR_WIDTH_THRESHOLD \
and self._alternate_tree.get_error_width() > rhat._ERROR_WIDTH_THRESHOLD:
old_error_rate = self.get_error_estimation()
alt_error_rate = self._alternate_tree.get_error_estimation()
fDelta = .05
fN = 1.0 / self._alternate_tree.get_error_width() + 1.0 / self.get_error_width()
bound = math.sqrt(2.0 * old_error_rate * (1.0 - old_error_rate) *
math.log(2.0 / fDelta) * fN)
# To check, bound never less than (old_error_rate - alt_error_rate)
if bound < (old_error_rate - alt_error_rate):
rhat._active_leaf_node_cnt -= self.number_leaves()
rhat._active_leaf_node_cnt += self._alternate_tree.number_leaves()
self.kill_tree_children(rhat)
if parent is not None:
parent.set_child(parent_branch, self._alternate_tree)
else:
rhat._tree_root = rhat._tree_root._alternate_tree
rhat.switch_alternate_trees_cnt += 1
elif bound < alt_error_rate - old_error_rate:
if isinstance(self._alternate_tree, ActiveLearningNode):
self._alternate_tree = None
elif isinstance(self._alternate_tree, InactiveLearningNode):
self._alternate_tree = None
else:
self._alternate_tree.kill_tree_children(rhat)
rhat.pruned_alternate_trees_cnt += 1 # hat.pruned_alternate_trees_cnt to check
# Learn_From_Instance alternate Tree and Child nodes
if self._alternate_tree is not None:
self._alternate_tree.learn_from_instance(X, y, weight, rhat, parent, parent_branch)
child_branch = self.instance_child_index(X)
child = self.get_child(child_branch)
if child is not None:
child.learn_from_instance(X, y, weight, rhat, self, child_branch)
# Instance contains a categorical value previously unseen by the split
# node
elif isinstance(self.get_split_test(), NominalAttributeMultiwayTest) and \
self.get_split_test().branch_for_instance(X) < 0:
# Creates a new learning node to encompass the new observed feature
# value
leaf_node = rhat._new_learning_node()
branch_id = self.get_split_test().add_new_branch(
X[self.get_split_test().get_atts_test_depends_on()[0]]
)
self.set_child(branch_id, leaf_node)
rhat._active_leaf_node_cnt += 1
leaf_node.learn_from_instance(X, y, weight, rhat, parent, parent_branch)
# Override AdaNode
def kill_tree_children(self, rhat):
for child in self._children.values():
if child is not None:
# Delete alternate tree if it exists
if isinstance(child, SplitNode) and child._alternate_tree is not None:
rhat.pruned_alternate_trees_cnt += 1
# Recursive delete of SplitNodes
if isinstance(child, SplitNode):
child.kill_tree_children(rhat)
if isinstance(child, ActiveLearningNode):
child = None
rhat._active_leaf_node_cnt -= 1
elif isinstance(child, InactiveLearningNode):
child = None
rhat._inactive_leaf_node_cnt -= 1
# override AdaNode
def filter_instance_to_leaves(self, X, y, weight, parent, parent_branch,
update_splitter_counts=False, found_nodes=None):
if found_nodes is None:
found_nodes = []
if update_splitter_counts:
try:
self._observed_class_distribution[0] += weight
self._observed_class_distribution[1] += y * weight
self._observed_class_distribution[2] += y * y * weight
except KeyError:
self._observed_class_distribution[0] = weight
self._observed_class_distribution[1] = y * weight
self._observed_class_distribution[2] = y * y * weight
child_index = self.instance_child_index(X)
if child_index >= 0:
child = self.get_child(child_index)
if child is not None:
child.filter_instance_to_leaves(X, y, weight, parent, parent_branch,
update_splitter_counts, found_nodes)
else:
found_nodes.append(FoundNode(None, self, child_index))
if self._alternate_tree is not None:
self._alternate_tree.filter_instance_to_leaves(X, y, weight, self, -999,
update_splitter_counts, found_nodes)
def get_normalized_error(self, y, y_pred):
abs_error = abs(y - y_pred)
# Incremental maintenance of the normalization ranges
if abs_error < self._min_error:
self._min_error = abs_error
if abs_error > self._max_error:
self._max_error = abs_error
if self._min_error != self._max_error:
return (abs_error - self._min_error) / (self._max_error - self._min_error)
else:
return 0.0
| 43.509615
| 99
| 0.632376
|
7948e50ca6dc47aa4992248e782c4b1bf69247cd
| 11,318
|
py
|
Python
|
decode.py
|
sunflower036/se-pg
|
2eaa32517abd324d0e495d632041f66beb514757
|
[
"Apache-2.0"
] | 2
|
2019-09-25T12:20:35.000Z
|
2020-01-12T08:26:03.000Z
|
decode.py
|
sunflower036/se-pg
|
2eaa32517abd324d0e495d632041f66beb514757
|
[
"Apache-2.0"
] | 1
|
2019-08-05T13:09:34.000Z
|
2019-08-07T07:46:20.000Z
|
decode.py
|
sunflower036/se-pg
|
2eaa32517abd324d0e495d632041f66beb514757
|
[
"Apache-2.0"
] | 1
|
2020-01-12T08:26:04.000Z
|
2020-01-12T08:26:04.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to run beam search decoding, including running ROUGE evaluation and producing JSON datafiles for the in-browser attention visualizer, which can be found here https://github.com/abisee/attn_vis"""
import os
import time
import tensorflow as tf
import beam_search
import data
import json
import pyrouge
import util
import logging
import numpy as np
FLAGS = tf.app.flags.FLAGS
SECS_UNTIL_NEW_CKPT = 60 # max number of seconds before loading new checkpoint
class BeamSearchDecoder(object):
"""Beam search decoder."""
def __init__(self, model, batcher, vocab):
"""Initialize decoder.
Args:
model: a Seq2SeqAttentionModel object.
batcher: a Batcher object.
vocab: Vocabulary object
"""
self._model = model
self._model.build_graph()
self._batcher = batcher
self._vocab = vocab
self._saver = tf.train.Saver() # we use this to load checkpoints for decoding
self._sess = tf.Session(config=util.get_config())
# Load an initial checkpoint to use for decoding
ckpt_path = util.load_ckpt(self._saver, self._sess)
if FLAGS.single_pass:
# Make a descriptive decode directory name
ckpt_name = "ckpt-" + ckpt_path.split('-')[-1] # this is something of the form "ckpt-123456"
self._decode_dir = os.path.join(FLAGS.log_root, get_decode_dir_name(ckpt_name))
if os.path.exists(self._decode_dir):
raise Exception("single_pass decode directory %s should not already exist" % self._decode_dir)
else: # Generic decode dir name
self._decode_dir = os.path.join(FLAGS.log_root, "decode")
# Make the decode dir if necessary
if not os.path.exists(self._decode_dir): os.mkdir(self._decode_dir)
if FLAGS.single_pass:
# Make the dirs to contain output written in the correct format for pyrouge
self._rouge_ref_dir = os.path.join(self._decode_dir, "reference")
if not os.path.exists(self._rouge_ref_dir): os.mkdir(self._rouge_ref_dir)
self._rouge_dec_dir = os.path.join(self._decode_dir, "decoded")
if not os.path.exists(self._rouge_dec_dir): os.mkdir(self._rouge_dec_dir)
def decode(self):
"""Decode examples until data is exhausted (if FLAGS.single_pass) and return, or decode indefinitely, loading latest checkpoint at regular intervals"""
t0 = time.time()
counter = 0
while True:
batch = self._batcher.next_batch() # 1 example repeated across batch
if batch is None: # finished decoding dataset in single_pass mode
assert FLAGS.single_pass, "Dataset exhausted, but we are not in single_pass mode"
tf.logging.info("Decoder has finished reading dataset for single_pass.")
tf.logging.info("Output has been saved in %s and %s. Now starting ROUGE eval...", self._rouge_ref_dir, self._rouge_dec_dir)
results_dict = rouge_eval(self._rouge_ref_dir, self._rouge_dec_dir)
rouge_log(results_dict, self._decode_dir)
return
original_article = batch.original_articles[0] # string
original_abstract = batch.original_abstracts[0] # string
# original_abstract_sents = batch.original_abstracts_sents[0] # list of strings
# article_withunks = data.show_art_oovs(original_article, self._vocab) # string
# abstract_withunks = data.show_abs_oovs(original_abstract, self._vocab, (batch.art_oovs[0] if FLAGS.pointer_gen else None)) # string
# Run beam search to get best Hypothesis
best_hyp = beam_search.run_beam_search(self._sess, self._model, self._vocab, batch, FLAGS.hidden_dim)
# Extract the output ids from the hypothesis and convert back to words
output_ids = [int(t) for t in best_hyp.tokens[1:]]
decoded_words = data.outputids2words(output_ids, self._vocab, (batch.art_oovs[0] if FLAGS.pointer_gen else None))
# Remove the [STOP] token from decoded_words, if necessary
try:
fst_stop_idx = decoded_words.index(data.STOP_DECODING) # index of the (first) [STOP] symbol
decoded_words = decoded_words[:fst_stop_idx]
except ValueError:
decoded_words = decoded_words
decoded_output = ' '.join(decoded_words) # single string
if FLAGS.single_pass:
self.write_for_rouge(original_abstract, decoded_words, counter) # write ref summary and decoded summary to file, to eval with pyrouge later
counter += 1 # this is how many examples we've decoded
else:
print_results(original_article, original_abstract, decoded_output) # log output to screen
self.write_for_attnvis(original_article, original_abstract, decoded_words, best_hyp.attn_dists, best_hyp.p_gens) # write info to .json file for visualization tool
# Check if SECS_UNTIL_NEW_CKPT has elapsed; if so return so we can load a new checkpoint
t1 = time.time()
if t1-t0 > SECS_UNTIL_NEW_CKPT:
tf.logging.info('We\'ve been decoding with same checkpoint for %i seconds. Time to load new checkpoint', t1-t0)
_ = util.load_ckpt(self._saver, self._sess)
t0 = time.time()
def write_for_rouge(self, reference_sents, decoded_words, ex_index):
"""Write output to file in correct format for eval with pyrouge. This is called in single_pass mode.
Args:
reference_sents: list of strings
decoded_words: list of strings
ex_index: int, the index with which to label the files
"""
# First, divide decoded output into sentences
decoded_sents = []
while len(decoded_words) > 0:
try:
fst_period_idx = decoded_words.index(".")
except ValueError: # there is text remaining that doesn't end in "."
fst_period_idx = len(decoded_words)
sent = decoded_words[:fst_period_idx+1] # sentence up to and including the period
decoded_words = decoded_words[fst_period_idx+1:] # everything else
decoded_sents.append(' '.join(sent))
# pyrouge calls a perl script that puts the data into HTML files.
# Therefore we need to make our output HTML safe.
decoded_sents = [make_html_safe(w) for w in decoded_sents]
reference_sents = [make_html_safe(w) for w in reference_sents]
# Write to file
ref_file = os.path.join(self._rouge_ref_dir, "%06d_reference.txt" % ex_index)
decoded_file = os.path.join(self._rouge_dec_dir, "%06d_decoded.txt" % ex_index)
with open(ref_file, "w") as f:
for idx,sent in enumerate(reference_sents):
f.write(sent) if idx==len(reference_sents)-1 else f.write(sent+"\n")
with open(decoded_file, "w") as f:
for idx,sent in enumerate(decoded_sents):
f.write(sent) if idx==len(decoded_sents)-1 else f.write(sent+"\n")
tf.logging.info("Wrote example %i to file" % ex_index)
def write_for_attnvis(self, article, abstract, decoded_words, attn_dists, p_gens):
"""Write some data to json file, which can be read into the in-browser attention visualizer tool:
https://github.com/abisee/attn_vis
Args:
article: The original article string.
abstract: The human (correct) abstract string.
attn_dists: List of arrays; the attention distributions.
decoded_words: List of strings; the words of the generated summary.
p_gens: List of scalars; the p_gen values. If not running in pointer-generator mode, list of None.
"""
article_lst = article.split() # list of words
decoded_lst = decoded_words # list of decoded words
to_write = {
'article_lst': [make_html_safe(t) for t in article_lst],
'decoded_lst': [make_html_safe(t) for t in decoded_lst],
'abstract_str': make_html_safe(abstract),
'attn_dists': attn_dists
}
if FLAGS.pointer_gen:
to_write['p_gens'] = p_gens
output_fname = os.path.join(self._decode_dir, 'attn_vis_data.json')
with open(output_fname, 'w') as output_file:
json.dump(to_write, output_file)
tf.logging.info('Wrote visualization data to %s', output_fname)
def print_results(article, abstract, decoded_output):
"""Prints the article, the reference summmary and the decoded summary to screen"""
print("---------------------------------------------------------------------------")
tf.logging.info('ARTICLE: %s', article)
tf.logging.info('REFERENCE SUMMARY: %s', abstract)
tf.logging.info('GENERATED SUMMARY: %s', decoded_output)
print("---------------------------------------------------------------------------")
def make_html_safe(s):
"""Replace any angled brackets in string s to avoid interfering with HTML attention visualizer."""
s.replace("<", "<")
s.replace(">", ">")
return s
def rouge_eval(ref_dir, dec_dir):
"""Evaluate the files in ref_dir and dec_dir with pyrouge, returning results_dict"""
r = pyrouge.Rouge155()
r.model_filename_pattern = '#ID#_reference.txt'
r.system_filename_pattern = '(\d+)_decoded.txt'
r.model_dir = ref_dir
r.system_dir = dec_dir
logging.getLogger('global').setLevel(logging.WARNING) # silence pyrouge logging
rouge_results = r.convert_and_evaluate()
return r.output_to_dict(rouge_results)
def rouge_log(results_dict, dir_to_write):
"""Log ROUGE results to screen and write to file.
Args:
results_dict: the dictionary returned by pyrouge
dir_to_write: the directory where we will write the results to"""
log_str = ""
for x in ["1","2","l"]:
log_str += "\nROUGE-%s:\n" % x
for y in ["f_score", "recall", "precision"]:
key = "rouge_%s_%s" % (x,y)
key_cb = key + "_cb"
key_ce = key + "_ce"
val = results_dict[key]
val_cb = results_dict[key_cb]
val_ce = results_dict[key_ce]
log_str += "%s: %.4f with confidence interval (%.4f, %.4f)\n" % (key, val, val_cb, val_ce)
tf.logging.info(log_str) # log to screen
results_file = os.path.join(dir_to_write, "ROUGE_results.txt")
tf.logging.info("Writing final ROUGE results to %s...", results_file)
with open(results_file, "w") as f:
f.write(log_str)
def get_decode_dir_name(ckpt_name):
"""Make a descriptive name for the decode dir, including the name of the checkpoint we use to decode. This is called in single_pass mode."""
if "train" in FLAGS.data_path: dataset = "train"
elif "val" in FLAGS.data_path: dataset = "val"
elif "test" in FLAGS.data_path: dataset = "test"
else: raise ValueError("FLAGS.data_path %s should contain one of train, val or test" % (FLAGS.data_path))
dirname = "decode_%s_%imaxenc_%ibeam_%imindec_%imaxdec" % (dataset, FLAGS.max_enc_steps, FLAGS.beam_size, FLAGS.min_dec_steps, FLAGS.max_dec_steps)
if ckpt_name is not None:
dirname += "_%s" % ckpt_name
return dirname
| 44.559055
| 222
| 0.696943
|
7948e7f255e1003f8d73f2ed1e766a3532505b26
| 26,455
|
py
|
Python
|
judge/views.py
|
vbsinha/pdp-judge
|
adb93228cc0e5881713fd35807430fd4be8e3f9c
|
[
"MIT"
] | 8
|
2019-06-13T13:17:13.000Z
|
2022-01-10T17:51:41.000Z
|
judge/views.py
|
vbsinha/autojudge
|
adb93228cc0e5881713fd35807430fd4be8e3f9c
|
[
"MIT"
] | 25
|
2019-05-27T15:12:45.000Z
|
2021-06-10T20:35:13.000Z
|
judge/views.py
|
vbsinha/pdp-judge
|
adb93228cc0e5881713fd35807430fd4be8e3f9c
|
[
"MIT"
] | 3
|
2021-01-29T07:25:29.000Z
|
2021-12-29T10:16:07.000Z
|
import os
from django.urls import reverse
from django.core.files import File
from django.utils import timezone
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.shortcuts import render, redirect, get_object_or_404
from . import handler
from .models import Contest, Problem, TestCase, Submission
from .forms import NewContestForm, AddPersonToContestForm, DeletePersonFromContestForm
from .forms import NewProblemForm, EditProblemForm, NewSubmissionForm, AddTestCaseForm
from .forms import NewCommentForm, UpdateContestForm, AddPosterScoreForm
def _get_user(request) -> User:
if request.user.is_authenticated:
# For superusers without email ID, we have to create a dummy email ID.
# This is a hotpatch: we need to fix the createsuperuser.
if request.user.email == '':
if request.user.is_superuser:
request.user.email = request.user.username + '@autojudge.superuser'
else:
return None
return request.user
else:
return None
def _return_file_as_response(path_name):
f = File(open(path_name, 'rb'))
response = HttpResponse(f, content_type='application/octet-stream')
f.close()
f_name = os.path.basename(path_name)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(f_name)
return response
def handler404(request, *args):
"""
Renders 404 page.
:param request: the request object used
:type request: HttpRequest
"""
return render(request, '404.html', status=404)
def handler500(request, *args):
"""
Renders 500 page.
:param request: the request object used
:type request: HttpRequest
"""
return render(request, '500.html', status=500)
def index(request):
"""
Renders the index page.
:param request: the request object used
:type request: HttpRequest
"""
context = {}
user = _get_user(request)
if user is not None:
status, maybe_error = handler.process_person(request.user.email)
if not status:
return handler404(request)
contests = Contest.objects.all()
permissions = [handler.get_personcontest_permission(
None if user is None else user.email, contest.pk) for contest in contests]
context['contests'] = zip(contests, permissions)
return render(request, 'judge/index.html', context)
def new_contest(request):
"""
Renders view for the page to create a new contest.
:param request: the request object used
:type request: HttpRequest
"""
user = _get_user(request)
if user is None:
return handler404(request)
if request.method == 'POST':
form = NewContestForm(request.POST)
if form.is_valid():
status, code_or_error = handler.process_contest(**form.cleaned_data)
if status:
handler.add_person_to_contest(user.email, code_or_error, True)
return redirect(reverse('judge:index'))
else:
form.add_error(None, code_or_error)
else:
form = NewContestForm()
context = {'form': form}
return render(request, 'judge/new_contest.html', context)
def get_people(request, contest_id, role):
"""
Function to render the page for viewing participants and posters
for a contest based on :attr:`role`.
:param request: the request object used
:type request: HttpRequest
:param contest_id: the contest ID
:type contest_id: int
:param role: ``True`` for Poster, ``False`` for Participant
:type role: bool
"""
user = _get_user(request)
perm = handler.get_personcontest_permission(
None if user is None else user.email, contest_id)
if perm is None:
return handler404(request)
if role is None:
return handler404(request)
context = {'contest_id': contest_id,
'type': 'Poster' if role else 'Participant'}
if request.method == 'POST' and perm is True:
form = DeletePersonFromContestForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
status, maybe_error = handler.delete_personcontest(email, contest_id)
if not status:
form.add_error(None, maybe_error)
else:
form = DeletePersonFromContestForm()
context['form'] = form
if role:
status, value_or_error = handler.get_posters(contest_id)
else:
status, value_or_error = handler.get_participants(contest_id)
if status:
context['persons'] = value_or_error
else:
return handler404(request)
context['permission'] = perm
return render(request, 'judge/contest_persons.html', context)
def get_posters(request, contest_id):
"""
Renders the page for posters of a contest.
Dispatches to :func:`get_people` with :attr:`role` set to ``True``.
:param request: the request object used
:type request: HttpRequest
:param contest_id: the contest ID
:type contest_id: int
"""
return get_people(request, contest_id, True)
def get_participants(request, contest_id):
"""
Renders the page for posters of a contest.
Dispatches to :func:`get_people` with :attr:`role` set to ``False``.
:param request: the request object used
:type request: HttpRequest
:param contest_id: the contest ID
:type contest_id: int
"""
return get_people(request, contest_id, False)
def add_person(request, contest_id, role):
"""
Function to render the page for adding a person - participant or poster to
a contest.
:param request: the request object used
:type request: HttpRequest
:param contest_id: the contest ID
:type contest_id: int
:param role: ``True`` for Poster, ``False`` for Participant
:type role: bool
"""
user = _get_user(request)
perm = handler.get_personcontest_permission(
None if user is None else user.email, contest_id)
if not (perm is True):
return handler404(request)
context = {'contest_id': contest_id,
'type': 'Poster' if role else 'Participant'}
if request.method == 'POST':
form = AddPersonToContestForm(request.POST)
if form.is_valid():
emails = form.cleaned_data['emails']
status, maybe_error = handler.add_persons_to_contest(emails, contest_id, role)
if status:
return redirect(reverse('judge:get_{}s'.format(context['type'].lower()),
args=(contest_id,)))
else:
form.add_error(None, maybe_error)
else:
form = AddPersonToContestForm()
context['form'] = form
return render(request, 'judge/contest_add_person.html', context)
def add_poster(request, contest_id):
"""
Renders the page for adding a poster.
Dispatches to :func:`add_person` with :attr:`role` set to ``True``.
:param request: the request object used
:type request: HttpRequest
:param contest_id: the contest ID
:type contest_id: int
"""
return add_person(request, contest_id, True)
def add_participant(request, contest_id):
"""
Renders the page for adding a participant.
Dispatches to :func:`add_person` with :attr:`role` set to ``False``.
:param request: the request object used
:type request: HttpRequest
:param contest_id: the contest ID
:type contest_id: int
"""
return add_person(request, contest_id, False)
def contest_detail(request, contest_id):
"""
Renders the contest preview page after the contest has been created.
:param request: the request object used
:type request: HttpRequest
:param contest_id: the contest ID
:type contest_id: int
"""
contest = get_object_or_404(Contest, pk=contest_id)
user = _get_user(request)
perm = handler.get_personcontest_permission(
None if user is None else user.email, contest_id)
if perm is None:
return handler404(request)
problems = Problem.objects.filter(contest_id=contest_id)
status, leaderboard = handler.get_leaderboard(contest_id)
curr_time = timezone.now()
context = {
'contest': contest,
'type': 'Poster' if perm else 'Participant',
'problems': problems,
'leaderboard_status': status,
'leaderboard': leaderboard,
'curr_time': curr_time,
}
if perm is True:
if request.method == 'POST':
form = UpdateContestForm(request.POST)
if form.is_valid():
if (curr_time < contest.soft_end_datetime or
(form.cleaned_data['contest_soft_end'] == contest.soft_end_datetime and
curr_time < contest.hard_end_datetime)):
try:
contest.start_datetime = form.cleaned_data['contest_start']
contest.soft_end_datetime = form.cleaned_data['contest_soft_end']
contest.hard_end_datetime = form.cleaned_data['contest_hard_end']
contest.save()
except Exception as e:
form.add_error(None, str(e))
else:
form.add_error(None, 'Deadline cannot be extended if it has passed')
else:
form = UpdateContestForm(initial={
'contest_start': contest.start_datetime,
'contest_soft_end': contest.soft_end_datetime,
'contest_hard_end': contest.hard_end_datetime,
})
context['form'] = form
return render(request, 'judge/contest_detail.html', context)
def contest_scores_csv(request, contest_id):
"""
Function to provide the facility to download a CSV of scores
of participants in a contest at a given point in time.
:param request: the request object used
:type request: HttpRequest
:param contest_id: the contest ID
:type contest_id: int
"""
user = _get_user(request)
perm = handler.get_personcontest_permission(
None if user is None else user.email, contest_id)
if perm:
status, csv_or_error = handler.get_csv(contest_id)
if status:
response = HttpResponse(csv_or_error.read())
response['Content-Disposition'] = \
"attachment; filename=contest_{}.csv".format(contest_id)
return response
return handler404(request)
def delete_contest(request, contest_id):
"""
Function to provide the option to delete a contest.
:param request: the request object used
:type request: HttpRequest
:param contest_id: the contest ID
:type contest_id: int
"""
user = _get_user(request)
perm = handler.get_personcontest_permission(
None if user is None else user.email, contest_id)
if perm and request.method == 'POST':
status, _ = handler.delete_contest(contest_id)
if status:
return redirect(reverse('judge:index'))
else:
return handler404(request)
else:
return handler404(request)
def delete_problem(request, problem_id):
"""
Function to provide the option to delete a problem.
:param request: the request object used
:type request: HttpRequest
:param problem_id: the problem ID
:type problem_id: str
"""
user = _get_user(request)
problem = get_object_or_404(Problem, pk=problem_id)
contest_id = problem.contest.pk
perm = handler.get_personproblem_permission(
None if user is None else user.email, problem_id)
if timezone.now() > problem.contest.start_datetime:
return handler404(request)
if perm and request.method == 'POST':
status, _ = handler.delete_problem(problem_id)
if status:
return redirect(reverse('judge:contest_detail', args=(contest_id,)))
else:
return handler404(request)
else:
return handler404(request)
def delete_testcase(request, problem_id, testcase_id):
"""
Function to provide the option to delete a test-case of a particular problem.
:param request: the request object used
:type request: HttpRequest
:param problem_id: the problem ID
:type problem_id: str
:param testcase_id: the testcase ID
:type testcase_id: str
"""
user = _get_user(request)
perm = handler.get_personproblem_permission(
None if user is None else user.email, problem_id)
testcase = get_object_or_404(TestCase, pk=testcase_id)
if timezone.now() > testcase.problem.contest.start_datetime:
return handler404(request)
if problem_id == testcase.problem.pk and perm and request.method == 'POST':
status, _ = handler.delete_testcase(testcase_id)
if status:
return redirect(reverse('judge:problem_detail', args=(problem_id,)))
else:
return handler404(request)
else:
return handler404(request)
def problem_detail(request, problem_id):
"""
Renders the problem preview page after the problem has been created.
This preview will be changed based on the role of the user (poster or participant).
:param request: the request object used
:type request: HttpRequest
:param problem_id: the problem ID
:type problem_id: str
"""
problem = get_object_or_404(Problem, pk=problem_id)
user = _get_user(request)
perm = handler.get_personproblem_permission(
None if user is None else user.email, problem_id)
if perm is None:
return handler404(request)
public_tests = TestCase.objects.filter(problem_id=problem_id, public=True)
private_tests = TestCase.objects.filter(problem_id=problem_id, public=False)
context = {
'problem': problem,
'type': 'Poster' if perm else 'Participant',
}
if perm is False and user is None:
pass
elif perm is False and user.is_authenticated:
if request.method == 'POST':
form = NewSubmissionForm(request.POST, request.FILES)
if form.is_valid():
status, maybe_error = handler.process_submission(
problem_id, user.email, **form.cleaned_data, timestamp=timezone.now())
if status:
return redirect(reverse('judge:problem_submissions', args=(problem_id,)))
if not status:
form.add_error(None, maybe_error)
else:
form = NewSubmissionForm()
context['form'] = form
if perm is True:
if timezone.now() < problem.contest.start_datetime:
if request.method == 'POST':
form = AddTestCaseForm(request.POST, request.FILES)
if form.is_valid():
status, maybe_error = handler.process_testcase(problem_id, **form.cleaned_data)
if status:
redirect(reverse('judge:problem_submissions', args=(problem_id,)))
else:
form.add_error(None, maybe_error)
else:
form = AddTestCaseForm()
else:
form = None
context['form'] = form
context['public_tests'] = []
context['private_tests'] = []
for t in public_tests:
input_file = open(t.inputfile.path, 'r')
output_file = open(t.outputfile.path, 'r')
context['public_tests'].append((input_file.read(), output_file.read(), t.pk))
input_file.close()
output_file.close()
for t in private_tests:
input_file = open(t.inputfile.path, 'r')
output_file = open(t.outputfile.path, 'r')
context['private_tests'].append((input_file.read(), output_file.read(), t.pk))
input_file.close()
output_file.close()
context['curr_time'] = timezone.now()
return render(request, 'judge/problem_detail.html', context)
def problem_starting_code(request, problem_id: str):
"""
Function to provide the facility to download the starting code
for a problem.
:param request: the request object used
:type request: HttpRequest
:param problem_id: the problem ID
:type problem_id: str
"""
problem = get_object_or_404(Problem, pk=problem_id)
user = _get_user(request)
perm = handler.get_personproblem_permission(None if user is None else user.email, problem_id)
if perm is None:
return handler404(request)
elif problem.starting_code:
return _return_file_as_response(problem.starting_code.path)
else:
return handler404(request)
def problem_compilation_script(request, problem_id: str):
"""
Function to provide the facility to download the compilation script
for a problem after creating the problem.
:param request: the request object used
:type request: HttpRequest
:param problem_id: the problem ID
:type problem_id: str
"""
problem = get_object_or_404(Problem, pk=problem_id)
user = _get_user(request)
perm = handler.get_personproblem_permission(None if user is None else user.email, problem_id)
if perm is None or not perm:
return handler404(request)
elif problem.compilation_script:
return _return_file_as_response(problem.compilation_script.path)
else:
return handler404(request)
def problem_test_script(request, problem_id: str):
"""
Function to provide the facility to download the testing script
for a problem after creating the problem.
:param request: the request object used
:type request: HttpRequest
:param problem_id: the problem ID
:type problem_id: str
"""
problem = get_object_or_404(Problem, pk=problem_id)
user = _get_user(request)
perm = handler.get_personproblem_permission(None if user is None else user.email, problem_id)
if perm is None or not perm:
return handler404(request)
elif problem.test_script:
return _return_file_as_response(problem.test_script.path)
else:
return handler404(request)
def problem_default_script(request, script_name: str):
"""
Function to provide the facility to download the
default compilation or test script.
:param request: the request object used
:type request: HttpRequest
:param script_name: name of the script - one of `compilation_script` or `test_script`
:type script_name: str
"""
if script_name not in ['compilation_script', 'test_script']:
return handler404(request)
else:
return _return_file_as_response(os.path.join('judge', 'default', script_name + '.sh'))
def new_problem(request, contest_id):
"""
Renders view for the page to create a new problem in a contest.
:param request: the request object used
:type request: HttpRequest
:param contest_id: the contest ID
:type contest_id: int
"""
contest = get_object_or_404(Contest, pk=contest_id)
user = _get_user(request)
perm = handler.get_personcontest_permission(
None if user is None else user.email, contest_id)
if not (perm is True):
return handler404(request)
context = {'contest': contest}
if timezone.now() > contest.start_datetime:
return handler404(request)
if request.method == 'POST':
form = NewProblemForm(request.POST, request.FILES)
if form.is_valid():
status, maybe_error = handler.process_problem(contest_id=contest_id,
**form.cleaned_data)
if status:
code = form.cleaned_data['code']
return redirect(reverse('judge:problem_detail', args=(code,)))
else:
form.add_error(None, maybe_error)
else:
form = NewProblemForm()
context['form'] = form
return render(request, 'judge/new_problem.html', context)
def edit_problem(request, problem_id):
"""
Renders view for the page to edit selected fields of a pre-existing problem.
:param request: the request object used
:type request: HttpRequest
:param problem_id: the problem ID
:type problem_id: str
"""
problem = get_object_or_404(Problem, pk=problem_id)
contest = get_object_or_404(Contest, pk=problem.contest_id)
user = _get_user(request)
perm = handler.get_personcontest_permission(
None if user is None else user.email, contest.pk)
if not (perm is True):
return handler404(request)
context = {'contest': contest}
if request.method == 'POST':
form = EditProblemForm(request.POST)
if form.is_valid():
status, maybe_error = handler.update_problem(problem.code, **form.cleaned_data)
if status:
return redirect(reverse('judge:problem_detail', args=(problem.code,)))
else:
form.add_error(None, maybe_error)
else:
required_fields = ['name', 'statement', 'input_format', 'output_format', 'difficulty']
form = EditProblemForm({field: getattr(problem, field) for field in required_fields})
context['form'] = form
context['problem'] = problem
return render(request, 'judge/edit_problem.html', context)
def problem_submissions(request, problem_id: str):
"""
Renders the page where all submissions to a given problem can be seen.
For posters, this renders a set of tables for each participant.
For participants, this renders a table with the scores of their submissions only.
:param request: the request object used
:type request: HttpRequest
:param problem_id: the problem ID
:type problem_id: str
"""
user = _get_user(request)
perm = handler.get_personproblem_permission(
None if user is None else user.email, problem_id)
if perm is None:
return handler404(request)
problem = get_object_or_404(Problem, pk=problem_id)
context = {'problem': problem, 'perm': perm}
if request.method == 'POST':
form = NewCommentForm(request.POST)
if form.is_valid():
if perm is False and form.cleaned_data['participant_email'] != user.email:
form.add_error(None, 'Your comment was not posted.')
else:
status, maybe_error = handler.process_comment(
problem_id, form.cleaned_data['participant_email'], user.email,
timezone.now(), form.cleaned_data['comment'])
if not status:
form.add_error(None, maybe_error)
else:
form = NewCommentForm()
else:
form = NewCommentForm()
submissions = {}
if perm:
status, all_subs_or_error = handler.get_submissions(problem_id, None)
if status:
for email, subs in all_subs_or_error.items():
comment_set = handler.get_comments(problem_id, email)
submissions[email] = (subs, comment_set)
context['submissions'] = submissions
else:
return handler404(request)
elif user is not None:
status, subs_or_error = handler.get_submissions(problem_id, user.email)
if status:
context['participant'] = True
comments = handler.get_comments(problem_id, user.email)
submissions[user.email] = (subs_or_error[user.email], comments)
else:
return handler404(request)
else:
return handler404(request)
context['form'] = form
context['submissions'] = submissions
return render(request, 'judge/problem_submissions.html', context)
def submission_download(request, submission_id: str):
"""
Function to provide the facility to download a given submission.
:param request: the request object used
:type request: HttpRequest
:param submission_id: the submission ID
:type submission_id: str
"""
user = _get_user(request)
submission = get_object_or_404(Submission, pk=submission_id)
perm = handler.get_personproblem_permission(
None if user is None else user.email, submission.problem.pk)
if user is None:
return handler404(request)
if perm or user.email == submission.participant.pk:
return _return_file_as_response(submission.submission_file.path)
else:
return handler404(request)
def submission_detail(request, submission_id: str):
"""
Renders the page where a detailed breakdown with respect to judge's
evaluation, additional scores, error messages displayed and so on.
:param request: the request object used
:type request: HttpRequest
:param submission_id: the submission ID
:type submission_id: str
"""
user = _get_user(request)
submission = get_object_or_404(Submission, pk=submission_id)
perm = handler.get_personproblem_permission(
None if user is None else user.email, submission.problem.pk)
context = {'submission': submission, 'problem': submission.problem}
if user is None:
return handler404(request)
if perm or user.email == submission.participant.pk:
context['type'] = 'Poster' if perm else 'Participant'
if perm and submission.problem.contest.enable_poster_score:
if request.method == 'POST':
form = AddPosterScoreForm(request.POST)
if form.is_valid():
status, maybe_error = handler.update_poster_score(submission.pk,
form.cleaned_data['score'])
if not status:
form.add_error(None, maybe_error)
else:
form = AddPosterScoreForm(initial={'score': submission.poster_score})
context['form'] = form
status, info_or_error = handler.get_submission_status(submission_id)
if status:
context['test_results'] = info_or_error[0]
context['judge_score'] = info_or_error[1][0]
context['poster_score'] = info_or_error[1][1]
context['linter_score'] = info_or_error[1][2]
context['final_score'] = info_or_error[1][3]
context['timestamp'] = info_or_error[1][4]
context['file_type'] = info_or_error[1][5]
else:
return handler404(request)
return render(request, 'judge/submission_detail.html', context)
else:
return handler404(request)
| 36.489655
| 99
| 0.651106
|
7948e89744b3aa14125735acced5bb01bc4416b0
| 4,485
|
py
|
Python
|
testarch/unet/unet_trainer.py
|
weihao94/deepdyn
|
e8a1d6620f48094b76ea3d272c57d40c9ae6d949
|
[
"MIT"
] | 42
|
2019-11-07T03:18:53.000Z
|
2021-12-08T09:42:00.000Z
|
testarch/unet/unet_trainer.py
|
LucasLee-ff/deepdyn
|
48018b62a245dd791e45d60b28068489a8c32742
|
[
"MIT"
] | 17
|
2020-01-25T12:58:18.000Z
|
2022-03-11T23:32:44.000Z
|
testarch/unet/unet_trainer.py
|
aksish/ature
|
48018b62a245dd791e45d60b28068489a8c32742
|
[
"MIT"
] | 21
|
2019-11-21T08:34:54.000Z
|
2022-02-27T16:24:00.000Z
|
"""
### author: Aashis Khanal
### sraashis@gmail.com
### date: 9/10/2018
"""
import os
import numpy as np
import torch
from PIL import Image as IMG
import torch.nn.functional as F
import viz.nviz as plt
from torchtrainer.torchtrainer import NNTrainer
from utils.measurements import ScoreAccumulator
sep = os.sep
class UNetTrainer(NNTrainer):
def __init__(self, **kwargs):
NNTrainer.__init__(self, **kwargs)
self.patch_shape = self.conf.get('Params').get('patch_shape')
self.patch_offset = self.conf.get('Params').get('patch_offset')
# Headers for log files
def get_log_headers(self):
return {
'train': 'ID,EPOCH,BATCH,PRECISION,RECALL,F1,ACCURACY,LOSS',
'validation': 'ID,PRECISION,RECALL,F1,ACCURACY',
'test': 'ID,PRECISION,RECALL,F1,ACCURACY'
}
def _on_epoch_end(self, **kw):
self.plot_column_keys(file=kw['log_file'], batches_per_epoch=kw['data_loader'].__len__(),
keys=['F1', 'LOSS', 'ACCURACY'])
plt.plot_cmap(file=kw['log_file'], save=True, x='PRECISION', y='RECALL')
def _on_validation_end(self, **kw):
self.plot_column_keys(file=kw['log_file'], batches_per_epoch=kw['data_loader'].__len__(),
keys=['F1', 'ACCURACY'])
plt.plot_cmap(file=kw['log_file'], save=True, x='PRECISION', y='RECALL')
def _on_test_end(self, **kw):
plt.y_scatter(file=kw['log_file'], y='F1', label='ID', save=True, title='Test')
plt.y_scatter(file=kw['log_file'], y='ACCURACY', label='ID', save=True, title='Test')
plt.xy_scatter(file=kw['log_file'], save=True, x='PRECISION', y='RECALL', label='ID', title='Test')
# This method takes torch n dataloaders for n image with one image in each and evaluates after training.
# It is also the base method for both testing and validation
def evaluate(self, data_loaders=None, logger=None, gen_images=False, score_acc=None):
assert isinstance(score_acc, ScoreAccumulator)
for loader in data_loaders:
img_obj = loader.dataset.image_objects[0]
x, y = img_obj.working_arr.shape[0], img_obj.working_arr.shape[1]
predicted_img = torch.FloatTensor(x, y).fill_(0).to(self.device)
map_img = torch.FloatTensor(x, y).fill_(0).to(self.device)
gt = torch.FloatTensor(img_obj.ground_truth).to(self.device)
for i, data in enumerate(loader, 1):
inputs, labels = data['inputs'].to(self.device).float(), data['labels'].to(self.device).float()
clip_ix = data['clip_ix'].to(self.device).int()
outputs = F.softmax(self.model(inputs), 1)
_, predicted = torch.max(outputs, 1)
predicted_map = outputs[:, 1, :, :]
for j in range(predicted_map.shape[0]):
p, q, r, s = clip_ix[j]
predicted_img[p:q, r:s] = predicted[j]
map_img[p:q, r:s] = predicted_map[j]
print('Batch: ', i, end='\r')
img_score = ScoreAccumulator()
if gen_images: #### Test mode
map_img = map_img.cpu().numpy() * 255
predicted_img = predicted_img.cpu().numpy() * 255
img_score.reset().add_array(predicted_img, img_obj.ground_truth)
### Only save scores for test images############################
self.conf['acc'].accumulate(img_score) # Global score
prf1a = img_score.get_prfa()
print(img_obj.file_name, ' PRF1A', prf1a)
self.flush(logger, ','.join(str(x) for x in [img_obj.file_name] + prf1a))
#################################################################
IMG.fromarray(np.array(predicted_img, dtype=np.uint8)).save(
os.path.join(self.log_dir, 'pred_' + img_obj.file_name.split('.')[0] + '.png'))
IMG.fromarray(np.array(map_img, dtype=np.uint8)).save(
os.path.join(self.log_dir, img_obj.file_name.split('.')[0] + '.png'))
else: #### Validation mode
img_score.reset().add_tensor(predicted_img, gt)
score_acc.accumulate(img_score)
prf1a = img_score.get_prfa()
print(img_obj.file_name, ' PRF1A', prf1a)
self.flush(logger, ','.join(str(x) for x in [img_obj.file_name] + prf1a))
| 44.85
| 111
| 0.580379
|
7948e910f73ed83230193396c9b0e0c0fda347c6
| 513
|
py
|
Python
|
core/migrations/0032_unit_reveal_at_level.py
|
ankanb240/otis-web
|
45eda65b419705c65c02b15872a137969d53d8e9
|
[
"MIT"
] | 15
|
2021-08-28T18:18:37.000Z
|
2022-03-13T07:48:15.000Z
|
core/migrations/0032_unit_reveal_at_level.py
|
ankanb240/otis-web
|
45eda65b419705c65c02b15872a137969d53d8e9
|
[
"MIT"
] | 65
|
2021-08-20T02:37:27.000Z
|
2022-02-07T17:19:23.000Z
|
core/migrations/0032_unit_reveal_at_level.py
|
ankanb240/otis-web
|
45eda65b419705c65c02b15872a137969d53d8e9
|
[
"MIT"
] | 31
|
2020-01-09T02:35:29.000Z
|
2022-03-13T07:48:18.000Z
|
# Generated by Django 3.2.7 on 2021-09-13 21:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0031_remove_semester_classroom_url'),
]
operations = [
migrations.AddField(
model_name='unit',
name='reveal_at_level',
field=models.PositiveSmallIntegerField(blank=True, help_text='If a number is specified here, the unit is added automatically at that level', null=True),
),
]
| 27
| 164
| 0.654971
|
7948e91200eeb3248052eac89f880a11edcdbef9
| 3,754
|
py
|
Python
|
docs-src/conf.py
|
erykoff/generic-catalog-reader
|
bc6267ac41b9f68106ed6065184469ac13fdc0b6
|
[
"MIT"
] | null | null | null |
docs-src/conf.py
|
erykoff/generic-catalog-reader
|
bc6267ac41b9f68106ed6065184469ac13fdc0b6
|
[
"MIT"
] | null | null | null |
docs-src/conf.py
|
erykoff/generic-catalog-reader
|
bc6267ac41b9f68106ed6065184469ac13fdc0b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Generic Catalog Reader (GCR) documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 15 15:10:50 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Generic Catalog Reader (GCR)'
copyright = '2017-2018, Yao-Yuan Mao'
author = 'Yao-Yuan Mao'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8.2'
# The full version, including alpha/beta/rc tags.
release = '0.8.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
#'navigation.html',
#'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
#'donate.html',
]
}
| 32.08547
| 81
| 0.713372
|
7948e9a427ab0042c780d733d881812f9c977bdb
| 6,008
|
py
|
Python
|
problem/zipcode_stats/place.py
|
jhanley634/testing-tools
|
3f3f8a34df53015347e1e1cc37d20c8d03652cad
|
[
"MIT"
] | null | null | null |
problem/zipcode_stats/place.py
|
jhanley634/testing-tools
|
3f3f8a34df53015347e1e1cc37d20c8d03652cad
|
[
"MIT"
] | 3
|
2020-09-07T17:24:36.000Z
|
2020-09-08T17:37:33.000Z
|
problem/zipcode_stats/place.py
|
jhanley634/testing-tools
|
3f3f8a34df53015347e1e1cc37d20c8d03652cad
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# Copyright 2018 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
"""Finds demographic statistics based on zipcode.
"""
import io
import os
import unittest
import zipfile
import requests
import sqlalchemy
import sqlalchemy.engine.url
import sqlalchemy.exc
import sqlalchemy.ext.automap
import sqlalchemy.orm.session
import uszipcode.search
from problem.zipcode_stats.place_table import t_places
class ZipcodeStats:
def __init__(self, places_mgr=None):
if places_mgr is None:
places_mgr = Places2kMgr()
self.places_mgr = places_mgr
self.zse = uszipcode.search.SearchEngine()
def get_city_state(self, zipcode):
r = self.zse.by_zipcode(zipcode)
return f'{r.city} {r.state}'
def get_lat_lng(self):
pass
class Place:
__table__ = 'place'
class Places2kMgr:
"""Manages a sqlite DB originally drawn from
http://www.census.gov/tiger/tms/gazetteer/places2k.txt.
"""
def __init__(self, dir='/tmp',
db_file='places.db', in_file='places2k.txt'):
self.engine = None
db_file, in_file = [os.path.join(dir, f) for f in [db_file, in_file]]
if os.path.exists(db_file):
os.unlink(db_file)
db_url = sqlalchemy.engine.url.URL(
**dict(drivername='sqlite', database=db_file))
self.engine = sqlalchemy.create_engine(db_url)
if not os.path.exists(db_file):
if not os.path.exists(in_file):
self._download(in_file)
with open(in_file) as fin:
self._create_database(db_file, fin)
def _create_database(self, db_file, fin):
meta = self._ensure_table_exists()
meta.reflect()
# meta.reflect(self.engine, only=['place'])
base = sqlalchemy.ext.automap.automap_base(metadata=meta)
base.prepare()
assert 1 == len(base.metadata.sorted_tables)
# place = base.metadata.sorted_tables[0]
# sess = sqlalchemy.orm.session.sessionmaker()()
# Now populate the table.
for row in self._get_text_file_fields(fin):
state, fips, name = row[:3]
# ins = place.insert()
def _get_text_file_fields(self, fin):
# Columns 1-2: United States Postal Service State Abbreviation
# Columns 3-4: State Federal Information Processing Standard FIPS code
# Columns 5-9: Place FIPS Code
for line in fin:
state = line[:2]
fips = line[2:9] # First 2 characters give the state FIPS code.
name = line[9:73].rstrip()
pop2k = int(line[73:82])
homes2k = int(line[82:91]) # ignore land area m^2, water, & sq mi
assert line[143] == ' ', line[143:] # northern hemisphere
lat = float(line[143:153])
lng = float(line[153:164])
assert lat > 0
assert lng < 0 or name.startswith('Attu ') # western hemisphere
yield state, fips, name, pop2k, homes2k, lat, lng
def _ensure_table_exists(self):
meta = sqlalchemy.MetaData(bind=self.engine)
query = 'select * from place where 1 > 2' # Sqlite lacks 'False'.
try:
self.engine.execute(query).fetchall()
except sqlalchemy.exc.OperationalError:
meta.create_all(tables=[t_places])
self.engine.execute(query).fetchall()
return meta
def _download(self, out_file, zip_url=None):
if zip_url is None:
zip_url = 'https://www.cs.rutgers.edu/~pxk/rutgers/hw/places.zip'
zip_url = 'https://web.archive.org/web/20130124002852/' + zip_url
# Another candidate download location might be
# https://github.com/petewarden/crunchcrawl/raw/master/places2k.txt
# but it uses some variant Latin1 encoding for Puerto Rico place names.
req = requests.get(zip_url)
req.raise_for_status()
assert 200 == req.status_code
# assert 1110384 == int(req.headers['Content-Length'])
assert 1110384 == int(req.headers['X-Archive-Orig-Content-Length'])
assert 'application/zip' == req.headers['Content-Type']
content = io.BytesIO(req.content)
zf = zipfile.ZipFile(content)
fl = zf.filelist
assert 'places2k.txt' == fl[0].filename
assert 4212250 == fl[0].file_size
assert 1507489281 == fl[0].CRC
assert (2009, 3, 18, 15, 37, 52) == fl[0].date_time
with zf.open(fl[0].filename) as places, open(out_file, 'w') as fout:
fout.write(places.read().decode('latin1'))
assert 4212304 == os.path.getsize(out_file) # UTF-8 expands slightly.
class ZipcodeStatsTest(unittest.TestCase):
def setUp(self):
self.zc = ZipcodeStats()
def test_city_state(self):
self.assertEqual('Beverly Hills CA', self.zc.get_city_state('90210'))
def test_places(self):
pass
if __name__ == '__main__':
unittest.main()
| 36.412121
| 79
| 0.65263
|
7948ece9148217a11285f48d4b1ed5434aaae8d6
| 189
|
py
|
Python
|
app/quiz/__init__.py
|
gibran-abdillah/quiz-app
|
6a346c87c5cc258aa42f8805f226b1119fbf60e6
|
[
"Apache-2.0"
] | 10
|
2021-12-24T09:02:00.000Z
|
2022-02-08T06:54:45.000Z
|
app/quiz/__init__.py
|
gibran-abdillah/quiz-app
|
6a346c87c5cc258aa42f8805f226b1119fbf60e6
|
[
"Apache-2.0"
] | null | null | null |
app/quiz/__init__.py
|
gibran-abdillah/quiz-app
|
6a346c87c5cc258aa42f8805f226b1119fbf60e6
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint
quiz_blueprint = Blueprint('quiz',
__name__,
url_prefix='/quiz'
)
from .views import *
| 23.625
| 45
| 0.465608
|
7948ee2bfa94828f03e885961494e1c29b05c1d9
| 3,612
|
py
|
Python
|
orthgan/updater.py
|
HMJiangGatech/chainer-gan-lib
|
ee6764b2b2a9d7309267fd1b95835b883464aee1
|
[
"MIT"
] | 4
|
2019-07-31T02:20:43.000Z
|
2020-02-18T05:33:05.000Z
|
orthgan/updater.py
|
HMJiangGatech/chainer-gan-lib
|
ee6764b2b2a9d7309267fd1b95835b883464aee1
|
[
"MIT"
] | null | null | null |
orthgan/updater.py
|
HMJiangGatech/chainer-gan-lib
|
ee6764b2b2a9d7309267fd1b95835b883464aee1
|
[
"MIT"
] | 1
|
2021-11-13T22:53:05.000Z
|
2021-11-13T22:53:05.000Z
|
import numpy as np
import chainer
import chainer.functions as F
from chainer import Variable
class Updater(chainer.training.StandardUpdater):
def __init__(self, *args, **kwargs):
self.gen, self.dis = kwargs.pop('models')
self.n_dis = kwargs.pop('n_dis')
super(Updater, self).__init__(*args, **kwargs)
def update_core(self):
gen_optimizer = self.get_optimizer('opt_gen')
dis_optimizer = self.get_optimizer('opt_dis')
xp = self.gen.xp
for i in range(self.n_dis):
batch = self.get_iterator('main').next()
batchsize = len(batch)
x = []
for j in range(batchsize):
x.append(np.asarray(batch[j]).astype("f"))
if i == 0:
z = Variable(xp.asarray(self.gen.make_hidden(batchsize)))
x_fake = self.gen(z)
y_fake = self.dis(x_fake)
loss_gen = F.sum(F.softplus(-y_fake)) / batchsize
self.gen.cleargrads()
loss_gen.backward()
gen_optimizer.update()
chainer.reporter.report({'loss_gen': loss_gen})
x_real = Variable(xp.asarray(x))
y_real = self.dis(x_real)
z = Variable(xp.asarray(self.gen.make_hidden(batchsize)))
x_fake = self.gen(z)
y_fake = self.dis(x_fake)
x_fake.unchain_backward()
loss_dis = F.sum(F.softplus(-y_real)) / batchsize
loss_dis += F.sum(F.softplus(y_fake)) / batchsize
loss_orth = self.dis.loss_orth()*10
self.dis.cleargrads()
loss_dis.backward()
loss_orth.backward()
dis_optimizer.update()
chainer.reporter.report({'loss_dis': loss_dis})
chainer.reporter.report({'loss_orth': loss_orth})
class HingeUpdater(chainer.training.StandardUpdater):
def __init__(self, *args, **kwargs):
self.gen, self.dis = kwargs.pop('models')
self.n_dis = kwargs.pop('n_dis')
super(HingeUpdater, self).__init__(*args, **kwargs)
def update_core(self):
gen_optimizer = self.get_optimizer('opt_gen')
dis_optimizer = self.get_optimizer('opt_dis')
xp = self.gen.xp
for i in range(self.n_dis):
batch = self.get_iterator('main').next()
batchsize = len(batch)
x = []
for j in range(batchsize):
x.append(np.asarray(batch[j]).astype("f"))
if i == 0:
z = Variable(xp.asarray(self.gen.make_hidden(batchsize)))
x_fake = self.gen(z)
y_fake = self.dis(x_fake)
loss_gen = -F.mean(y_fake) #F.sum(F.softplus(-y_fake)) / batchsize
self.gen.cleargrads()
loss_gen.backward()
gen_optimizer.update()
chainer.reporter.report({'loss_gen': loss_gen})
x_real = Variable(xp.asarray(x))
y_real = self.dis(x_real)
z = Variable(xp.asarray(self.gen.make_hidden(batchsize)))
x_fake = self.gen(z)
y_fake = self.dis(x_fake)
x_fake.unchain_backward()
loss_dis = F.mean(F.relu(1. - y_real))
loss_dis += F.mean(F.relu(1. + y_fake))
loss_orth = self.dis.loss_orth()*10
self.dis.cleargrads()
loss_dis.backward()
loss_orth.backward()
dis_optimizer.update()
chainer.reporter.report({'loss_dis': loss_dis})
chainer.reporter.report({'loss_orth': loss_orth})
| 34.730769
| 82
| 0.556202
|
7948ee5342ed0911f470cd6818da240190903b9e
| 2,475
|
py
|
Python
|
Erik-solutions/day8/day.py
|
NTI-Gymnasieingenjor/AdventOfCode2020
|
ea74c06a2b220e227618ed841c4eb853f08d5c84
|
[
"MIT"
] | 1
|
2020-12-08T12:33:36.000Z
|
2020-12-08T12:33:36.000Z
|
Erik-solutions/day8/day.py
|
NTI-Gymnasieingenjor/AdventOfCode2020
|
ea74c06a2b220e227618ed841c4eb853f08d5c84
|
[
"MIT"
] | null | null | null |
Erik-solutions/day8/day.py
|
NTI-Gymnasieingenjor/AdventOfCode2020
|
ea74c06a2b220e227618ed841c4eb853f08d5c84
|
[
"MIT"
] | 1
|
2021-01-20T15:08:12.000Z
|
2021-01-20T15:08:12.000Z
|
inputDoc = open("input.txt")
docLines = inputDoc.read().split("\n")
inputDoc.close()
# PART 1
print("\nPART 1")
"""
acc increases or decreases a single global value called the accumulator by the value given in the argument.
jmp jumps to a new instruction relative to itself.
nop stands for No OPeration - it does nothing. The instruction immediately below it is executed next.
"""
def part1():
accumulator, index = 0, 0
executed = []
while True:
if index >= len(docLines):
break
name, number = tuple(docLines[index].split())
number = int(number)
if index in executed:
break
executed.append(index)
if name == "jmp":
index += number
else:
if name == "acc":
accumulator += number
index += 1
return accumulator
print(part1()) # 1548
# PART 2
print("\nPART 2")
"""
Either a jmp is supposed to be a nop, or a nop is supposed to be a jmp.
The program is supposed to terminate by attempting to execute an instruction
immediately after the last instruction in the file.
By changing exactly one jmp or nop, you can repair the boot code and make it terminate correctly.
Fix the program so that it terminates normally by changing exactly one jmp (to nop) or nop (to jmp).
What is the value of the accumulator after the program terminates?
"""
def convert(name, number):
if name == "jmp":
name = "nop"
elif name == "nop" and int(number) > 0:
name = "jmp"
return name, int(number)
def loopThrough(counter: int):
accumulator, index = 0, 0
lineUsed = []
success = False
while True:
if index >= len(docLines):
if accumulator > 0:
success = True
break
if index in lineUsed:
break
else:
lineUsed.append(index)
name, number = tuple(docLines[index].split())
number = int(number)
if index == counter:
name, number = convert(name, number)
if name == "jmp":
index += number
else:
if name == "acc":
accumulator += number
index += 1
return success, accumulator
def part2():
found = False
accumulator, counter = 0, 0
while found == False:
found, accumulator = loopThrough(counter)
counter += 1
return accumulator
print(part2()) # 1375
| 24.264706
| 111
| 0.589899
|
7948efac7c03c502ca8ff26b02e6d814f4471144
| 808
|
py
|
Python
|
tests/test_api_v1_services_ntpd_restart.py
|
pincher95/pfsense-api
|
001a4b8a1ec39138668d6d92b3c9d0c89a7f1b45
|
[
"Apache-2.0"
] | null | null | null |
tests/test_api_v1_services_ntpd_restart.py
|
pincher95/pfsense-api
|
001a4b8a1ec39138668d6d92b3c9d0c89a7f1b45
|
[
"Apache-2.0"
] | null | null | null |
tests/test_api_v1_services_ntpd_restart.py
|
pincher95/pfsense-api
|
001a4b8a1ec39138668d6d92b3c9d0c89a7f1b45
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Jared Hendrickson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import e2e_test_framework
class APIE2ETestServicesNTPdRestart(e2e_test_framework.APIE2ETest):
uri = "/api/v1/services/ntpd/restart"
post_tests = [{"name": "Restart the NTPd service"}]
APIE2ETestServicesNTPdRestart()
| 36.727273
| 74
| 0.769802
|
7948efb54e6cb4cd7e1274135d7e6a81f868dfeb
| 1,493
|
py
|
Python
|
bib_models/serializers.py
|
stefanomunarini/bibxml-service
|
4c3f6922f8a5c84cf63755e687f61862da0f4ad4
|
[
"BSD-3-Clause"
] | null | null | null |
bib_models/serializers.py
|
stefanomunarini/bibxml-service
|
4c3f6922f8a5c84cf63755e687f61862da0f4ad4
|
[
"BSD-3-Clause"
] | null | null | null |
bib_models/serializers.py
|
stefanomunarini/bibxml-service
|
4c3f6922f8a5c84cf63755e687f61862da0f4ad4
|
[
"BSD-3-Clause"
] | null | null | null |
"""Pluggable serializer registry
for :class:`~.models.bibdata.BibliographicItem` instances.
Currently, only serialization
into various utf-8 strings is supported.
"""
from typing import Callable, Dict
from dataclasses import dataclass
def register(id: str, content_type: str):
"""Parametrized decorator that, given ID and content_type,
returns a function that will register a serializer function.
Serializer function must take
a :class:`relaton.models.bibdata.BibliographicItem` instance
and return an utf-8-encoded string.
"""
def wrapper(func: Callable[..., bytes]):
registry[id] = Serializer(
serialize=func,
content_type=content_type,
)
return func
return wrapper
@dataclass
class Serializer:
"""A registered serializer.
Instantiated automatically by the :func:`~bib_models.serializers.register`
function.
"""
serialize: Callable[..., bytes]
"""Serializer function. Returns a string."""
content_type: str
"""Content type to be used with this serializer, e.g. in HTTP responses."""
def get(id: str) -> Serializer:
"""Get previously registered serializer by ID.
:raises SerializerNotFound:"""
try:
return registry[id]
except KeyError:
raise SerializerNotFound(id)
class SerializerNotFound(RuntimeError):
"""No registered serializer with given ID."""
pass
registry: Dict[str, Serializer] = {}
"""Registry of serializers."""
| 24.883333
| 79
| 0.689216
|
7948eff884bcc75c5ed90e816e10d7910414a584
| 2,954
|
py
|
Python
|
coex/scaling.py
|
karnesh/coexistence
|
a6862dd8554ca0ab430fca6efe4de0f355f8145a
|
[
"BSD-2-Clause"
] | null | null | null |
coex/scaling.py
|
karnesh/coexistence
|
a6862dd8554ca0ab430fca6efe4de0f355f8145a
|
[
"BSD-2-Clause"
] | null | null | null |
coex/scaling.py
|
karnesh/coexistence
|
a6862dd8554ca0ab430fca6efe4de0f355f8145a
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
from scipy.optimize import fmin
def coexistence(lnpi, N):
"""Locate the coexistence acticity near the critical point by
maximizing compressibility.
Args:
lnpi: The original log of probability distribution.
N: particle number distribution.
Returns:
A newlog of probability distribution at the coexistence
point.
"""
def compress(ratio):
"""Return the compressibility kai"""
lnpi_new = lnpi + ratio*N
prob = np.exp(lnpi_new)/np.sum(np.exp(lnpi_new))
kai = np.dot(N*N,prob)-np.dot(N,prob)**2
return kai
solution = fmin(lambda x: -compress(x), x0=0)
lnpi += solution[0] * N
return lnpi
def finite_scaling(frac,path,T,T0,H,d):
"""Calculate the first order cumulant M = <m^2>/<|m|>^2, m = eta_c -
<eta_c>
Args:
frac : The activity at simulation condition.
path: The path where simulation data is stored.
T: The list of temperatures to reweight to.
T0: The simulation temperature.
H: Cubic Box length(3D).
d : Dimension
Retruns:
M: The second order cumulant ratio, <m^2>/<|m|>^2
U: The fourth order cumulant ratio, <m^4>/<m^2>^2
"""
N,nlnpi = np.loadtxt(path + '/lnpi_op.dat', usecols=(0,1), unpack=True)
nlnpi = np.log(np.exp(nlnpi)/np.sum(np.exp(nlnpi)))
elim = np.loadtxt(path + '/elim.dat')[:,1:4]
ehist = np.loadtxt(path + '/ehist.dat')[:,1:]
"""Histogram Reweighting and M calculation"""
# Set constants and parameters
sigma = 4.0
kb = 1.38e-23
m = np.zeros(len(T))
m_2=np.zeros(len(T))
m_4=np.zeros(len(T))
if d == 3:
rho = np.pi/6*sigma**3*N/H**3
elif d == 2:
rho = np.pi*sigma**2*N/H**2
for i in range(len(T)):
nlnpi_new = np.zeros(len(N))
#Reweight and calculate the new pi(N)[j] at each N[j]
for j in range(len(N)):
num,e_st,e_en = elim[j,:]
emicro = np.linspace(e_st,e_en,num)
eh = ehist[:num,j]
elnpi = np.log(eh/np.sum(eh))
elnpi_new = elnpi + emicro*(1.0/kb/T0-1.0/kb/T[i])
eprob_new = np.exp(elnpi_new)/np.sum(np.exp(elnpi_new))
lnpi_new = (elnpi_new + nlnpi[j]
+ (1.0/kb/T[i]-1.0/kb/T0)*frac[0]/(1.0/kb/T0)*N[j])
nlnpi_new[j] = np.log(np.sum(np.exp(lnpi_new)))
#Reweight new lnpi(N) to saturated acticity
nlnpi_new = coexistence(nlnpi_new, N)
prob = np.exp(nlnpi_new)/np.sum(np.exp(nlnpi_new))
rho_av = np.dot(rho,prob)
m[i] = np.dot(np.abs(rho-rho_av),prob)
m_2[i] = np.dot((rho-rho_av)**2,prob)
m_4[i] = np.dot((rho-rho_av)**4,prob)
M = m_2/m**2
U = m_4/m_2**2
return M, U
| 33.954023
| 77
| 0.538592
|
7948f01dca953a1143e03a4db02d63f2f9fc2080
| 303
|
py
|
Python
|
data/multilingual/Latn.SRP/Sans_12/pdf_to_json_test_Latn.SRP_Sans_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Latn.SRP/Sans_12/pdf_to_json_test_Latn.SRP_Sans_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Latn.SRP/Sans_12/pdf_to_json_test_Latn.SRP_Sans_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.SRP/Sans_12/udhr_Latn.SRP_Sans_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3
| 73
| 0.811881
|
7948f1b91259ab96b1adb8d22e4efb3805f30083
| 758
|
py
|
Python
|
sql_anti/urls.py
|
gmaclinuxer/sql-anti
|
70bf414cec16cc2285604e26d9c6711578257be7
|
[
"MIT"
] | null | null | null |
sql_anti/urls.py
|
gmaclinuxer/sql-anti
|
70bf414cec16cc2285604e26d9c6711578257be7
|
[
"MIT"
] | null | null | null |
sql_anti/urls.py
|
gmaclinuxer/sql-anti
|
70bf414cec16cc2285604e26d9c6711578257be7
|
[
"MIT"
] | null | null | null |
"""sql_anti URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^naive_trees/', include('naive_trees.urls')),
]
| 34.454545
| 77
| 0.699208
|
7948f2133b15608276eead69ee6dbb6edcd46d7a
| 9,731
|
py
|
Python
|
word2vec.py
|
AppleFairy/CS20SI-Tensorflow-for-Deep-Learning-Research
|
c647b4c779787c7aa3fd7842ff6d808d3f85822e
|
[
"MIT"
] | 2
|
2017-08-24T14:09:45.000Z
|
2017-08-28T16:58:31.000Z
|
word2vec.py
|
AppleFairy/CS20SI-Tensorflow-for-Deep-Learning-Research
|
c647b4c779787c7aa3fd7842ff6d808d3f85822e
|
[
"MIT"
] | null | null | null |
word2vec.py
|
AppleFairy/CS20SI-Tensorflow-for-Deep-Learning-Research
|
c647b4c779787c7aa3fd7842ff6d808d3f85822e
|
[
"MIT"
] | null | null | null |
from __future__ import division
import os
import zipfile
import collections
import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
# global variables
data_name = "data/text/text8.zip"
WORK_DIR = os.path.abspath(os.curdir)
VOCAB_SIZE = 50000
BATCH_SIZE = 128
EMBED_SIZE = 128
EPOCH = 10000
SKIP_WINDOW = 1
NUM_SKIPS = 2
NUM_SAMPLED = 64
LEARNNING_RATE = 1.0
VALID_SIZE = 16
VALID_WINDOW = 100
class SkipGramModel:
''' Build the graph for word2vec model'''
def __init__(self, vocab_size, batch_size, embed_size, epoch, skip_window, num_skips, num_sampled, learning_rate=1.0):
self.vocab_size = vocab_size
self.batch_size = batch_size
self.embed_size = embed_size
self.epoch = epoch
self.skip_window = skip_window # the number of context words from left/right of input word
self.num_skips = num_skips # the number of labels used for one input
self.num_sampled = num_sampled
self.index = 0
self.learning_rate = learning_rate
self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
def _read_data(self):
with zipfile.ZipFile(data_name) as zf:
self.data = tf.compat.as_str(zf.read(zf.namelist()[0])).split()
def _build_dataset(self):
count = [['UNK', -1]]
count.extend(collections.Counter(self.data).most_common(self.vocab_size - 1))
vocabulary = dict()
for word, _ in count:
vocabulary[word] = len(vocabulary) # index
self.indices = list()
unk_count = 0
for word in self.data:
if word in vocabulary:
index = vocabulary[word]
else:
index = 0
unk_count += 1
self.indices.append(index)
with open('./graph/word2vec/vocab_500.tsv', "w") as f:
index = 0
for word, _ in count:
vocabulary[word] = index
if index < 500:
f.write(word + "\n")
index += 1
count[0][1] = unk_count
self.reversed_vocabulary = dict(zip(vocabulary.values(), vocabulary.keys()))
def _generate_batch(self):
assert self.batch_size % self.num_skips == 0
assert self.num_skips <= (2 * self.skip_window)
self.batch = np.ndarray(shape=(self.batch_size), dtype=np.int32)
self.labels = np.ndarray(shape=(self.batch_size, 1), dtype=np.int32)
span = 2 * self.skip_window + 1
buf = collections.deque(maxlen=span)
# round back
if self.index + span > len(self.indices):
self.index = 0
buf.extend(self.indices[self.index:self.index + span])
self.index += span
for i in range(self.batch_size // self.num_skips): # for each span
target = self.skip_window # center words as target
targets_to_avoid = [self.skip_window]
for j in range(self.num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
self.batch[i * self.num_skips + j] = buf[self.skip_window]
self.labels[i * self.num_skips + j, 0] = buf[target]
if self.index == len(self.indices):
buf[:] = self.indices[:span]
self.index = span
else:
buf.append(self.indices[self.index])
self.index += 1
self.index = (self.index + len(self.indices) - span) % len(self.indices)
def _create_placeholder(self):
""" define placeholder for input and output """
with tf.name_scope("data"):
self.train_inputs = tf.placeholder(tf.int32, [self.batch_size])
self.train_labels = tf.placeholder(tf.int32,[self.batch_size, 1])
def _create_embedding(self):
''' define the weight '''
with tf.name_scope("embedding"):
self.embed_matrix = tf.Variable(tf.random_uniform([self.vocab_size, self.embed_size], -1.0, 1.0))
def _create_loss(self):
with tf.name_scope("loss"):
embed = tf.nn.embedding_lookup(self.embed_matrix, self.train_inputs)
# define the loss function
nce_weight = tf.Variable(tf.truncated_normal([self.vocab_size, self.embed_size], stddev=1.0 / self.embed_size ** 0.5))
nce_bias = tf.Variable(tf.zeros([self.vocab_size]))
self.loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weight,
biases=nce_bias,
labels=self.train_labels,
inputs=embed,
num_sampled=self.num_sampled,
num_classes=self.vocab_size))
def _create_optimizer(self):
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss)
def _create_summaries(self):
with tf.name_scope("summaries"):
tf.summary.scalar("loss", self.loss)
tf.summary.histogram("histogram loss", self.loss)
self.summary_op = tf.summary.merge_all()
def _create_validation(self, valid_size, valid_window):
self.valid_size = valid_size
self.valid_window = valid_window
self.valid_examples = np.random.choice(self.valid_window, self.valid_size, replace=False)
self.valid_dataset = tf.constant(self.valid_examples, dtype=tf.int32)
norm = tf.sqrt(tf.reduce_sum(tf.square(self.embed_matrix), 1, keep_dims=True))
normalized_embeddings = self.embed_matrix / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, self.valid_dataset)
self.similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
def build_graph(self, valid_size=0, valid_window=0):
self._read_data()
self._build_dataset()
self._create_placeholder()
self._create_embedding()
self._create_loss()
self._create_optimizer()
self._create_summaries()
if valid_size > 0 and valid_window > 0:
self._create_validation(valid_size=valid_size, valid_window=valid_window)
def train_word2vec(self, validation=False):
saver = tf.train.Saver() # defaults to saving all variables
initial_step = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(os.path.dirname("./graph/word2vec/checkpoint"))
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
writer = tf.summary.FileWriter("./graph/word2vec", sess.graph)
initial_step = model.global_step.eval()
average_loss = 0.0
for step in range(initial_step, initial_step + self.epoch):
self._generate_batch()
feed_dict = {self.train_inputs:self.batch, self.train_labels:self.labels}
_, batch_loss, summary = sess.run([self.optimizer, self.loss, self.summary_op], feed_dict)
writer.add_summary(summary, global_step=step)
average_loss += batch_loss
if (step + 1) % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
print("average loss=%r" % average_loss)
average_loss = 0
saver.save(sess, "./graph/word2vec/checkpoint", step)
if validation:
if step % 4000 == 0:
sim = self.similarity.eval()
for i in range(self.valid_size):
valid_word = self.reversed_vocabulary[self.valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in range(top_k):
close_word = self.reversed_vocabulary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
# generate data for tensorboard.
final_embed_matrix = sess.run(self.embed_matrix)
embedding_var = tf.Variable(final_embed_matrix[:500], name='embedding')
sess.run(embedding_var.initializer)
config = projector.ProjectorConfig()
summary_writer = tf.summary.FileWriter('./graph/word2vec')
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.metadata_path = os.path.join(WORK_DIR,'./graph/word2vec/vocab_500.tsv')
# generate projector_config.pbtxt, it will be loaded by tensorboard for visualization.
projector.visualize_embeddings(summary_writer, config)
# only save embedding_var.
saver_embed = tf.train.Saver([embedding_var])
saver_embed.save(sess, './graph/word2vec/skip-gram.ckpt', 1)
summary_writer.close()
writer.close()
if __name__ == "__main__":
model = SkipGramModel(VOCAB_SIZE, BATCH_SIZE, EMBED_SIZE, EPOCH, SKIP_WINDOW, NUM_SKIPS, NUM_SAMPLED)
#model.build_graph(valid_size=VALID_SIZE, valid_window=VALID_WINDOW)
model.build_graph()
#model.train_word2vec(validation=True)
model.train_word2vec()
| 40.545833
| 130
| 0.598089
|
7948f376154cf0ff1d49d2da896e175fc4e5cf73
| 421
|
py
|
Python
|
timing.py
|
russss/pydms
|
14cca0faec16767fcb8c2519428c407c664dc081
|
[
"BSD-2-Clause"
] | 2
|
2015-03-30T00:22:13.000Z
|
2019-01-18T19:46:06.000Z
|
timing.py
|
russss/pydms
|
14cca0faec16767fcb8c2519428c407c664dc081
|
[
"BSD-2-Clause"
] | null | null | null |
timing.py
|
russss/pydms
|
14cca0faec16767fcb8c2519428c407c664dc081
|
[
"BSD-2-Clause"
] | null | null | null |
# coding=utf-8
from __future__ import division, absolute_import, print_function, unicode_literals
import time
class Timer(object):
def __enter__(self):
self.interval = None
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
def __str__(self):
return "%.2f seconds" % (self.interval)
| 23.388889
| 82
| 0.643705
|
7948f4cc8f6814db774b566ba2fa7c09d2f6aa8a
| 5,262
|
py
|
Python
|
retro/plot/plot_1d_scan.py
|
ellohfin/retro
|
58ec8f5b698e6140acd215717f051d99e407c4e5
|
[
"Apache-2.0"
] | 1
|
2018-03-02T01:05:52.000Z
|
2018-03-02T01:05:52.000Z
|
retro/plot/plot_1d_scan.py
|
ellohfin/retro
|
58ec8f5b698e6140acd215717f051d99e407c4e5
|
[
"Apache-2.0"
] | 30
|
2018-01-30T21:03:28.000Z
|
2019-11-07T16:42:07.000Z
|
retro/plot/plot_1d_scan.py
|
ellohfin/retro
|
58ec8f5b698e6140acd215717f051d99e407c4e5
|
[
"Apache-2.0"
] | 6
|
2017-07-27T19:49:13.000Z
|
2019-11-19T13:38:27.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position
"""
Plot likelihood scan results
"""
from __future__ import absolute_import, division, print_function
__all__ = [
'FNAME_TEMPLATE',
'plot_1d_scan',
'parse_args'
]
__author__ = 'P. Eller, J.L. Lanfranchi'
__license__ = '''Copyright 2017 Philipp Eller and Justin L. Lanfranchi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from argparse import ArgumentParser
from os.path import abspath, dirname, join
import sys
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__' and __package__ is None:
PARENT_DIR = dirname(dirname(abspath(__file__)))
if PARENT_DIR not in sys.path:
sys.path.append(PARENT_DIR)
from retro import HypoParams8D, load_pickle
from retro.utils.misc import expand, get_primary_interaction_tex
FNAME_TEMPLATE = 'scan_results_event_{event}_uid_{uid}_dims_{param}.pkl'
def plot_1d_scan(dir, event, uid): # pylint: disable=redefined-builtin
"""main"""
#scan_files = glob(expand(dirpath) + '/*_uid_%d_*' % uid)
fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(14, 8), dpi=72)
axiter = iter(axes.flatten())
for pnum, param in enumerate(HypoParams8D._fields):
fname = FNAME_TEMPLATE.format(event=event, uid=uid, param=param)
fpath = expand(join(dir, fname))
scan = load_pickle(fpath)
scan_values = scan['scan_values'][0]
truth = scan['truth'][0]
llh = -scan['neg_llh']
err_at_max_llh = scan_values[llh == llh.max()][0] - truth
if param == 'time':
units = 'ns'
elif param in ['x', 'y', 'z']:
units = 'm'
elif param in ['track_zenith', 'track_azimuth']:
units = 'deg'
scan_values *= 180 / np.pi
err_at_max_llh *= 180/np.pi
err_at_max_llh = ((err_at_max_llh + 180) % 360) - 180
truth *= 180/np.pi
elif param in ['track_energy', 'cascade_energ']:
units = 'GeV'
ax = next(axiter)
ax.plot(
[0]*2, [llh.min(), llh.max()],
color='C1',
label='truth'
)
ax.plot(
scan_values - truth, llh,
color='C0',
label='LLH scan'
)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.axis('tight')
ax.set_xlabel(r'%s$_{\rm reco} -$%s$_{\rm true}$ (%s)'
% (param, param, units))
ax.set_title(r'Error at LLH$_{\rm max}$: %.2f %s'
% (err_at_max_llh, units))
if pnum == 0:
ax.legend(loc='best')
if scan['LLH_USE_AVGPHOT']:
llhname = r'LLH from counts including avg. photon'
eps = (r', $\epsilon_{\rm ang}$=%.1f, $\epsilon_{\rm len}$=%.1f'
% (scan['EPS_ANGLE'], scan['EPS_LENGTH']))
else:
llhname = 'LLH from simple counts'
eps = ''
if scan['NUM_JITTER_SAMPLES'] > 1:
jitter_sigmas = r' $\sigma_{\rm jitter}$=%d,' % scan['JITTER_SIGMA']
else:
jitter_sigmas = ''
prim_int_tex = get_primary_interaction_tex(scan['primary_interaction'])
fig.suptitle(
r'Event %s: %.1f GeV $%s$; %s%s'
r'$q_{\rm noise}$=%.1e,'
'%s'
r' $N_{\rm samp,jitter}=$%d,'
r' escale$_{\rm cscd}$=%d,'
r' escale$_{\rm trck}$=%d'
'%s'
% (scan['uid'], scan['neutrino_energy'], prim_int_tex, llhname, '\n',
scan['NOISE_CHARGE'],
jitter_sigmas,
scan['NUM_JITTER_SAMPLES'],
scan['CASCADE_E_SCALE'],
scan['TRACK_E_SCALE'],
eps),
fontsize=14
)
plt.tight_layout(rect=(0, 0, 1, 0.92))
fbasename = 'scan_results_event_%d_uid_%d_1d' % (event, uid)
fbasepath = join(dir, fbasename)
fpath = fbasepath + '.png'
fig.savefig(fpath, dpi=120)
print('saved plot to "%s"' % fpath)
#fpath = fbasepath + '.pdf'
#fig.savefig(fpath)
#print('saved plot to "%s"' % fpath)
#plt.draw()
#plt.show()
def parse_args(description=__doc__):
"""Parse command line arguments"""
parser = ArgumentParser(description=description)
parser.add_argument(
'-d', '--dir', metavar='DIR', type=str, required=True,
help='''Directory containing retro tables''',
)
parser.add_argument(
'-e', '--event', type=int, required=True,
help='''Event ID from original I3 / HDF5 file'''
)
parser.add_argument(
'-u', '--uid', type=int, required=True,
help='''Unique event ID'''
)
args = parser.parse_args()
return args
if __name__ == '__main__':
plot_1d_scan(**vars(parse_args()))
| 30.952941
| 77
| 0.597301
|
7948f51a3264e31b1efee37f0869f29f617d069a
| 23,578
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/aio/operations/_express_route_circuit_connections_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/aio/operations/_express_route_circuit_connections_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 4
|
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/aio/operations/_express_route_circuit_connections_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitConnectionsOperations:
"""ExpressRouteCircuitConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs
) -> "_models.ExpressRouteCircuitConnection":
"""Gets the specified Express Route Circuit Connection from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.ExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
express_route_circuit_connection_parameters: "_models.ExpressRouteCircuitConnection",
**kwargs
) -> "_models.ExpressRouteCircuitConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(express_route_circuit_connection_parameters, 'ExpressRouteCircuitConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
express_route_circuit_connection_parameters: "_models.ExpressRouteCircuitConnection",
**kwargs
) -> AsyncLROPoller["_models.ExpressRouteCircuitConnection"]:
"""Creates or updates a Express Route Circuit Connection in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:param express_route_circuit_connection_parameters: Parameters supplied to the create or update
express route circuit connection operation.
:type express_route_circuit_connection_parameters: ~azure.mgmt.network.v2019_12_01.models.ExpressRouteCircuitConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_12_01.models.ExpressRouteCircuitConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
express_route_circuit_connection_parameters=express_route_circuit_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs
) -> AsyncIterable["_models.ExpressRouteCircuitConnectionListResult"]:
"""Gets all global reach connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.ExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections'} # type: ignore
| 51.70614
| 249
| 0.678514
|
7948f5adb9cc55726492ac4abc872d4c4da6380b
| 636
|
py
|
Python
|
tests/fixtures/parser.py
|
peterg79/regenmaschine
|
3b76fb42c34d6d655d8e793bca4cfcf21c858123
|
[
"MIT"
] | null | null | null |
tests/fixtures/parser.py
|
peterg79/regenmaschine
|
3b76fb42c34d6d655d8e793bca4cfcf21c858123
|
[
"MIT"
] | null | null | null |
tests/fixtures/parser.py
|
peterg79/regenmaschine
|
3b76fb42c34d6d655d8e793bca4cfcf21c858123
|
[
"MIT"
] | null | null | null |
"""Define fixtures related to the "parser" endpoint."""
import pytest
@pytest.fixture(scope="module")
def parser_json():
"""Return a /parser response."""
return {
"parsers": [
{
"lastRun": "2018-04-30 11:52:33",
"lastKnownError": "",
"hasForecast": True,
"uid": 11,
"hasHistorical": False,
"description": "North America weather forecast",
"enabled": True,
"custom": False,
"isRunning": False,
"name": "NOAA Parser",
}
]
}
| 26.5
| 64
| 0.448113
|
7948f81a237ba1e0d7000719c808145de7b5d719
| 889
|
py
|
Python
|
sims/s436/mkmov.py
|
ammarhakim/ammar-simjournal
|
85b64ddc9556f01a4fab37977864a7d878eac637
|
[
"MIT",
"Unlicense"
] | 1
|
2019-12-19T16:21:13.000Z
|
2019-12-19T16:21:13.000Z
|
sims/s436/mkmov.py
|
ammarhakim/ammar-simjournal
|
85b64ddc9556f01a4fab37977864a7d878eac637
|
[
"MIT",
"Unlicense"
] | null | null | null |
sims/s436/mkmov.py
|
ammarhakim/ammar-simjournal
|
85b64ddc9556f01a4fab37977864a7d878eac637
|
[
"MIT",
"Unlicense"
] | 2
|
2020-01-08T06:23:33.000Z
|
2020-01-08T07:06:50.000Z
|
from pylab import *
import tables
def getMeshGrid(grid):
xl, yl = grid._v_attrs.vsLowerBounds
xu, yu = grid._v_attrs.vsUpperBounds
nx, ny = grid._v_attrs.vsNumCells
dx = (xu-xl)/nx
dy = (yu-yl)/ny
X = linspace(xl+0.5*dx, xu-0.5*dx, nx)
Y = linspace(yl+0.5*dy, yu-0.5*dy, ny)
return meshgrid(X, Y)
def mkFig(fh, XX, YY, dat, nm):
tm = fh.root.timeData._v_attrs.vsTime
Valf = 0.1
Lx = 4*pi*25.0
tmAlf = tm/(Lx/Valf)
f = figure(1)
pcolormesh(XX, YY, dat.transpose())
axis('image')
colorbar()
title("T = %.4g" % tmAlf)
savefig(nm)
close()
for i in range(35,51):
print ("Working on %d .." % i)
fh = tables.openFile("../s436/s436-is-coal_q_%d.h5" % i)
q = fh.root.StructGridField
X, Y = getMeshGrid(fh.root.StructGrid)
mkFig(fh, X, Y, q[:,:,3], 's436-Jze-%05d.png' % i)
fh.close()
| 24.027027
| 60
| 0.575928
|
7948f8965777fc780685dc58d03c466ba3fc03ae
| 556
|
py
|
Python
|
recipe/migrations/0002_auto_20200525_0845.py
|
UtkarshAgrawalDTU/My-Fridge-API
|
8b73b40ef5c4920b47db66574305c26095f9b1e7
|
[
"MIT"
] | null | null | null |
recipe/migrations/0002_auto_20200525_0845.py
|
UtkarshAgrawalDTU/My-Fridge-API
|
8b73b40ef5c4920b47db66574305c26095f9b1e7
|
[
"MIT"
] | 3
|
2021-06-04T23:22:04.000Z
|
2021-09-22T19:10:42.000Z
|
recipe/migrations/0002_auto_20200525_0845.py
|
UtkarshAgrawalDTU/My-Fridge-API
|
8b73b40ef5c4920b47db66574305c26095f9b1e7
|
[
"MIT"
] | 1
|
2021-08-20T10:50:24.000Z
|
2021-08-20T10:50:24.000Z
|
# Generated by Django 3.0.6 on 2020-05-25 08:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipe', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='recipe',
name='ingredients',
),
migrations.AddField(
model_name='recipe',
name='ingredients',
field=models.TextField(blank=True),
),
migrations.DeleteModel(
name='Ingredient',
),
]
| 21.384615
| 47
| 0.55036
|
7948fa460bbfb54646d7a62dd6f615d60e7a7b4f
| 2,858
|
py
|
Python
|
examples/sim/main.py
|
gabs48/tigrillo
|
663f7407808bb8101edebec02fb0cd81c59ad2f1
|
[
"MIT"
] | 5
|
2018-10-22T21:28:44.000Z
|
2020-09-03T07:01:36.000Z
|
examples/sim/main.py
|
gabs48/tigrillo
|
663f7407808bb8101edebec02fb0cd81c59ad2f1
|
[
"MIT"
] | null | null | null |
examples/sim/main.py
|
gabs48/tigrillo
|
663f7407808bb8101edebec02fb0cd81c59ad2f1
|
[
"MIT"
] | 1
|
2020-02-01T15:12:38.000Z
|
2020-02-01T15:12:38.000Z
|
#!/usr/bin/python3
"""
This file is the main example to run. It parses the command line and
load the configuration file for the simulation.
"""
import argparse
from experiment import *
from tigrillo.core.utils import *
__author__ = "Gabriel Urbain"
__copyright__ = "Copyright 2017, Human Brain Projet, SP10"
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Gabriel Urbain"
__email__ = "gabriel.urbain@ugent.be"
__status__ = "Research"
__date__ = "June 20th, 2017"
if __name__ == "__main__":
""" Parse the arguments, config file and run all """
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str,
help="The configuration file that contains all the parameters for the experiment.",
required=True)
parser.add_argument("-v", "--verbose", action="store_true",
help="Force shell verbosity to INFO despite of config file entry.")
parser.add_argument("-l", "--log", action="store_true",
help="Force file logs verbosity to DEBUG despite of config file entry.")
parser.add_argument("-x", "--gui", action="store_true", help="Force the GUI despite of config file entry.")
args = parser.parse_args()
if not args.config:
parser.error("No config file given. Please, add a config file after the -c option")
# Parse config file
config = configparser.ConfigParser()
config.read(args.config)
config.add_section('Config')
config.set('Config', 'filename', os.path.abspath(args.config))
# Manually change forced command-line arguments
if args.gui:
config.set("Simulation", "rendering", "True")
if args.verbose:
config.set("Logger", "level_shell", "INFO")
if args.log:
config.set("Logger", "level_file", "DEBUG")
# Retrieve logging parameters
log_file_level = eval("logging." + config.get("Logger", "level_file"))
log_shell_level = eval("logging." + config.get("Logger", "level_shell"))
log_file_folder = config.get("Logger", "folder")
mkdir(log_file_folder)
log_file_name = log_file_folder + "/" + time.strftime("%Y%m%d_%H%M%S", time.localtime()) + ".log"
# Set up logger
logging.basicConfig(level=log_file_level,
format='[%(asctime)s - %(levelname)-8s: %(name)s] %(message)s',
datefmt='%y-%m-%d %H:%M:%S',
filename=log_file_name,
filemode='w')
log_shell = logging.StreamHandler()
log_shell.setLevel(log_shell_level)
log_shell_format = logging.Formatter("[%(name)-12s: %(levelname)-8s] %(message)s")
log_shell.setFormatter(log_shell_format)
logging.getLogger('').addHandler(log_shell)
# Run the experiment
e = Experiment(config)
e.start()
| 37.116883
| 111
| 0.641358
|
7948fa5293c0435ccd1d8191c54a48d272d396f4
| 287
|
py
|
Python
|
tools_box/_selling/doctype/competitor_information/competitor_information.py
|
maisonarmani/Tools-Box
|
a2226940afdf0291f031008be1af953d2360acdf
|
[
"MIT"
] | 4
|
2017-09-25T23:34:08.000Z
|
2020-07-17T23:52:26.000Z
|
tools_box/_selling/doctype/competitor_information/competitor_information.py
|
maisonarmani/Tools-Box
|
a2226940afdf0291f031008be1af953d2360acdf
|
[
"MIT"
] | null | null | null |
tools_box/_selling/doctype/competitor_information/competitor_information.py
|
maisonarmani/Tools-Box
|
a2226940afdf0291f031008be1af953d2360acdf
|
[
"MIT"
] | 5
|
2017-06-02T01:58:32.000Z
|
2022-02-22T16:59:01.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CompetitorInformation(Document):
pass
| 26.090909
| 68
| 0.787456
|
7948fbc18a03676d67661884065876fb7b0dd9b8
| 2,372
|
py
|
Python
|
kitsune/announcements/tests/test_models.py
|
theresnotime/kitsune
|
0757b267b0d332264167d31ce84e342263e1c635
|
[
"BSD-3-Clause"
] | null | null | null |
kitsune/announcements/tests/test_models.py
|
theresnotime/kitsune
|
0757b267b0d332264167d31ce84e342263e1c635
|
[
"BSD-3-Clause"
] | null | null | null |
kitsune/announcements/tests/test_models.py
|
theresnotime/kitsune
|
0757b267b0d332264167d31ce84e342263e1c635
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime, timedelta
from kitsune.announcements.models import Announcement
from kitsune.announcements.tests import AnnouncementFactory
from kitsune.sumo.tests import TestCase
from kitsune.users.tests import GroupFactory, UserFactory
from kitsune.wiki.tests import LocaleFactory
class AnnouncementModelTests(TestCase):
def setUp(self):
super(AnnouncementModelTests, self).setUp()
self.creator = UserFactory()
self.group = GroupFactory()
self.locale = LocaleFactory(locale="es")
self.creator.groups.add(self.group)
def test_active(self):
"""Active announcement shows."""
AnnouncementFactory(
show_after=datetime.now() - timedelta(days=2),
show_until=datetime.now() + timedelta(days=2),
)
self.assertEqual(1, Announcement.get_site_wide().count())
def test_always_visible(self):
"""Always visible announcements are shown."""
# This one doesn't show
AnnouncementFactory(show_after=datetime.now() + timedelta(days=2))
AnnouncementFactory(
show_after=datetime.now() - timedelta(days=2), content="stardate 43125"
)
site_wide = Announcement.get_site_wide()
self.assertEqual(1, site_wide.count())
self.assertEqual("stardate 43125", site_wide[0].content)
def test_group_excluded(self):
"""Announcements in a group are not shown."""
AnnouncementFactory(group=self.group)
self.assertEqual(0, Announcement.get_site_wide().count())
def test_get_for_group_id(self):
"""If no groups are passed, nothing is returned."""
# Site-wide announcement
AnnouncementFactory()
# Announcement in a group.
a = AnnouncementFactory(group=self.group)
group_ann = Announcement.get_for_group_id(self.group.id)
self.assertEqual(1, len(group_ann))
self.assertEqual(a, group_ann[0])
def test_get_for_locale_name(self):
"""Announcements for a specific locale are shown."""
# Site-wide announcement
AnnouncementFactory()
# Announcement in a locale
a = AnnouncementFactory(locale=self.locale)
locale_ann = Announcement.get_for_locale_name(self.locale.locale)
self.assertEqual(1, locale_ann.count())
self.assertEqual(a, locale_ann[0])
| 37.0625
| 83
| 0.680017
|
7948fceb8073023b3429b7055eac4127fb80647e
| 2,214
|
py
|
Python
|
tensorpack/dataflow/imgaug/noise.py
|
awesome-archive/tensorpack
|
55f640f70e19d538e5082a4712241ee966fcb201
|
[
"Apache-2.0"
] | 121
|
2019-06-04T08:30:53.000Z
|
2021-12-17T13:27:54.000Z
|
tensorpack/dataflow/imgaug/noise.py
|
lkn123/tensorpack
|
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
|
[
"Apache-2.0"
] | 7
|
2019-12-16T21:58:30.000Z
|
2022-02-10T00:17:01.000Z
|
tensorpack/dataflow/imgaug/noise.py
|
lkn123/tensorpack
|
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
|
[
"Apache-2.0"
] | 22
|
2019-10-10T15:35:47.000Z
|
2021-09-13T12:46:09.000Z
|
# -*- coding: utf-8 -*-
# File: noise.py
import numpy as np
import cv2
from .base import ImageAugmentor
__all__ = ['JpegNoise', 'GaussianNoise', 'SaltPepperNoise']
class JpegNoise(ImageAugmentor):
""" Random JPEG noise. """
def __init__(self, quality_range=(40, 100)):
"""
Args:
quality_range (tuple): range to sample JPEG quality
"""
super(JpegNoise, self).__init__()
self._init(locals())
def _get_augment_params(self, img):
return self.rng.randint(*self.quality_range)
def _augment(self, img, q):
enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]
return cv2.imdecode(enc, 1).astype(img.dtype)
class GaussianNoise(ImageAugmentor):
"""
Add random Gaussian noise N(0, sigma^2) of the same shape to img.
"""
def __init__(self, sigma=1, clip=True):
"""
Args:
sigma (float): stddev of the Gaussian distribution.
clip (bool): clip the result to [0,255] in the end.
"""
super(GaussianNoise, self).__init__()
self._init(locals())
def _get_augment_params(self, img):
return self.rng.randn(*img.shape)
def _augment(self, img, noise):
old_dtype = img.dtype
ret = img + noise * self.sigma
if self.clip or old_dtype == np.uint8:
ret = np.clip(ret, 0, 255)
return ret.astype(old_dtype)
class SaltPepperNoise(ImageAugmentor):
""" Salt and pepper noise.
Randomly set some elements in image to 0 or 255, regardless of its channels.
"""
def __init__(self, white_prob=0.05, black_prob=0.05):
"""
Args:
white_prob (float), black_prob (float): probabilities setting an element to 255 or 0.
"""
assert white_prob + black_prob <= 1, "Sum of probabilities cannot be greater than 1"
super(SaltPepperNoise, self).__init__()
self._init(locals())
def _get_augment_params(self, img):
return self.rng.uniform(low=0, high=1, size=img.shape)
def _augment(self, img, param):
img[param > (1 - self.white_prob)] = 255
img[param < self.black_prob] = 0
return img
| 28.753247
| 97
| 0.610208
|
7948fd36bc148b9fad231f1849169dbf3f96e70c
| 1,531
|
py
|
Python
|
source/triangulation/computeMSMS.py
|
hui2000ji/masif
|
70a76c5f4639f70c546d5603612c7cc9f47a35b8
|
[
"Apache-2.0"
] | null | null | null |
source/triangulation/computeMSMS.py
|
hui2000ji/masif
|
70a76c5f4639f70c546d5603612c7cc9f47a35b8
|
[
"Apache-2.0"
] | null | null | null |
source/triangulation/computeMSMS.py
|
hui2000ji/masif
|
70a76c5f4639f70c546d5603612c7cc9f47a35b8
|
[
"Apache-2.0"
] | null | null | null |
import os
from subprocess import Popen, PIPE
from input_output.read_msms import read_msms
from triangulation.xyzrn import output_pdb_as_xyzrn
from default_config.global_vars import msms_bin
from default_config.masif_opts import masif_opts
import random
# Pablo Gainza LPDI EPFL 2017-2019
# Calls MSMS and returns the vertices.
# Special atoms are atoms with a reduced radius.
def computeMSMS(pdb_file, protonate=True):
randnum = random.randint(1,10000000)
file_base = masif_opts['tmp_dir']+"/msms_"+str(randnum)
out_xyzrn = file_base+".xyzrn"
if protonate:
output_pdb_as_xyzrn(pdb_file, out_xyzrn)
else:
print("Error - pdb2xyzrn is deprecated.")
import sys
sys.exit(1)
# Now run MSMS on xyzrn file
FNULL = open(os.devnull, 'w')
args = [msms_bin, "-density", "3.0", "-hdensity", "3.0", "-probe",\
"1.5", "-if",out_xyzrn,"-of",file_base, "-af", file_base]
#print msms_bin+" "+`args`
p2 = Popen(args, stdout=PIPE, stderr=PIPE)
stdout, stderr = p2.communicate()
vertices, faces, normals, names = read_msms(file_base)
areas = {}
ses_file = open(file_base+".area")
next(ses_file) # ignore header line
for line in ses_file:
fields = line.split()
areas[fields[3]] = fields[1]
# Remove temporary files.
os.remove(file_base+'.area')
os.remove(file_base+'.xyzrn')
os.remove(file_base+'.vert')
os.remove(file_base+'.face')
return vertices, faces, normals, names, areas
| 31.895833
| 77
| 0.667538
|
7948fdad8414d753426b34eb4a9bfbc2c0c14db9
| 6,719
|
py
|
Python
|
AirzoneCloud/Installation.py
|
max13fr/airzonecloud
|
ff3b6e76d4d008b364ed7dcfa7bffe542e5c6664
|
[
"MIT"
] | null | null | null |
AirzoneCloud/Installation.py
|
max13fr/airzonecloud
|
ff3b6e76d4d008b364ed7dcfa7bffe542e5c6664
|
[
"MIT"
] | null | null | null |
AirzoneCloud/Installation.py
|
max13fr/airzonecloud
|
ff3b6e76d4d008b364ed7dcfa7bffe542e5c6664
|
[
"MIT"
] | null | null | null |
import logging
import time
from . import AirzoneCloud
from .Group import Group
from .Device import Device
_LOGGER = logging.getLogger(__name__)
class Installation:
"""Manage a AirzoneCloud installation"""
_api: AirzoneCloud = None
_data: dict = {}
_groups: "list[Group]" = []
def __init__(self, api: AirzoneCloud, data: dict) -> None:
self._api = api
self._data = data
# log
_LOGGER.info("Init {}".format(self.str_verbose))
_LOGGER.debug(data)
# load all groups
self._load_groups()
def __str__(self) -> str:
return "Installation(name={})".format(self.name)
@property
def str_verbose(self) -> str:
"""More verbose description of current installation"""
return "Installation(name={}, access_type={}, ws_ids=[{}], id={})".format(
self.name, self.access_type, ", ".join(self.ws_ids), self.id
)
#
# getters
#
@property
def id(self) -> str:
"""Return installation id"""
return self._data.get("installation_id")
@property
def name(self) -> str:
"""Return installation name"""
return self._data.get("name")
@property
def access_type(self) -> str:
"""Return installation access_type (admin┃advanced┃basic)"""
return self._data.get("access_type")
@property
def location_id(self) -> str:
"""Return installation location id"""
return self._data.get("location_id")
@property
def ws_ids(self) -> str:
"""Return array of Webserver MAC addresses belonging to the installation"""
return self._data.get("ws_ids", [])
#
# setters
#
def turn_on(
self, auto_refresh: bool = True, delay_refresh: int = 1
) -> "Installation":
"""Turn on all devices in the installation"""
_LOGGER.info("call turn_on() on {}".format(self.str_verbose))
for group in self.groups:
group.turn_on(auto_refresh=False)
if auto_refresh:
time.sleep(delay_refresh) # wait data refresh by airzone
self.refresh_devices()
return self
def turn_off(
self, auto_refresh: bool = True, delay_refresh: int = 1
) -> "Installation":
"""Turn off all devices in the installation"""
_LOGGER.info("call turn_off() on {}".format(self.str_verbose))
for group in self.groups:
group.turn_off(auto_refresh=False)
if auto_refresh:
time.sleep(delay_refresh) # wait data refresh by airzone
self.refresh_devices()
return self
def set_temperature(
self, temperature: float, auto_refresh: bool = True, delay_refresh: int = 1
) -> "Installation":
"""Set target_temperature for current all devices in the installation (in degrees celsius)"""
_LOGGER.info(
"call set_temperature({}) on {}".format(temperature, self.str_verbose)
)
for group in self.groups:
group.set_temperature(temperature=temperature, auto_refresh=False)
if auto_refresh:
time.sleep(delay_refresh) # wait data refresh by airzone
self.refresh_devices()
return self
def set_mode(
self, mode_name: str, auto_refresh: bool = True, delay_refresh: int = 1
) -> "Installation":
"""Set mode of the all devices in the installation"""
_LOGGER.info("call set_mode({}) on {}".format(mode_name, self.str_verbose))
for group in self.groups:
group.set_mode(mode_name=mode_name, auto_refresh=False)
if auto_refresh:
time.sleep(delay_refresh) # wait data refresh by airzone
self.refresh_devices()
return self
#
# children
#
@property
def groups(self) -> "list[Group]":
"""Get all groups in the current installation"""
return self._groups
@property
def all_devices(self) -> "list[Device]":
"""Get all devices from all groups in the current installation"""
result = []
for group in self.groups:
for device in group.devices:
result.append(device)
return result
#
# Refresh
#
def refresh_groups(self) -> "Installation":
"""Refresh all groups of this installation"""
self._load_groups()
return self
def refresh_devices(self) -> "Installation":
"""Refresh all devices of this installation"""
for group in self.groups:
group.refresh_devices()
return self
#
# private
#
def _load_groups(self) -> "list[Group]":
"""Load all groups for this installation"""
previous_groups = self._groups
self._groups = []
try:
for group_data in self._api._api_get_installation_groups_list(self.id):
group = None
# search group in previous_groups (if where are refreshing groups)
for previous_group in previous_groups:
if previous_group.id == group_data.get("group_id"):
group = previous_group
group._set_data_refreshed(group_data)
break
# group not found => instance new group
if group is None:
group = Group(self._api, self, group_data)
self._groups.append(group)
except RuntimeError:
raise Exception(
"Unable to load groups for Installation " + self.str_verbose
)
return self._groups
def _set_data_refreshed(self, data: dict) -> "Installation":
"""Set data refreshed (called by parent AirzoneCloud on refresh_installations())"""
self._data = data
_LOGGER.info("Data refreshed for {}".format(self.str_verbose))
return self
#
# installation raw data example
#
# {
# "_id": "60f5cb...",
# "installation_id": "60f5...",
# "location_id": "60f54...",
# "location_text": {
# "city": {
# "de": "Bouches-du-Rhône",
# "en": "Bouches-du-Rhône",
# "es": "Bouches-du-Rhône",
# "fr": "Bouches-du-Rhône",
# "it": "Bouches-du-Rhône",
# "pt": "Bouches-du-Rhône"
# },
# "country": {
# "de": "Frankreich",
# "en": "France",
# "es": "Francia",
# "fr": "France",
# "it": "Francia",
# "pt": "França"
# }
# },
# "name": "Maison",
# "ws_ids": [
# "AA:BB:CC:DD:EE:FF"
# ],
# "access_type": "admin",
# "color": 2
# }
| 28.713675
| 101
| 0.56809
|
7948ff0b71db2e9053def2639290ff5f49f7a084
| 20,156
|
py
|
Python
|
probatus/interpret/inspector.py
|
anilkumarpanda/probatus
|
a6123b4da664dfc4f182dad1baa00e77decf4789
|
[
"MIT"
] | null | null | null |
probatus/interpret/inspector.py
|
anilkumarpanda/probatus
|
a6123b4da664dfc4f182dad1baa00e77decf4789
|
[
"MIT"
] | null | null | null |
probatus/interpret/inspector.py
|
anilkumarpanda/probatus
|
a6123b4da664dfc4f182dad1baa00e77decf4789
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 ING Bank N.V.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from ..utils import NotFittedError, UnsupportedModelError, BaseFitComputeClass
import numpy as np
import pandas as pd
import copy
from sklearn.cluster import KMeans
from probatus.utils import shap_helpers
def return_confusion_metric(y_true, y_score, normalize = False):
"""
Computes a confusion metric as absolute difference between the y_true and y_score.
If normalize eis set to tru, it will normalize y_score to the maximum value in the array
Args:
y_true: (np.ndarray or pd.Series) true targets
y_score: (np.ndarray or pd.Series) model output
normalize: boolean, normalize or not to the maximum value
Returns: (np.ndarray or pd.Series) conflusion metric
"""
if normalize:
y_score = y_score/y_score.max()
return np.abs(y_true - y_score)
class BaseInspector(BaseFitComputeClass):
def __init__(self, algotype, **kwargs):
self.algotype = algotype
# TODO fix compilatiopn issue on for hdbscan
# if algotype =='dbscan':
# self.clusterer = hdbscan.HDBSCAN(prediction_data=True,**kwargs)
if algotype =='kmeans':
self.clusterer = KMeans(**kwargs)
else:
raise UnsupportedModelError("The algorithm {} is not supported".format(algotype))
def __repr__(self):
repr_ = "{},\n\t{}".format(self.__class__.__name__,self.algotype)
if self.fitted:
repr_ += "\n\tTotal clusters {}".format(np.unique(self.clusterer.labels_).shape[0])
return repr_
def fit_clusters(self, X):
"""
Perform the fit of the clusters with the algorithm specified in the constructor
Args:
X: input features
Returns: cluster labels
"""
self.clusterer.fit(X)
self.fitted = True
return self
def predict_clusters(self,X):
if not self.fitted:
raise NotFittedError("Inspector not fitter. Run .fit()")
labels = None
if self.algotype == 'kmeans':
labels = self.clusterer.predict(X)
if self.algotype == 'dbscan':
raise NotImplementedError("Implementation not finished (note the hdbscan package is not imported yet!)")
#labels, strengths = hdbscan.approximate_predict(self.clusterer, X)
return labels
@staticmethod
def assert_is_dataframe(df):
if isinstance(df,pd.DataFrame):
return df
elif isinstance(df,np.ndarray) and len(df.shape)==2:
return pd.DataFrame(df)
else:
raise NotImplementedError("Sorry, X needs to be a pd.DataFrame for for a 2 dimensional numpy array")
@staticmethod
def assert_is_series(series, index=None):
if isinstance(series, pd.Series):
return series
elif isinstance(series, pd.DataFrame) and series.shape[1] == 1:
return pd.Series(series.values.ravel(), index=series.index)
elif isinstance(series, np.ndarray) and len(series.shape) == 1 and index is not None:
return pd.Series(series, index=index)
else:
raise TypeError(
"The object should be a pd.Series, a dataframe with one collumn or a 1 dimensional numpy array")
class InspectorShap(BaseInspector):
"""
Class to perform inspection of the model prediction based on Shapley values.
It uses the calculated shapley values for the train model to build clusters in the shap space.
For each cluster, an average confusion, average predicted probability and observed rate of a single class is
calculated.
Every sub cluster can be retrieved with the function slice_cluster to perform deeper analysis.
The original dataframe indexing is used in slicing the dataframe, ensuring easy filtering
Args:
model: (obj) pretrained model (with sklearn-like API)
algotype: (str) clustering algorithm (supported are kmeans and hdbscan)
confusion_metric: (str) Confusion metric to use:
- "proba": it will calculate the confusion metric as the absolute value of the target minus the predicted
probability. This provides a continuous measure of confusion, where 0 indicated correct predictions
and the closer the number is to 1, the higher the confusion
normalize_probability: (boolean) if true, it will normalize the probabilities to the max value when computing
the confusion metric
cluster_probabilities: (boolean) if true, uses the model prediction as an input for the cluster prediction
**kwargs: keyword arguments for the clustering algorithm
"""
def __init__(self, model, algotype='kmeans', confusion_metric = 'proba',
normalize_probability=False,cluster_probability = False, **kwargs):
super().__init__(algotype, **kwargs)
self.model = model
self.isinspected = False
self.hasmultiple_dfs = False
self.normalize_proba = normalize_probability
self.cluster_probabilities = cluster_probability
self.agg_summary_df = None
self.set_names = None
self.confusion_metric = confusion_metric
self.cluster_report = None
self.y = None
self.predicted_proba = None
self.X_shap = None
self.clusters = None
self.init_eval_set_report_variables()
if confusion_metric not in ['proba']:
#TODO implement the target method
raise NotImplementedError("confusion metric {} not supported. See docstrings".format(confusion_metric))
def __repr__(self):
repr_ = "{},\n\t{}".format(self.__class__.__name__, self.algotype)
if self.fitted:
repr_ += "\n\tTotal clusters {}".format(np.unique(self.clusterer.labels_).shape[0])
return repr_
def init_eval_set_report_variables(self):
self.X_shaps = list()
self.clusters_list = list()
self.ys = list()
self.predicted_probas = list()
def compute_probabilities(self, X):
"""
Compute the probabilities for the model using the sklearn API
Args:
X: Feature set
Returns: (np.array) probability
"""
return self.model.predict_proba(X)[:,1]
def fit_clusters(self, X):
"""
Perform the fit of the clusters with the algorithm specified in the constructor
Args:
X: input features
"""
X = copy.deepcopy(X)
if self.cluster_probabilities:
X['probs'] = self.predicted_proba
return super().fit_clusters(X)
def predict_clusters(self,X):
"""
Predicts the clusters of the dataset X
Args:
X: features
Returns: cluster labels
"""
X = copy.deepcopy(X)
if self.cluster_probabilities:
X['probs'] = self.predicted_proba
return super().predict_clusters(X)
def fit(self, X, y=None, eval_set = None, sample_names=None, **shap_kwargs):
"""
Fits and orchestrates the cluster calculations
Args:
X: (pd.DataFrame) with the features set used to train the model
y: (pd.Series, default=None): targets used to train the model
eval_set: (list, default=None). list of tuples in the shape (X,y) containing evaluation samples, for example
a test sample, validation sample etc... X corresponds to the feature set of the sample, y corresponds
to the targets of the samples
sample_names: (list of strings, default=None): list of suffixed for the samples. If none, it will be labelled with
sample_{i}, where i corresponds to the index of the sample.
List length must match that of eval_set
**shap_kwargs: kwargs to pass to the Shapley Tree Explained
"""
self.set_names = sample_names
if sample_names is not None:
# Make sure that the amount of eval sets matches the set names
assert len(eval_set) == len(sample_names), "set_names must be the same length as eval_set"
self.y, self.predicted_proba, self.X_shap, self.clusters = self.perform_fit_calc(X=X, y=y,
fit_clusters=True,
**shap_kwargs)
if eval_set is not None:
assert isinstance(eval_set, list), "eval_set needs to be a list"
self.hasmultiple_dfs = True
# Reset lists in case inspect run multiple times
self.init_eval_set_report_variables()
for X_, y_ in eval_set:
y_, predicted_proba_, X_shap_, clusters_ = self.perform_fit_calc(X=X_, y=y_, fit_clusters=False,
**shap_kwargs)
self.X_shaps.append(X_shap_)
self.ys.append(y_)
self.predicted_probas.append(predicted_proba_)
self.clusters_list.append(clusters_)
return self
def perform_fit_calc(self, X, y, fit_clusters=False, **shap_kwargs):
"""
Performs cluster calculations for a specific X and y
Args:
X: pd.DataFrame with the features set used to train the model
y: pd.Series (default None): targets used to train the model
fit_clusters: flag indicating whether clustering algorithm should be trained with computed shap values
**shap_kwargs: kwargs to pass to the Shapley Tree Explained
"""
X = self.assert_is_dataframe(X)
y = self.assert_is_series(y, index = X.index)
# Compute probabilities for the input X using model
predicted_proba = pd.Series(self.compute_probabilities(X), index = y.index,name = 'pred_proba')
# Compute SHAP values and cluster them
X_shap = shap_helpers.shap_to_df(self.model, X, **shap_kwargs)
if fit_clusters:
self.fit_clusters(X_shap)
clusters = pd.Series(self.predict_clusters(X_shap), index=y.index, name='cluster_id')
return y, predicted_proba, X_shap, clusters
def _compute_report(self):
"""
Helper function to compute the report of the ispector - performs aggregations per cluster id
"""
self.summary_df = self.create_summary_df(self.clusters, self.y, self.predicted_proba, normalize=self.normalize_proba)
self.agg_summary_df = self.aggregate_summary_df(self.summary_df)
if self.hasmultiple_dfs:
self.summary_dfs = [
self.create_summary_df(clust, y, pred_proba, normalize=self.normalize_proba)
for clust, y, pred_proba in zip(self.clusters_list, self.ys, self.predicted_probas)
]
self.agg_summary_dfs = [
self.aggregate_summary_df(df)
for df in self.summary_dfs
]
def compute(self):
"""
Calculates a report containing the information per cluster.
Includes the following:
- cluster id
- total number of observations in the cluster
- total number of target 1 in the cluster
- target 1 rate (ration of target 1 counts/observations)
- average predicted probabilitites
- average confusion
If multiple eval_sets were passed in the inspect() functions, the output will contain those aggregations as well
The output names will use the sample names provided in the inspect function. Otherwise they will be labelled by
the suffix sample_{i}, where i is the index of the sample
Returns: (pd.DataFrame) with above mentioned aggregations.
"""
if self.cluster_report is not None:
return self.cluster_report
self._compute_report()
out = copy.deepcopy(self.agg_summary_df)
if self.hasmultiple_dfs:
for ix, agg_summary_df in enumerate(self.agg_summary_dfs):
if self.set_names is None:
sample_suffix = "sample_{}".format(ix+1)
else: sample_suffix = self.set_names[ix]
out = pd.merge(out, agg_summary_df, how="left", on='cluster_id',
suffixes = ('','_{}'.format(sample_suffix)))
self.cluster_report = out
return self.cluster_report
def slice_cluster(self, cluster_id, summary_df=None, X_shap=None, y=None, predicted_proba=None,
complementary = False):
"""
Slices the input dataframes by the cluster.
Args:
cluster_id: (int or list for multiple cluster_id) cluster ids to to slice
summary_df: Optional parameter - the summary_df on which the masking should be performed.
if not passed the slicing is performed on summary generated by inspect method on X and y
X_shap: Optional parameter - the SHAP values generated from on X on which the masking should be performed.
if not passed the slicing is performed on X_shap generated by inspect method on X and y
y: Optional parameter - the y on which the masking should be performed.
if not passed the slicing is performed on y passed to inspect
predicted_proba: Optional parameter - the predicted_proba on which the masking should be performed.
if not passed the slicing is performed on predicted_proba generated by inspect method on X and y
complementary: flag that returns the cluster_id if set to False, otherwise the complementary dataframe (ie
those with ~mask)
Returns: tuple: Dataframe of sliced shapley values, series of sliced targets, sliced probabilities
"""
if self.cluster_report is None:
self.compute()
# Check if input specified by user, otherwise use the ones from self
if summary_df is None:
summary_df = self.summary_df
if X_shap is None:
X_shap = self.X_shap
if y is None:
y = self.y
if predicted_proba is None:
predicted_proba = self.predicted_proba
mask = self.get_cluster_mask(summary_df, cluster_id)
if not complementary:
return X_shap[mask], y[mask], predicted_proba[mask]
else:
return X_shap[~mask], y[~mask], predicted_proba[~mask]
def slice_cluster_eval_set(self, cluster_id, complementary=False):
"""
Slices the input dataframes passed in the eval_set in the inspect function by the cluster id.
Args:
cluster_id: (int or list for multiple cluster_id) cluster ids to to slice
complementary: flag that returns the cluster_id if set to False, otherwise the complementary dataframe (ie
those with ~mask)
Returns: list of tuplse: each element of the list containst
Dataframe of sliced shapley values, series of sliced targets, sliced probabilities
"""
if not self.hasmultiple_dfs:
raise NotFittedError("You did not fit the eval set. Please add an eval set when calling inspect()")
output = []
for X_shap, y, predicted_proba, summary_df in zip(
self.X_shaps, self.ys, self.predicted_probas, self.summary_dfs):
output.append(self.slice_cluster(cluster_id=cluster_id, summary_df=summary_df, X_shap=X_shap, y=y,
predicted_proba=predicted_proba, complementary=complementary))
return output
@staticmethod
def get_cluster_mask(df, cluster_id):
"""
Returns the mask to filter the cluster id
Args:
df: dataframe with 'cluster_id' in it
cluster_id: int or list of cluster ids to mask
Returns:
"""
if not isinstance(cluster_id, list):
cluster_id = [cluster_id]
mask = df['cluster_id'].isin(cluster_id)
return mask
@staticmethod
def create_summary_df(cluster,y, probas, normalize=False):
"""
Creates a summary by concatenating the cluster series, the targets, the probabilities and the measured confusion
Args:
cluster: pd.Series od clusters
y: pd.Series od targets
probas: pd.Series of predicted probabilities of the model
normalize: boolean (if the predicted probabilities should be normalized to the max value
Returns: pd.DataFrame (concatenation of the inputs)
"""
confusion = return_confusion_metric(y, probas, normalize = normalize).rename("confusion")
summary = [
cluster,
y.rename("target"),
probas,
confusion
]
return pd.concat(summary, axis=1)
@staticmethod
def aggregate_summary_df(df):
"""
Performs the aggregations at the cluster_id level needed to generate the report of the inspection
Args:
df: input df to aggregate
Returns: pd.Dataframe with aggregation results
"""
out = df.groupby("cluster_id").agg(
total_label_1=pd.NamedAgg(column='target', aggfunc="sum"),
total_entries=pd.NamedAgg(column='target', aggfunc="count"),
label_1_rate=pd.NamedAgg(column='target', aggfunc="mean"),
average_confusion=pd.NamedAgg(column='confusion', aggfunc="mean"),
average_pred_proba=pd.NamedAgg(column='pred_proba', aggfunc="mean"),
).reset_index().rename(columns={"index": "cluster_id"}).sort_values(by='cluster_id')
return out
def fit_compute(self, X, y=None, eval_set=None, sample_names=None, **shap_kwargs):
"""
Fits and orchestrates the cluster calculations and returns the computed report
Args:
X: (pd.DataFrame) with the features set used to train the model
y: (pd.Series, default=None): targets used to train the model
eval_set: (list, default=None). list of tuples in the shape (X,y) containing evaluation samples, for example
a test sample, validation sample etc... X corresponds to the feature set of the sample, y corresponds
to the targets of the samples
sample_names: (list of strings, default=None): list of suffixed for the samples. If none, it will be labelled with
sample_{i}, where i corresponds to the index of the sample.
List length must match that of eval_set
**shap_kwargs: kwargs to pass to the Shapley Tree Explained
Returns:
(pd.DataFrame) Report with aggregations described in compute() method.
"""
self.fit(X, y, eval_set, sample_names, **shap_kwargs)
return self.compute()
| 40.555332
| 126
| 0.638619
|
7948ff4f87873d92edf73625f1469a7aa96f0901
| 2,131
|
py
|
Python
|
TestTimeAugmentation/mainTTA.py
|
onepanelio/ensembleObjectDetection
|
ddc742b553ba7e18d5dcdcf30f61f5858f369d3a
|
[
"MIT"
] | null | null | null |
TestTimeAugmentation/mainTTA.py
|
onepanelio/ensembleObjectDetection
|
ddc742b553ba7e18d5dcdcf30f61f5858f369d3a
|
[
"MIT"
] | null | null | null |
TestTimeAugmentation/mainTTA.py
|
onepanelio/ensembleObjectDetection
|
ddc742b553ba7e18d5dcdcf30f61f5858f369d3a
|
[
"MIT"
] | null | null | null |
import testTimeAugmentation
import function
import os
import shutil
import sys
import argparse
import ensembleOptions
from imutils import paths
def tta(model,myTechniques,pathImg,option, conf=0.7):
fichs = os.listdir(pathImg)
# 1. Create tmp folder
os.mkdir(pathImg+'/tmp')
# move imgs to tmp
for fich in fichs:
shutil.copy(pathImg+'/'+fich, pathImg+'/tmp')
imgFolder = pathImg
os.mkdir(pathImg+'/../salida')
# 3. Classification
for technique in myTechniques:
function.clasification(imgFolder,technique)
# we get all the folders we have created
listDirOut = []
for filename in os.listdir(pathImg+'/../salida'):
if os.path.isdir(pathImg+'/../salida/'+filename) == True:
listDirOut.append(pathImg+'/../salida/'+filename)
for dir in listDirOut:
for img in os.listdir(dir+'/tmp'):
img1 = img[img.find("_")+1:]
img2 = img1[img1.find("_")+1:]
shutil.move(dir+'/tmp/'+img, dir+'/'+img2)
os.rmdir(dir+'/tmp')
# 4. Generate xml
for dir in listDirOut:
model.predict(dir, dir,conf)
# 5. Detection
for dir in listDirOut:
tec = dir.split("/")
function.detection(dir, tec[len(tec)-1])
for dir in listDirOut:
for img in os.listdir(dir):
if os.path.isdir(dir+'/'+img)== False:
os.remove(dir+'/'+img)
for img in os.listdir(dir+'/tmp'):
img1 = img[img.find("_") + 1:]
img2 = img1[img1.find("_") + 1:]
shutil.move(dir+'/tmp/'+img, dir+'/'+img2)
os.rmdir(dir+'/tmp')
# 6. Ensemble
for dirOut in os.listdir(pathImg+'/../salida/'):
for file in list(paths.list_files(pathImg+'/../salida/'+dirOut, validExts=(".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif"))):
os.remove(file)
ensembleOptions.ensembleOptions(pathImg+'/../salida/', option)
for xml in os.listdir(pathImg+'/../salida/output/'):
shutil.copy(pathImg+'/../salida/output/'+xml,pathImg+'/')
shutil.rmtree(pathImg+'/tmp')
shutil.rmtree(pathImg+'/../salida/')
| 30.884058
| 135
| 0.588925
|
7948ff6bb3cc71103bc4fee7e03a9bbadcf39751
| 152
|
py
|
Python
|
embed_video/tests/__init__.py
|
albertcabre/django-embed-video
|
5f3ff1ef0da59be2224139826091506a29b30c09
|
[
"MIT"
] | 1
|
2018-08-12T22:01:34.000Z
|
2018-08-12T22:01:34.000Z
|
embed_video/tests/__init__.py
|
albertcabre/django-embed-video
|
5f3ff1ef0da59be2224139826091506a29b30c09
|
[
"MIT"
] | 2
|
2016-07-13T21:03:58.000Z
|
2016-07-14T16:41:50.000Z
|
embed_video/tests/__init__.py
|
albertcabre/django-embed-video
|
5f3ff1ef0da59be2224139826091506a29b30c09
|
[
"MIT"
] | 1
|
2018-01-11T09:28:08.000Z
|
2018-01-11T09:28:08.000Z
|
import os
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'embed_video.tests.django_settings'
if django.VERSION[:2] >= (1, 7):
django.setup()
| 21.714286
| 74
| 0.730263
|
7948fff1c70a1393b0f16b0b2d9e589976d1e93e
| 13,154
|
py
|
Python
|
aiokubernetes/models/v1_csi_persistent_volume_source.py
|
tantioch/aiokubernetes
|
2f332498598ece14d22f8e59ecb02665db6db68d
|
[
"Apache-2.0"
] | 24
|
2018-07-07T15:12:19.000Z
|
2021-09-01T07:33:11.000Z
|
aiokubernetes/models/v1_csi_persistent_volume_source.py
|
revoteon/aiokubernetes
|
730eae03e4779563740f07ad3ecef180b511ac18
|
[
"Apache-2.0"
] | 5
|
2018-07-11T00:09:17.000Z
|
2018-10-22T16:41:54.000Z
|
aiokubernetes/models/v1_csi_persistent_volume_source.py
|
revoteon/aiokubernetes
|
730eae03e4779563740f07ad3ecef180b511ac18
|
[
"Apache-2.0"
] | 3
|
2018-07-10T10:16:57.000Z
|
2018-10-20T19:32:05.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
from aiokubernetes.models.v1_secret_reference import V1SecretReference # noqa: F401,E501
class V1CSIPersistentVolumeSource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'controller_publish_secret_ref': 'V1SecretReference',
'driver': 'str',
'fs_type': 'str',
'node_publish_secret_ref': 'V1SecretReference',
'node_stage_secret_ref': 'V1SecretReference',
'read_only': 'bool',
'volume_attributes': 'dict(str, str)',
'volume_handle': 'str'
}
attribute_map = {
'controller_publish_secret_ref': 'controllerPublishSecretRef',
'driver': 'driver',
'fs_type': 'fsType',
'node_publish_secret_ref': 'nodePublishSecretRef',
'node_stage_secret_ref': 'nodeStageSecretRef',
'read_only': 'readOnly',
'volume_attributes': 'volumeAttributes',
'volume_handle': 'volumeHandle'
}
def __init__(self, controller_publish_secret_ref=None, driver=None, fs_type=None, node_publish_secret_ref=None, node_stage_secret_ref=None, read_only=None, volume_attributes=None, volume_handle=None): # noqa: E501
"""V1CSIPersistentVolumeSource - a model defined in Swagger""" # noqa: E501
self._controller_publish_secret_ref = None
self._driver = None
self._fs_type = None
self._node_publish_secret_ref = None
self._node_stage_secret_ref = None
self._read_only = None
self._volume_attributes = None
self._volume_handle = None
self.discriminator = None
if controller_publish_secret_ref is not None:
self.controller_publish_secret_ref = controller_publish_secret_ref
self.driver = driver
if fs_type is not None:
self.fs_type = fs_type
if node_publish_secret_ref is not None:
self.node_publish_secret_ref = node_publish_secret_ref
if node_stage_secret_ref is not None:
self.node_stage_secret_ref = node_stage_secret_ref
if read_only is not None:
self.read_only = read_only
if volume_attributes is not None:
self.volume_attributes = volume_attributes
self.volume_handle = volume_handle
@property
def controller_publish_secret_ref(self):
"""Gets the controller_publish_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. # noqa: E501
:return: The controller_publish_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
:rtype: V1SecretReference
"""
return self._controller_publish_secret_ref
@controller_publish_secret_ref.setter
def controller_publish_secret_ref(self, controller_publish_secret_ref):
"""Sets the controller_publish_secret_ref of this V1CSIPersistentVolumeSource.
ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. # noqa: E501
:param controller_publish_secret_ref: The controller_publish_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
:type: V1SecretReference
"""
self._controller_publish_secret_ref = controller_publish_secret_ref
@property
def driver(self):
"""Gets the driver of this V1CSIPersistentVolumeSource. # noqa: E501
Driver is the name of the driver to use for this volume. Required. # noqa: E501
:return: The driver of this V1CSIPersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._driver
@driver.setter
def driver(self, driver):
"""Sets the driver of this V1CSIPersistentVolumeSource.
Driver is the name of the driver to use for this volume. Required. # noqa: E501
:param driver: The driver of this V1CSIPersistentVolumeSource. # noqa: E501
:type: str
"""
if driver is None:
raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
self._driver = driver
@property
def fs_type(self):
"""Gets the fs_type of this V1CSIPersistentVolumeSource. # noqa: E501
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
:return: The fs_type of this V1CSIPersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1CSIPersistentVolumeSource.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
:param fs_type: The fs_type of this V1CSIPersistentVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def node_publish_secret_ref(self):
"""Gets the node_publish_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. # noqa: E501
:return: The node_publish_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
:rtype: V1SecretReference
"""
return self._node_publish_secret_ref
@node_publish_secret_ref.setter
def node_publish_secret_ref(self, node_publish_secret_ref):
"""Sets the node_publish_secret_ref of this V1CSIPersistentVolumeSource.
NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. # noqa: E501
:param node_publish_secret_ref: The node_publish_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
:type: V1SecretReference
"""
self._node_publish_secret_ref = node_publish_secret_ref
@property
def node_stage_secret_ref(self):
"""Gets the node_stage_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. # noqa: E501
:return: The node_stage_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
:rtype: V1SecretReference
"""
return self._node_stage_secret_ref
@node_stage_secret_ref.setter
def node_stage_secret_ref(self, node_stage_secret_ref):
"""Sets the node_stage_secret_ref of this V1CSIPersistentVolumeSource.
NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. # noqa: E501
:param node_stage_secret_ref: The node_stage_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
:type: V1SecretReference
"""
self._node_stage_secret_ref = node_stage_secret_ref
@property
def read_only(self):
"""Gets the read_only of this V1CSIPersistentVolumeSource. # noqa: E501
Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write). # noqa: E501
:return: The read_only of this V1CSIPersistentVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1CSIPersistentVolumeSource.
Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write). # noqa: E501
:param read_only: The read_only of this V1CSIPersistentVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def volume_attributes(self):
"""Gets the volume_attributes of this V1CSIPersistentVolumeSource. # noqa: E501
Attributes of the volume to publish. # noqa: E501
:return: The volume_attributes of this V1CSIPersistentVolumeSource. # noqa: E501
:rtype: dict(str, str)
"""
return self._volume_attributes
@volume_attributes.setter
def volume_attributes(self, volume_attributes):
"""Sets the volume_attributes of this V1CSIPersistentVolumeSource.
Attributes of the volume to publish. # noqa: E501
:param volume_attributes: The volume_attributes of this V1CSIPersistentVolumeSource. # noqa: E501
:type: dict(str, str)
"""
self._volume_attributes = volume_attributes
@property
def volume_handle(self):
"""Gets the volume_handle of this V1CSIPersistentVolumeSource. # noqa: E501
VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required. # noqa: E501
:return: The volume_handle of this V1CSIPersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._volume_handle
@volume_handle.setter
def volume_handle(self, volume_handle):
"""Sets the volume_handle of this V1CSIPersistentVolumeSource.
VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required. # noqa: E501
:param volume_handle: The volume_handle of this V1CSIPersistentVolumeSource. # noqa: E501
:type: str
"""
if volume_handle is None:
raise ValueError("Invalid value for `volume_handle`, must not be `None`") # noqa: E501
self._volume_handle = volume_handle
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in self.swagger_types.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CSIPersistentVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 41.89172
| 368
| 0.683822
|
7948fff758c6ab9cf808d1a61a862eac478914c6
| 134
|
py
|
Python
|
Python/Programming Fundamentals/Text Processing/09. Repeat Strings.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
Python/Programming Fundamentals/Text Processing/09. Repeat Strings.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
Python/Programming Fundamentals/Text Processing/09. Repeat Strings.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
strings = input().split(' ')
repeated_string = ''
for s in strings:
repeated_string += s * len(s)
print(repeated_string)
| 16.75
| 34
| 0.634328
|
794900848ed0676637ce0a6a12971cd62af31bde
| 256
|
py
|
Python
|
var_args.py
|
shreya-n-kumari/python
|
2462cf01891770b078815f9925f37842aaec7b91
|
[
"MIT"
] | null | null | null |
var_args.py
|
shreya-n-kumari/python
|
2462cf01891770b078815f9925f37842aaec7b91
|
[
"MIT"
] | null | null | null |
var_args.py
|
shreya-n-kumari/python
|
2462cf01891770b078815f9925f37842aaec7b91
|
[
"MIT"
] | null | null | null |
def Hair_cut(**name):
print(name["U_shape"])
print(name["three_step"])
print(name["full_layer"])
print(name["curly"])
print(name["straight"])
Hair_cut(U_shape = 'abc',three_step = 'def',full_layer = 'uvw',curly = 'shreya',straight = 'sblog')
| 28.444444
| 100
| 0.65625
|
794900d5839348fa8e258c7c20cc3348f60e1579
| 96
|
py
|
Python
|
my_services/apps.py
|
Williano/CV
|
b2954ac1753d7f31461c59cd7fe24ea13405cddf
|
[
"MIT"
] | null | null | null |
my_services/apps.py
|
Williano/CV
|
b2954ac1753d7f31461c59cd7fe24ea13405cddf
|
[
"MIT"
] | 3
|
2020-02-11T21:48:34.000Z
|
2021-06-10T18:38:09.000Z
|
my_services/apps.py
|
Williano/CV
|
b2954ac1753d7f31461c59cd7fe24ea13405cddf
|
[
"MIT"
] | 1
|
2018-08-06T06:57:16.000Z
|
2018-08-06T06:57:16.000Z
|
from django.apps import AppConfig
class MyServicesConfig(AppConfig):
name = 'my_services'
| 16
| 34
| 0.770833
|
794901638853029121ef3a2a36eb44337496a840
| 1,336
|
py
|
Python
|
chapter13/test/code/4-yuce.py
|
hitaitengteng/python
|
4e07fe6755ef1e0e0c1193249446e5246c89236e
|
[
"MIT"
] | null | null | null |
chapter13/test/code/4-yuce.py
|
hitaitengteng/python
|
4e07fe6755ef1e0e0c1193249446e5246c89236e
|
[
"MIT"
] | null | null | null |
chapter13/test/code/4-yuce.py
|
hitaitengteng/python
|
4e07fe6755ef1e0e0c1193249446e5246c89236e
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
import pandas as pd
inputfile = '../tmp/data4_GM11.xls' # 灰色预测后保存的路径
outputfile = '../data/enterprise_income.xls' # 神经网络预测后保存的结果
modelfile = '../tmp/4-net.model' # 模型保存路径
data = pd.read_excel(inputfile) # 读取数据
feature = ['x1', 'x2', 'x3', 'x4', 'x6', 'x7', 'x9', 'x10'] # 特征所在列
data_train = data.loc[range(2002, 2014)].copy() # 取2014年前的数据建模
data_mean = data_train.mean()
data_std = data_train.std()
data_train = (data_train - data_mean) / data_std # 数据标准化
x_train = data_train[feature].as_matrix() # 特征数据
y_train = data_train['y'].as_matrix() # 标签数据
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
model = Sequential() # 建立模型
model.add(Dense(8, 6))
model.add(Activation('relu')) # 用relu函数作为激活函数,能够大幅提供准确度
model.add(Dense(6, 1))
model.compile(loss='mean_squared_error', optimizer='adam') # 编译模型
model.fit(x_train, y_train, nb_epoch=5000, batch_size=16) # 训练模型,学习五千次
model.save_weights(modelfile) # 保存模型参数
# 预测,并还原结果。
x = ((data[feature] - data_mean[feature]) / data_std[feature]).as_matrix()
data[u'y_pred'] = model.predict(x) * data_std['y'] + data_mean['y']
data[u'y_pred'] = data[u'y_pred'].round()
data.to_excel(outputfile)
import matplotlib.pyplot as plt # 画出预测结果图
p = data[['y', 'y_pred']].plot(subplots=True, style=['b-o', 'r-*'])
plt.show()
| 37.111111
| 74
| 0.695359
|
794902ea53780f3ecf0f008401d046f1aae2eb87
| 4,097
|
py
|
Python
|
mito_sims_py3/fixed_points.py
|
DEPICTIVE/mito_sims
|
07daf8f5dcb82ba95c1b9f4cc8981d451b032986
|
[
"Apache-2.0"
] | 1
|
2020-07-14T03:07:10.000Z
|
2020-07-14T03:07:10.000Z
|
mito_sims_py3/fixed_points.py
|
DEPICTIVE/mito_sims
|
07daf8f5dcb82ba95c1b9f4cc8981d451b032986
|
[
"Apache-2.0"
] | null | null | null |
mito_sims_py3/fixed_points.py
|
DEPICTIVE/mito_sims
|
07daf8f5dcb82ba95c1b9f4cc8981d451b032986
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from scipy.optimize import bisect
# =============================================
# =============================================
# =============================================
# =============================================
class find:
def __init__(self, f, xlims, args, nsamples=100, max_iter=500):
'''
Find the fixed points of a dynamic system
Input :
f : function, model to compute the fixed points
xlims : python list, the boundary in which to search for values
args : tuple, parameters for f
nsamples : int, the number of partitions of the range specified in xlims. In each partition the code will use the bisect method to search for roots of f by the bisect method.
max_iter : max number of iterations
'''
self.xlims = xlims
self.args = args
self.nsamples = nsamples
self.max_iter = max_iter
self._find(f)
# =============================================
# =============================================
def _find(self, f, xo=None):
'''
Numerically solve for the fixed points and estimate stability. To ensure that all fixed points are found, solve for roots with nsamples randomly choosen initial values between range xlims.
Input:
f = model dydt function
xlims = list or numpy array indicating bounds of x
args : arguments passed to model dydt function
'''
# set partition values
xo = np.linspace(self.xlims[0], self.xlims[1], self.nsamples)
# instantiate fixed point list
fp = []
for w in range(1, self.nsamples):
# compute the flux at points (xo[w], xo[w-1])
dy = np.array([f(xo[w], self.args[0], self.args[1],
self.args[2], self.args[3]),
f(xo[w-1], self.args[0], self.args[1],
self.args[2], self.args[3])])
# if there is a sign change then their must be a root between xo[w] and xo[w-1]
if np.sum(np.abs(dy)) != np.abs(np.sum(dy)):
# solve for the root using the bisect function in scipy
fp += [bisect(f, xo[w-1], xo[w],
args=self.args)]
# store fixed points
self.fp = np.array(fp)
# compute the stability of the fixed point
self._get_stability(f)
# =============================================
# =============================================
def _get_stability(self, f):
'''
Compute the stability of a fixed point by measuring the response of random small perturbations.
'''
# instantiate stability array
self.stability = np.zeros(self.fp.size)
# loop over fixed points
for w in range(self.fp.size):
# sample small perturbation of the independent variable
x = self.fp[w] + 0.01*np.random.rand(10)
# compute the flux for each perturbation
y = np.zeros(x.size)
for wy in range(x.size):
y[wy] = f(np.abs(x[wy]), self.args[0], self.args[1],
self.args[2], self.args[3])
# find the slope of the flux about the perturbations
slope = compute_slope(x, y)
# if the slope is less than 0 then the fp is stable, the fixed point is unstable
if slope < 0:
self.stability[w] = 1
else:
self.stability[w] = 0
# =============================================
# =============================================
# =============================================
# =============================================
def compute_slope(x, y):
'''
Compute slope using linear regression
Input :
x : numpy array of floats representing independent variable
y : numpy array of floats representing dependent variable
Return :
float, representing the slope of the line that minimizes sum-squared error.
'''
c = np.cov(x,y)
return c[0, 1] / c[0, 0]
| 40.97
| 197
| 0.494752
|
7949036b2e61e56805c1a7d1a3530e2583ebad15
| 15,113
|
py
|
Python
|
var/spack/repos/builtin/packages/legion/package.py
|
ilagunap/spack
|
510f869c3ae8ac2721debd29e98076212ee75852
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2018-11-16T02:42:57.000Z
|
2019-06-06T19:18:50.000Z
|
var/spack/repos/builtin/packages/legion/package.py
|
ilagunap/spack
|
510f869c3ae8ac2721debd29e98076212ee75852
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 5
|
2021-07-26T12:12:00.000Z
|
2022-03-01T12:16:03.000Z
|
var/spack/repos/builtin/packages/legion/package.py
|
ilagunap/spack
|
510f869c3ae8ac2721debd29e98076212ee75852
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Legion(CMakePackage):
"""Legion is a data-centric parallel programming system for writing
portable high performance programs targeted at distributed heterogeneous
architectures. Legion presents abstractions which allow programmers to
describe properties of program data (e.g. independence, locality). By
making the Legion programming system aware of the structure of program
data, it can automate many of the tedious tasks programmers currently
face, including correctly extracting task- and data-level parallelism
and moving data around complex memory hierarchies. A novel mapping
interface provides explicit programmer controlled placement of data in
the memory hierarchy and assignment of tasks to processors in a way
that is orthogonal to correctness, thereby enabling easy porting and
tuning of Legion applications to new architectures."""
homepage = "https://legion.stanford.edu/"
git = "https://github.com/StanfordLegion/legion.git"
maintainers = ['pmccormick', 'streichler']
version('21.03.0', tag='legion-21.03.0')
version('stable', branch='stable')
version('master', branch='master')
version('cr', branch='control_replication')
depends_on("cmake@3.16:", type='build')
# TODO: Need to spec version of MPI v3 for use of the low-level MPI transport
# layer. At present the MPI layer is still experimental and we discourge its
# use for general (not legion development) use cases.
depends_on('mpi', when='network=mpi')
depends_on('mpi', when='network=gasnet') # MPI is required to build gasnet (needs mpicc).
depends_on('ucx', when='conduit=ucx')
depends_on('mpi', when='conduit=mpi')
depends_on('cuda@10.0:11.9', when='+cuda_unsupported_compiler')
depends_on('cuda@10.0:11.9', when='+cuda')
depends_on('hdf5', when='+hdf5')
depends_on('hwloc', when='+hwloc')
# cuda-centric
# reminder for arch numbers to names: 60=pascal, 70=volta, 75=turing, 80=ampere
# TODO: we could use a map here to clean up and use naming vs. numbers.
cuda_arch_list = ('60', '70', '75', '80')
for nvarch in cuda_arch_list:
depends_on('kokkos@3.3.01+cuda+cuda_lambda+wrapper cuda_arch={0}'.format(nvarch),
when='%gcc+kokkos+cuda cuda_arch={0}'.format(nvarch))
depends_on("kokkos@3.3.01+cuda+cuda_lambda~wrapper cuda_arch={0}".format(nvarch),
when="%clang+kokkos+cuda cuda_arch={0}".format(nvarch))
depends_on('kokkos@3.3.01~cuda', when='+kokkos~cuda')
depends_on("kokkos@3.3.01~cuda+openmp", when='kokkos+openmp')
depends_on('python@3', when='+python')
depends_on('papi', when='+papi')
depends_on('zlib', when='+zlib')
# TODO: Need a AMD/HIP variant to match support landing in 21.03.0.
# Network transport layer: the underlying data transport API should be used for
# distributed data movement. For Legion, gasnet is the currently the most
# mature. We have many users that default to using no network layer for
# day-to-day development thus we default to 'none'. MPI support is new and
# should be considered as a beta release.
variant('network', default='none',
values=('gasnet', 'mpi', 'none'),
description="The network communications/transport layer to use.",
multi=False)
# We default to automatically embedding a gasnet build. To override this
# point the package a pre-installed version of GASNet-Ex via the gasnet_root
# variant.
#
# make sure we have a valid directory provided for gasnet_root...
def validate_gasnet_root(value):
if value == 'none':
return True
if not os.path.isdir(value):
print("gasnet_root:", value, "-- no such directory.")
return False
else:
return True
variant('gasnet_root',
default='none',
values=validate_gasnet_root,
description="Path to a pre-installed version of GASNet (prefix directory).",
multi=False)
conflicts('gasnet_root', when="network=mpi")
variant('conduit', default='none',
values=('aries', 'ibv', 'udp', 'mpi', 'ucx', 'none'),
description="The gasnet conduit(s) to enable.",
multi=False)
conflicts('conduit=none', when='network=gasnet',
msg="a conduit must be selected when 'network=gasnet'")
gasnet_conduits = ('aries', 'ibv', 'udp', 'mpi', 'ucx')
for c in gasnet_conduits:
conflict_str = 'conduit=%s' % c
conflicts(conflict_str, when='network=mpi',
msg="conduit attribute requires 'network=gasnet'.")
conflicts(conflict_str, when='network=none',
msg="conduit attribute requires 'network=gasnet'.")
variant('gasnet_debug', default=False,
description="Build gasnet with debugging enabled.")
conflicts('+gasnet_debug', when='network=mpi')
conflicts('+gasnet_debug', when='network=none')
variant('shared', default=False,
description="Build shared libraries.")
variant('bounds_checks', default=False,
description="Enable bounds checking in Legion accessors.")
variant('privilege_checks', default=False,
description="Enable runtime privildge checks in Legion accessors.")
variant('enable_tls', default=False,
description="Enable thread-local-storage of the Legion context.")
variant('output_level', default='warning',
# Note: these values are dependent upon those used in the cmake config.
values=("spew", "debug", "info", "print", "warning", "error", "fatal",
"none"),
description="Set the compile-time logging level.",
multi=False)
variant('spy', default=False,
description="Enable detailed logging for Legion Spy debugging.")
# note: we will be dependent upon spack's latest-and-greatest cuda version...
variant('cuda', default=False,
description="Enable CUDA support.")
variant('cuda_hijack', default=False,
description="Hijack application calls into the CUDA runtime (+cuda).")
variant('cuda_arch', default='70',
values=cuda_arch_list,
description="GPU/CUDA architecture to build for.",
multi=False)
variant('cuda_unsupported_compiler', default=False,
description="Disable nvcc version check (--allow-unsupported-compiler).")
conflicts('+cuda_hijack', when='~cuda')
variant('fortran', default=False,
description="Enable Fortran bindings.")
variant('hdf5', default=False,
description="Enable support for HDF5.")
variant('hwloc', default=False,
description="Use hwloc for topology awareness.")
variant('kokkos', default=False,
description="Enable support for interoperability with Kokkos.")
variant('bindings', default=False,
description="Build runtime language bindings (excl. Fortran).")
variant('libdl', default=True,
description="Enable support for dynamic object/library loading.")
variant('openmp', default=False,
description="Enable support for OpenMP within Legion tasks.")
variant('papi', default=False,
description="Enable PAPI performance measurements.")
variant('python', default=False,
description="Enable Python support.")
variant('zlib', default=True,
description="Enable zlib support.")
variant('redop_complex', default=False,
description="Use reduction operators for complex types.")
variant('max_dims', values=int, default=3,
description="Set max number of dimensions for logical regions.")
variant('max_fields', values=int, default=512,
description="Maximum number of fields allowed in a logical region.")
variant('native', default=False,
description="Enable native/host processor optimizaton target.")
def cmake_args(self):
spec = self.spec
cmake_cxx_flags = []
options = []
if 'network=gasnet' in spec:
options.append('-DLegion_NETWORKS=gasnetex')
if spec.variants['gasnet_root'].value != 'none':
gasnet_dir = spec.variants['gasnet_root'].value
options.append('-DGASNet_ROOT_DIR=%s' % gasnet_dir)
else:
options.append('-DLegion_EMBED_GASNet=ON')
gasnet_conduit = spec.variants['conduit'].value
options.append('-DGASNet_CONDUIT=%s' % gasnet_conduit)
if '+gasnet_debug' in spec:
options.append('-DLegion_EMBED_GASNet_CONFIGURE_ARGS=--enable-debug')
elif 'network=mpi' in spec:
options.append('-DLegion_NETWORKS=mpi')
if spec.variants['gasnet_root'].value != 'none':
raise InstallError("'gasnet_root' is only valid when 'network=gasnet'.")
else:
if spec.variants['gasnet_root'].value != 'none':
raise InstallError("'gasnet_root' is only valid when 'network=gasnet'.")
options.append('-DLegion_EMBED_GASNet=OFF')
if '+shared' in spec:
options.append('-DBUILD_SHARED_LIBS=ON')
else:
options.append('-DBUILD_SHARED_LIBS=OFF')
if '+bounds_checks' in spec:
# default is off.
options.append('-DLegion_BOUNDS_CHECKS=ON')
if '+privilege_checks' in spec:
# default is off.
options.append('-DLegion_PRIVILEGE_CHECKS=ON')
if '+enable_tls' in spec:
# default is off.
options.append('-DLegion_ENABLE_TLS=ON')
if 'output_level' in spec:
level = str.upper(spec.variants['output_level'].value)
options.append('-DLegion_OUTPUT_LEVEL=%s' % level)
if '+spy' in spec:
# default is off.
options.append('-DLegion_SPY=ON')
if '+cuda' in spec:
cuda_arch = spec.variants['cuda_arch'].value
options.append('-DLegion_USE_CUDA=ON')
options.append('-DLegion_GPU_REDUCTIONS=ON')
options.append('-DLegion_CUDA_ARCH=%s' % cuda_arch)
if '+cuda_hijack' in spec:
options.append('-DLegion_HIJACK_CUDART=ON')
else:
options.append('-DLegion_HIJACK_CUDART=OFF')
if '+cuda_unsupported_compiler' in spec:
options.append('-DCUDA_NVCC_FLAGS:STRING=--allow-unsupported-compiler')
if '+fortran' in spec:
# default is off.
options.append('-DLegion_USE_Fortran=ON')
if '+hdf5' in spec:
# default is off.
options.append('-DLegion_USE_HDF5=ON')
if '+hwloc' in spec:
# default is off.
options.append('-DLegion_USE_HWLOC=ON')
if '+kokkos' in spec:
# default is off.
options.append('-DLegion_USE_Kokkos=ON')
os.environ['KOKKOS_CXX_COMPILER'] = spec['kokkos'].kokkos_cxx
if '+libdl' in spec:
# default is on.
options.append('-DLegion_USE_LIBDL=ON')
else:
options.append('-DLegion_USE_LIBDL=OFF')
if '+openmp' in spec:
# default is off.
options.append('-DLegion_USE_OpenMP=ON')
if '+papi' in spec:
# default is off.
options.append('-DLegion_USE_PAPI=ON')
if '+python' in spec:
# default is off.
options.append('-DLegion_USE_Python=ON')
if '+zlib' in spec:
# default is on.
options.append('-DLegion_USE_ZLIB=ON')
else:
options.append('-DLegion_USE_ZLIB=OFF')
if '+redop_complex' in spec:
# default is off.
options.append('-DLegion_REDOP_COMPLEX=ON')
if '+bindings' in spec:
# default is off.
options.append('-DLegion_BUILD_BINDINGS=ON')
options.append('-DLegion_REDOP_COMPLEX=ON') # required for bindings
options.append('-DLegion_USE_Fortran=ON')
if spec.variants['build_type'].value == 'Debug':
cmake_cxx_flags.extend([
'-DDEBUG_REALM',
'-DDEBUG_LEGION',
'-ggdb',
])
maxdims = int(spec.variants['max_dims'].value)
# TODO: sanity check if maxdims < 0 || > 9???
options.append('-DLegion_MAX_DIM=%d' % maxdims)
maxfields = int(spec.variants['max_fields'].value)
if (maxfields <= 0):
maxfields = 512
# make sure maxfields is a power of two. if not,
# find the next largest power of two and use that...
if (maxfields & (maxfields - 1) != 0):
while maxfields & maxfields - 1:
maxfields = maxfields & maxfields - 1
maxfields = maxfields << 1
options.append('-DLegion_MAX_FIELDS=%d' % maxfields)
if '+native' in spec:
# default is off.
options.append('-DBUILD_MARCH:STRING=native')
return options
@run_after('install')
def cache_test_sources(self):
"""Copy the example source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources([join_path('examples', 'local_function_tasks')])
def run_local_function_tasks_test(self):
"""Run stand alone test: local_function_tasks"""
test_dir = join_path(self.test_suite.current_test_cache_dir,
'examples', 'local_function_tasks')
if not os.path.exists(test_dir):
print('Skipping local_function_tasks test')
return
exe = 'local_function_tasks'
cmake_args = ['-DCMAKE_C_COMPILER={0}'.format(self.compiler.cc),
'-DCMAKE_CXX_COMPILER={0}'.format(self.compiler.cxx),
'-DLegion_DIR={0}'.format(join_path(self.prefix,
'share',
'Legion',
'cmake'))]
self.run_test('cmake',
options=cmake_args,
purpose='test: generate makefile for {0} example'.format(exe),
work_dir=test_dir)
self.run_test('make',
purpose='test: build {0} example'.format(exe),
work_dir=test_dir)
self.run_test(exe,
purpose='test: run {0} example'.format(exe),
work_dir=test_dir)
def test(self):
self.run_local_function_tasks_test()
| 40.194149
| 94
| 0.610931
|
794904bbee96f5a5c201f2c068849441f7701ef5
| 1,960
|
py
|
Python
|
config/_base_/datasets/voc_ssdd.py
|
automlresearch/autodetector
|
e959baf589fb329509cd25edcab11c7d22ea5e7e
|
[
"Apache-2.0"
] | null | null | null |
config/_base_/datasets/voc_ssdd.py
|
automlresearch/autodetector
|
e959baf589fb329509cd25edcab11c7d22ea5e7e
|
[
"Apache-2.0"
] | null | null | null |
config/_base_/datasets/voc_ssdd.py
|
automlresearch/autodetector
|
e959baf589fb329509cd25edcab11c7d22ea5e7e
|
[
"Apache-2.0"
] | 1
|
2021-12-08T08:28:16.000Z
|
2021-12-08T08:28:16.000Z
|
# dataset settings
dataset_type = 'VOCDataset_1class'
# Dataroot on Razer Laptop
# data_root = '/home/p/Documents/data/SSDD数据以及标签/ssdd/'
# Dataroot on Desktop
data_root = "/home/p/HD2/Datasets/SSDD/"
img_norm_cfg = dict(mean=[0.2, 0.2, 0.2], std=[0.2, 0.2, 0.2], to_rgb=True)
# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1000, 600),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
ann_file=[
data_root + 'ImageSets/Main/trainval.txt',
],
img_prefix=[data_root + ''],
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'ImageSets/Main/test.txt',
img_prefix=data_root + '',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'ImageSets/Main/test.txt',
img_prefix=data_root + '',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='mAP')
| 33.793103
| 81
| 0.602551
|
7949051046035e903164fe7914207a7bea97ff3c
| 4,310
|
py
|
Python
|
8term/OR/lab3/DualSimplexMethod.py
|
nik-sergeson/bsuir-informatics-labs
|
14805fb83b8e2324580b6253158565068595e804
|
[
"Apache-2.0"
] | null | null | null |
8term/OR/lab3/DualSimplexMethod.py
|
nik-sergeson/bsuir-informatics-labs
|
14805fb83b8e2324580b6253158565068595e804
|
[
"Apache-2.0"
] | null | null | null |
8term/OR/lab3/DualSimplexMethod.py
|
nik-sergeson/bsuir-informatics-labs
|
14805fb83b8e2324580b6253158565068595e804
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
from lab1.BasisMatricesHelper import get_basis_matrix, get_cannonical_form, get_basis_c_vector
from lab1.DualSimplexMethod import find_initial_basis_set
from sympy import zeros, Matrix
import bisect
class DualSimplexMethod(object):
"""
:type matrix_c:Matrix
:type matrix_A:Matrix
:type matrix_b:Matrix
:type eps:float
"""
def __init__(self, matrix_c, matrix_A, matrix_b, eps, condition_operators=None):
rows, cols = matrix_A.shape
assert matrix_b.shape[0] == rows
assert matrix_c.shape[0] == cols
self.matrix_c = matrix_c[:, :]
self._matrix_A = matrix_A[:, :]
self.matrix_b = matrix_b[:, :]
self.eps = eps
self.m, self.n = matrix_A.shape
if condition_operators is None:
self.condition_operators = ["="] * self.m
else:
self.condition_operators = list(condition_operators)
def solve(self, maximize, basis_indexes_set=None):
"""
:type basis_indexes_set: list[int]
:type not_basis_indexes_set: list[int]
"""
if not maximize:
self.matrix_c = -self.matrix_c
if not basis_indexes_set:
basis_indexes_set = find_initial_basis_set(self._matrix_A)
if '<=' in self.condition_operators or '>=' in self.condition_operators:
self._matrix_A, self.matrix_c, self.condition_operators = get_cannonical_form(self._matrix_A,
self.condition_operators,
self.matrix_c, maximize)
self.m, self.n = self._matrix_A.shape
basis_indexes_set.sort()
not_basis_indexes_set = sorted(set(range(self.n)) - set(basis_indexes_set))
return self.dual_simplex_algorithm(basis_indexes_set, not_basis_indexes_set)
def dual_simplex_algorithm(self, basis_indexes_set, not_basis_indexes_set):
"""
:type basis_indexes_set: list[int]
:type not_basis_indexes_set: list[int]
"""
inverse_basis_matrix = get_basis_matrix(self._matrix_A, basis_indexes_set).inv()
while True:
vector_u = get_basis_c_vector(self.matrix_c, basis_indexes_set).transpose() * inverse_basis_matrix
vector_delta = zeros(len(not_basis_indexes_set), 1)
for i, j in enumerate(not_basis_indexes_set):
vector_delta[i, 0] = self.matrix_c[j, 0] - (vector_u * self._matrix_A[:, j])[0, 0]
vector_kappa = inverse_basis_matrix * self.matrix_b
j_k = -1
for k, j in enumerate(basis_indexes_set):
if vector_kappa[k, 0] < -self.eps:
j_k = j
break
if j_k == -1:
basis_plan = zeros(self.n, 1)
for i, j in enumerate(basis_indexes_set):
basis_plan[j, 0] = vector_kappa[i, 0]
return basis_plan, basis_indexes_set
k = basis_indexes_set.index(j_k)
vector_mu = zeros(len(not_basis_indexes_set), 1)
vector_sigma = zeros(len(not_basis_indexes_set), 1)
for i, j_nb in enumerate(not_basis_indexes_set):
vector_mu[i, 0] = (inverse_basis_matrix[k, :] * self._matrix_A[:, j_nb])[0, 0]
for i, j_nb in enumerate(not_basis_indexes_set):
if vector_mu[i, 0] < -self.eps:
vector_sigma[i, 0] = vector_delta[i, 0] / vector_mu[i, 0]
else:
vector_sigma[i, 0] = float("inf")
j_0 = -1
min_sigma = float("inf")
for i, j_nb in enumerate(not_basis_indexes_set):
if vector_sigma[i, 0] < min_sigma:
min_sigma = vector_sigma[i, 0]
j_0 = j_nb
if j_0 == -1:
raise Exception("Limitations of direct task are incompatible")
basis_indexes_set.remove(j_k)
bisect.insort_left(basis_indexes_set, j_0)
not_basis_indexes_set.remove(j_0)
bisect.insort_left(not_basis_indexes_set, j_k)
inverse_basis_matrix = get_basis_matrix(self._matrix_A, basis_indexes_set).inv()
| 45.851064
| 115
| 0.592111
|
79490513ccc519e8e9230e6019c6d197e7479575
| 1,676
|
py
|
Python
|
src/data/get_raw_data.py
|
bhavanakv/titanic-disaster
|
0a689ded73fdb84bc4a4c9cead2cb492e1f66464
|
[
"MIT"
] | null | null | null |
src/data/get_raw_data.py
|
bhavanakv/titanic-disaster
|
0a689ded73fdb84bc4a4c9cead2cb492e1f66464
|
[
"MIT"
] | null | null | null |
src/data/get_raw_data.py
|
bhavanakv/titanic-disaster
|
0a689ded73fdb84bc4a4c9cead2cb492e1f66464
|
[
"MIT"
] | null | null | null |
# the content below is written into a file using writefile magic function
import os
from dotenv import load_dotenv,find_dotenv
from requests import session
import logging
payload = {
'action' : 'login',
'username' : os.environ.get("KAGGLE_USERNAME"),
'password' : os.environ.get("KAGGLE_PASSWORD")
}
def extract_data(url, file_path):
with session() as c:
c.post("https://www.kaggle.com/account/login",data=payload)
with open(file_path,'wb') as handle:
response = c.get(url,stream=True)
for block in response.iter_content(1024):
handle.write(block)
def main(project_dir):
logger = logging.getLogger(__name__) # obtaining the instance of logger
logger.info('getting raw data')
train_url = 'https://www.kaggle.com/c/titanic/download/train.csv'
test_url = 'https://www.kaggle.com/c/titanic/download/test.csv'
raw_data_path = os.path.join(project_dir,'data','raw')
train_data_path = os.path.join(raw_data_path,'train.csv')
test_data_path = os.path.join(raw_data_path,'test.csv')
extract_data(train_url,train_data_path)
extract_data(test_url,test_data_path)
logger.info('downloaded raw training and test data')
if __name__ == '__main__':
project_dir = os.path.join(os.path.dirname(__file__),os.pardir,os.pardir) # two levels up -- data and then src and then titanic
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO,format=log_fmt)
dotenv_path = find_dotenv()
load_dotenv(dotenv_path)
main(project_dir)
| 35.659574
| 131
| 0.668258
|
7949058280196016d2af75dd845533229a8391a4
| 3,234
|
py
|
Python
|
profiles_project/settings.py
|
kazamazza/profiles-rest-api
|
4f7274ef517f67d06a643759bc4a5ecb2e52cf72
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
kazamazza/profiles-rest-api
|
4f7274ef517f67d06a643759bc4a5ecb2e52cf72
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
kazamazza/profiles-rest-api
|
4f7274ef517f67d06a643759bc4a5ecb2e52cf72
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'uslo(49*-0$w%og@82p2+$ovv0so7k$6wi@hy7t(g35x3r*u8g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 25.872
| 91
| 0.700371
|
79490678627e217c90e0ac7b7806b504dbfea6fb
| 3,041
|
py
|
Python
|
nilabels/agents/math.py
|
nipy/nilabels
|
b065febc611eef638785651b4642d53bb61f1321
|
[
"MIT"
] | 15
|
2019-04-09T21:47:47.000Z
|
2022-02-01T14:11:51.000Z
|
nilabels/agents/math.py
|
SebastianoF/LabelsManager
|
b065febc611eef638785651b4642d53bb61f1321
|
[
"MIT"
] | 4
|
2018-08-21T16:53:04.000Z
|
2018-08-24T09:13:09.000Z
|
nilabels/agents/math.py
|
nipy/nilabels
|
b065febc611eef638785651b4642d53bb61f1321
|
[
"MIT"
] | 1
|
2019-04-06T20:49:48.000Z
|
2019-04-06T20:49:48.000Z
|
import nibabel as nib
from nilabels.tools.aux_methods.utils_path import get_pfi_in_pfi_out, connect_path_tail_head
from nilabels.tools.aux_methods.utils_nib import set_new_data
class Math(object):
"""
Facade of no external methods. Simple class for quick algebraic manipulations of images with the same grid
"""
def __init__(self, input_data_folder=None, output_data_folder=None):
self.pfo_in = input_data_folder
self.pfo_out = output_data_folder
def sum(self, path_first_image, path_second_image, path_resulting_image):
pfi_im1, pfi_im2 = get_pfi_in_pfi_out(path_first_image, path_second_image, self.pfo_in, self.pfo_in)
pfi_result = connect_path_tail_head(self.pfo_out, path_resulting_image)
im1 = nib.load(pfi_im1)
im2 = nib.load(pfi_im2)
if not im1.shape == im2.shape:
raise IOError('Input images must have the same dimensions.')
im_result = set_new_data(im1, new_data=im1.get_data() + im2.get_data())
nib.save(im_result, pfi_result)
print('Image sum of {0} {1} saved under {2}.'.format(pfi_im1, pfi_im2, pfi_result))
return pfi_result
def sub(self, path_first_image, path_second_image, path_resulting_image):
pfi_im1, pfi_im2 = get_pfi_in_pfi_out(path_first_image, path_second_image, self.pfo_in, self.pfo_in)
pfi_result = connect_path_tail_head(self.pfo_out, path_resulting_image)
im1 = nib.load(pfi_im1)
im2 = nib.load(pfi_im2)
if not im1.shape == im2.shape:
raise IOError('Input images must have the same dimensions.')
im_result = set_new_data(im1, new_data=im1.get_data() - im2.get_data())
nib.save(im_result, pfi_result)
print('Image difference of {0} {1} saved under {2}.'.format(pfi_im1, pfi_im2, pfi_result))
return pfi_result
def prod(self, path_first_image, path_second_image, path_resulting_image):
pfi_im1, pfi_im2 = get_pfi_in_pfi_out(path_first_image, path_second_image, self.pfo_in, self.pfo_in)
pfi_result = connect_path_tail_head(self.pfo_out, path_resulting_image)
im1 = nib.load(pfi_im1)
im2 = nib.load(pfi_im2)
if not im1.shape == im2.shape:
raise IOError('Input images must have the same dimensions.')
im_result = set_new_data(im1, new_data=im1.get_data() * im2.get_data())
nib.save(im_result, pfi_result)
print('Image product of {0} {1} saved under {2}.'.format(pfi_im1, pfi_im2, pfi_result))
return pfi_result
def scalar_prod(self, scalar, path_image, path_resulting_image):
pfi_image = connect_path_tail_head(self.pfo_in, path_image)
pfi_result = connect_path_tail_head(self.pfo_out, path_resulting_image)
im = nib.load(pfi_image)
im_result = set_new_data(im, new_data=scalar * im.get_data())
nib.save(im_result, pfi_result)
print('Image {0} times {1} saved under {2}.'.format(pfi_image, scalar, pfi_result))
return pfi_result
| 41.094595
| 110
| 0.698783
|
7949068b81848b1ec1d756a7b7803534047ab920
| 792
|
py
|
Python
|
satchless/product/handler.py
|
cajun-code/satchless
|
068b26046c3af63268f8eecd6e33da2bbb78b8d1
|
[
"BSD-4-Clause"
] | 1
|
2015-11-05T05:09:27.000Z
|
2015-11-05T05:09:27.000Z
|
satchless/product/handler.py
|
cajun-code/satchless
|
068b26046c3af63268f8eecd6e33da2bbb78b8d1
|
[
"BSD-4-Clause"
] | null | null | null |
satchless/product/handler.py
|
cajun-code/satchless
|
068b26046c3af63268f8eecd6e33da2bbb78b8d1
|
[
"BSD-4-Clause"
] | null | null | null |
from django.conf import settings
from django.utils.importlib import import_module
from django.http import HttpResponse
_handlers_queue = None
def product_view(instances, request):
context = {}
for handler in _handlers_queue:
context = handler(instances, request=request, extra_context=context)
if isinstance(context, HttpResponse):
return context
return context
def init_queue():
global _handlers_queue
_handlers_queue = []
for handler in getattr(settings, 'SATCHLESS_PRODUCT_VIEW_HANDLERS', []):
if isinstance(handler, str):
mod_name, han_name = handler.rsplit('.', 1)
module = import_module(mod_name)
handler = getattr(module, han_name)
_handlers_queue.append(handler)
init_queue()
| 30.461538
| 76
| 0.698232
|
794906b2f34442e1c7f759d29020e91b6e6d95bd
| 909
|
py
|
Python
|
buffered_encryption/tests/test_gcm.py
|
eblocha/buffered-encryption
|
5ee84645f90ab74d2a7f6f7cc826bbe146f3c9cc
|
[
"MIT"
] | 3
|
2021-04-26T15:23:25.000Z
|
2022-03-11T21:16:46.000Z
|
buffered_encryption/tests/test_gcm.py
|
eblocha/buffered-encryption
|
5ee84645f90ab74d2a7f6f7cc826bbe146f3c9cc
|
[
"MIT"
] | null | null | null |
buffered_encryption/tests/test_gcm.py
|
eblocha/buffered-encryption
|
5ee84645f90ab74d2a7f6f7cc826bbe146f3c9cc
|
[
"MIT"
] | null | null | null |
import os
import io
from buffered_encryption.aesgcm import EncryptionIterator, DecryptionIterator
import unittest
class TestSymmetry(unittest.TestCase):
def setUp(self):
# Prime number of random bytes greater than the chunk size
self.plaintext = os.urandom(130399)
self.key = os.urandom(32)
self.ad = os.urandom(16)
def test_buffer(self):
"""Encrypt to a buffer"""
enc = EncryptionIterator(io.BytesIO(self.plaintext), self.key, self.ad)
ciphertext = io.BytesIO()
decrypted = io.BytesIO()
for chunk in enc:
ciphertext.write(chunk)
ciphertext.seek(0)
dec = DecryptionIterator(ciphertext, self.key, self.ad, enc.iv, enc.tag)
for chunk in dec:
decrypted.write(chunk)
ciphertext.close()
decrypted.seek(0)
self.assertEqual(decrypted.read(),self.plaintext)
| 28.40625
| 80
| 0.646865
|
79490711c3504719c84fdb6f6f1ef3aeafe05586
| 1,210
|
py
|
Python
|
onmt/tests/test_translator.py
|
l-k-11235/OpenNMT-py
|
4815f07fcd482af9a1fe1d3b620d144197178bc5
|
[
"MIT"
] | 5,864
|
2017-02-24T19:17:07.000Z
|
2022-03-31T20:49:22.000Z
|
onmt/tests/test_translator.py
|
l-k-11235/OpenNMT-py
|
4815f07fcd482af9a1fe1d3b620d144197178bc5
|
[
"MIT"
] | 1,727
|
2017-02-27T09:09:56.000Z
|
2022-03-29T17:08:29.000Z
|
onmt/tests/test_translator.py
|
l-k-11235/OpenNMT-py
|
4815f07fcd482af9a1fe1d3b620d144197178bc5
|
[
"MIT"
] | 2,570
|
2017-02-24T19:20:36.000Z
|
2022-03-31T06:24:22.000Z
|
import unittest
from onmt.translate import GeneratorLM
import torch
class TestGeneratorLM(unittest.TestCase):
def test_split_src_to_prevent_padding_target_prefix_is_none_when_equal_size( # noqa: E501
self,
):
src = torch.randint(0, 10, (5, 6))
src_lengths = 5 * torch.ones(5)
(
src,
src_lengths,
target_prefix,
) = GeneratorLM.split_src_to_prevent_padding(src, src_lengths)
self.assertIsNone(target_prefix)
def test_split_src_to_prevent_padding_target_prefix_is_ok_when_different_size( # noqa: E501
self,
):
default_length = 5
src = torch.randint(0, 10, (default_length, 6))
src_lengths = default_length * torch.ones(6, dtype=torch.int)
new_length = 4
src_lengths[1] = new_length
(
src,
src_lengths,
target_prefix,
) = GeneratorLM.split_src_to_prevent_padding(src, src_lengths)
self.assertTupleEqual(src.shape, (new_length, 6))
self.assertTupleEqual(target_prefix.shape, (1, 6))
self.assertTrue(
src_lengths.equal(new_length * torch.ones(6, dtype=torch.int))
)
| 32.702703
| 96
| 0.63719
|
79490731cbe1a6782fc30516a0eba4218dbfb31f
| 760
|
py
|
Python
|
project_code/topology_getter/component_errors.py
|
statnett/relevant_assets
|
3b29c64677b63c485c09fbef14d933fe49fd7fc0
|
[
"Apache-2.0"
] | 1
|
2020-04-29T13:29:22.000Z
|
2020-04-29T13:29:22.000Z
|
project_code/topology_getter/component_errors.py
|
statnett/relevant_assets
|
3b29c64677b63c485c09fbef14d933fe49fd7fc0
|
[
"Apache-2.0"
] | null | null | null |
project_code/topology_getter/component_errors.py
|
statnett/relevant_assets
|
3b29c64677b63c485c09fbef14d933fe49fd7fc0
|
[
"Apache-2.0"
] | null | null | null |
class BusInitError(Exception):
""" Error class for Bus inititlaization
"""
pass
class BranchInitError(Exception):
""" Error class for Branch inititlaization
"""
pass
class LineInitError(Exception):
""" Error class for Line inititlaization
"""
pass
class TwoWindingTransformerInitError(Exception):
""" Error class for TwoWindingTransformer inititlaization
"""
pass
class ThreeWindingTransformerInitError(Exception):
""" Error class for ThreeWindingTransformer inititlaization
"""
pass
class MachineInitError(Exception):
""" Error class for Machine inititlaization
"""
pass
class LoadInitError(Exception):
""" Error class for Machine inititlaization
"""
pass
| 15.833333
| 63
| 0.689474
|
79490760e4f77ec4c4049262881c1b9492d586e3
| 4,595
|
py
|
Python
|
usaspending_api/broker/tests/integration/test_broker_integration.py
|
samspoonemore0/usaspending-api
|
62326fd1426ee5a18bde6d0df22237c8d995d853
|
[
"CC0-1.0"
] | 1
|
2022-01-18T05:07:32.000Z
|
2022-01-18T05:07:32.000Z
|
usaspending_api/broker/tests/integration/test_broker_integration.py
|
Hk92a/usaspending-api
|
25daa9dbc30835b8f4b4c797c592ba9ecc78ca00
|
[
"CC0-1.0"
] | null | null | null |
usaspending_api/broker/tests/integration/test_broker_integration.py
|
Hk92a/usaspending-api
|
25daa9dbc30835b8f4b4c797c592ba9ecc78ca00
|
[
"CC0-1.0"
] | null | null | null |
import pytest
from django.conf import settings
from django.db import connections, DEFAULT_DB_ALIAS
from django.test import TestCase
class BrokerIntegrationTestCase(TestCase):
databases = {"default", "data_broker"}
dummy_table_name = "dummy_broker_table_to_be_rolled_back"
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
# Follow-up of test_broker_transactional_test
with connections["data_broker"].cursor() as cursor:
cursor.execute("select * from pg_tables where tablename = '{}'".format(cls.dummy_table_name))
results = cursor.fetchall()
assert results is not None
if len(results) != 0:
pytest.fail(
"Test test_broker_transactional_test did not run transactionally. "
"Creation of table {} in Broker DB was not rolled back and still exists.".format(cls.dummy_table_name)
)
@pytest.mark.usefixtures("broker_db_setup")
def test_can_connect_to_broker(self):
"""Simple 'integration test' that checks a Broker DB exists to integrate with"""
connection = connections["data_broker"]
with connection.cursor() as cursor:
cursor.execute("SELECT now()")
results = cursor.fetchall()
assert results is not None
assert len(str(results[0][0])) > 0
@pytest.mark.usefixtures("broker_db_setup")
def test_broker_transactional_test(self):
"""Integration test that checks whether Django's default transactional test implementation works against the
integrated Broker DB too.
The test creates a dummy table during its execution. If transactional wrapper is working, that table creation
will be rolled-back after the test completes. This not verified until the ~``tearDownClass`` method runs.
NOTE: The transaction is only controlled and will only roll-back if you use Django's django.db.connections
dictionary to get the connection.
"""
dummy_contents = "dummy_text"
# Make sure the table and the data get in there
connection = connections["data_broker"]
with connection.cursor() as cursor:
cursor.execute("create table {} (contents text)".format(self.dummy_table_name))
cursor.execute("insert into {} values ('{}')".format(self.dummy_table_name, dummy_contents))
with connection.cursor() as cursor:
cursor.execute("select * from pg_tables where tablename = '{}'".format(self.dummy_table_name))
results = cursor.fetchall()
assert results is not None
assert len(str(results[0][0])) > 0
with connection.cursor() as cursor:
cursor.execute("select * from {}".format(self.dummy_table_name))
results = cursor.fetchall()
assert results is not None
assert str(results[0][0]) == dummy_contents
@pytest.mark.usefixtures("broker_db_setup")
def test_broker_db_fully_setup(self):
"""Simple 'integration test' that checks a Broker DB had its schema setup"""
connection = connections["data_broker"]
with connection.cursor() as cursor:
cursor.execute("select * from pg_tables where tablename = 'alembic_version'")
results = cursor.fetchall()
assert results is not None
assert len(results) > 0
assert len(str(results[0][0])) > 0
def test_can_connect_to_broker_by_dblink(broker_server_dblink_setup, db):
"""Simple 'integration test' that checks the USAspending to Broker dblink works
It will be skipped if a broker foreign data wrapper is not created in the USAspending database-under-test
"""
connection = connections[DEFAULT_DB_ALIAS]
with connection.cursor() as cursor:
cursor.execute(f"select srvname from pg_foreign_server where srvname = '{settings.DATA_BROKER_DBLINK_NAME}'")
results = cursor.fetchall()
if not results or not results[0][0] == settings.DATA_BROKER_DBLINK_NAME:
pytest.skip(
f"No foreign server named '{settings.DATA_BROKER_DBLINK_NAME}' has been setup on this "
"USAspending database. Skipping the test of integration with that server via dblink"
)
cursor.execute(
f"SELECT * FROM dblink('{settings.DATA_BROKER_DBLINK_NAME}','SELECT now()') "
"AS broker_time(the_now timestamp)"
)
results = cursor.fetchall()
assert results is not None
assert len(results) > 0
assert len(str(results[0][0])) > 0
| 45.04902
| 118
| 0.669859
|
7949088897681b2ac614f2af8db9a64059080d5a
| 4,208
|
py
|
Python
|
clients/python-experimental/generated/openapi_client/model/free_style_build.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
clients/python-experimental/generated/openapi_client/model/free_style_build.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
clients/python-experimental/generated/openapi_client/model/free_style_build.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from openapi_client.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
InstantiationMetadata,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class FreeStyleBuild(
DictSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
_class = StrSchema
number = IntSchema
url = StrSchema
class actions(
ListSchema
):
@classmethod
@property
def _items(cls) -> typing.Type['CauseAction']:
return CauseAction
building = BoolSchema
description = StrSchema
displayName = StrSchema
duration = IntSchema
estimatedDuration = IntSchema
executor = StrSchema
fullDisplayName = StrSchema
id = StrSchema
keepLog = BoolSchema
queueId = IntSchema
result = StrSchema
timestamp = IntSchema
builtOn = StrSchema
@classmethod
@property
def changeSet(cls) -> typing.Type['EmptyChangeLogSet']:
return EmptyChangeLogSet
def __new__(
cls,
*args: typing.Union[dict, frozendict, ],
_class: typing.Union[_class, Unset] = unset,
number: typing.Union[number, Unset] = unset,
url: typing.Union[url, Unset] = unset,
actions: typing.Union[actions, Unset] = unset,
building: typing.Union[building, Unset] = unset,
description: typing.Union[description, Unset] = unset,
displayName: typing.Union[displayName, Unset] = unset,
duration: typing.Union[duration, Unset] = unset,
estimatedDuration: typing.Union[estimatedDuration, Unset] = unset,
executor: typing.Union[executor, Unset] = unset,
fullDisplayName: typing.Union[fullDisplayName, Unset] = unset,
id: typing.Union[id, Unset] = unset,
keepLog: typing.Union[keepLog, Unset] = unset,
queueId: typing.Union[queueId, Unset] = unset,
result: typing.Union[result, Unset] = unset,
timestamp: typing.Union[timestamp, Unset] = unset,
builtOn: typing.Union[builtOn, Unset] = unset,
changeSet: typing.Union['EmptyChangeLogSet', Unset] = unset,
_instantiation_metadata: typing.Optional[InstantiationMetadata] = None,
**kwargs: typing.Type[Schema],
) -> 'FreeStyleBuild':
return super().__new__(
cls,
*args,
_class=_class,
number=number,
url=url,
actions=actions,
building=building,
description=description,
displayName=displayName,
duration=duration,
estimatedDuration=estimatedDuration,
executor=executor,
fullDisplayName=fullDisplayName,
id=id,
keepLog=keepLog,
queueId=queueId,
result=result,
timestamp=timestamp,
builtOn=builtOn,
changeSet=changeSet,
_instantiation_metadata=_instantiation_metadata,
**kwargs,
)
from openapi_client.model.cause_action import CauseAction
from openapi_client.model.empty_change_log_set import EmptyChangeLogSet
| 27.148387
| 85
| 0.638546
|
7949098eac192fe0d5585a742e6388a8f9698094
| 642
|
py
|
Python
|
solutions/python3/369.py
|
sm2774us/amazon_interview_prep_2021
|
f580080e4a6b712b0b295bb429bf676eb15668de
|
[
"MIT"
] | 42
|
2020-08-02T07:03:49.000Z
|
2022-03-26T07:50:15.000Z
|
solutions/python3/369.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | null | null | null |
solutions/python3/369.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | 40
|
2020-02-08T02:50:24.000Z
|
2022-03-26T15:38:10.000Z
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def plusOne(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
def dfs(node):
if not node.next or dfs(node.next):
if node.val + 1 > 9:
node.val = 0
return True
else:
node.val += 1
return False
if dfs(head):
dummy = ListNode(1)
dummy.next = head
return dummy
return head
| 25.68
| 47
| 0.44081
|
794909eb8a9cf3c184734b084c5a7913a3330d2e
| 164
|
py
|
Python
|
home/admin.py
|
aayushray/CodingEasy
|
8af86a0b83438e568349f162dae5e77f5ea923e4
|
[
"MIT"
] | 1
|
2022-03-03T09:30:26.000Z
|
2022-03-03T09:30:26.000Z
|
home/admin.py
|
aayushray/CodingEasy
|
8af86a0b83438e568349f162dae5e77f5ea923e4
|
[
"MIT"
] | null | null | null |
home/admin.py
|
aayushray/CodingEasy
|
8af86a0b83438e568349f162dae5e77f5ea923e4
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Contact,Newsletter
# Register your models here.
admin.site.register(Contact)
admin.site.register(Newsletter)
| 20.5
| 38
| 0.817073
|
79490bbcf49729148923b3a244bd8c784d1af946
| 7,486
|
py
|
Python
|
imacropy/console.py
|
bogiebro/imacropy
|
d659527cf5af391c07de09484c7163f92fb92aed
|
[
"BSD-2-Clause"
] | null | null | null |
imacropy/console.py
|
bogiebro/imacropy
|
d659527cf5af391c07de09484c7163f92fb92aed
|
[
"BSD-2-Clause"
] | null | null | null |
imacropy/console.py
|
bogiebro/imacropy
|
d659527cf5af391c07de09484c7163f92fb92aed
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""IPython extension for a MacroPy-enabled REPL.
To enable::
%load_ext imacropy.console
To autoload it at IPython startup, put this into your ``ipython_config.py``::
c.InteractiveShellApp.extensions = ["imacropy.console"]
To find your config file, ``ipython profile locate``.
Notes:
- Each time a ``from mymodule import macros, ...`` is executed in the REPL,
the system reloads ``mymodule``, to use the latest macro definitions.
Hence, semi-live updates to macro definitions are possible: hack on your
macros, re-import, and try out the new version in the REPL; no need to restart
the REPL session in between.
- The set of macros available from ``mymodule``, at any given time, is those
specified **in the most recent** ``from mymodule import macros, ...``.
Any other macros from ``mymodule``, that were not specified in the most recent
import, will be unloaded when the import is performed.
- Each time after importing macros, the corresponding macro stubs are
automatically imported as regular Python objects.
Stubs are not directly usable. The intention is to let Python recognize
the macro name (otherwise there would be no run-time object by that name),
and to allow viewing macro docstrings and source code easily using
``some_macro?``, ``some_macro??``.
This does not affect using the macros in the intended way, as macros,
since macros are expanded away before run-time.
"""
import ast
import importlib
from collections import OrderedDict
from functools import partial
from IPython.core.error import InputRejected
from IPython.core.magic import register_cell_magic
from macropy import __version__ as macropy_version
from macropy.core.macros import ModuleExpansionContext, detect_macros
_placeholder = "<interactive input>"
_instance = None
def load_ipython_extension(ipython):
# FIXME: The banner is injected too late. It seems IPython startup has already performed when ``load_ipython_extension()`` is called.
#
# FIXME: We shouldn't print anything directly here; doing that breaks tools such as the Emacs Python autoimporter (see importmagic.el
# FIXME: in Spacemacs; it will think epc failed to start if anything but the bare process id is printed). Tools expect to suppress
# FIXME: **all** of the IPython banner by telling IPython itself not to print it.
#
# FIXME: For now, let's just put the info into banner2, and refrain from printing it.
# https://stackoverflow.com/questions/31613804/how-can-i-call-ipython-start-ipython-with-my-own-banner
ipython.config.TerminalInteractiveShell.banner2 = "MacroPy {} -- Syntactic macros for Python.".format(macropy_version)
global _instance
if not _instance:
_instance = IMacroPyExtension(shell=ipython)
def unload_ipython_extension(ipython):
global _instance
_instance = None
class MacroTransformer(ast.NodeTransformer):
def __init__(self, extension_instance, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ext = extension_instance
self.bindings = OrderedDict()
def visit(self, tree):
try:
bindings = detect_macros(tree, '__main__') # macro imports
if bindings:
self.ext.macro_bindings_changed = True
for fullname, macro_bindings in bindings:
mod = importlib.import_module(fullname)
self.bindings[fullname] = (mod, macro_bindings)
newtree = ModuleExpansionContext(tree, self.ext.src, self.bindings.values()).expand_macros()
self.ext.src = _placeholder
return newtree
except Exception as err:
# see IPython.core.interactiveshell.InteractiveShell.transform_ast()
raise InputRejected(*err.args)
# avoid complaining about typoed macro names...
@register_cell_magic
def ignore_importerror(line, cell): # ...when their stubs are loaded
try:
exec(cell, _instance.shell.user_ns) # set globals to the shell user namespace to respect assignments
except ImportError as e:
pass
@register_cell_magic
def ignore_nameerror(line, cell): # ...when they are unloaded
try:
exec(cell, _instance.shell.user_ns)
except NameError as e:
pass
class IMacroPyExtension:
def __init__(self, shell):
self.src = _placeholder
self.shell = shell
ipy = self.shell.get_ipython()
self.new_api = hasattr(self.shell, "input_transformers_post") # IPython 7.0+ with Python 3.5+
if self.new_api:
self.shell.input_transformers_post.append(self._get_source_code)
else:
ipy.events.register('pre_run_cell', self._get_source_code_legacy)
self.macro_bindings_changed = False
self.current_stubs = set()
self.macro_transformer = MacroTransformer(extension_instance=self)
self.shell.ast_transformers.append(self.macro_transformer) # TODO: last or first?
ipy.events.register('post_run_cell', self._refresh_stubs)
# initialize MacroPy in the session
self.shell.run_cell("import macropy.activate", store_history=False, silent=True)
def __del__(self):
ipy = self.shell.get_ipython()
ipy.events.unregister('post_run_cell', self._refresh_stubs)
self.shell.ast_transformers.remove(self.macro_transformer)
if self.new_api:
self.shell.input_transformers_post.remove(self._get_source_code)
else:
ipy.events.unregister('pre_run_cell', self._get_source_code_legacy)
def _get_source_code_legacy(self, info):
"""Get the source code of the current cell just before it runs.
Does not account for any string transformers.
"""
self.src = info.raw_cell
def _get_source_code(self, lines): # IPython 7.0+ with Python 3.5+
"""Get the source code of the current cell.
This is a do-nothing string transformer that just captures the text.
It is intended to run last, just before any AST transformers run.
"""
self.src = lines
return lines
def _refresh_stubs(self, info):
"""Refresh macro stub imports.
Called after running a cell, so that Jupyter help "some_macro?" works
for the currently available macros.
This allows the user to view macro docstrings.
"""
if not self.macro_bindings_changed:
return
self.macro_bindings_changed = False
internal_execute = partial(self.shell.run_cell,
store_history=False,
silent=True)
# Clear previous stubs, because our MacroTransformer overrides
# the available set of macros from a given module with those
# most recently imported from that module.
for asname in self.current_stubs:
internal_execute("%%ignore_nameerror\n"
"del {}".format(asname))
self.current_stubs = set()
for fullname, (_, macro_bindings) in self.macro_transformer.bindings.items():
for _, asname in macro_bindings:
self.current_stubs.add(asname)
stubnames = ", ".join("{} as {}".format(name, asname) for name, asname in macro_bindings)
internal_execute("%%ignore_importerror\n"
"from {} import {}".format(fullname, stubnames))
| 40.464865
| 138
| 0.681539
|
79490ce3a34a8f2977a1935eef019f39e7252022
| 3,809
|
py
|
Python
|
tensorflow/examples/saved_model/integration_tests/export_simple_text_embedding.py
|
hanton/tensorflow
|
34fd2a5e9bcdb40957ece90fec46a37e6e9248b2
|
[
"Apache-2.0"
] | 3
|
2017-11-09T17:40:28.000Z
|
2021-11-17T10:24:19.000Z
|
tensorflow/examples/saved_model/integration_tests/export_simple_text_embedding.py
|
jensfreudenau/tensorflow
|
3fe3f2b1984aab6f159b89aa3ab0069988925689
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/examples/saved_model/integration_tests/export_simple_text_embedding.py
|
jensfreudenau/tensorflow
|
3fe3f2b1984aab6f159b89aa3ab0069988925689
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Text embedding model stored as a SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl import app
from absl import flags
import tensorflow.compat.v2 as tf
# TODO(vbardiovsky): remove these when symbols are public.
from tensorflow.python.ops import lookup_ops
from tensorflow.python.training.tracking import tracking
FLAGS = flags.FLAGS
flags.DEFINE_string("export_dir", None, "Directory to export SavedModel.")
def write_vocabulary_file(vocabulary):
"""Write temporary vocab file for module construction."""
tmpdir = tempfile.mkdtemp()
vocabulary_file = os.path.join(tmpdir, "tokens.txt")
with tf.io.gfile.GFile(vocabulary_file, "w") as f:
for entry in vocabulary:
f.write(entry + "\n")
return vocabulary_file
class TextEmbeddingModel(tf.train.Checkpoint):
"""Text embedding model.
A text embeddings model that takes a sentences on input and outputs the
sentence embedding.
"""
def __init__(self, vocabulary, emb_dim, oov_buckets):
super(TextEmbeddingModel, self).__init__()
self._oov_buckets = oov_buckets
self._vocabulary_file = tracking.TrackableAsset(
write_vocabulary_file(vocabulary))
self._total_size = len(vocabulary) + oov_buckets
self._table = lookup_ops.index_table_from_file(
vocabulary_file=self._vocabulary_file,
num_oov_buckets=self._oov_buckets,
hasher_spec=lookup_ops.FastHashSpec)
self.embeddings = tf.Variable(
tf.random.uniform(shape=[self._total_size, emb_dim]))
self.variables = [self.embeddings]
self.trainable_variables = self.variables
def _tokenize(self, sentences):
# Perform a minimalistic text preprocessing by removing punctuation and
# splitting on spaces.
normalized_sentences = tf.strings.regex_replace(
input=sentences, pattern=r"\pP", rewrite="")
normalized_sentences = tf.reshape(normalized_sentences, [-1])
sparse_tokens = tf.strings.split(normalized_sentences, " ")
# Deal with a corner case: there is one empty sentence.
sparse_tokens, _ = tf.sparse.fill_empty_rows(sparse_tokens, tf.constant(""))
# Deal with a corner case: all sentences are empty.
sparse_tokens = tf.sparse.reset_shape(sparse_tokens)
sparse_token_ids = self._table.lookup(sparse_tokens.values)
return (sparse_tokens.indices, sparse_token_ids, sparse_tokens.dense_shape)
@tf.function(input_signature=[tf.TensorSpec([None], tf.dtypes.string)])
def __call__(self, sentences):
token_ids, token_values, token_dense_shape = self._tokenize(sentences)
return tf.nn.safe_embedding_lookup_sparse(
embedding_weights=self.embeddings,
sparse_ids=tf.SparseTensor(token_ids, token_values, token_dense_shape),
sparse_weights=None,
combiner="sqrtn")
def main(argv):
del argv
vocabulary = ["cat", "is", "on", "the", "mat"]
module = TextEmbeddingModel(vocabulary=vocabulary, emb_dim=10, oov_buckets=10)
tf.saved_model.save(module, FLAGS.export_dir)
if __name__ == "__main__":
app.run(main)
| 35.933962
| 80
| 0.733526
|
79490f332a5b5056a1be27b3003ae3d3d523c98c
| 8,748
|
py
|
Python
|
fontFeatures/feeLib/__init__.py
|
ctrlcctrlv/fontFeatures
|
76d68586da2c1c42bb3cd79f92d583e63f52cf02
|
[
"BSD-3-Clause"
] | null | null | null |
fontFeatures/feeLib/__init__.py
|
ctrlcctrlv/fontFeatures
|
76d68586da2c1c42bb3cd79f92d583e63f52cf02
|
[
"BSD-3-Clause"
] | null | null | null |
fontFeatures/feeLib/__init__.py
|
ctrlcctrlv/fontFeatures
|
76d68586da2c1c42bb3cd79f92d583e63f52cf02
|
[
"BSD-3-Clause"
] | null | null | null |
import re
import parsley
import importlib, inspect
from fontFeatures import FontFeatures
from babelfont.font import Font
from ometa.grammar import OMeta
import warnings
from more_itertools import collapse
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
return "# %s\n" % (message)
warnings.formatwarning = warning_on_one_line
def callRule(self):
_locals = {"self": self}
n1 = self._apply(self.rule_anything, "anything", [])
n2 = self._apply(self.rule_anything, "anything", [])
return self.foreignApply(n1[0], n1[0] + "_" + n2[0], self.globals, self.locals)
class GlyphSelector:
def __init__(self, selector, suffixes, location):
self.selector = selector
self.suffixes = suffixes
self.location = location
def as_text(self):
if "barename" in self.selector:
returned = self.selector["barename"]
elif "classname" in self.selector:
returned = "@" + self.selector["classname"]
elif "regex" in self.selector:
returned = "/" + self.selector["regex"] + "/"
elif "inlineclass" in self.selector:
items = [
GlyphSelector(i, (), self.location)
for i in self.selector["inlineclass"]
]
returned = "[" + " ".join([item.as_text() for item in items]) + "]"
else:
raise ValueError("Unknown selector type %s" % self.selector)
for s in self.suffixes:
returned = returned + s["suffixtype"] + s["suffix"]
return returned
def _apply_suffix(self, glyphname, suffix):
if suffix["suffixtype"] == ".":
glyphname = glyphname + "." + suffix["suffix"]
else:
if glyphname.endswith("." + suffix["suffix"]):
glyphname = glyphname[: -(len(suffix["suffix"]) + 1)]
return glyphname
def resolve(self, fontfeatures, font, mustExist=True):
returned = []
assert isinstance(font, Font)
glyphs = font.keys()
if "barename" in self.selector:
returned = [self.selector["barename"]]
elif "inlineclass" in self.selector:
returned = list(
collapse(
[
GlyphSelector(i, (), self.location).resolve(fontfeatures, font)
for i in self.selector["inlineclass"]
]
)
)
elif "classname" in self.selector:
classname = self.selector["classname"]
if not classname in fontfeatures.namedClasses:
raise ValueError(
"Tried to expand glyph class '@%s' but @%s was not defined (at %s)"
% (classname, classname, self.location)
)
returned = fontfeatures.namedClasses[classname]
elif "regex" in self.selector:
regex = self.selector["regex"]
try:
pattern = re.compile(regex)
except Exception as e:
raise ValueError(
"Couldn't parse regular expression '%s' at %s"
% (regex, self.location)
)
returned = list(filter(lambda x: re.search(pattern, x), glyphs))
for s in self.suffixes:
returned = [self._apply_suffix(g, s) for g in returned]
if mustExist:
notFound = list(filter(lambda x: x not in glyphs, returned))
returned = list(filter(lambda x: x in glyphs, returned))
if len(notFound) > 0:
plural = ""
if len(notFound) > 1:
plural = "s"
glyphstring = ", ".join(notFound)
warnings.warn(
"# Couldn't find glyph%s '%s' in font (at %s)"
% (plural, glyphstring, self.location)
)
return list(returned)
class FeeParser:
"""Convert a FEE file into a fontFeatures object.
The resulting object is stored in the parser's ``fontFeatures`` property.
Args:
font: A TTFont object or glyphsLib GSFontMaster object.
"""
basegrammar = """
feefile = wsc statement+
statement = verb:v wsc callRule(v "Args"):args ws ';' wsc -> parser.do(v, args)
rest_of_line = <('\\\n' | (~'\n' anything))*>
wsc = (comment | ' ' | '\t' | '\n')+ | ws
comment = '#' rest_of_line ws?
verb = <letter+>:x ?(x in self.valid_verbs) -> x
# Ways of specifying glyphs
classname = '@' <(letter|"_")+>:b -> {"classname": b}
barename = <(letter|digit|"."|"_")+ (("."|"_"|"-") (letter|digit)+)*>:b -> {"barename": b}
inlineclass_member = (barename|classname):m ws? -> m
inlineclass_members = inlineclass_member+
inlineclass = '[' ws inlineclass_members:m ']' -> {"inlineclass": m}
regex = '/' <(~'/' anything)+>:r '/' -> {"regex": r}
glyphsuffix = ('.'|'~'):suffixtype <(letter|digit|"_")+>:suffix -> {"suffixtype":suffixtype, "suffix":suffix}
glyphselector = (regex | barename | classname | inlineclass ):g glyphsuffix*:s -> GlyphSelector(g,s, self.input.position)
# Number things
bareinteger = ('-'|'+')?:sign <digit+>:i -> (-int(i) if sign == "-" else int(i))
namedinteger = '$' barename:b ?(b["barename"] in parser.variables) -> int(parser.variables[b["barename"]])
integer = namedinteger | bareinteger
# Value records
valuerecord = integer_value_record | fee_value_record | traditional_value_record
integer_value_record = integer:xAdvance -> (0, 0, xAdvance, 0)
traditional_value_record = '<' integer:xPlacement ws integer:yPlacement ws integer:xAdvance ws integer:yAdvance '>' -> (xPlacement, yPlacement, xAdvance, yAdvance)
fee_value_record = '<' ws fee_value_record_member+:m '>' -> { "members": m }
fee_value_record_member = ("xAdvance"| "xPlacement" | "yAdvance" | "yPlacement"):d '=' integer:pos ws -> {"dimension": d, "position": pos}
"""
DEFAULT_PLUGINS = [
"LoadPlugin",
"ClassDefinition",
"Conditional",
"Feature",
"Substitute",
"Position",
"Chain",
"Anchors",
"Routine",
"Include"
]
def __init__(self, font):
self.grammar = self._make_initial_grammar()
self.grammar_generation = 1
self.font = font
self.fontfeatures = FontFeatures()
self.current_file = None
self.plugin_classes = {}
self.current_feature = None
self.font_modified = False
self.variables = {}
self._rebuild_parser()
for plugin in self.DEFAULT_PLUGINS:
self._load_plugin(plugin)
def parseFile(self, filename):
"""Load a FEE features file.
Args:
filename: Name of the file to read.
"""
with open(filename, "r") as f:
data = f.read()
self.current_file = filename
return self.parseString(data)
def parseString(self, s):
"""LoadFEE features information from a string.
Args:
s: Layout rules in FEE format.
"""
fee = self.parser(s).feefile()
if self.font_modified:
warnings.warn("Font was modified")
return fee
def _rebuild_parser(self):
self.parser = parsley.wrapGrammar(self.grammar)
def _make_initial_grammar(self):
g = parsley.makeGrammar(
FeeParser.basegrammar,
{"match": re.match, "GlyphSelector": GlyphSelector},
unwrap=True,
)
g.globals["parser"] = self
g.rule_callRule = callRule
g.valid_verbs = ["LoadPlugin"]
return g
def _load_plugin(self, plugin):
if "." not in plugin:
plugin = "fontFeatures.feeLib." + plugin
mod = importlib.import_module(plugin)
if not hasattr(mod, "GRAMMAR"):
warnings.warn("Module %s is not a FEE plugin" % plugin)
return
self._register_plugin(mod)
def _register_plugin(self, mod):
rules = mod.GRAMMAR
verbs = getattr(mod, "VERBS", [])
self.grammar_generation = self.grammar_generation + 1
classes = inspect.getmembers(mod, inspect.isclass)
self.grammar.valid_verbs.extend(verbs)
newgrammar = OMeta.makeGrammar(
rules, "Grammar%i" % self.grammar_generation
).createParserClass(self.grammar, {})
newgrammar.globals = self.grammar.globals
for v in verbs:
self.grammar.globals[v] = newgrammar
for c in classes:
self.plugin_classes[c[0]] = c[1]
self._rebuild_parser()
def do(self, verb, args):
return self.plugin_classes[verb].action(self, *args)
def filterResults(self, results):
return [x for x in collapse(results) if x]
| 35.706122
| 163
| 0.581047
|
79490f5fa3f9c56ebbeb1a75f95bc2909d048341
| 1,999
|
py
|
Python
|
tests/tests_geomstats/test_prepare_graph_data.py
|
SaitejaUtpala/geomstats
|
5d4e16b3f30a86aab4725142f2263d8f10a30508
|
[
"MIT"
] | 2
|
2020-01-23T04:01:02.000Z
|
2020-08-18T19:20:27.000Z
|
tests/tests_geomstats/test_prepare_graph_data.py
|
SaitejaUtpala/geomstats
|
5d4e16b3f30a86aab4725142f2263d8f10a30508
|
[
"MIT"
] | null | null | null |
tests/tests_geomstats/test_prepare_graph_data.py
|
SaitejaUtpala/geomstats
|
5d4e16b3f30a86aab4725142f2263d8f10a30508
|
[
"MIT"
] | 1
|
2021-03-14T06:54:09.000Z
|
2021-03-14T06:54:09.000Z
|
"""Unit tests for embedding data class."""
import geomstats.backend as gs
import geomstats.tests
from geomstats.datasets.prepare_graph_data import HyperbolicEmbedding
from geomstats.datasets.utils import load_karate_graph
class TestPrepareGraphData(geomstats.tests.TestCase):
"""Class for testing embedding."""
def setUp(self):
"""Set up function."""
gs.random.seed(1234)
dim = 2
max_epochs = 3
lr = .05
n_negative = 2
context_size = 1
self.karate_graph = load_karate_graph()
self.embedding = HyperbolicEmbedding(
dim=dim,
max_epochs=max_epochs,
lr=lr,
n_context=context_size,
n_negative=n_negative)
def test_log_sigmoid(self):
"""Test log_sigmoid."""
point = gs.array([0.1, 0.3])
result = self.embedding.log_sigmoid(point)
expected = gs.array([-0.644397, -0.554355])
self.assertAllClose(result, expected)
def test_grad_log_sigmoid(self):
"""Test grad_log_sigmoid."""
point = gs.array([0.1, 0.3])
result = self.embedding.grad_log_sigmoid(point)
expected = gs.array([0.47502081, 0.42555748])
self.assertAllClose(result, expected)
def test_loss(self):
"""Test loss function."""
point = gs.array([0.5, 0.5])
point_context = gs.array([0.6, 0.6])
point_negative = gs.array([-0.4, -0.4])
loss_value, loss_grad = self.embedding.loss(
point, point_context, point_negative)
expected_loss = 1.00322045
expected_grad = gs.array([-0.16565083, -0.16565083])
self.assertAllClose(loss_value[0], expected_loss)
self.assertAllClose(gs.squeeze(loss_grad), expected_grad)
def test_embed(self):
"""Test embedding function."""
embeddings = self.embedding.embed(self.karate_graph)
self.assertTrue(
gs.all(self.embedding.manifold.belongs(embeddings)))
| 30.753846
| 69
| 0.627314
|
79490fddf6b0900aef936262636d7d84ce4ea70a
| 24,427
|
py
|
Python
|
S11/LRScheduler.py
|
jagatabhay/TSAI
|
313a27e321881fa974bfa62388e7f43ae3e0390a
|
[
"MIT"
] | 5
|
2020-08-13T18:16:33.000Z
|
2022-03-15T07:51:26.000Z
|
S11/LRScheduler.py
|
jagatabhay/TSAI
|
313a27e321881fa974bfa62388e7f43ae3e0390a
|
[
"MIT"
] | null | null | null |
S11/LRScheduler.py
|
jagatabhay/TSAI
|
313a27e321881fa974bfa62388e7f43ae3e0390a
|
[
"MIT"
] | 1
|
2020-07-05T18:03:38.000Z
|
2020-07-05T18:03:38.000Z
|
import copy
import os
import torch
from tqdm.autonotebook import tqdm
from torch.optim.lr_scheduler import _LRScheduler
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from packaging import version
PYTORCH_VERSION = version.parse(torch.__version__)
try:
from apex import amp
IS_AMP_AVAILABLE = True
except ImportError:
IS_AMP_AVAILABLE = False
class DataLoaderIter(object):
def __init__(self, data_loader):
self.data_loader = data_loader
self._iterator = iter(data_loader)
@property
def dataset(self):
return self.data_loader.dataset
def inputs_labels_from_batch(self, batch_data):
if not isinstance(batch_data, list) and not isinstance(batch_data, tuple):
raise ValueError(
"Your batch type not supported: {}. Please inherit from "
"`TrainDataLoaderIter` (or `ValDataLoaderIter`) and redefine "
"`_batch_make_inputs_labels` method.".format(type(batch_data))
)
inputs, labels, *_ = batch_data
return inputs, labels
def __iter__(self):
return self
def __next__(self):
batch = next(self._iterator)
return self.inputs_labels_from_batch(batch)
class TrainDataLoaderIter(DataLoaderIter):
def __init__(self, data_loader, auto_reset=True):
super().__init__(data_loader)
self.auto_reset = auto_reset
def __next__(self):
try:
batch = next(self._iterator)
inputs, labels = self.inputs_labels_from_batch(batch)
except StopIteration:
if not self.auto_reset:
raise
self._iterator = iter(self.data_loader)
batch = next(self._iterator)
inputs, labels = self.inputs_labels_from_batch(batch)
return inputs, labels
class ValDataLoaderIter(DataLoaderIter):
pass
class LRFinder(object):
"""Learning rate range test.
The learning rate range test increases the learning rate in a pre-training run
between two boundaries in a linear or exponential manner. It provides valuable
information on how well the network can be trained over a range of learning rates
and what is the optimal learning rate.
Arguments:
model (torch.nn.Module): wrapped model.
optimizer (torch.optim.Optimizer): wrapped optimizer where the defined learning
is assumed to be the lower boundary of the range test.
criterion (torch.nn.Module): wrapped loss function.
device (str or torch.device, optional): a string ("cpu" or "cuda") with an
optional ordinal for the device type (e.g. "cuda:X", where is the ordinal).
Alternatively, can be an object representing the device on which the
computation will take place. Default: None, uses the same device as `model`.
memory_cache (boolean, optional): if this flag is set to True, `state_dict` of
model and optimizer will be cached in memory. Otherwise, they will be saved
to files under the `cache_dir`.
cache_dir (string, optional): path for storing temporary files. If no path is
specified, system-wide temporary directory is used. Notice that this
parameter will be ignored if `memory_cache` is True.
Example:
>>> lr_finder = LRFinder(net, optimizer, criterion, device="cuda")
>>> lr_finder.range_test(dataloader, end_lr=100, num_iter=100)
>>> lr_finder.plot() # to inspect the loss-learning rate graph
>>> lr_finder.reset() # to reset the model and optimizer to their initial state
Reference:
Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186
fastai/lr_find: https://github.com/fastai/fastai
"""
def __init__(
self,
model,
optimizer,
criterion,
device=None,
memory_cache=True,
cache_dir=None,
):
# Check if the optimizer is already attached to a scheduler
self.optimizer = optimizer
self._check_for_scheduler()
self.model = model
self.criterion = criterion
self.history = {"lr": [], "loss": []}
self.best_loss = None
self.memory_cache = memory_cache
self.cache_dir = cache_dir
# Save the original state of the model and optimizer so they can be restored if
# needed
self.model_device = next(self.model.parameters()).device
self.state_cacher = StateCacher(memory_cache, cache_dir=cache_dir)
self.state_cacher.store("model", self.model.state_dict())
self.state_cacher.store("optimizer", self.optimizer.state_dict())
# If device is None, use the same as the model
if device:
self.device = device
else:
self.device = self.model_device
def reset(self):
"""Restores the model and optimizer to their initial states."""
self.model.load_state_dict(self.state_cacher.retrieve("model"))
self.optimizer.load_state_dict(self.state_cacher.retrieve("optimizer"))
self.model.to(self.model_device)
def range_test(
self,
train_loader,
val_loader=None,
start_lr=None,
end_lr=10,
num_iter=100,
step_mode="exp",
smooth_f=0.05,
diverge_th=5,
accumulation_steps=1,
non_blocking_transfer=True,
):
"""Performs the learning rate range test.
Arguments:
train_loader (`torch.utils.data.DataLoader`
or child of `TrainDataLoaderIter`, optional):
the training set data loader.
If your dataset (data loader) returns a tuple (inputs, labels,*) then
Pytorch data loader object can be provided. However, if a dataset
returns different outputs e.g. dicts, then you should inherit
from `TrainDataLoaderIter` class and redefine `inputs_labels_from_batch`
method so that it outputs (inputs, labels).
val_loader (`torch.utils.data.DataLoader`
or child of `ValDataLoaderIter`, optional): if `None` the range test
will only use the training loss. When given a data loader, the model is
evaluated after each iteration on that dataset and the evaluation loss
is used. Note that in this mode the test takes significantly longer but
generally produces more precise results.
Similarly to `train_loader`, if your dataset outputs are not standard
you should inherit from `ValDataLoaderIter` class and
redefine method `inputs_labels_from_batch` so that
it outputs (inputs, labels). Default: None.
start_lr (float, optional): the starting learning rate for the range test.
Default: None (uses the learning rate from the optimizer).
end_lr (float, optional): the maximum learning rate to test. Default: 10.
num_iter (int, optional): the number of iterations over which the test
occurs. Default: 100.
step_mode (str, optional): one of the available learning rate policies,
linear or exponential ("linear", "exp"). Default: "exp".
smooth_f (float, optional): the loss smoothing factor within the [0, 1[
interval. Disabled if set to 0, otherwise the loss is smoothed using
exponential smoothing. Default: 0.05.
diverge_th (int, optional): the test is stopped when the loss surpasses the
threshold: diverge_th * best_loss. Default: 5.
accumulation_steps (int, optional): steps for gradient accumulation. If it
is 1, gradients are not accumulated. Default: 1.
non_blocking_transfer (bool, optional): when non_blocking_transfer is set,
tries to convert/move data to the device asynchronously if possible,
e.g., moving CPU Tensors with pinned memory to CUDA devices. Default: True.
Example (fastai approach):
>>> lr_finder = LRFinder(net, optimizer, criterion, device="cuda")
>>> lr_finder.range_test(dataloader, end_lr=100, num_iter=100)
Example (Leslie Smith's approach):
>>> lr_finder = LRFinder(net, optimizer, criterion, device="cuda")
>>> lr_finder.range_test(trainloader, val_loader=val_loader, end_lr=1, num_iter=100, step_mode="linear")
Gradient accumulation is supported; example:
>>> train_data = ... # prepared dataset
>>> desired_bs, real_bs = 32, 4 # batch size
>>> accumulation_steps = desired_bs // real_bs # required steps for accumulation
>>> dataloader = torch.utils.data.DataLoader(train_data, batch_size=real_bs, shuffle=True)
>>> acc_lr_finder = LRFinder(net, optimizer, criterion, device="cuda")
>>> acc_lr_finder.range_test(dataloader, end_lr=10, num_iter=100, accumulation_steps=accumulation_steps)
If your DataLoader returns e.g. dict, or other non standard output, intehit from TrainDataLoaderIter,
redefine method `inputs_labels_from_batch` so that it outputs (inputs, lables) data:
>>> import torch_lr_finder
>>> class TrainIter(torch_lr_finder.TrainDataLoaderIter):
>>> def inputs_labels_from_batch(self, batch_data):
>>> return (batch_data['user_features'], batch_data['user_history']), batch_data['y_labels']
>>> train_data_iter = TrainIter(train_dl)
>>> finder = torch_lr_finder.LRFinder(model, optimizer, partial(model._train_loss, need_one_hot=False))
>>> finder.range_test(train_data_iter, end_lr=10, num_iter=300, diverge_th=10)
Reference:
[Training Neural Nets on Larger Batches: Practical Tips for 1-GPU, Multi-GPU & Distributed setups](
https://medium.com/huggingface/ec88c3e51255)
[thomwolf/gradient_accumulation](https://gist.github.com/thomwolf/ac7a7da6b1888c2eeac8ac8b9b05d3d3)
"""
# Reset test results
self.history = {"lr": [], "loss": []}
self.best_loss = None
# Move the model to the proper device
self.model.to(self.device)
# Check if the optimizer is already attached to a scheduler
self._check_for_scheduler()
# Set the starting learning rate
if start_lr:
self._set_learning_rate(start_lr)
# Initialize the proper learning rate policy
if step_mode.lower() == "exp":
lr_schedule = ExponentialLR(self.optimizer, end_lr, num_iter)
elif step_mode.lower() == "linear":
lr_schedule = LinearLR(self.optimizer, end_lr, num_iter)
else:
raise ValueError("expected one of (exp, linear), got {}".format(step_mode))
if smooth_f < 0 or smooth_f >= 1:
raise ValueError("smooth_f is outside the range [0, 1[")
# Create an iterator to get data batch by batch
if isinstance(train_loader, DataLoader):
train_iter = TrainDataLoaderIter(train_loader)
elif isinstance(train_loader, TrainDataLoaderIter):
train_iter = train_loader
else:
raise ValueError(
"`train_loader` has unsupported type: {}."
"Expected types are `torch.utils.data.DataLoader`"
"or child of `TrainDataLoaderIter`.".format(type(train_loader))
)
if val_loader:
if isinstance(val_loader, DataLoader):
val_iter = ValDataLoaderIter(val_loader)
elif isinstance(val_loader, ValDataLoaderIter):
val_iter = val_loader
else:
raise ValueError(
"`val_loader` has unsupported type: {}."
"Expected types are `torch.utils.data.DataLoader`"
"or child of `ValDataLoaderIter`.".format(type(val_loader))
)
for iteration in tqdm(range(num_iter)):
# Train on batch and retrieve loss
loss = self._train_batch(
train_iter,
accumulation_steps,
non_blocking_transfer=non_blocking_transfer,
)
if val_loader:
loss = self._validate(
val_iter, non_blocking_transfer=non_blocking_transfer
)
# Update the learning rate
self.history["lr"].append(lr_schedule.get_lr()[0])
lr_schedule.step()
# Track the best loss and smooth it if smooth_f is specified
if iteration == 0:
self.best_loss = loss
else:
if smooth_f > 0:
loss = smooth_f * loss + (1 - smooth_f) * self.history["loss"][-1]
if loss < self.best_loss:
self.best_loss = loss
# Check if the loss has diverged; if it has, stop the test
self.history["loss"].append(loss)
if loss > diverge_th * self.best_loss:
print("Stopping early, the loss has diverged")
break
print("Learning rate search finished. See the graph with {finder_name}.plot()")
def _set_learning_rate(self, new_lrs):
if not isinstance(new_lrs, list):
new_lrs = [new_lrs] * len(self.optimizer.param_groups)
if len(new_lrs) != len(self.optimizer.param_groups):
raise ValueError(
"Length of `new_lrs` is not equal to the number of parameter groups "
+ "in the given optimizer"
)
for param_group, new_lr in zip(self.optimizer.param_groups, new_lrs):
param_group["lr"] = new_lr
def _check_for_scheduler(self):
for param_group in self.optimizer.param_groups:
if "initial_lr" in param_group:
raise RuntimeError("Optimizer already has a scheduler attached to it")
def _train_batch(self, train_iter, accumulation_steps, non_blocking_transfer=True):
self.model.train()
total_loss = None # for late initialization
self.optimizer.zero_grad()
for i in range(accumulation_steps):
inputs, labels = next(train_iter)
inputs, labels = self._move_to_device(
inputs, labels, non_blocking=non_blocking_transfer
)
# Forward pass
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# Loss should be averaged in each step
loss /= accumulation_steps
# Backward pass
if IS_AMP_AVAILABLE and hasattr(self.optimizer, "_amp_stash"):
# For minor performance optimization, see also:
# https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations
delay_unscale = ((i + 1) % accumulation_steps) != 0
with amp.scale_loss(
loss, self.optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if total_loss is None:
total_loss = loss
else:
total_loss += loss
self.optimizer.step()
return total_loss.item()
def _move_to_device(self, inputs, labels, non_blocking=True):
def move(obj, device, non_blocking=True):
if hasattr(obj, "to"):
return obj.to(device, non_blocking=non_blocking)
elif isinstance(obj, tuple):
return tuple(move(o, device, non_blocking) for o in obj)
elif isinstance(obj, list):
return [move(o, device, non_blocking) for o in obj]
elif isinstance(obj, dict):
return {k: move(o, device, non_blocking) for k, o in obj.items()}
else:
return obj
inputs = move(inputs, self.device, non_blocking=non_blocking)
labels = move(labels, self.device, non_blocking=non_blocking)
return inputs, labels
def _validate(self, val_iter, non_blocking_transfer=True):
# Set model to evaluation mode and disable gradient computation
running_loss = 0
self.model.eval()
with torch.no_grad():
for inputs, labels in val_iter:
# Move data to the correct device
inputs, labels = self._move_to_device(
inputs, labels, non_blocking=non_blocking_transfer
)
if isinstance(inputs, tuple) or isinstance(inputs, list):
batch_size = inputs[0].size(0)
else:
batch_size = inputs.size(0)
# Forward pass and loss computation
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
running_loss += loss.item() * batch_size
return running_loss / len(val_iter.dataset)
def plot(self, skip_start=10, skip_end=5, log_lr=True, show_lr=None, ax=None):
"""Plots the learning rate range test.
Arguments:
skip_start (int, optional): number of batches to trim from the start.
Default: 10.
skip_end (int, optional): number of batches to trim from the start.
Default: 5.
log_lr (bool, optional): True to plot the learning rate in a logarithmic
scale; otherwise, plotted in a linear scale. Default: True.
show_lr (float, optional): if set, adds a vertical line to visualize the
specified learning rate. Default: None.
ax (matplotlib.axes.Axes, optional): the plot is created in the specified
matplotlib axes object and the figure is not be shown. If `None`, then
the figure and axes object are created in this method and the figure is
shown . Default: None.
Returns:
The matplotlib.axes.Axes object that contains the plot.
"""
if skip_start < 0:
raise ValueError("skip_start cannot be negative")
if skip_end < 0:
raise ValueError("skip_end cannot be negative")
if show_lr is not None and not isinstance(show_lr, float):
raise ValueError("show_lr must be float")
# Get the data to plot from the history dictionary. Also, handle skip_end=0
# properly so the behaviour is the expected
lrs = self.history["lr"]
losses = self.history["loss"]
if skip_end == 0:
lrs = lrs[skip_start:]
losses = losses[skip_start:]
else:
lrs = lrs[skip_start:-skip_end]
losses = losses[skip_start:-skip_end]
# Create the figure and axes object if axes was not already given
fig = None
if ax is None:
fig, ax = plt.subplots()
# Plot loss as a function of the learning rate
ax.plot(lrs, losses)
if log_lr:
ax.set_xscale("log")
ax.set_xlabel("Learning rate")
ax.set_ylabel("Loss")
if show_lr is not None:
ax.axvline(x=show_lr, color="red")
# Show only if the figure was created internally
if fig is not None:
plt.show()
return ax
class LinearLR(_LRScheduler):
"""Linearly increases the learning rate between two boundaries over a number of
iterations.
Arguments:
optimizer (torch.optim.Optimizer): wrapped optimizer.
end_lr (float): the final learning rate.
num_iter (int): the number of iterations over which the test occurs.
last_epoch (int, optional): the index of last epoch. Default: -1.
"""
def __init__(self, optimizer, end_lr, num_iter, last_epoch=-1):
self.end_lr = end_lr
if num_iter <= 1:
raise ValueError("`num_iter` must be larger than 1")
self.num_iter = num_iter
super(LinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
# In earlier Pytorch versions last_epoch starts at -1, while in recent versions
# it starts at 0. We need to adjust the math a bit to handle this. See
# discussion at: https://github.com/davidtvs/pytorch-lr-finder/pull/42
if PYTORCH_VERSION < version.parse("1.1.0"):
curr_iter = self.last_epoch + 1
r = curr_iter / (self.num_iter - 1)
else:
r = self.last_epoch / (self.num_iter - 1)
return [base_lr + r * (self.end_lr - base_lr) for base_lr in self.base_lrs]
class ExponentialLR(_LRScheduler):
"""Exponentially increases the learning rate between two boundaries over a number of
iterations.
Arguments:
optimizer (torch.optim.Optimizer): wrapped optimizer.
end_lr (float): the final learning rate.
num_iter (int): the number of iterations over which the test occurs.
last_epoch (int, optional): the index of last epoch. Default: -1.
"""
def __init__(self, optimizer, end_lr, num_iter, last_epoch=-1):
self.end_lr = end_lr
if num_iter <= 1:
raise ValueError("`num_iter` must be larger than 1")
self.num_iter = num_iter
super(ExponentialLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
# In earlier Pytorch versions last_epoch starts at -1, while in recent versions
# it starts at 0. We need to adjust the math a bit to handle this. See
# discussion at: https://github.com/davidtvs/pytorch-lr-finder/pull/42
if PYTORCH_VERSION < version.parse("1.1.0"):
curr_iter = self.last_epoch + 1
r = curr_iter / (self.num_iter - 1)
else:
r = self.last_epoch / (self.num_iter - 1)
return [base_lr * (self.end_lr / base_lr) ** r for base_lr in self.base_lrs]
class StateCacher(object):
def __init__(self, in_memory, cache_dir=None):
self.in_memory = in_memory
self.cache_dir = cache_dir
if self.cache_dir is None:
import tempfile
self.cache_dir = tempfile.gettempdir()
else:
if not os.path.isdir(self.cache_dir):
raise ValueError("Given `cache_dir` is not a valid directory.")
self.cached = {}
def store(self, key, state_dict):
if self.in_memory:
self.cached.update({key: copy.deepcopy(state_dict)})
else:
fn = os.path.join(self.cache_dir, "state_{}_{}.pt".format(key, id(self)))
self.cached.update({key: fn})
torch.save(state_dict, fn)
def retrieve(self, key):
if key not in self.cached:
raise KeyError("Target {} was not cached.".format(key))
if self.in_memory:
return self.cached.get(key)
else:
fn = self.cached.get(key)
if not os.path.exists(fn):
raise RuntimeError(
"Failed to load state in {}. File doesn't exist anymore.".format(fn)
)
state_dict = torch.load(fn, map_location=lambda storage, location: storage)
return state_dict
def __del__(self):
"""Check whether there are unused cached files existing in `cache_dir` before
this instance being destroyed."""
if self.in_memory:
return
for k in self.cached:
if os.path.exists(self.cached[k]):
os.remove(self.cached[k])
print(" LR Scheduler Loaded Successfully ")
| 42.407986
| 117
| 0.599419
|
794911bc14ed482d0f807d30b420e68722011166
| 683
|
py
|
Python
|
mysite/pages/migrations/0019_auto_20151105_0922.py
|
raccoongang/socraticqs2
|
06201005136ee139846f857dbb2f518736e441de
|
[
"Apache-2.0"
] | 3
|
2015-11-20T07:33:28.000Z
|
2017-01-15T23:33:50.000Z
|
mysite/pages/migrations/0019_auto_20151105_0922.py
|
raccoongang/socraticqs2
|
06201005136ee139846f857dbb2f518736e441de
|
[
"Apache-2.0"
] | 28
|
2015-07-14T11:33:24.000Z
|
2017-11-17T15:21:22.000Z
|
mysite/pages/migrations/0019_auto_20151105_0922.py
|
raccoongang/socraticqs2
|
06201005136ee139846f857dbb2f518736e441de
|
[
"Apache-2.0"
] | 4
|
2015-04-29T09:04:59.000Z
|
2017-07-19T14:11:16.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pages', '0018_interestedform_interestedplugin'),
]
operations = [
migrations.AlterField(
model_name='detailsplugin',
name='quote_small',
field=models.CharField(max_length=70, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='detailsplugin',
name='quote_text',
field=models.CharField(max_length=70, blank=True),
preserve_default=True,
),
]
| 25.296296
| 62
| 0.604685
|
794911dbf3bc5f44b257c89fb9bc6321e7a5af92
| 2,106
|
py
|
Python
|
iotedgehubdev/edgecert.py
|
SLdragon/iotedgehubdev
|
f988cd1f108b16605fb32b7b8c61f3d595361bc6
|
[
"MIT"
] | null | null | null |
iotedgehubdev/edgecert.py
|
SLdragon/iotedgehubdev
|
f988cd1f108b16605fb32b7b8c61f3d595361bc6
|
[
"MIT"
] | null | null | null |
iotedgehubdev/edgecert.py
|
SLdragon/iotedgehubdev
|
f988cd1f108b16605fb32b7b8c61f3d595361bc6
|
[
"MIT"
] | 1
|
2018-11-26T20:00:32.000Z
|
2018-11-26T20:00:32.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .certutils import EdgeCertUtil
from .constants import EdgeConstants
class EdgeCert(object):
def __init__(self, certs_dir, hostname):
self.certs_dir = certs_dir
self.hostname = hostname
def generate_self_signed_certs(self):
cert_util = EdgeCertUtil()
cert_util.create_root_ca_cert(EdgeConstants.EDGE_DEVICE_CA,
validity_days_from_now=365,
subject_dict=EdgeConstants.CERT_DEFAULT_DICT,
passphrase=None)
cert_util.export_cert_artifacts_to_dir(EdgeConstants.EDGE_DEVICE_CA, self.certs_dir)
cert_util.create_intermediate_ca_cert(EdgeConstants.EDGE_AGENT_CA,
EdgeConstants.EDGE_DEVICE_CA,
validity_days_from_now=365,
common_name='Edge Agent CA',
set_terminal_ca=True,
passphrase=None)
cert_util.export_cert_artifacts_to_dir(EdgeConstants.EDGE_AGENT_CA, self.certs_dir)
cert_util.create_server_cert(EdgeConstants.EDGE_HUB_SERVER,
EdgeConstants.EDGE_AGENT_CA,
validity_days_from_now=365,
hostname=self.hostname)
cert_util.export_cert_artifacts_to_dir(EdgeConstants.EDGE_HUB_SERVER, self.certs_dir)
cert_util.export_pfx_cert(EdgeConstants.EDGE_HUB_SERVER, self.certs_dir)
prefixes = [EdgeConstants.EDGE_AGENT_CA, EdgeConstants.EDGE_DEVICE_CA]
cert_util.chain_ca_certs(EdgeConstants.EDGE_CHAIN_CA, prefixes, self.certs_dir)
def get_cert_file_path(self, id_str):
return EdgeCertUtil.get_cert_file_path(id_str, self.certs_dir)
def get_pfx_file_path(self, id_str):
return EdgeCertUtil.get_pfx_file_path(id_str, self.certs_dir)
| 46.8
| 93
| 0.622507
|
794913d2a4685807d9bf77319176df206517e1b7
| 305
|
py
|
Python
|
2015/09/candidate-refugees-20150909/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14
|
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2015/09/candidate-refugees-20150909/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2015/09/candidate-refugees-20150909/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7
|
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1aDWOdUCmdWBQBDDAWQetZQO36sn55av-6uAlRtZ_Wuw'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.785714
| 77
| 0.816393
|
7949143db7ef0c7058fd589d8905e53f82559999
| 27,552
|
py
|
Python
|
sceptre/stack.py
|
phajduk/sceptre
|
4674616caf05978843e560e01b7dd91f0137a9ed
|
[
"Apache-2.0"
] | null | null | null |
sceptre/stack.py
|
phajduk/sceptre
|
4674616caf05978843e560e01b7dd91f0137a9ed
|
[
"Apache-2.0"
] | null | null | null |
sceptre/stack.py
|
phajduk/sceptre
|
4674616caf05978843e560e01b7dd91f0137a9ed
|
[
"Apache-2.0"
] | 4
|
2019-09-10T13:32:18.000Z
|
2021-06-16T19:03:47.000Z
|
# -*- coding: utf-8 -*-
"""
sceptre.stack
This module implements a Stack class, which stores data and logic associated
with a particular stack.
"""
from datetime import datetime, timedelta
import logging
import os
import time
from dateutil.tz import tzutc
import botocore
from .connection_manager import ConnectionManager
from .helpers import get_external_stack_name
from .resolvers import ResolvableProperty
from .hooks import HookProperty
from .stack_status import StackStatus
from .stack_status import StackChangeSetStatus
from .template import Template
from .hooks import add_stack_hooks
from .exceptions import CannotUpdateFailedStackError
from .exceptions import UnknownStackStatusError
from .exceptions import UnknownStackChangeSetStatusError
from .exceptions import StackDoesNotExistError
from .exceptions import ProtectedStackError
class Stack(object):
"""
Stack stores information about a particular CloudFormation stack.
It implements methods for carrying out stack-level operations, such as
creating or deleting the stack.
:param name: The name of the stack.
:type project: str
:param connection_manager: A connection manager, used to make Boto3 calls.
:type connection_manager: sceptre.connection_manager.ConnectionManager
"""
parameters = ResolvableProperty("parameters")
sceptre_user_data = ResolvableProperty("sceptre_user_data")
notifications = ResolvableProperty("notifications")
hooks = HookProperty("hooks")
def __init__(
self, name, project_code, template_path, region, iam_role=None,
parameters=None, sceptre_user_data=None, hooks=None, s3_details=None,
dependencies=None, role_arn=None, protected=False, tags=None,
external_name=None, notifications=None, on_failure=None,
stack_timeout=0
):
self.logger = logging.getLogger(__name__)
self.name = name
self.project_code = project_code
self.external_name = external_name or \
get_external_stack_name(self.project_code, self.name)
self.connection_manager = ConnectionManager(region, iam_role)
self.template_path = template_path
self.s3_details = s3_details
self._template = None
self.protected = protected
self.role_arn = role_arn
self.on_failure = on_failure
self.dependencies = dependencies or []
self.tags = tags or {}
self.stack_timeout = stack_timeout
self.hooks = hooks or {}
self.parameters = parameters or {}
self.sceptre_user_data = sceptre_user_data or {}
self.notifications = notifications or []
def __repr__(self):
return (
"sceptre.stack.Stack("
"name='{name}', project_code='{project_code}', "
"template_path='{template_path}', region='{region}', "
"iam_role='{iam_role}', parameters='{parameters}', "
"sceptre_user_data='{sceptre_user_data}', "
"hooks='{hooks}', s3_details='{s3_details}', "
"dependencies='{dependencies}', role_arn='{role_arn}', "
"protected='{protected}', tags='{tags}', "
"external_name='{external_name}', "
"notifications='{notifications}', on_failure='{on_failure}', "
"stack_timeout='{stack_timeout}'"
")".format(
name=self.name, project_code=self.project_code,
template_path=self.template_path,
region=self.connection_manager.region,
iam_role=self.connection_manager.iam_role,
parameters=self.parameters,
sceptre_user_data=self.sceptre_user_data,
hooks=self.hooks, s3_details=self.s3_details,
dependencies=self.dependencies, role_arn=self.role_arn,
protected=self.protected, tags=self.tags,
external_name=self.external_name,
notifications=self.notifications, on_failure=self.on_failure,
stack_timeout=self.stack_timeout
)
)
@property
def template(self):
"""
Returns the CloudFormation template used to create the stack.
:returns: The stack's template.
:rtype: str
"""
if self._template is None:
self._template = Template(
path=self.template_path,
sceptre_user_data=self.sceptre_user_data,
s3_details=self.s3_details,
connection_manager=self.connection_manager
)
return self._template
@add_stack_hooks
def create(self):
"""
Creates the stack.
:returns: The stack's status.
:rtype: sceptre.stack_status.StackStatus
"""
self._protect_execution()
self.logger.info("%s - Creating stack", self.name)
create_stack_kwargs = {
"StackName": self.external_name,
"Parameters": self._format_parameters(self.parameters),
"Capabilities": ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
"NotificationARNs": self.notifications,
"Tags": [
{"Key": str(k), "Value": str(v)}
for k, v in self.tags.items()
]
}
if self.on_failure:
create_stack_kwargs.update({"OnFailure": self.on_failure})
create_stack_kwargs.update(self.template.get_boto_call_parameter())
create_stack_kwargs.update(self._get_role_arn())
create_stack_kwargs.update(self._get_stack_timeout())
response = self.connection_manager.call(
service="cloudformation",
command="create_stack",
kwargs=create_stack_kwargs
)
self.logger.debug(
"%s - Create stack response: %s", self.name, response
)
status = self._wait_for_completion()
return status
@add_stack_hooks
def update(self):
"""
Updates the stack.
:returns: The stack's status.
:rtype: sceptre.stack_status.StackStatus
"""
self._protect_execution()
self.logger.info("%s - Updating stack", self.name)
update_stack_kwargs = {
"StackName": self.external_name,
"Parameters": self._format_parameters(self.parameters),
"Capabilities": ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
"NotificationARNs": self.notifications,
"Tags": [
{"Key": str(k), "Value": str(v)}
for k, v in self.tags.items()
]
}
update_stack_kwargs.update(self.template.get_boto_call_parameter())
update_stack_kwargs.update(self._get_role_arn())
response = self.connection_manager.call(
service="cloudformation",
command="update_stack",
kwargs=update_stack_kwargs
)
self.logger.debug(
"%s - Update stack response: %s", self.name, response
)
status = self._wait_for_completion(self.stack_timeout)
# Cancel update after timeout
if status == StackStatus.IN_PROGRESS:
status = self.cancel_stack_update()
return status
def cancel_stack_update(self):
"""
Cancels a stack update.
:returns: The cancelled stack status.
:rtype: sceptre.stack_status.StackStatus
"""
self.logger.warning(
"%s - Update stack time exceeded the specified timeout",
self.name
)
response = self.connection_manager.call(
service="cloudformation",
command="cancel_update_stack",
kwargs={"StackName": self.external_name}
)
self.logger.debug(
"%s - Cancel update stack response: %s", self.name, response
)
return self._wait_for_completion()
def launch(self):
"""
Launches the stack.
If the stack status is create_complete or rollback_complete, the
stack is deleted. Launch thena tries to create or update the stack,
depending if it already exists. If there are no updates to be
performed, launch exits gracefully.
:returns: The stack's status.
:rtype: sceptre.stack_status.StackStatus
"""
self._protect_execution()
self.logger.info("%s - Launching stack", self.name)
try:
existing_status = self.get_status()
except StackDoesNotExistError:
existing_status = "PENDING"
self.logger.info(
"%s - Stack is in the %s state", self.name, existing_status
)
if existing_status == "PENDING":
status = self.create()
elif existing_status in ["CREATE_FAILED", "ROLLBACK_COMPLETE"]:
self.delete()
status = self.create()
elif existing_status.endswith("COMPLETE"):
try:
status = self.update()
except botocore.exceptions.ClientError as exp:
error_message = exp.response["Error"]["Message"]
if error_message == "No updates are to be performed.":
self.logger.info(
"%s - No updates to perform.", self.name
)
status = StackStatus.COMPLETE
else:
raise
status = status
elif existing_status.endswith("IN_PROGRESS"):
self.logger.info(
"%s - Stack action is already in progress state and cannot "
"be updated", self.name
)
status = StackStatus.IN_PROGRESS
elif existing_status.endswith("FAILED"):
status = StackStatus.FAILED
raise CannotUpdateFailedStackError(
"'{0}' is in a the state '{1}' and cannot be updated".format(
self.name, existing_status
)
)
else:
raise UnknownStackStatusError(
"{0} is unknown".format(existing_status)
)
return status
@add_stack_hooks
def delete(self):
"""
Deletes the stack.
:returns: The stack's status.
:rtype: sceptre.stack_status.StackStatus
"""
self._protect_execution()
self.logger.info("%s - Deleting stack", self.name)
try:
status = self.get_status()
except StackDoesNotExistError:
self.logger.info("%s does not exist.", self.name)
status = StackStatus.COMPLETE
return status
delete_stack_kwargs = {"StackName": self.external_name}
delete_stack_kwargs.update(self._get_role_arn())
self.connection_manager.call(
service="cloudformation",
command="delete_stack",
kwargs=delete_stack_kwargs
)
try:
status = self._wait_for_completion()
except StackDoesNotExistError:
status = StackStatus.COMPLETE
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Message"].endswith("does not exist"):
status = StackStatus.COMPLETE
else:
raise
self.logger.info("%s - delete %s", self.name, status)
return status
def lock(self):
"""
Locks the stack by applying a deny all updates stack policy.
"""
policy_path = os.path.join(
os.path.dirname(__file__),
"stack_policies/lock.json"
)
self.set_policy(policy_path)
self.logger.info("%s - Successfully locked stack", self.name)
def unlock(self):
"""
Unlocks the stack by applying an allow all updates stack policy.
"""
policy_path = os.path.join(
os.path.dirname(__file__),
"stack_policies/unlock.json"
)
self.set_policy(policy_path)
self.logger.info("%s - Successfully unlocked stack", self.name)
def describe(self):
"""
Returns the a description of the stack.
:returns: A stack description.
:rtype: dict
"""
return self.connection_manager.call(
service="cloudformation",
command="describe_stacks",
kwargs={"StackName": self.external_name}
)
def describe_events(self):
"""
Returns a dictionary contianing the stack events.
:returns: The CloudFormation events for a stack.
:rtype: dict
"""
return self.connection_manager.call(
service="cloudformation",
command="describe_stack_events",
kwargs={"StackName": self.external_name}
)
def describe_resources(self):
"""
Returns the logical and physical resource IDs of the stack's resources.
:returns: Information about the stack's resources.
:rtype: dict
"""
self.logger.debug("%s - Describing stack resources", self.name)
response = self.connection_manager.call(
service="cloudformation",
command="describe_stack_resources",
kwargs={"StackName": self.external_name}
)
self.logger.debug(
"%s - Describe stack resource response: %s", self.name, response
)
desired_properties = ["LogicalResourceId", "PhysicalResourceId"]
formatted_response = [
{k: v for k, v in item.items() if k in desired_properties}
for item in response["StackResources"]
]
return formatted_response
def describe_outputs(self):
"""
Returns a list of stack outputs.
:returns: The stack's outputs.
:rtype: list
"""
self.logger.debug("%s - Describing stack outputs", self.name)
response = self.describe()
return response["Stacks"][0].get("Outputs", [])
def continue_update_rollback(self):
"""
Rolls back a stack in the UPDATE_ROLLBACK_FAILED state to
UPDATE_ROLLBACK_COMPLETE.
"""
self.logger.debug("%s - Continuing update rollback", self.name)
continue_update_rollback_kwargs = {"StackName": self.external_name}
continue_update_rollback_kwargs.update(self._get_role_arn())
self.connection_manager.call(
service="cloudformation",
command="continue_update_rollback",
kwargs=continue_update_rollback_kwargs
)
self.logger.info(
"%s - Successfully initiated continuation of update rollback",
self.name
)
def set_policy(self, policy_path):
"""
Applies a stack policy.
:param policy_path: the path of json file containing a aws policy
:type policy_path: str
"""
with open(policy_path) as f:
policy = f.read()
self.logger.debug("%s - Setting stack policy: \n%s", self.name, policy)
self.connection_manager.call(
service="cloudformation",
command="set_stack_policy",
kwargs={
"StackName": self.external_name,
"StackPolicyBody": policy
}
)
self.logger.info("%s - Successfully set stack policy", self.name)
def get_policy(self):
"""
Returns a stack's policy.
:returns: The stack's stack policy.
:rtype: str
"""
self.logger.debug("%s - Getting stack policy", self.name)
response = self.connection_manager.call(
service="cloudformation",
command="get_stack_policy",
kwargs={
"StackName": self.external_name
}
)
return response
def create_change_set(self, change_set_name):
"""
Creates a change set with the name ``change_set_name``.
:param change_set_name: The name of the change set.
:type change_set_name: str
"""
create_change_set_kwargs = {
"StackName": self.external_name,
"Parameters": self._format_parameters(self.parameters),
"Capabilities": ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
"ChangeSetName": change_set_name,
"NotificationARNs": self.notifications,
"Tags": [
{"Key": str(k), "Value": str(v)}
for k, v in self.tags.items()
]
}
create_change_set_kwargs.update(
self.template.get_boto_call_parameter()
)
create_change_set_kwargs.update(self._get_role_arn())
self.logger.debug(
"%s - Creating change set '%s'", self.name, change_set_name
)
self.connection_manager.call(
service="cloudformation",
command="create_change_set",
kwargs=create_change_set_kwargs
)
# After the call successfully completes, AWS CloudFormation
# starts creating the change set.
self.logger.info(
"%s - Successfully initiated creation of change set '%s'",
self.name, change_set_name
)
def delete_change_set(self, change_set_name):
"""
Deletes the change set ``change_set_name``.
:param change_set_name: The name of the change set.
:type change_set_name: str
"""
self.logger.debug(
"%s - Deleting change set '%s'", self.name, change_set_name
)
self.connection_manager.call(
service="cloudformation",
command="delete_change_set",
kwargs={
"ChangeSetName": change_set_name,
"StackName": self.external_name
}
)
# If the call successfully completes, AWS CloudFormation
# successfully deleted the change set.
self.logger.info(
"%s - Successfully deleted change set '%s'",
self.name, change_set_name
)
def describe_change_set(self, change_set_name):
"""
Describes the change set ``change_set_name``.
:param change_set_name: The name of the change set.
:type change_set_name: str
:returns: The description of the change set.
:rtype: dict
"""
self.logger.debug(
"%s - Describing change set '%s'", self.name, change_set_name
)
return self.connection_manager.call(
service="cloudformation",
command="describe_change_set",
kwargs={
"ChangeSetName": change_set_name,
"StackName": self.external_name
}
)
def execute_change_set(self, change_set_name):
"""
Executes the change set ``change_set_name``.
:param change_set_name: The name of the change set.
:type change_set_name: str
"""
self._protect_execution()
self.logger.debug(
"%s - Executing change set '%s'", self.name, change_set_name
)
self.connection_manager.call(
service="cloudformation",
command="execute_change_set",
kwargs={
"ChangeSetName": change_set_name,
"StackName": self.external_name
}
)
status = self._wait_for_completion()
return status
def list_change_sets(self):
"""
Lists the stack's change sets.
:returns: The stack's change sets.
:rtype: dict
"""
self.logger.debug("%s - Listing change sets", self.name)
return self.connection_manager.call(
service="cloudformation",
command="list_change_sets",
kwargs={
"StackName": self.external_name
}
)
def get_status(self):
"""
Returns the stack's status.
:returns: The stack's status.
:rtype: sceptre.stack_status.StackStatus
:raises: sceptre.exceptions.StackDoesNotExistError
"""
try:
status = self.describe()["Stacks"][0]["StackStatus"]
except botocore.exceptions.ClientError as exp:
if exp.response["Error"]["Message"].endswith("does not exist"):
raise StackDoesNotExistError(exp.response["Error"]["Message"])
else:
raise exp
return status
def _format_parameters(self, parameters):
"""
Converts CloudFormation parameters to the format used by Boto3.
:param parameters: A dictionary of parameters.
:type parameters: dict
:returns: A list of the formatted parameters.
:rtype: list
"""
formatted_parameters = []
for name, value in parameters.items():
if value is None:
continue
if isinstance(value, list):
value = ",".join(value)
formatted_parameters.append({
"ParameterKey": name,
"ParameterValue": value
})
return formatted_parameters
def _get_role_arn(self):
"""
Returns the role arn assumed by CloudFormation when building a stack.
Returns an empty dict if no role is to be assumed.
:returns: the a role arn
:rtype: dict
"""
if self.role_arn:
return {
"RoleARN": self.role_arn
}
else:
return {}
def _get_stack_timeout(self):
"""
Return the timeout before considering the stack to be failing.
Returns an empty dict if no timeout is set.
:returns: the creation/update timeout
:rtype: dict
"""
if self.stack_timeout:
return {
"TimeoutInMinutes": self.stack_timeout
}
else:
return {}
def _protect_execution(self):
"""
Raises a ProtectedStackError if protect == True.
This error is meant to stop the
:raises: sceptre.exceptions.ProtectedStackError
"""
if self.protected:
raise ProtectedStackError(
"Cannot perform action on '{0}': stack protection is "
"currently enabled".format(self.name)
)
def _wait_for_completion(self, timeout=0):
"""
Waits for a stack operation to finish. Prints CloudFormation events
while it waits.
:param timeout: Timeout before returning, in minutes.
:returns: The final stack status.
:rtype: sceptre.stack_status.StackStatus
"""
timeout = 60 * timeout
def timed_out(elapsed):
return elapsed >= timeout if timeout else False
status = StackStatus.IN_PROGRESS
self.most_recent_event_datetime = (
datetime.now(tzutc()) - timedelta(seconds=3)
)
elapsed = 0
while status == StackStatus.IN_PROGRESS and not timed_out(elapsed):
status = self._get_simplified_status(self.get_status())
self._log_new_events()
time.sleep(4)
elapsed += 4
return status
@staticmethod
def _get_simplified_status(status):
"""
Returns the simplified Stack Status.
The simplified stack status is represented by the struct
``sceptre.StackStatus()`` and can take one of the following options:
* complete
* in_progress
* failed
:param status: The CloudFormation stack status to simplify.
:type status: str
:returns: The stack's simplified status
:rtype: sceptre.stack_status.StackStatus
"""
if status.endswith("ROLLBACK_COMPLETE"):
return StackStatus.FAILED
elif status.endswith("_COMPLETE"):
return StackStatus.COMPLETE
elif status.endswith("_IN_PROGRESS"):
return StackStatus.IN_PROGRESS
elif status.endswith("_FAILED"):
return StackStatus.FAILED
else:
raise UnknownStackStatusError(
"{0} is unknown".format(status)
)
def _log_new_events(self):
"""
Log the latest stack events while the stack is being built.
"""
events = self.describe_events()["StackEvents"]
events.reverse()
new_events = [
event for event in events
if event["Timestamp"] > self.most_recent_event_datetime
]
for event in new_events:
self.logger.info(" ".join([
event["Timestamp"].replace(microsecond=0).isoformat(),
self.name,
event["LogicalResourceId"],
event["ResourceType"],
event["ResourceStatus"],
event.get("ResourceStatusReason", "")
]))
self.most_recent_event_datetime = event["Timestamp"]
def wait_for_cs_completion(self, change_set_name):
"""
Waits while the stack change set status is "pending".
:param change_set_name: The name of the change set.
:type change_set_name: str
:returns: The change set's status.
:rtype: sceptre.stack_status.StackChangeSetStatus
"""
while True:
status = self._get_cs_status(change_set_name)
if status != StackChangeSetStatus.PENDING:
break
time.sleep(2)
return status
def _get_cs_status(self, change_set_name):
"""
Returns the status of a change set.
:param change_set_name: The name of the change set.
:type change_set_name: str
:returns: The change set's status.
:rtype: sceptre.stack_status.StackChangeSetStatus
"""
cs_description = self.describe_change_set(change_set_name)
cs_status = cs_description["Status"]
cs_exec_status = cs_description["ExecutionStatus"]
possible_statuses = [
"CREATE_PENDING", "CREATE_IN_PROGRESS",
"CREATE_COMPLETE", "DELETE_COMPLETE", "FAILED"
]
possible_execution_statuses = [
"UNAVAILABLE", "AVAILABLE", "EXECUTE_IN_PROGRESS",
"EXECUTE_COMPLETE", "EXECUTE_FAILED", "OBSOLETE"
]
if cs_status not in possible_statuses:
raise UnknownStackChangeSetStatusError(
"Status {0} is unknown".format(cs_status)
)
if cs_exec_status not in possible_execution_statuses:
raise UnknownStackChangeSetStatusError(
"ExecutionStatus {0} is unknown".format(cs_status)
)
if (
cs_status == "CREATE_COMPLETE" and
cs_exec_status == "AVAILABLE"
):
return StackChangeSetStatus.READY
elif (
cs_status in [
"CREATE_PENDING", "CREATE_IN_PROGRESS", "CREATE_COMPLETE"
] and
cs_exec_status in ["UNAVAILABLE", "AVAILABLE"]
):
return StackChangeSetStatus.PENDING
elif (
cs_status in ["DELETE_COMPLETE", "FAILED"] or
cs_exec_status in [
"EXECUTE_IN_PROGRESS", "EXECUTE_COMPLETE",
"EXECUTE_FAILED", "OBSOLETE"
]
):
return StackChangeSetStatus.DEFUNCT
else: # pragma: no cover
raise Exception("This else should not be reachable.")
| 33.436893
| 79
| 0.586999
|
7949153718530e9d77b8fb4e5d7027ce463544e2
| 874
|
py
|
Python
|
fault_tolerant_flight_control_drl/tools/__init__.py
|
kdally/fault-tolerant-flight-control-drl
|
800a1c9319b44ab2b1d17f6e19266c2392d6e57b
|
[
"MIT"
] | 8
|
2021-02-27T09:49:57.000Z
|
2022-03-21T16:28:08.000Z
|
fault_tolerant_flight_control_drl/tools/__init__.py
|
kdally/fault-tolerant-flight-control-drl
|
800a1c9319b44ab2b1d17f6e19266c2392d6e57b
|
[
"MIT"
] | null | null | null |
fault_tolerant_flight_control_drl/tools/__init__.py
|
kdally/fault-tolerant-flight-control-drl
|
800a1c9319b44ab2b1d17f6e19266c2392d6e57b
|
[
"MIT"
] | 2
|
2021-03-04T07:24:35.000Z
|
2021-11-17T04:21:08.000Z
|
from fault_tolerant_flight_control_drl.tools.get_task import AltitudeTask, AttitudeTask, BodyRateTask, Task
from fault_tolerant_flight_control_drl.tools.get_task import CascadedAltTask, ReliabilityTask
from fault_tolerant_flight_control_drl.tools.get_task import DisturbanceRejectionAlt, DisturbanceRejectionAtt
from fault_tolerant_flight_control_drl.tools.identifier import get_ID
from fault_tolerant_flight_control_drl.tools.plot_response import plot_response
from fault_tolerant_flight_control_drl.tools.plot_optimization import plot_optimization
from fault_tolerant_flight_control_drl.tools.plot_training import plot_training
from fault_tolerant_flight_control_drl.tools.plot_weights import plot_weights
from fault_tolerant_flight_control_drl.tools.schedule import schedule, schedule_exp, schedule_kink, constant
import fault_tolerant_flight_control_drl.tools.save_util
| 79.454545
| 109
| 0.915332
|
794915c1bd839773b43cbd71ff06abd15aa40264
| 6,901
|
py
|
Python
|
utils/sampling.py
|
hongyouc/FedBE
|
7eeb965d29ce6c35c38f31706a004c00745e59b8
|
[
"Apache-2.0"
] | 12
|
2021-09-23T19:41:46.000Z
|
2022-03-29T12:17:25.000Z
|
utils/sampling.py
|
hongyouc/FedBE
|
7eeb965d29ce6c35c38f31706a004c00745e59b8
|
[
"Apache-2.0"
] | 1
|
2022-03-17T08:20:17.000Z
|
2022-03-17T08:20:17.000Z
|
utils/sampling.py
|
hongyouc/FedBE
|
7eeb965d29ce6c35c38f31706a004c00745e59b8
|
[
"Apache-2.0"
] | 1
|
2022-03-01T11:43:31.000Z
|
2022-03-01T11:43:31.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import numpy as np
import pdb
from torchvision import datasets, transforms
import os
import glob
from torch.utils.data import Dataset
from PIL import Image
def mnist_iid(dataset, num_users):
"""
Sample I.I.D. client data from MNIST dataset
:param dataset:
:param num_users:
:return: dict of image index
"""
num_items = int(len(dataset)/num_users)
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
for i in range(num_users):
dict_users[i] = set(np.random.choice(all_idxs, num_items, replace=False))
all_idxs = list(set(all_idxs) - dict_users[i])
return dict_users
def mnist_noniid(dataset, num_users, num_data=60000):
"""
Sample non-I.I.D client data from MNIST dataset
:param dataset:
:param num_users:
:return:
"""
num_shards, num_imgs = 200, 250
idx_shard = [i for i in range(num_shards)]
dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}
idxs = np.arange(num_shards*num_imgs)
labels = dataset.train_labels.numpy()[:num_shards*num_imgs]
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:,idxs_labels[1,:].argsort()]
idxs = idxs_labels[0,:]
# divide and assign
for i in range(num_users):
rand_set = set(np.random.choice(idx_shard, 2, replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
add_idx = np.array(list(set(idxs[rand*num_imgs:(rand+1)*num_imgs]) ))
dict_users[i] = np.concatenate((dict_users[i], add_idx), axis=0)
cnts_dict = {}
with open("mnist_%d_u%d.txt"%(num_data, num_users), 'w') as f:
for i in range(num_users):
labels_i = labels[dict_users[i]]
cnts = np.array([np.count_nonzero(labels_i == j ) for j in range(10)] )
cnts_dict[i] = cnts
f.write("User %s: %s sum: %d\n"%(i, " ".join([str(cnt) for cnt in cnts]), sum(cnts) ))
server_idx = list(range(num_shards*num_imgs, 60000))
return dict_users, server_idx, cnts_dict
def cifar_iid(dataset, num_users, num_data=50000):
"""
Sample I.I.D. client data from CIFAR10 dataset
:param dataset:
:param num_users:
:return: dict of image index
"""
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
if num_data < 50000:
server_idx = np.random.choice(all_idxs, 50000-num_data, replace=False)
all_idxs = list(set(all_idxs) - set(server_idx))
num_items = int(len(all_idxs)/num_users)
for i in range(num_users):
dict_users[i] = set(np.random.choice(all_idxs, num_items, replace=False))
all_idxs = list(set(all_idxs) - dict_users[i])
return dict_users, server_idx
def cifar_noniid(dataset, num_users, num_data=50000, method="step"):
"""
Sample non-I.I.D client data from CIFAR dataset
:param dataset:
:param num_users:
:return:
"""
labels = np.array(dataset.targets)
_lst_sample = 10
if method=="step":
num_shards = num_users*2
num_imgs = 50000// num_shards
idx_shard = [i for i in range(num_shards)]
idxs = np.arange(num_shards*num_imgs)
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:,idxs_labels[1,:].argsort()]
idxs = idxs_labels[0,:]
least_idx = np.zeros((num_users, 10, _lst_sample), dtype=np.int)
for i in range(10):
idx_i = np.random.choice(np.where(labels==i)[0], num_users*_lst_sample, replace=False)
least_idx[:, i, :] = idx_i.reshape((num_users, _lst_sample))
least_idx = np.reshape(least_idx, (num_users, -1))
least_idx_set = set(np.reshape(least_idx, (-1)))
server_idx = np.random.choice(list(set(range(50000))-least_idx_set), 50000-num_data, replace=False)
# divide and assign
dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}
for i in range(num_users):
rand_set = set(np.random.choice(idx_shard, num_shards//num_users, replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
idx_i = list( set(range(rand*num_imgs, (rand+1)*num_imgs)) )
add_idx = list(set(idxs[idx_i]) - set(server_idx) )
dict_users[i] = np.concatenate((dict_users[i], add_idx), axis=0)
dict_users[i] = np.concatenate((dict_users[i], least_idx[i]), axis=0)
elif method == "dir":
min_size = 0
K = 10
y_train = labels
_lst_sample = 2
least_idx = np.zeros((num_users, 10, _lst_sample), dtype=np.int)
for i in range(10):
idx_i = np.random.choice(np.where(labels==i)[0], num_users*_lst_sample, replace=False)
least_idx[:, i, :] = idx_i.reshape((num_users, _lst_sample))
least_idx = np.reshape(least_idx, (num_users, -1))
least_idx_set = set(np.reshape(least_idx, (-1)))
#least_idx_set = set([])
server_idx = np.random.choice(list(set(range(50000))-least_idx_set), 50000-num_data, replace=False)
local_idx = np.array([i for i in range(50000) if i not in server_idx and i not in least_idx_set])
N = y_train.shape[0]
net_dataidx_map = {}
dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}
while min_size < 10:
idx_batch = [[] for _ in range(num_users)]
# for each class in the dataset
for k in range(K):
idx_k = np.where(y_train == k)[0]
idx_k = [id for id in idx_k if id in local_idx]
np.random.shuffle(idx_k)
proportions = np.random.dirichlet(np.repeat(0.1, num_users))
## Balance
proportions = np.array([p*(len(idx_j)<N/num_users) for p,idx_j in zip(proportions,idx_batch)])
proportions = proportions/proportions.sum()
proportions = (np.cumsum(proportions)*len(idx_k)).astype(int)[:-1]
idx_batch = [idx_j + idx.tolist() for idx_j,idx in zip(idx_batch,np.split(idx_k,proportions))]
min_size = min([len(idx_j) for idx_j in idx_batch])
for j in range(num_users):
np.random.shuffle(idx_batch[j])
dict_users[j] = idx_batch[j]
dict_users[j] = np.concatenate((dict_users[j], least_idx[j]), axis=0)
cnts_dict = {}
with open("data_%d_u%d_%s.txt"%(num_data, num_users, method), 'w') as f:
for i in range(num_users):
labels_i = labels[dict_users[i]]
cnts = np.array([np.count_nonzero(labels_i == j ) for j in range(10)] )
cnts_dict[i] = cnts
f.write("User %s: %s sum: %d\n"%(i, " ".join([str(cnt) for cnt in cnts]), sum(cnts) ))
return dict_users, server_idx, cnts_dict
| 38.553073
| 108
| 0.618026
|
79491614dceb323c022da2f467de3caabf704186
| 1,503
|
py
|
Python
|
tests/unit/flows/test_duckdb_transform.py
|
angelika233/viadot
|
99a4c5b622ad099a44ab014a47ba932a747c0ae6
|
[
"MIT"
] | null | null | null |
tests/unit/flows/test_duckdb_transform.py
|
angelika233/viadot
|
99a4c5b622ad099a44ab014a47ba932a747c0ae6
|
[
"MIT"
] | null | null | null |
tests/unit/flows/test_duckdb_transform.py
|
angelika233/viadot
|
99a4c5b622ad099a44ab014a47ba932a747c0ae6
|
[
"MIT"
] | null | null | null |
import os
import pytest
from viadot.flows import DuckDBTransform
from viadot.sources import DuckDB
BRONZE_SCHEMA = "bronze_schema"
SILVER_SCHEMA = "silver_schema"
TABLE = "test_table"
DATABASE_PATH = "test_db_1234.duckdb"
CREDENTIALS = dict(database=DATABASE_PATH)
@pytest.fixture(scope="session")
def duckdb():
duckdb = DuckDB(credentials=CREDENTIALS)
duckdb.run(f"CREATE SCHEMA IF NOT EXISTS {BRONZE_SCHEMA}")
duckdb.run(f"CREATE SCHEMA IF NOT EXISTS {SILVER_SCHEMA}")
# create placeholder tables so that we can list schemas later on
# (DuckDB does not expose a way to list schemas without a table)
duckdb.run(f"CREATE TABLE {BRONZE_SCHEMA}.placeholder(a INTEGER)")
duckdb.run(f"CREATE TABLE {SILVER_SCHEMA}.placeholder(a INTEGER)")
yield duckdb
os.remove(DATABASE_PATH)
def test_duckdb_transform(duckdb, TEST_PARQUET_FILE_PATH):
silver_table_fqn = SILVER_SCHEMA + "." + TABLE
assert silver_table_fqn not in duckdb.tables
# create a table to transform
duckdb.create_table_from_parquet(
schema=BRONZE_SCHEMA, table=TABLE, path=TEST_PARQUET_FILE_PATH
)
# run the flow
flow = DuckDBTransform(
name="First DuckDBTransform flow",
query=f"CREATE TABLE {SILVER_SCHEMA}.{TABLE} AS SELECT * FROM {BRONZE_SCHEMA}.{TABLE}",
credentials=CREDENTIALS,
)
result = flow.run()
assert result.is_successful()
df = duckdb.to_df(f"SELECT * FROM {SILVER_SCHEMA}.{TABLE}")
assert df.shape[0] == 3
| 30.673469
| 95
| 0.725882
|
794916198dccf8a833455f83c6ebf21ab0484833
| 219
|
py
|
Python
|
manager_workspace/config/desktop.py
|
muirawachanga/manager_workspace
|
e7701475652a03263024e17606ed03952726b5c5
|
[
"MIT"
] | null | null | null |
manager_workspace/config/desktop.py
|
muirawachanga/manager_workspace
|
e7701475652a03263024e17606ed03952726b5c5
|
[
"MIT"
] | null | null | null |
manager_workspace/config/desktop.py
|
muirawachanga/manager_workspace
|
e7701475652a03263024e17606ed03952726b5c5
|
[
"MIT"
] | 1
|
2022-02-24T20:33:56.000Z
|
2022-02-24T20:33:56.000Z
|
from frappe import _
def get_data():
return [
{
"module_name": "Manager Workspace",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Manager Workspace")
}
]
| 16.846154
| 44
| 0.607306
|
794916392d4640d16b7df45104c3c01b87538a3d
| 5,315
|
py
|
Python
|
app/main/views.py
|
vicky-eve/Personal-Blog
|
930f754dec1e7be9e1653cfd3f67a8c440ce85fe
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
vicky-eve/Personal-Blog
|
930f754dec1e7be9e1653cfd3f67a8c440ce85fe
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
vicky-eve/Personal-Blog
|
930f754dec1e7be9e1653cfd3f67a8c440ce85fe
|
[
"MIT"
] | null | null | null |
from flask import render_template,request,redirect,url_for,abort, flash
from . import main
from flask_login import login_required,current_user
from ..models import User, Blog, Quote,Subscribe, Comment
from .forms import UpdateProfile,BlogForm,CommentForm,UpdateBlog,SubscribeForm
from ..requests import get_quote
from .. import db, photos
from app.email import mail_message
@main.route('/')
def home():
'''
View root function that returns index template
'''
quotes = get_quote()
blog_form = BlogForm()
all_blogs = Blog.query.order_by(Blog.date_posted.desc()).all()
return render_template('index.html', quotes=quotes, blogs = all_blogs)
@main.route('/user/<uname>')
@login_required
def profile(uname):
user = User.query.filter_by(username = uname).first()
post = Blog.query.filter_by(user = current_user).all()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user,post=post)
@main.route('/',methods = ['POST','GET'])
def index():
blogs = Blog.query.all()
quotes = get_quote()
form = SubscribeForm()
if form.validate_on_submit():
email = form.email.data
new_subscriber=Subscribe(email=email)
new_subscriber.save_subscriber()
mail_message("You're now subscribed","email/subscribe",new_subscriber.email,new_subscriber=new_subscriber)
flash('Successfull subscription!')
return redirect(url_for('main.index'))
return render_template('index.html', blogs = blogs,quotes =quotes,user=current_user, form = form)
@main.route('/subscribe', methods=['GET', 'POST'])
def subscribe():
"""
subscribe function that subscribes the user to the post
"""
email = request.args.get('email')
new_subscriber = Subscribe(email=email)
db.session.add(new_subscriber)
db.session.commit()
flash('Email submitted successfully', 'success')
return redirect(url_for('main.index'))
@main.route('/comment/<int:blog_id>', methods = ['POST','GET'])
@login_required
def comment(blog_id):
form = CommentForm()
blog = Blog.query.get(blog_id)
comments = Comment.query.filter_by(blog_id = blog_id).all()
if form.validate_on_submit():
comment = form.comment.data
blog_id = blog_id
new_comment = Comment(comment = comment,blog_id = blog_id,user=current_user)
new_comment.save_comment()
return redirect(url_for('.comment', blog_id = blog_id))
return render_template('comment.html', form = form, blog = blog,comments=comments)
@main.route('/create_new', methods = ['POST','GET'])
@login_required
def new_blog():
form = BlogForm()
if form.validate_on_submit():
title = form.title.data
post = form.post.data
new_blog = Blog(title = title,post=post,user=current_user)
new_blog.save_blog()
return redirect(url_for('main.index'))
return render_template('new_post.html', form = form,title = "Your Blog")
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route("/delete_post/<int:blog_id>/delete",methods= ['POST'])
@login_required
def delete_post(blog_id):
blog_delete = Blog.query.get(blog_id)
db.session.delete(blog_delete)
db.session.commit()
flash('Succsessfully deleted!')
return redirect(url_for('main.index', blog_id=blog_id))
@main.route("/delete_comment/<int:blog_id>/<int:comment_id>",methods= ['POST'])
@login_required
def delete_comment(comment_id,blog_id):
comment = Comment.query.filter_by(id=comment_id).first()
db.session.delete(comment)
db.session.commit()
flash('Deleted!')
return redirect(url_for('.comment', blog_id = blog_id,comment_id=comment_id))
@main.route("/update_post/<int:blog_id>",methods= ['POST','GET'])
@login_required
def update_post(blog_id):
blog = Blog.query.get(blog_id)
form = UpdateBlog()
if form.validate_on_submit():
blog.title =form.title.data
blog.post = form.post.data
db.session.commit()
flash('Post updated!',)
return redirect(url_for('main.index',blog_id=blog_id))
elif request.method == 'GET':
form.title.data = blog.title
form.post.data = blog.post
return render_template('new_blog.html',form=form, title='Update Blog')
@main.route('/recent',methods = ['POST','GET'])
def recent():
blogs = Blog.query.order_by(Blog.date_posted.desc()).all()
return render_template('recent.html', blogs = blogs)
| 34.967105
| 114
| 0.680527
|
7949166dc9f7f6235e9db9d15e2d913b9bb55598
| 4,166
|
py
|
Python
|
docs_zh_CN/conf.py
|
vineethbabu/mmaction2
|
f2e4289807c95bad7dd83757a49c5d9ebd2f881e
|
[
"Apache-2.0"
] | 1,870
|
2020-07-11T09:33:46.000Z
|
2022-03-31T13:21:36.000Z
|
docs_zh_CN/conf.py
|
wuyy258/mmaction2
|
3f3ad9cae291c991b822cbc2ecfb88c1188e87c5
|
[
"Apache-2.0"
] | 1,285
|
2020-07-11T11:18:57.000Z
|
2022-03-31T08:41:17.000Z
|
docs_zh_CN/conf.py
|
wuyy258/mmaction2
|
3f3ad9cae291c991b822cbc2ecfb88c1188e87c5
|
[
"Apache-2.0"
] | 557
|
2020-07-11T09:51:57.000Z
|
2022-03-31T13:21:35.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'MMAction2'
copyright = '2020, OpenMMLab'
author = 'MMAction2 Authors'
version_file = '../mmaction/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode',
'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser'
]
# numpy and torch are required
autodoc_mock_imports = ['mmaction.version', 'PIL']
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
# 'logo_url': 'https://mmocr.readthedocs.io/en/latest/',
'menu': [
{
'name':
'教程',
'url':
'https://colab.research.google.com/github/'
'open-mmlab/mmaction2/blob/master/demo/'
'mmaction2_tutorial_zh-CN.ipynb'
},
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmaction2'
},
{
'name':
'上游代码库',
'children': [
{
'name': 'MMCV',
'url': 'https://github.com/open-mmlab/mmcv',
'description': '计算机视觉基础库'
},
{
'name': 'MMClassification',
'url': 'https://github.com/open-mmlab/mmclassification',
'description': '图像分类代码库'
},
{
'name': 'MMDetection',
'url': 'https://github.com/open-mmlab/mmdetection',
'description': '物体检测代码库'
},
]
},
],
# Specify the language of shared menu
'menu_lang':
'cn'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
myst_enable_extensions = ['colon_fence']
language = 'zh_CN'
master_doc = 'index'
def builder_inited_handler(app):
subprocess.run(['./merge_docs.sh'])
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
| 31.560606
| 79
| 0.601056
|
794916f3707fcfab742abfccbcd7b32a9397fb54
| 10,840
|
py
|
Python
|
spearmint/cleanup.py
|
fernandezdaniel/Spearmint
|
3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84
|
[
"RSA-MD"
] | 6
|
2021-06-29T11:26:49.000Z
|
2022-01-20T18:12:47.000Z
|
spearmint/cleanup.py
|
fernandezdaniel/Spearmint
|
3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84
|
[
"RSA-MD"
] | null | null | null |
spearmint/cleanup.py
|
fernandezdaniel/Spearmint
|
3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84
|
[
"RSA-MD"
] | 9
|
2018-06-28T13:06:35.000Z
|
2021-06-20T18:21:58.000Z
|
# -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by Ryan P. Adams, Michael Gelbart, and
# Jasper Snoek at Harvard University, Kevin Swersky at the
# University of Toronto (“Toronto”), and Hugo Larochelle at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: otd@harvard.edu
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology Jasper Snoek, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import os
import sys
import pymongo
import json
import shutil
from spearmint.utils.parsing import parse_config_file
from spearmint.utils.parsing import repeat_experiment_name
from spearmint.utils.parsing import repeat_output_dir
def cleanup(path, repeat=-1):
if not os.path.isdir(path):
raise Exception("%s is not a valid directory" % path)
cfg = parse_config_file(path, 'config.json', verbose=False)
db_address = cfg['database']['address']
client = pymongo.MongoClient(db_address)
experiment_name = cfg["experiment-name"]
if repeat >= 0:
experiment_name = repeat_experiment_name(experiment_name, repeat)
print 'Cleaning up experiment %s in database at %s' % (experiment_name, db_address)
db = client.spearmint[experiment_name]
db['jobs'].drop()
db['hypers'].drop()
db['recommendations'].drop()
# remove output files
output_directory = repeat_output_dir(path, repeat) if repeat >= 0 else os.path.join(path, 'output')
if os.path.isdir(output_directory):
shutil.rmtree(output_directory)
# remove plots
plots_directory = os.path.join(path, 'plots')
if os.path.isdir(plots_directory):
shutil.rmtree(plots_directory)
if __name__ == '__main__':
cleanup(*sys.argv[1:])
| 47.336245
| 103
| 0.772509
|
7949173877c7498ab96f4e71ea72e38bee78d108
| 29,190
|
py
|
Python
|
train.py
|
faroukmokhtar/weaver
|
f3ba1b78d03b5809611b21bd06663fa2f3b738bf
|
[
"MIT"
] | null | null | null |
train.py
|
faroukmokhtar/weaver
|
f3ba1b78d03b5809611b21bd06663fa2f3b738bf
|
[
"MIT"
] | null | null | null |
train.py
|
faroukmokhtar/weaver
|
f3ba1b78d03b5809611b21bd06663fa2f3b738bf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#import setGPU
import os
import shutil
import glob
import argparse
import numpy as np
import math
import torch
from torch.utils.data import DataLoader
from importlib import import_module
import ast
from utils.logger import _logger, _configLogger
from utils.dataset import SimpleIterDataset
parser = argparse.ArgumentParser()
parser.add_argument('--regression-mode', action='store_true', default=False,
help='run in regression mode if this flag is set; otherwise run in classification mode')
parser.add_argument('-c', '--data-config', type=str, default='data/ak15_points_pf_sv_v0.yaml',
help='data config YAML file')
parser.add_argument('-i', '--data-train', nargs='*', default=[],
help='training files')
parser.add_argument('-t', '--data-test', nargs='*', default=[],
help='testing files')
parser.add_argument('--data-fraction', type=float, default=1,
help='fraction of events to load from each file; for training, the events are randomly selected for each epoch')
parser.add_argument('--file-fraction', type=float, default=1,
help='fraction of files to load; for training, the files are randomly selected for each epoch')
parser.add_argument('--fetch-by-files', action='store_true', default=False,
help='When enabled, will load all events from a small number (set by ``--fetch-step``) of files for each data fetching. '
'Otherwise (default), load a small fraction of events from all files each time, which helps reduce variations in the sample composition.')
parser.add_argument('--fetch-step', type=float, default=0.01,
help='fraction of events to load each time from every file (when ``--fetch-by-files`` is disabled); '
'Or: number of files to load each time (when ``--fetch-by-files`` is enabled). Shuffling & sampling is done within these events, so set a large enough value.')
parser.add_argument('--in-memory', action='store_true', default=False,
help='load the whole dataset (and perform the preprocessing) only once and keep it in memory for the entire run')
parser.add_argument('--train-val-split', type=float, default=0.8,
help='training/validation split fraction')
parser.add_argument('--demo', action='store_true', default=False,
help='quickly test the setup by running over only a small number of events')
parser.add_argument('--lr-finder', type=str, default=None,
help='run learning rate finder instead of the actual training; format: ``start_lr, end_lr, num_iters``')
parser.add_argument('-n', '--network-config', type=str, default='networks/particle_net_pfcand_sv.py',
help='network architecture configuration file; the path must be relative to the current dir')
parser.add_argument('-o', '--network-option', nargs=2, action='append', default=[],
help='options to pass to the model class constructor, e.g., `--network-option use_counts False`')
parser.add_argument('-m', '--model-prefix', type=str, default='models/{auto}/network',
help='path to save or load the model; for training, this will be used as a prefix, so model snapshots '
'will saved to `{model_prefix}_epoch-%d_state.pt` after each epoch, and the one with the best '
'validation metric to `{model_prefix}_best_epoch_state.pt`; for testing, this should be the full path '
'including the suffix, otherwise the one with the best validation metric will be used; '
'for training, `{auto}` can be used as part of the path to auto-generate a name, '
'based on the timestamp and network configuration')
parser.add_argument('--num-epochs', type=int, default=20,
help='number of epochs')
parser.add_argument('--steps-per-epoch', type=int, default=None,
help='number of steps (iterations) per epochs; if not set, each epoch will run over all loaded samples')
parser.add_argument('--optimizer', type=str, default='ranger', choices=['adam', 'adamW', 'ranger'], # TODO: add more
help='optimizer for the training')
parser.add_argument('--optimizer-option', nargs=2, action='append', default=[],
help='options to pass to the optimizer class constructor, e.g., `--optimizer-option weight_decay 1e-4`')
parser.add_argument('--lr-scheduler', type=str, default='flat+decay',
choices=['none', 'steps', 'flat+decay', 'flat+linear', 'flat+cos', 'one-cycle'],
help='learning rate scheduler')
parser.add_argument('--load-epoch', type=int, default=None,
help='used to resume interrupted training, load model and optimizer state saved in the `epoch-%d_state.pt` and `epoch-%d_optimizer.pt` files')
parser.add_argument('--start-lr', type=float, default=5e-3,
help='start learning rate')
parser.add_argument('--batch-size', type=int, default=128,
help='batch size')
parser.add_argument('--use-amp', action='store_true', default=False,
help='use mixed precision training (fp16); NOT WORKING YET')
parser.add_argument('--gpus', type=str, default='0',
help='device for the training/testing; to use CPU, set to empty string (""); to use multiple gpu, set it as a comma separated list, e.g., `1,2,3,4`')
parser.add_argument('--num-workers', type=int, default=1,
help='number of threads to load the dataset; memory consumption and disk access load increases (~linearly) with this numbers')
parser.add_argument('--predict', action='store_true', default=False,
help='run prediction instead of training')
parser.add_argument('--predict-output', type=str,
help='path to save the prediction output, support `.root` and `.awkd` format')
parser.add_argument('--export-onnx', type=str, default=None,
help='export the PyTorch model to ONNX model and save it at the given path (path must ends w/ .onnx); '
'needs to set `--data-config`, `--network-config`, and `--model-prefix` (requires the full model path)')
parser.add_argument('--io-test', action='store_true', default=False,
help='test throughput of the dataloader')
parser.add_argument('--copy-inputs', action='store_true', default=False,
help='copy input files to the current dir (can help to speed up dataloading when running over remote files, e.g., from EOS)')
parser.add_argument('--log', type=str, default='',
help='path to the log file; `{auto}` can be used as part of the path to auto-generate a name, based on the timestamp and network configuration')
parser.add_argument('--print', action='store_true', default=False,
help='do not run training/prediction but only print model information, e.g., FLOPs and number of parameters of a model')
parser.add_argument('--profile', action='store_true', default=False,
help='run the profiler')
def train_load(args):
"""
Loads the training data.
:param args:
:return: train_loader, val_loader, data_config, train_inputs
"""
filelist = sorted(sum([glob.glob(f) for f in args.data_train], []))
if args.copy_inputs:
import tempfile
tmpdir = tempfile.mkdtemp()
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
new_filelist = []
for src in filelist:
dest = os.path.join(tmpdir, src.lstrip('/'))
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest), exist_ok=True)
shutil.copy2(src, dest)
_logger.info('Copied file %s to %s' % (src, dest))
new_filelist.append(dest)
filelist = new_filelist
# np.random.seed(1)
np.random.shuffle(filelist)
if args.demo:
filelist = filelist[:20]
_logger.info(filelist)
args.data_fraction = 0.1
args.fetch_step = 0.002
num_workers = min(args.num_workers, int(len(filelist) * args.file_fraction))
train_data = SimpleIterDataset(filelist, args.data_config, for_training=True,
load_range_and_fraction=((0, args.train_val_split), args.data_fraction),
file_fraction=args.file_fraction,
fetch_by_files=args.fetch_by_files,
fetch_step=args.fetch_step,
infinity_mode=args.steps_per_epoch is not None,
in_memory=args.in_memory)
val_data = SimpleIterDataset(filelist, args.data_config, for_training=True,
load_range_and_fraction=((args.train_val_split, 1), args.data_fraction),
file_fraction=args.file_fraction,
fetch_by_files=args.fetch_by_files,
fetch_step=args.fetch_step,
infinity_mode=args.steps_per_epoch is not None,
in_memory=args.in_memory)
persistent_workers = num_workers > 0 and args.steps_per_epoch is not None
train_loader = DataLoader(train_data, num_workers=num_workers, batch_size=args.batch_size, drop_last=True,
pin_memory=True, persistent_workers=persistent_workers)
val_loader = DataLoader(val_data, num_workers=num_workers, batch_size=args.batch_size, drop_last=True,
pin_memory=True, persistent_workers=persistent_workers)
data_config = train_data.config
train_input_names = train_data.config.input_names
train_label_names = train_data.config.label_names
return train_loader, val_loader, data_config, train_input_names, train_label_names
def test_load(args):
"""
Loads the test data.
:param args:
:return: test_loader, data_config
"""
filelist = sorted(sum([glob.glob(f) for f in args.data_test], []))
num_workers = min(args.num_workers, len(filelist))
test_data = SimpleIterDataset(filelist, args.data_config, for_training=False,
load_range_and_fraction=((0, 1), args.data_fraction),
fetch_by_files=True, fetch_step=1)
test_loader = DataLoader(test_data, num_workers=num_workers, batch_size=args.batch_size, drop_last=False,
pin_memory=True)
data_config = test_data.config
return test_loader, data_config
def onnx(args, model, data_config, model_info):
"""
Saving model as ONNX.
:param args:
:param model:
:param data_config:
:param model_info:
:return:
"""
assert (args.export_onnx.endswith('.onnx'))
model_path = args.model_prefix
_logger.info('Exporting model %s to ONNX' % model_path)
model.load_state_dict(torch.load(model_path, map_location='cpu'))
model = model.cpu()
model.eval()
os.makedirs(os.path.dirname(args.export_onnx), exist_ok=True)
inputs = tuple(
torch.ones(model_info['input_shapes'][k], dtype=torch.float32) for k in model_info['input_names'])
torch.onnx.export(model, inputs, args.export_onnx,
input_names=model_info['input_names'],
output_names=model_info['output_names'],
dynamic_axes=model_info.get('dynamic_axes', None),
opset_version=13)
_logger.info('ONNX model saved to %s', args.export_onnx)
preprocessing_json = os.path.join(os.path.dirname(args.export_onnx), 'preprocess.json')
data_config.export_json(preprocessing_json)
_logger.info('Preprocessing parameters saved to %s', preprocessing_json)
def flops(model, model_info):
"""
Count FLOPs and params.
:param args:
:param model:
:param model_info:
:return:
"""
from utils.flops_counter import get_model_complexity_info
import copy
model = copy.deepcopy(model).cpu()
model.eval()
inputs = tuple(
torch.ones(model_info['input_shapes'][k], dtype=torch.float32) for k in model_info['input_names'])
macs, params = get_model_complexity_info(model, inputs, as_strings=True, print_per_layer_stat=True, verbose=True)
_logger.info('{:<30} {:<8}'.format('Computational complexity: ', macs))
_logger.info('{:<30} {:<8}'.format('Number of parameters: ', params))
def profile(args, model, model_info, device):
"""
Profile.
:param model:
:param model_info:
:return:
"""
import copy
from torch.profiler import profile, record_function, ProfilerActivity
model = copy.deepcopy(model)
model = model.to(device)
model.eval()
inputs = tuple(torch.ones((args.batch_size,) + model_info['input_shapes'][k][1:], dtype=torch.float32).to(device) for k in model_info['input_names'])
for x in inputs:
print(x.shape, x.device)
def trace_handler(p):
output = p.key_averages().table(sort_by="self_cuda_time_total", row_limit=50)
print(output)
p.export_chrome_trace("/tmp/trace_" + str(p.step_num) + ".json")
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
schedule=torch.profiler.schedule(
wait=2,
warmup=2,
active=6,
repeat=2),
on_trace_ready=trace_handler
) as p:
for idx in range(100):
model(*inputs)
p.step()
def optim(args, model, device):
"""
Optimizer and scheduler.
:param args:
:param model:
:return:
"""
optimizer_options = {k: ast.literal_eval(v) for k, v in args.optimizer_option}
_logger.info('Optimizer options: %s' % str(optimizer_options))
if args.optimizer == 'ranger':
from utils.nn.optimizer.ranger import Ranger
opt = Ranger(model.parameters(), lr=args.start_lr, **optimizer_options)
elif args.optimizer == 'adam':
opt = torch.optim.Adam(model.parameters(), lr=args.start_lr, **optimizer_options)
elif args.optimizer == 'adamW':
opt = torch.optim.AdamW(model.parameters(), lr=args.start_lr, **optimizer_options)
# load previous training and resume if `--load-epoch` is set
if args.load_epoch is not None:
_logger.info('Resume training from epoch %d' % args.load_epoch)
model_state = torch.load(args.model_prefix + '_epoch-%d_state.pt' % args.load_epoch, map_location=device)
model.load_state_dict(model_state)
opt_state = torch.load(args.model_prefix + '_epoch-%d_optimizer.pt' % args.load_epoch, map_location=device)
opt.load_state_dict(opt_state)
scheduler = None
if args.lr_finder is None:
if args.lr_scheduler == 'steps':
lr_step = round(args.num_epochs / 3)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
opt, milestones=[lr_step, 2 * lr_step], gamma=0.1,
last_epoch=-1 if args.load_epoch is None else args.load_epoch)
elif args.lr_scheduler == 'flat+decay':
lr_decay_epochs = max(1, int(args.num_epochs * 0.3))
lr_decay_rate = 0.01 ** (1. / lr_decay_epochs)
scheduler = torch.optim.lr_scheduler.MultiStepLR(opt, milestones=list(
range(args.num_epochs - lr_decay_epochs, args.num_epochs)), gamma=lr_decay_rate,
last_epoch=-1 if args.load_epoch is None else args.load_epoch)
elif args.lr_scheduler == 'flat+linear' or args.lr_scheduler == 'flat+cos':
total_steps = args.num_epochs * args.steps_per_epoch
flat_steps = total_steps * 0.7 - 1
min_factor = 0.001
def lr_fn(step_num):
if step_num > total_steps:
raise ValueError(
"Tried to step {} times. The specified number of total steps is {}".format(
step_num + 1, total_steps))
if step_num <= flat_steps:
return 1.0
pct = (step_num - flat_steps) / (total_steps - flat_steps)
if args.lr_scheduler == 'flat+linear':
return max(min_factor, 1 - pct)
else:
return max(min_factor, 0.5 * (math.cos(math.pi * pct) + 1))
scheduler = torch.optim.lr_scheduler.LambdaLR(
opt, lr_fn, last_epoch=-1 if args.load_epoch is None else args.load_epoch * args.steps_per_epoch)
scheduler._update_per_step = True # mark it to update the lr every step, instead of every epoch
elif args.lr_scheduler == 'one-cycle':
scheduler = torch.optim.lr_scheduler.OneCycleLR(
opt, max_lr=args.start_lr, epochs=args.num_epochs, steps_per_epoch=args.steps_per_epoch, pct_start=0.3,
anneal_strategy='cos', div_factor=25.0, last_epoch=-1 if args.load_epoch is None else args.load_epoch)
scheduler._update_per_step = True # mark it to update the lr every step, instead of every epoch
return opt, scheduler
def model_setup(args, data_config):
"""
Loads the model
:param args:
:param data_config:
:return: model, model_info, network_module, network_options
"""
network_module = import_module(args.network_config.replace('.py', '').replace('/', '.'))
network_options = {k: ast.literal_eval(v) for k, v in args.network_option}
_logger.info('Network options: %s' % str(network_options))
if args.export_onnx:
network_options['for_inference'] = True
if args.use_amp:
network_options['use_amp'] = True
model, model_info = network_module.get_model(data_config, **network_options)
# _logger.info(model)
flops(model, model_info)
return model, model_info, network_module, network_options
def iotest(args, data_loader):
"""
Io test
:param args:
:param data_loader:
:return:
"""
from tqdm.auto import tqdm
from collections import defaultdict
from utils.data.tools import _concat
_logger.info('Start running IO test')
monitor_info = defaultdict(list)
for X, y, Z in tqdm(data_loader):
for k, v in Z.items():
monitor_info[k].append(v.cpu().numpy())
monitor_info = {k: _concat(v) for k, v in monitor_info.items()}
if monitor_info:
monitor_output_path = 'weaver_monitor_info.pkl'
import pickle
with open(monitor_output_path, 'wb') as f:
pickle.dump(monitor_info, f)
_logger.info('Monitor info written to %s' % monitor_output_path)
def save_root(data_config, scores, labels, observers):
"""
Saves as .root
:param data_config:
:param scores:
:param labels
:param observers
:return:
"""
from utils.data.fileio import _write_root
output = {}
if args.regression_mode:
output[data_config.label_names[0]] = labels[data_config.label_names[0]]
output['output'] = scores
else:
for idx, label_name in enumerate(data_config.label_value):
output[label_name] = (labels[data_config.label_names[0]] == idx)
output['score_' + label_name] = scores[:, idx]
for k, v in labels.items():
if k == data_config.label_names[0]:
continue
if v.ndim > 1:
_logger.warning('Ignoring %s, not a 1d array.', k)
continue
output[k] = v
for k, v in observers.items():
if v.ndim > 1:
_logger.warning('Ignoring %s, not a 1d array.', k)
continue
output[k] = v
_write_root(args.predict_output, output)
def save_awk(scores, labels, observers):
"""
Saves as .awkd
:param scores:
:param labels:
:param observers:
:return:
"""
from utils.data.tools import awkward
output = {'scores': scores}
output.update(labels)
output.update(observers)
name_remap = {}
arraynames = list(output)
for i in range(len(arraynames)):
for j in range(i + 1, len(arraynames)):
if arraynames[i].startswith(arraynames[j]):
name_remap[arraynames[j]] = '%s_%d' % (arraynames[j], len(name_remap))
if arraynames[j].startswith(arraynames[i]):
name_remap[arraynames[i]] = '%s_%d' % (arraynames[i], len(name_remap))
_logger.info('Renamed the following variables in the output file: %s', str(name_remap))
output = {name_remap[k] if k in name_remap else k: v for k, v in output.items()}
awkward.save(args.predict_output, output, mode='w')
def main(args):
_logger.info(args)
if args.file_fraction < 1:
_logger.warning('Use of `file-fraction` is not recommended in general -- prefer using `data-fraction` instead.')
# classification/regression mode
if args.regression_mode:
_logger.info('Running in regression mode')
from utils.nn.tools import train_regression as train
from utils.nn.tools import evaluate_regression as evaluate
else:
_logger.info('Running in classification mode')
from utils.nn.tools import train_classification as train
from utils.nn.tools import evaluate_classification as evaluate
# training/testing mode
training_mode = not args.predict
# device
if args.gpus:
gpus = [int(i) for i in args.gpus.split(',')]
dev = torch.device(gpus[0])
else:
gpus = None
dev = torch.device('cpu')
# load data
if training_mode:
train_loader, val_loader, data_config, train_input_names, train_label_names = train_load(args)
else:
test_loader, data_config = test_load(args)
if args.io_test:
data_loader = train_loader if training_mode else test_loader
iotest(args, data_loader)
return
model, model_info, network_module, network_options = model_setup(args, data_config)
if args.print:
return
if args.profile:
profile(args, model, model_info, device=dev)
return
# export to ONNX
if args.export_onnx:
onnx(args, model, data_config, model_info)
return
# note: we should always save/load the state_dict of the original model, not the one wrapped by nn.DataParallel
# so we do not convert it to nn.DataParallel now
orig_model = model
if training_mode:
model = orig_model.to(dev)
# loss function
try:
loss_func = network_module.get_loss(data_config, **network_options)
_logger.info('Using loss function %s with options %s' % (loss_func, network_options))
except AttributeError:
loss_func = torch.nn.CrossEntropyLoss()
_logger.warning('Loss function not defined in %s. Will use `torch.nn.CrossEntropyLoss()` by default.',
args.network_config)
# optimizer & learning rate
opt, scheduler = optim(args, model, dev)
# multi-gpu
if gpus is not None and len(gpus) > 1:
# model becomes `torch.nn.DataParallel` w/ model.module being the original `torch.nn.Module`
model = torch.nn.DataParallel(model, device_ids=gpus)
model = model.to(dev)
# lr finder: keep it after all other setups
if args.lr_finder is not None:
start_lr, end_lr, num_iter = args.lr_finder.replace(' ', '').split(',')
from utils.lr_finder import LRFinder
lr_finder = LRFinder(model, opt, loss_func, device=dev, input_names=train_input_names,
label_names=train_label_names)
lr_finder.range_test(train_loader, start_lr=float(start_lr), end_lr=float(end_lr), num_iter=int(num_iter))
lr_finder.plot(output='lr_finder.png') # to inspect the loss-learning rate graph
return
if args.use_amp:
from torch.cuda.amp import GradScaler
scaler = GradScaler()
else:
scaler = None
# training loop
training_losses = []
validation_losses = []
best_valid_metric = np.inf if args.regression_mode else 0
for epoch in range(args.num_epochs):
if args.load_epoch is not None:
if epoch <= args.load_epoch:
continue
print('-' * 50)
_logger.info('Epoch #%d training' % epoch)
training_losses.append(train(model, loss_func, opt, scheduler, train_loader, dev, steps_per_epoch=args.steps_per_epoch, grad_scaler=scaler))
if args.model_prefix:
dirname = os.path.dirname(args.model_prefix)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
state_dict = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict()
torch.save(state_dict, args.model_prefix + '_epoch-%d_state.pt' % epoch)
torch.save(opt.state_dict(), args.model_prefix + '_epoch-%d_optimizer.pt' % epoch)
_logger.info('Epoch #%d validating' % epoch)
valid_metric, valid_loss = evaluate(model, val_loader, dev, loss_func=loss_func,
steps_per_epoch=None if args.steps_per_epoch is None else
round(args.steps_per_epoch * (1 - args.train_val_split) / args.train_val_split))
validation_losses.append(valid_loss)
is_best_epoch = (
valid_metric < best_valid_metric) if args.regression_mode else(
valid_metric > best_valid_metric)
if is_best_epoch:
best_valid_metric = valid_metric
if args.model_prefix:
shutil.copy2(args.model_prefix + '_epoch-%d_state.pt' %
epoch, args.model_prefix + '_best_epoch_state.pt')
torch.save(model, args.model_prefix + '_best_epoch_full.pt')
_logger.info('Epoch #%d: Current validation metric: %.5f (best: %.5f)' %
(epoch, valid_metric, best_valid_metric))
np.savetxt(args.model_prefix + "_training_losses.txt", training_losses)
np.savetxt(args.model_prefix + "_validation_losses.txt", validation_losses)
if args.data_test:
model = orig_model.to(dev)
if training_mode:
del train_loader, val_loader
test_loader, data_config = test_load(args)
# run prediction
if args.model_prefix.endswith('.onnx'):
_logger.info('Loading model %s for eval' % args.model_prefix)
from utils.nn.tools import evaluate_onnx
test_metric, scores, labels, observers = evaluate_onnx(args.model_prefix, test_loader)
else:
model_path = args.model_prefix if args.model_prefix.endswith(
'.pt') else args.model_prefix + '_best_epoch_state.pt'
_logger.info('Loading model %s for eval' % model_path)
model.load_state_dict(torch.load(model_path, map_location=dev))
if gpus is not None and len(gpus) > 1:
#print('multi-gpu predict ',gpus)
model = torch.nn.DataParallel(model, device_ids=gpus)
model = model.to(dev)
test_metric, scores, labels, observers = evaluate(model, test_loader, dev, for_training=False)
_logger.info('Test metric %.5f' % test_metric)
if args.predict_output:
if '/' not in args.predict_output:
args.predict_output = os.path.join(
os.path.dirname(args.model_prefix),
'predict_output', args.predict_output)
os.makedirs(os.path.dirname(args.predict_output), exist_ok=True)
if args.predict_output.endswith('.root'):
save_root(data_config, scores, labels, observers)
else:
save_awk(scores, labels, observers)
_logger.info('Written output to %s' % args.predict_output)
if __name__ == '__main__':
args = parser.parse_args()
if '{auto}' in args.model_prefix or '{auto}' in args.log:
import hashlib
import time
model_name = time.strftime('%Y%m%d-%H%M%S') + "_" + os.path.basename(args.network_config).replace('.py', '')
if len(args.network_option):
model_name = model_name + "_" + hashlib.md5(str(args.network_option).encode('utf-8')).hexdigest()
model_name += '_{optim}_lr{lr}_batch{batch}'.format(lr=args.start_lr,
optim=args.optimizer, batch=args.batch_size)
args._auto_model_name = model_name
args.model_prefix = args.model_prefix.replace('{auto}', model_name)
args.log = args.log.replace('{auto}', model_name)
print('Using auto-generated model prefix %s' % args.model_prefix)
_configLogger('weaver', filename=args.log)
main(args)
| 47.156704
| 184
| 0.631792
|
7949185859dee40d96b037de0e0db54b681b0f07
| 4,263
|
py
|
Python
|
experiments/tuning/300units_1.py
|
samuilstoychev/research_project
|
897bde82471ef92ded396aa31d91ec19826d4ce2
|
[
"MIT"
] | null | null | null |
experiments/tuning/300units_1.py
|
samuilstoychev/research_project
|
897bde82471ef92ded396aa31d91ec19826d4ce2
|
[
"MIT"
] | null | null | null |
experiments/tuning/300units_1.py
|
samuilstoychev/research_project
|
897bde82471ef92ded396aa31d91ec19826d4ce2
|
[
"MIT"
] | null | null | null |
RAM AT BEGINNING: 0.2237396240234375
Latent replay turned on
CUDA is used
RAM BEFORE LOADING DATA: 0.22828292846679688
Preparing the data...
SPLIT RATIO: [50000, 10000]
--> mnist: 'train'-dataset consisting of 60000 samples
--> mnist: 'test'-dataset consisting of 10000 samples
RAM AFTER LOADING DATA: 0.2893791198730469
RAM BEFORE CLASSIFER: 2.2489891052246094
RAM AFTER CLASSIFER: 2.2489891052246094
RAM BEFORE PRE-TRAINING 2.2489891052246094
RAM AFTER PRE-TRAINING 2.2645950317382812
RAM BEFORE GENERATOR: 2.2645950317382812
RAM AFTER DECLARING GENERATOR: 2.2645950317382812
MACs of root classifier 772000
MACs of top classifier: 39680
RAM BEFORE REPORTING: 2.2645950317382812
Parameter-stamp...
--> task: splitMNIST5-task
--> model: CNN_CLASSIFIER_c10
--> hyper-params: i500-lr0.001-b128-adam
--> replay: generative-VAE(MLP([300, 300, 300])--z100-c10)
splitMNIST5-task--CNN_CLASSIFIER_c10--i500-lr0.001-b128-adam--generative-VAE(MLP([300, 300, 300])--z100-c10)-s24203
----------------------------------------TOP----------------------------------------
CNNTopClassifier(
(dropout2): Dropout(p=0.5, inplace=False)
(fc1): Linear(in_features=300, out_features=128, bias=True)
(fc2): Linear(in_features=128, out_features=10, bias=True)
)
------------------------------------------------------------------------------------------
--> this network has 39818 parameters (~0.0 million)
of which: - learnable: 39818 (~0.0 million)
- fixed: 0 (~0.0 million)
------------------------------------------------------------------------------------------
----------------------------------------ROOT----------------------------------------
CNNRootClassifier(
(conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1))
(conv2): Conv2d(10, 10, kernel_size=(5, 5), stride=(1, 1))
(dropout1): Dropout(p=0.25, inplace=False)
(fc0): Linear(in_features=1440, out_features=300, bias=True)
)
------------------------------------------------------------------------------------------
--> this network has 435070 parameters (~0.4 million)
of which: - learnable: 435070 (~0.4 million)
- fixed: 0 (~0.0 million)
------------------------------------------------------------------------------------------
----------------------------------------GENERATOR----------------------------------------
AutoEncoderLatent(
(fcE): MLP(
(fcLayer1): fc_layer(
(linear): LinearExcitability(in_features=300, out_features=300)
(nl): ReLU()
)
(fcLayer2): fc_layer(
(linear): LinearExcitability(in_features=300, out_features=300)
(nl): ReLU()
)
)
(toZ): fc_layer_split(
(mean): fc_layer(
(linear): LinearExcitability(in_features=300, out_features=100)
)
(logvar): fc_layer(
(linear): LinearExcitability(in_features=300, out_features=100)
)
)
(classifier): fc_layer(
(linear): LinearExcitability(in_features=300, out_features=10)
)
(fromZ): fc_layer(
(linear): LinearExcitability(in_features=100, out_features=300)
(nl): ReLU()
)
(fcD): MLP(
(fcLayer1): fc_layer(
(linear): LinearExcitability(in_features=300, out_features=300)
(nl): ReLU()
)
(fcLayer2): fc_layer(
(linear): LinearExcitability(in_features=300, out_features=300)
(nl): Sigmoid()
)
)
)
------------------------------------------------------------------------------------------
--> this network has 454610 parameters (~0.5 million)
of which: - learnable: 454610 (~0.5 million)
- fixed: 0 (~0.0 million)
------------------------------------------------------------------------------------------
RAM BEFORE TRAINING: 2.2645950317382812
CPU BEFORE TRAINING: (28.01, 3.03)
INITIALISING GPU TRACKER
Training...
PEAK TRAINING RAM: 2.266864776611328
Peak mem and init mem: 979 953
GPU BEFORE EVALUATION: (10.428571428571429, 26)
RAM BEFORE EVALUATION: 2.266864776611328
CPU BEFORE EVALUATION: (123.95, 5.44)
EVALUATION RESULTS:
Precision on test-set:
- Task 1: 0.9960
- Task 2: 0.9986
- Task 3: 0.9933
- Task 4: 1.0000
- Task 5: 0.9927
=> Average precision over all 5 tasks: 0.9961
=> Total training time = 68.5 seconds
RAM AT THE END: 2.266864776611328
CPU AT THE END: (125.66, 5.45)
| 34.942623
| 115
| 0.562515
|
7949187d6b1ecb72ec6f8c54919298df96f935e4
| 159
|
py
|
Python
|
tests/urls.py
|
xncbf/django-dynamodb-cache
|
be6d1b4b8e92d581041043bcd694f2a9f00ee386
|
[
"MIT"
] | 21
|
2022-02-16T10:18:24.000Z
|
2022-03-31T23:40:06.000Z
|
tests/urls.py
|
xncbf/django-dynamodb-cache
|
be6d1b4b8e92d581041043bcd694f2a9f00ee386
|
[
"MIT"
] | 9
|
2022-03-01T06:40:59.000Z
|
2022-03-26T08:12:31.000Z
|
tests/urls.py
|
xncbf/django-dynamodb-cache
|
be6d1b4b8e92d581041043bcd694f2a9f00ee386
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django.urls import path
def test(request):
return HttpResponse(b"teste")
urlpatterns = [path("test", test)]
| 14.454545
| 36
| 0.735849
|
794918dbd64c1ce58555d4bc0b1262c7a724d058
| 196
|
py
|
Python
|
October Long Challenge/threeBoxes.py
|
dibyanshushekhardey/Coding-Practice
|
885d87212221083f890c334e0fe0b70510ef9c37
|
[
"MIT"
] | null | null | null |
October Long Challenge/threeBoxes.py
|
dibyanshushekhardey/Coding-Practice
|
885d87212221083f890c334e0fe0b70510ef9c37
|
[
"MIT"
] | null | null | null |
October Long Challenge/threeBoxes.py
|
dibyanshushekhardey/Coding-Practice
|
885d87212221083f890c334e0fe0b70510ef9c37
|
[
"MIT"
] | null | null | null |
t = int(input())
for i in range(t):
a, b, c, d = list(map(int, input().split()))
if a + b + c <= d:
print("1")
elif a + b <= d:
print("2")
else:
print("3")
| 19.6
| 48
| 0.408163
|
794919d05dd37bd20b557a1334970d99f3ecbebe
| 11,164
|
py
|
Python
|
python/tests/unit/test_api_tasks_unit.py
|
mardim91/airship-drydock
|
bb4e96bb7411f8ffdd39ea07914ee0d9a8f10fdd
|
[
"Apache-2.0"
] | null | null | null |
python/tests/unit/test_api_tasks_unit.py
|
mardim91/airship-drydock
|
bb4e96bb7411f8ffdd39ea07914ee0d9a8f10fdd
|
[
"Apache-2.0"
] | null | null | null |
python/tests/unit/test_api_tasks_unit.py
|
mardim91/airship-drydock
|
bb4e96bb7411f8ffdd39ea07914ee0d9a8f10fdd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Tasks API"""
from falcon import testing
from unittest.mock import Mock
import pytest
import json
import logging
from drydock_provisioner import policy
from drydock_provisioner.control.api import start_api
import drydock_provisioner.objects as objects
import drydock_provisioner.objects.fields as hd_fields
import falcon
LOG = logging.getLogger(__name__)
class TestTasksApiUnit(object):
def test_get_tasks_id_resp(self, falcontest):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111111'
hdr = self.get_standard_header()
result = falcontest.simulate_get(url, headers=hdr)
assert result.status == falcon.HTTP_200
response_json = json.loads(result.text)
assert response_json[
'task_id'] == '11111111-1111-1111-1111-111111111111'
try:
response_json['build_data']
key_error = False
except KeyError as ex:
key_error = True
assert key_error
try:
response_json['subtask_errors']
key_error = False
except KeyError as ex:
key_error = True
assert key_error
def test_get_tasks_id_subtaskerror_noerrors_resp(self, falcontest):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111111'
hdr = self.get_standard_header()
result = falcontest.simulate_get(
url, headers=hdr, query_string='subtaskerrors=true')
assert result.status == falcon.HTTP_200
response_json = json.loads(result.text)
assert response_json[
'task_id'] == '11111111-1111-1111-1111-111111111111'
assert response_json['subtask_errors'] == {}
def test_get_tasks_id_subtaskerror_errors_resp(self, falcontest):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111113'
hdr = self.get_standard_header()
result = falcontest.simulate_get(
url, headers=hdr, query_string='subtaskerrors=true')
assert result.status == falcon.HTTP_200
response_json = json.loads(result.text)
assert response_json[
'task_id'] == '11111111-1111-1111-1111-111111111113'
assert response_json['subtask_errors'][
'11111111-1111-1111-1111-111111111116']['details'][
'errorCount'] == 1
def test_get_tasks_id_builddata_resp(self, falcontest):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111111'
hdr = self.get_standard_header()
result = falcontest.simulate_get(
url, headers=hdr, query_string='builddata=true')
LOG.debug(result.text)
assert result.status == falcon.HTTP_200
response_json = json.loads(result.text)
assert response_json['build_data']
try:
response_json['subtask_errors']
key_error = False
except KeyError as ex:
key_error = True
assert key_error
def test_get_tasks_id_builddata_subtaskerrors_resp(self, falcontest):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111111'
hdr = self.get_standard_header()
result = falcontest.simulate_get(
url, headers=hdr, query_string='builddata=true&subtaskerrors=true')
LOG.debug(result.text)
assert result.status == falcon.HTTP_200
response_json = json.loads(result.text)
assert response_json['build_data']
assert response_json['subtask_errors'] == {}
def test_get_tasks_id_layers_resp(self, falcontest):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111113'
hdr = self.get_standard_header()
result = falcontest.simulate_get(
url, headers=hdr, query_string='layers=2')
LOG.debug(result.text)
assert result.status == falcon.HTTP_200
response_json = json.loads(result.text)
init_task_id = '11111111-1111-1111-1111-111111111113'
sub_task_id_1 = '11111111-1111-1111-1111-111111111114'
sub_task_id_2 = '11111111-1111-1111-1111-111111111115'
assert response_json['init_task_id'] == init_task_id
assert response_json[init_task_id]['task_id'] == init_task_id
assert response_json[sub_task_id_1]['task_id'] == sub_task_id_1
assert response_json[sub_task_id_2]['task_id'] == sub_task_id_2
try:
response_json['11111111-1111-1111-1111-111111111116']
key_error = False
except KeyError as ex:
key_error = True
assert key_error
def test_get_tasks_id_layers_all_noerrors_resp(self, falcontest):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111113'
hdr = self.get_standard_header()
result = falcontest.simulate_get(
url, headers=hdr, query_string='layers=-1')
LOG.debug(result.text)
assert result.status == falcon.HTTP_200
response_json = json.loads(result.text)
init_task_id = '11111111-1111-1111-1111-111111111113'
sub_task_id_1 = '11111111-1111-1111-1111-111111111114'
sub_task_id_2 = '11111111-1111-1111-1111-111111111115'
assert response_json['init_task_id'] == init_task_id
assert response_json[init_task_id]['task_id'] == init_task_id
assert response_json[sub_task_id_1]['task_id'] == sub_task_id_1
assert response_json[sub_task_id_2]['task_id'] == sub_task_id_2
try:
response_json['11111111-1111-1111-1111-111111111116']
key_error = False
except KeyError as ex:
key_error = True
assert key_error is False
try:
response_json['subtask_errors']
key_error = False
except KeyError as ex:
key_error = True
assert key_error
def test_get_tasks_id_layers_all_errors_resp(self, falcontest):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111113'
hdr = self.get_standard_header()
result = falcontest.simulate_get(
url, headers=hdr, query_string='layers=-1&subtaskerrors=true')
LOG.debug(result.text)
assert result.status == falcon.HTTP_200
response_json = json.loads(result.text)
init_task_id = '11111111-1111-1111-1111-111111111113'
sub_task_id_1 = '11111111-1111-1111-1111-111111111114'
sub_task_id_2 = '11111111-1111-1111-1111-111111111115'
assert response_json['init_task_id'] == init_task_id
assert response_json[init_task_id]['task_id'] == init_task_id
assert response_json[sub_task_id_1]['task_id'] == sub_task_id_1
assert response_json[sub_task_id_2]['task_id'] == sub_task_id_2
try:
response_json['11111111-1111-1111-1111-111111111116']
key_error = False
except KeyError as ex:
key_error = True
assert key_error is False
assert response_json['subtask_errors'][
'11111111-1111-1111-1111-111111111116']['details'][
'errorCount'] == 1
def test_input_not_found(self, falcontest):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111112'
hdr = self.get_standard_header()
result = falcontest.simulate_get(url, headers=hdr)
LOG.debug(result.text)
assert result.status == falcon.HTTP_404
@pytest.fixture()
def falcontest(self, drydock_state, deckhand_ingester,
deckhand_orchestrator, mock_get_build_data, mock_get_task):
"""Create a test harness for the Falcon API framework."""
policy.policy_engine = policy.DrydockPolicy()
policy.policy_engine.register_policy()
return testing.TestClient(
start_api(
state_manager=drydock_state,
ingester=deckhand_ingester,
orchestrator=deckhand_orchestrator))
def get_standard_header(self):
hdr = {
'Content-Type': 'application/json',
'X-IDENTITY-STATUS': 'Confirmed',
'X-USER-NAME': 'Test',
'X-ROLES': 'admin'
}
return hdr
@pytest.fixture()
def mock_get_task(drydock_state):
def side_effect(*args):
task_id = str(args[0])
LOG.debug(task_id)
# Basic task
if task_id == '11111111-1111-1111-1111-111111111111':
new_task = objects.Task()
new_task.task_id = '11111111-1111-1111-1111-111111111111'
new_task.result = objects.TaskStatus()
new_task.result.set_status(hd_fields.ActionResult.Failure)
new_task.result.add_status_msg(
msg='Test', error=True, ctx_type='N/A', ctx='N/A')
return new_task
# Task not found
if task_id == '11111111-1111-1111-1111-111111111112':
return None
# Task layers
if task_id == '11111111-1111-1111-1111-111111111113':
new_task = objects.Task()
new_task.task_id = '11111111-1111-1111-1111-111111111113'
new_task.subtask_id_list = [
'11111111-1111-1111-1111-111111111114',
'11111111-1111-1111-1111-111111111115'
]
return new_task
if task_id == '11111111-1111-1111-1111-111111111114':
new_task = objects.Task()
new_task.task_id = '11111111-1111-1111-1111-111111111114'
return new_task
if task_id == '11111111-1111-1111-1111-111111111115':
new_task = objects.Task()
new_task.task_id = '11111111-1111-1111-1111-111111111115'
new_task.subtask_id_list = [
'11111111-1111-1111-1111-111111111116',
'11111111-1111-1111-1111-111111111117'
]
return new_task
if task_id == '11111111-1111-1111-1111-111111111116':
new_task = objects.Task()
new_task.task_id = '11111111-1111-1111-1111-111111111116'
new_task.result = objects.TaskStatus()
new_task.result.set_status(hd_fields.ActionResult.Failure)
new_task.result.add_status_msg(
msg='Test', error=True, ctx_type='N/A', ctx='N/A')
LOG.debug('error_count')
LOG.debug(new_task.result.error_count)
return new_task
LOG.debug('returning None')
return None
drydock_state.real_get_task = drydock_state.get_task
drydock_state.get_task = Mock(side_effect=side_effect)
yield
drydock_state.get_task = Mock(wraps=None, side_effect=None)
drydock_state.get_task = drydock_state.real_get_task
| 39.17193
| 79
| 0.650842
|
794919f6942e169c4c08945014f8deb55c3a758f
| 47,223
|
py
|
Python
|
sklearn/pipeline.py
|
MrinalTyagi/scikit-learn
|
6192d1fbdb84b1f775c0242a8fea4c6481085cff
|
[
"BSD-3-Clause"
] | 50,961
|
2015-01-01T06:06:31.000Z
|
2022-03-31T23:40:12.000Z
|
sklearn/pipeline.py
|
ashutoshpatelofficial/scikit-learn
|
2fc9187879424556726d9345a6656884fa9fbc20
|
[
"BSD-3-Clause"
] | 17,065
|
2015-01-01T02:01:58.000Z
|
2022-03-31T23:48:34.000Z
|
sklearn/pipeline.py
|
ashutoshpatelofficial/scikit-learn
|
2fc9187879424556726d9345a6656884fa9fbc20
|
[
"BSD-3-Clause"
] | 26,886
|
2015-01-01T00:59:27.000Z
|
2022-03-31T18:03:23.000Z
|
"""
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# License: BSD
from collections import defaultdict
from itertools import islice
import numpy as np
from scipy import sparse
from joblib import Parallel
from .base import clone, TransformerMixin
from .preprocessing import FunctionTransformer
from .utils._estimator_html_repr import _VisualBlock
from .utils.metaestimators import available_if
from .utils import (
Bunch,
_print_elapsed_time,
)
from .utils.deprecation import deprecated
from .utils._tags import _safe_tags
from .utils.validation import check_memory
from .utils.validation import check_is_fitted
from .utils.fixes import delayed
from .exceptions import NotFittedError
from .utils.metaestimators import _BaseComposition
__all__ = ["Pipeline", "FeatureUnion", "make_pipeline", "make_union"]
def _final_estimator_has(attr):
"""Check that final_estimator has `attr`.
Used together with `avaliable_if` in `Pipeline`."""
def check(self):
# raise original `AttributeError` if `attr` does not exist
getattr(self._final_estimator, attr)
return True
return check
class Pipeline(_BaseComposition):
"""
Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement `fit` and `transform` methods.
The final estimator only needs to implement `fit`.
The transformers in the pipeline can be cached using ``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters. For this, it
enables setting parameters of the various steps using their names and the
parameter name separated by a `'__'`, as in the example below. A step's
estimator may be replaced entirely by setting the parameter with its name
to another estimator, or a transformer removed by setting it to
`'passthrough'` or `None`.
Read more in the :ref:`User Guide <pipeline>`.
.. versionadded:: 0.5
Parameters
----------
steps : list of tuple
List of (name, transform) tuples (implementing `fit`/`transform`) that
are chained, in the order in which they are chained, with the last
object an estimator.
memory : str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Attributes
----------
named_steps : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
classes_ : ndarray of shape (n_classes,)
The classes labels. Only exist if the last step of the pipeline is a
classifier.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying first estimator in `steps` exposes such an attribute
when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 1.0
See Also
--------
make_pipeline : Convenience function for simplified pipeline construction.
Examples
--------
>>> from sklearn.svm import SVC
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.pipeline import Pipeline
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> pipe = Pipeline([('scaler', StandardScaler()), ('svc', SVC())])
>>> # The pipeline can be used as any other estimator
>>> # and avoids leaking the test set into the train set
>>> pipe.fit(X_train, y_train)
Pipeline(steps=[('scaler', StandardScaler()), ('svc', SVC())])
>>> pipe.score(X_test, y_test)
0.88
"""
# BaseEstimator interface
_required_parameters = ["steps"]
def __init__(self, steps, *, memory=None, verbose=False):
self.steps = steps
self.memory = memory
self.verbose = verbose
def get_params(self, deep=True):
"""Get parameters for this estimator.
Returns the parameters given in the constructor as well as the
estimators contained within the `steps` of the `Pipeline`.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params("steps", deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``. Note that
you can directly set the parameters of the estimators contained in
`steps`.
Parameters
----------
**kwargs : dict
Parameters of this estimator or parameters of estimators contained
in `steps`. Parameters of the steps may be set using its name and
the parameter name separated by a '__'.
Returns
-------
self : object
Pipeline class instance.
"""
self._set_params("steps", **kwargs)
return self
def _validate_steps(self):
names, estimators = zip(*self.steps)
# validate names
self._validate_names(names)
# validate estimators
transformers = estimators[:-1]
estimator = estimators[-1]
for t in transformers:
if t is None or t == "passthrough":
continue
if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
t, "transform"
):
raise TypeError(
"All intermediate steps should be "
"transformers and implement fit and transform "
"or be the string 'passthrough' "
"'%s' (type %s) doesn't" % (t, type(t))
)
# We allow last estimator to be None as an identity transformation
if (
estimator is not None
and estimator != "passthrough"
and not hasattr(estimator, "fit")
):
raise TypeError(
"Last step of Pipeline should implement fit "
"or be the string 'passthrough'. "
"'%s' (type %s) doesn't" % (estimator, type(estimator))
)
def _iter(self, with_final=True, filter_passthrough=True):
"""
Generate (idx, (name, trans)) tuples from self.steps
When filter_passthrough is True, 'passthrough' and None transformers
are filtered out.
"""
stop = len(self.steps)
if not with_final:
stop -= 1
for idx, (name, trans) in enumerate(islice(self.steps, 0, stop)):
if not filter_passthrough:
yield idx, name, trans
elif trans is not None and trans != "passthrough":
yield idx, name, trans
def __len__(self):
"""
Returns the length of the Pipeline
"""
return len(self.steps)
def __getitem__(self, ind):
"""Returns a sub-pipeline or a single estimator in the pipeline
Indexing with an integer will return an estimator; using a slice
returns another Pipeline instance which copies a slice of this
Pipeline. This copy is shallow: modifying (or fitting) estimators in
the sub-pipeline will affect the larger pipeline and vice-versa.
However, replacing a value in `step` will not affect a copy.
"""
if isinstance(ind, slice):
if ind.step not in (1, None):
raise ValueError("Pipeline slicing only supports a step of 1")
return self.__class__(
self.steps[ind], memory=self.memory, verbose=self.verbose
)
try:
name, est = self.steps[ind]
except TypeError:
# Not an int, try get step by name
return self.named_steps[ind]
return est
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
@property
def named_steps(self):
"""Access the steps by name.
Read-only attribute to access any step by given name.
Keys are steps names and values are the steps objects."""
# Use Bunch object to improve autocomplete
return Bunch(**dict(self.steps))
@property
def _final_estimator(self):
estimator = self.steps[-1][1]
return "passthrough" if estimator is None else estimator
def _log_message(self, step_idx):
if not self.verbose:
return None
name, _ = self.steps[step_idx]
return "(step %d of %d) Processing %s" % (step_idx + 1, len(self.steps), name)
def _check_fit_params(self, **fit_params):
fit_params_steps = {name: {} for name, step in self.steps if step is not None}
for pname, pval in fit_params.items():
if "__" not in pname:
raise ValueError(
"Pipeline.fit does not accept the {} parameter. "
"You can pass parameters to specific steps of your "
"pipeline using the stepname__parameter format, e.g. "
"`Pipeline.fit(X, y, logisticregression__sample_weight"
"=sample_weight)`.".format(pname)
)
step, param = pname.split("__", 1)
fit_params_steps[step][param] = pval
return fit_params_steps
# Estimator interface
def _fit(self, X, y=None, **fit_params_steps):
# shallow copy of steps - this should really be steps_
self.steps = list(self.steps)
self._validate_steps()
# Setup the memory
memory = check_memory(self.memory)
fit_transform_one_cached = memory.cache(_fit_transform_one)
for (step_idx, name, transformer) in self._iter(
with_final=False, filter_passthrough=False
):
if transformer is None or transformer == "passthrough":
with _print_elapsed_time("Pipeline", self._log_message(step_idx)):
continue
if hasattr(memory, "location"):
# joblib >= 0.12
if memory.location is None:
# we do not clone when caching is disabled to
# preserve backward compatibility
cloned_transformer = transformer
else:
cloned_transformer = clone(transformer)
elif hasattr(memory, "cachedir"):
# joblib < 0.11
if memory.cachedir is None:
# we do not clone when caching is disabled to
# preserve backward compatibility
cloned_transformer = transformer
else:
cloned_transformer = clone(transformer)
else:
cloned_transformer = clone(transformer)
# Fit or load from cache the current transformer
X, fitted_transformer = fit_transform_one_cached(
cloned_transformer,
X,
y,
None,
message_clsname="Pipeline",
message=self._log_message(step_idx),
**fit_params_steps[name],
)
# Replace the transformer of the step with the fitted
# transformer. This is necessary when loading the transformer
# from the cache.
self.steps[step_idx] = (name, fitted_transformer)
return X
def fit(self, X, y=None, **fit_params):
"""Fit the model.
Fit all the transformers one after the other and transform the
data. Finally, fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
self : object
Pipeline with fitted steps.
"""
fit_params_steps = self._check_fit_params(**fit_params)
Xt = self._fit(X, y, **fit_params_steps)
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
if self._final_estimator != "passthrough":
fit_params_last_step = fit_params_steps[self.steps[-1][0]]
self._final_estimator.fit(Xt, y, **fit_params_last_step)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit the model and transform with the final estimator.
Fits all the transformers one after the other and transform the
data. Then uses `fit_transform` on transformed data with the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : ndarray of shape (n_samples, n_transformed_features)
Transformed samples.
"""
fit_params_steps = self._check_fit_params(**fit_params)
Xt = self._fit(X, y, **fit_params_steps)
last_step = self._final_estimator
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
if last_step == "passthrough":
return Xt
fit_params_last_step = fit_params_steps[self.steps[-1][0]]
if hasattr(last_step, "fit_transform"):
return last_step.fit_transform(Xt, y, **fit_params_last_step)
else:
return last_step.fit(Xt, y, **fit_params_last_step).transform(Xt)
@available_if(_final_estimator_has("predict"))
def predict(self, X, **predict_params):
"""Transform the data, and apply `predict` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls `predict`
method. Only valid if the final estimator implements `predict`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**predict_params : dict of string -> object
Parameters to the ``predict`` called at the end of all
transformations in the pipeline. Note that while this may be
used to return uncertainties from some models with return_std
or return_cov, uncertainties that are generated by the
transformations in the pipeline are not propagated to the
final estimator.
.. versionadded:: 0.20
Returns
-------
y_pred : ndarray
Result of calling `predict` on the final estimator.
"""
Xt = X
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
return self.steps[-1][1].predict(Xt, **predict_params)
@available_if(_final_estimator_has("fit_predict"))
def fit_predict(self, X, y=None, **fit_params):
"""Transform the data, and apply `fit_predict` with the final estimator.
Call `fit_transform` of each transformer in the pipeline. The
transformed data are finally passed to the final estimator that calls
`fit_predict` method. Only valid if the final estimator implements
`fit_predict`.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
y_pred : ndarray
Result of calling `fit_predict` on the final estimator.
"""
fit_params_steps = self._check_fit_params(**fit_params)
Xt = self._fit(X, y, **fit_params_steps)
fit_params_last_step = fit_params_steps[self.steps[-1][0]]
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
y_pred = self.steps[-1][1].fit_predict(Xt, y, **fit_params_last_step)
return y_pred
@available_if(_final_estimator_has("predict_proba"))
def predict_proba(self, X, **predict_proba_params):
"""Transform the data, and apply `predict_proba` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`predict_proba` method. Only valid if the final estimator implements
`predict_proba`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**predict_proba_params : dict of string -> object
Parameters to the `predict_proba` called at the end of all
transformations in the pipeline.
Returns
-------
y_proba : ndarray of shape (n_samples, n_classes)
Result of calling `predict_proba` on the final estimator.
"""
Xt = X
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
return self.steps[-1][1].predict_proba(Xt, **predict_proba_params)
@available_if(_final_estimator_has("decision_function"))
def decision_function(self, X):
"""Transform the data, and apply `decision_function` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`decision_function` method. Only valid if the final estimator
implements `decision_function`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : ndarray of shape (n_samples, n_classes)
Result of calling `decision_function` on the final estimator.
"""
Xt = X
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
return self.steps[-1][1].decision_function(Xt)
@available_if(_final_estimator_has("score_samples"))
def score_samples(self, X):
"""Transform the data, and apply `score_samples` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`score_samples` method. Only valid if the final estimator implements
`score_samples`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : ndarray of shape (n_samples,)
Result of calling `score_samples` on the final estimator.
"""
Xt = X
for _, _, transformer in self._iter(with_final=False):
Xt = transformer.transform(Xt)
return self.steps[-1][1].score_samples(Xt)
@available_if(_final_estimator_has("predict_log_proba"))
def predict_log_proba(self, X, **predict_log_proba_params):
"""Transform the data, and apply `predict_log_proba` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`predict_log_proba` method. Only valid if the final estimator
implements `predict_log_proba`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**predict_log_proba_params : dict of string -> object
Parameters to the ``predict_log_proba`` called at the end of all
transformations in the pipeline.
Returns
-------
y_log_proba : ndarray of shape (n_samples, n_classes)
Result of calling `predict_log_proba` on the final estimator.
"""
Xt = X
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
return self.steps[-1][1].predict_log_proba(Xt, **predict_log_proba_params)
def _can_transform(self):
return self._final_estimator == "passthrough" or hasattr(
self._final_estimator, "transform"
)
@available_if(_can_transform)
def transform(self, X):
"""Transform the data, and apply `transform` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`transform` method. Only valid if the final estimator
implements `transform`.
This also works where final estimator is `None` in which case all prior
transformations are applied.
Parameters
----------
X : iterable
Data to transform. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
Xt : ndarray of shape (n_samples, n_transformed_features)
Transformed data.
"""
Xt = X
for _, _, transform in self._iter():
Xt = transform.transform(Xt)
return Xt
def _can_inverse_transform(self):
return all(hasattr(t, "inverse_transform") for _, _, t in self._iter())
@available_if(_can_inverse_transform)
def inverse_transform(self, Xt):
"""Apply `inverse_transform` for each step in a reverse order.
All estimators in the pipeline must support `inverse_transform`.
Parameters
----------
Xt : array-like of shape (n_samples, n_transformed_features)
Data samples, where ``n_samples`` is the number of samples and
``n_features`` is the number of features. Must fulfill
input requirements of last step of pipeline's
``inverse_transform`` method.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Inverse transformed data, that is, data in the original feature
space.
"""
reverse_iter = reversed(list(self._iter()))
for _, _, transform in reverse_iter:
Xt = transform.inverse_transform(Xt)
return Xt
@available_if(_final_estimator_has("score"))
def score(self, X, y=None, sample_weight=None):
"""Transform the data, and apply `score` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`score` method. Only valid if the final estimator implements `score`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all
steps of the pipeline.
sample_weight : array-like, default=None
If not None, this argument is passed as ``sample_weight`` keyword
argument to the ``score`` method of the final estimator.
Returns
-------
score : float
Result of calling `score` on the final estimator.
"""
Xt = X
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
score_params = {}
if sample_weight is not None:
score_params["sample_weight"] = sample_weight
return self.steps[-1][1].score(Xt, y, **score_params)
@property
def classes_(self):
"""The classes labels. Only exist if the last step is a classifier."""
return self.steps[-1][1].classes_
def _more_tags(self):
# check if first estimator expects pairwise input
return {"pairwise": _safe_tags(self.steps[0][1], "pairwise")}
# TODO: Remove in 1.1
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute `_pairwise` was deprecated in "
"version 0.24 and will be removed in 1.1 (renaming of 0.26)."
)
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], "_pairwise", False)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Transform input features using the pipeline.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
feature_names_out = input_features
for _, name, transform in self._iter():
if not hasattr(transform, "get_feature_names_out"):
raise AttributeError(
"Estimator {} does not provide get_feature_names_out. "
"Did you mean to call pipeline[:-1].get_feature_names_out"
"()?".format(name)
)
feature_names_out = transform.get_feature_names_out(feature_names_out)
return feature_names_out
@property
def n_features_in_(self):
"""Number of features seen during first step `fit` method."""
# delegate to first step (which will call _check_is_fitted)
return self.steps[0][1].n_features_in_
@property
def feature_names_in_(self):
"""Names of features seen during first step `fit` method."""
# delegate to first step (which will call _check_is_fitted)
return self.steps[0][1].feature_names_in_
def __sklearn_is_fitted__(self):
"""Indicate whether pipeline has been fit."""
try:
# check if the last step of the pipeline is fitted
# we only check the last step since if the last step is fit, it
# means the previous steps should also be fit. This is faster than
# checking if every step of the pipeline is fit.
check_is_fitted(self.steps[-1][1])
return True
except NotFittedError:
return False
def _sk_visual_block_(self):
_, estimators = zip(*self.steps)
def _get_name(name, est):
if est is None or est == "passthrough":
return f"{name}: passthrough"
# Is an estimator
return f"{name}: {est.__class__.__name__}"
names = [_get_name(name, est) for name, est in self.steps]
name_details = [str(est) for est in estimators]
return _VisualBlock(
"serial",
estimators,
names=names,
name_details=name_details,
dash_wrapped=False,
)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [
estimator if isinstance(estimator, str) else type(estimator).__name__.lower()
for estimator in estimators
]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(namecount.items()):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps, memory=None, verbose=False):
"""Construct a :class:`Pipeline` from the given estimators.
This is a shorthand for the :class:`Pipeline` constructor; it does not
require, and does not permit, naming the estimators. Instead, their names
will be set to the lowercase of their types automatically.
Parameters
----------
*steps : list of Estimator objects
List of the scikit-learn estimators that are chained together.
memory : str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Returns
-------
p : Pipeline
Returns a scikit-learn :class:`Pipeline` object.
See Also
--------
Pipeline : Class for creating a pipeline of transforms with a final
estimator.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.pipeline import make_pipeline
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
Pipeline(steps=[('standardscaler', StandardScaler()),
('gaussiannb', GaussianNB())])
"""
return Pipeline(_name_estimators(steps), memory=memory, verbose=verbose)
def _transform_one(transformer, X, y, weight, **fit_params):
res = transformer.transform(X)
# if we have a weight for this transformer, multiply output
if weight is None:
return res
return res * weight
def _fit_transform_one(
transformer, X, y, weight, message_clsname="", message=None, **fit_params
):
"""
Fits ``transformer`` to ``X`` and ``y``. The transformed result is returned
with the fitted transformer. If ``weight`` is not ``None``, the result will
be multiplied by ``weight``.
"""
with _print_elapsed_time(message_clsname, message):
if hasattr(transformer, "fit_transform"):
res = transformer.fit_transform(X, y, **fit_params)
else:
res = transformer.fit(X, y, **fit_params).transform(X)
if weight is None:
return res, transformer
return res * weight, transformer
def _fit_one(transformer, X, y, weight, message_clsname="", message=None, **fit_params):
"""
Fits ``transformer`` to ``X`` and ``y``.
"""
with _print_elapsed_time(message_clsname, message):
return transformer.fit(X, y, **fit_params)
class FeatureUnion(TransformerMixin, _BaseComposition):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters of the transformers may be set using its name and the parameter
name separated by a '__'. A transformer may be replaced entirely by
setting the parameter with its name to another transformer, removed by
setting to 'drop' or disabled by setting to 'passthrough' (features are
passed without transformation).
Read more in the :ref:`User Guide <feature_union>`.
.. versionadded:: 0.13
Parameters
----------
transformer_list : list of (str, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer. The transformer can
be 'drop' for it to be ignored or can be 'passthrough' for features to
be passed unchanged.
.. versionadded:: 1.1
Added the option `"passthrough"`.
.. versionchanged:: 0.22
Deprecated `None` as a transformer in favor of 'drop'.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
transformer_weights : dict, default=None
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
Raises ValueError if key not present in ``transformer_list``.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying first transformer in `transformer_list` exposes such an
attribute when fit.
.. versionadded:: 0.24
See Also
--------
make_union : Convenience function for simplified feature union
construction.
Examples
--------
>>> from sklearn.pipeline import FeatureUnion
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> union = FeatureUnion([("pca", PCA(n_components=1)),
... ("svd", TruncatedSVD(n_components=2))])
>>> X = [[0., 1., 3], [2., 2., 5]]
>>> union.fit_transform(X)
array([[ 1.5 , 3.0..., 0.8...],
[-1.5 , 5.7..., -0.4...]])
"""
_required_parameters = ["transformer_list"]
def __init__(
self, transformer_list, *, n_jobs=None, transformer_weights=None, verbose=False
):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
self.verbose = verbose
self._validate_transformers()
def get_params(self, deep=True):
"""Get parameters for this estimator.
Returns the parameters given in the constructor as well as the
estimators contained within the `transformer_list` of the
`FeatureUnion`.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params("transformer_list", deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``. Note that
you can directly set the parameters of the estimators contained in
`tranformer_list`.
Parameters
----------
**kwargs : dict
Parameters of this estimator or parameters of estimators contained
in `transform_list`. Parameters of the transformers may be set
using its name and the parameter name separated by a '__'.
Returns
-------
self : object
FeatureUnion class instance.
"""
self._set_params("transformer_list", **kwargs)
return self
def _validate_transformers(self):
names, transformers = zip(*self.transformer_list)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t in ("drop", "passthrough"):
continue
if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
t, "transform"
):
raise TypeError(
"All estimators should implement fit and "
"transform. '%s' (type %s) doesn't" % (t, type(t))
)
def _validate_transformer_weights(self):
if not self.transformer_weights:
return
transformer_names = set(name for name, _ in self.transformer_list)
for name in self.transformer_weights:
if name not in transformer_names:
raise ValueError(
f'Attempting to weight transformer "{name}", '
"but it is not present in transformer_list."
)
def _iter(self):
"""
Generate (name, trans, weight) tuples excluding None and
'drop' transformers.
"""
get_weight = (self.transformer_weights or {}).get
for name, trans in self.transformer_list:
if trans == "drop":
continue
if trans == "passthrough":
trans = FunctionTransformer()
yield (name, trans, get_weight(name))
@deprecated(
"get_feature_names is deprecated in 1.0 and will be removed "
"in 1.2. Please use get_feature_names_out instead."
)
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans, weight in self._iter():
if not hasattr(trans, "get_feature_names"):
raise AttributeError(
"Transformer %s (type %s) does not provide get_feature_names."
% (str(name), type(trans).__name__)
)
feature_names.extend([name + "__" + f for f in trans.get_feature_names()])
return feature_names
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
feature_names = []
for name, trans, _ in self._iter():
if not hasattr(trans, "get_feature_names_out"):
raise AttributeError(
"Transformer %s (type %s) does not provide get_feature_names_out."
% (str(name), type(trans).__name__)
)
feature_names.extend(
[f"{name}__{f}" for f in trans.get_feature_names_out(input_features)]
)
return np.asarray(feature_names, dtype=object)
def fit(self, X, y=None, **fit_params):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like of shape (n_samples, n_outputs), default=None
Targets for supervised learning.
**fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
Returns
-------
self : object
FeatureUnion class instance.
"""
transformers = self._parallel_func(X, y, fit_params, _fit_one)
if not transformers:
# All transformers are None
return self
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like of shape (n_samples, n_outputs), default=None
Targets for supervised learning.
**fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
Returns
-------
X_t : array-like or sparse matrix of \
shape (n_samples, sum_n_components)
The `hstack` of results of transformers. `sum_n_components` is the
sum of `n_components` (output dimension) over transformers.
"""
results = self._parallel_func(X, y, fit_params, _fit_transform_one)
if not results:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*results)
self._update_transformer_list(transformers)
return self._hstack(Xs)
def _log_message(self, name, idx, total):
if not self.verbose:
return None
return "(step %d of %d) Processing %s" % (idx, total, name)
def _parallel_func(self, X, y, fit_params, func):
"""Runs func in parallel on X and y"""
self.transformer_list = list(self.transformer_list)
self._validate_transformers()
self._validate_transformer_weights()
transformers = list(self._iter())
return Parallel(n_jobs=self.n_jobs)(
delayed(func)(
transformer,
X,
y,
weight,
message_clsname="FeatureUnion",
message=self._log_message(name, idx, len(transformers)),
**fit_params,
)
for idx, (name, transformer, weight) in enumerate(transformers, 1)
)
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix of \
shape (n_samples, sum_n_components)
The `hstack` of results of transformers. `sum_n_components` is the
sum of `n_components` (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, X, None, weight)
for name, trans, weight in self._iter()
)
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
return self._hstack(Xs)
def _hstack(self, Xs):
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def _update_transformer_list(self, transformers):
transformers = iter(transformers)
self.transformer_list[:] = [
(name, old if old == "drop" else next(transformers))
for name, old in self.transformer_list
]
@property
def n_features_in_(self):
"""Number of features seen during :term:`fit`."""
# X is passed to all transformers so we just delegate to the first one
return self.transformer_list[0][1].n_features_in_
def _sk_visual_block_(self):
names, transformers = zip(*self.transformer_list)
return _VisualBlock("parallel", transformers, names=names)
def make_union(*transformers, n_jobs=None, verbose=False):
"""
Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Parameters
----------
*transformers : list of estimators
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
Returns
-------
f : FeatureUnion
See Also
--------
FeatureUnion : Class for concatenating the results of multiple transformer
objects.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from sklearn.pipeline import make_union
>>> make_union(PCA(), TruncatedSVD())
FeatureUnion(transformer_list=[('pca', PCA()),
('truncatedsvd', TruncatedSVD())])
"""
return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs, verbose=verbose)
| 36.130834
| 88
| 0.613578
|
79491a0b75c7a6bb7a27cea885d59ded6038819c
| 828
|
py
|
Python
|
src/c3nav/control/migrations/0008_userpermissions_reports.py
|
johnjohndoe/c3nav
|
a17f863a3512e305595c16b0300796b6bae81241
|
[
"Apache-2.0"
] | 132
|
2016-11-12T01:45:23.000Z
|
2022-03-08T15:17:10.000Z
|
src/c3nav/control/migrations/0008_userpermissions_reports.py
|
johnjohndoe/c3nav
|
a17f863a3512e305595c16b0300796b6bae81241
|
[
"Apache-2.0"
] | 66
|
2016-09-29T09:46:19.000Z
|
2022-03-11T23:26:18.000Z
|
src/c3nav/control/migrations/0008_userpermissions_reports.py
|
johnjohndoe/c3nav
|
a17f863a3512e305595c16b0300796b6bae81241
|
[
"Apache-2.0"
] | 42
|
2016-09-29T08:34:57.000Z
|
2022-03-08T15:17:15.000Z
|
# Generated by Django 2.2.8 on 2019-12-24 16:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0078_reports'),
('control', '0007_userpermissions_manage_map_updates'),
]
operations = [
migrations.AddField(
model_name='userpermissions',
name='review_all_reports',
field=models.BooleanField(default=False, verbose_name='can review all reports'),
),
migrations.AddField(
model_name='userpermissions',
name='review_group_reports',
field=models.ManyToManyField(blank=True, limit_choices_to={'access_restriction': None}, related_name='permissions', to='mapdata.LocationGroup', verbose_name='can review reports belonging to'),
),
]
| 33.12
| 204
| 0.655797
|
79491a79324b2560e8d9b81d637e9442f013573a
| 19,523
|
py
|
Python
|
input_reader/keylevel.py
|
SethMMorton/input_reader
|
837ccc903cd5c8c89693130bc9b7eb48d0f50167
|
[
"MIT"
] | 1
|
2019-09-27T23:41:12.000Z
|
2019-09-27T23:41:12.000Z
|
input_reader/keylevel.py
|
SethMMorton/input_reader
|
837ccc903cd5c8c89693130bc9b7eb48d0f50167
|
[
"MIT"
] | null | null | null |
input_reader/keylevel.py
|
SethMMorton/input_reader
|
837ccc903cd5c8c89693130bc9b7eb48d0f50167
|
[
"MIT"
] | 1
|
2022-03-15T07:46:12.000Z
|
2022-03-15T07:46:12.000Z
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from .helpers import ReaderError, SUPPRESS
from .py23compat import py23_str, py23_basestring
class _KeyLevel(object):
"""An abstract base class that provides functionality essential
for a key"""
def __init__(self, case=False):
"""Init the KeyLevel class"""
# Are the keys case-sensitive by default?
self._case = case
if not isinstance(case, bool):
raise ValueError('case must be bool, '
'given '+repr(self._case))
def _validate_string(self, string):
"""Make sure a string has no spaces"""
if string is None:
return
elif hasattr(string, 'pattern'):
for s in (r'\s', r'.'):
if s in string.pattern:
msg = ': Regex should not allow the possibility of spaces'
msg += ', given "'+string.pattern+'"'
raise ValueError(self.name+msg)
else:
if len(string.split()) == 0:
msg = ': String cannot be of zero length'
raise ValueError(self.name+msg)
elif len(string.split()) > 1:
msg = ': String cannot contain spaces, given "'+string+'"'
raise ValueError(self.name+msg)
def _return_val(self, i, val, namespace):
"""Returns the result properly, depending on the key type
and how the user wants it."""
# Substitute the keyname for dest if required
name = self._dest if self._dest is not None else self.name
# If multiple occurences of the keyname may appear, store
# each of these in the namespace
if self._repeat:
# If this key has been found, check if we need to append to
# the previous values or create the new value
if name in namespace:
return i, name, getattr(namespace, name)+(val,)
# If the key jas not been found, simply return (as a tuple)
else:
return i, name, (val,)
# In this case, only one instance of the keyname may appear
# or it is an error.
else:
# If the keyname has already been found it is an error,
if name in namespace:
raise ReaderError(self.name+': The key "'+name+'" appears twice')
# If the key has not been found, simply return
else:
return i, name, val
def _add_kwargs(self, **kwargs):
"""Generic keyword arguments common to many methods"""
# If this class defines a default default attribute, use that instead
self._default = getattr(self, 'default', None)
if self._default is None:
self._default = kwargs.pop('default', None)
# Repeat
self._repeat = kwargs.pop('repeat', False)
if not isinstance(self._repeat, bool):
raise ValueError('repeat value must be a bool, '
'given '+repr(self._repeat))
# Required
self._required = kwargs.pop('required', False)
if not isinstance(self._required, bool):
raise ValueError('required value must be a bool, '
'given '+repr(self._required))
# If this class defines a default dest attribute, use that instead
self._dest = getattr(self, 'dest', None)
if self._dest is None:
self._dest = kwargs.pop('dest', None)
if self._dest is not None and not isinstance(self._dest, py23_basestring):
raise ValueError('dest value '+repr(self._dest)+' must be a str')
# Depends
self._depends = kwargs.pop('depends', None)
# Make sure nothing extra was given
if kwargs:
msg = ': Unknown arguments given: '+','.join(kwargs)
raise TypeError(self.name+msg)
class BooleanKey(_KeyLevel):
"""A class to store data in a boolean key"""
def __init__(self, keyname, action, **kwargs):
"""Defines a boolean key."""
super(BooleanKey, self).__init__()
# Fill in the non-generic values
self.name = keyname
self._action = action
# Add the generic keyword arguments
self._add_kwargs(**kwargs)
# Check strings
self._validate_string(self.name)
self._validate_string(self._dest)
def _parse(self, f, i, namespace):
"""Parses the current line for the key. Returns the line that
we read from and the value"""
n = len(f[i].split())
if n == 1:
return self._return_val(i, self._action, namespace)
else:
raise ReaderError('The boolean "'+self.name+'" was given '
'arguments, this is illegal')
class Regex(_KeyLevel):
"""A class to store data from a regex"""
def __init__(self, handle, regex, **kwargs):
"""Defines a regex searcher."""
super(Regex, self).__init__()
# Fill in the non-generic values
self.name = handle
self._regex = regex
# Add the generic keyword arguments
self._add_kwargs(**kwargs)
# Check strings
self._validate_string(self.name)
self._validate_string(self._dest)
def _parse(self, f, i, namespace):
"""Parses the current line for the regex. Returns the match objext
for the line."""
# Grab the match object for this line
val = self._regex.match(f[i])
return self._return_val(i, val, namespace)
class LineKey(_KeyLevel):
"""A class to store data on a line key"""
def __init__(self, keyname, type, glob, keywords, case, **kwargs):
"""Defines a line key."""
super(LineKey, self).__init__(case=case)
# Fill in the values
self.name = keyname
# Add the generic keyword arguments
self._add_kwargs(**kwargs)
# Check strings
self._validate_string(self.name)
self._validate_string(self._dest)
# Cannot have both glob and keywords defined
if glob and keywords:
msg = ': Cannot define both glob and keywords'
raise TypeError(self.name+msg)
# Validate type
# type given as a list
if isinstance(type, list):
self._type = type
self._nolist = False
# type given as a single value
elif type is None:
self._type = []
self._nolist = False
else:
self._type = [type]
self._nolist = True
self._check_types_in_list(self._type)
# Validate glob
if glob:
if not isinstance(glob, dict):
raise ValueError(self.name+': glob must be a dict')
if 'len' not in glob:
raise ValueError(self.name+': "len" required for glob')
elif glob['len'] not in ('*', '+', '?'):
msg = ': "len" must be one of "*", "+", or "?" in glob'
raise ValueError(self.name+msg)
if 'type' not in glob:
glob['type'] = str
if isinstance(glob['type'], list):
msg = ': list not allowed in type for glob or keywords'
raise ValueError(self.name+msg)
self._check_types_in_list([glob['type']])
if 'join' not in glob:
glob['join'] = False
if glob['join'] and glob['len'] == '?':
msg = ': "join=True" makes no sense for "len=?"'
raise ValueError(self.name+msg)
if set(glob.keys()) != set(['len', 'type', 'join']):
if set(glob.keys()) != set(['len', 'type', 'join', 'default']):
raise TypeError(self.name+': Unknown key in glob')
if not isinstance(glob['join'], bool):
raise ValueError(self.name+': "join" must be a bool in glob')
# Make the result is only a string when there is no positionals
if not self._type and (glob['join'] or glob['len'] == '?'):
self._nolist = True
else:
self._nolist = False
self._glob = glob
else:
self._glob = {} # In case glob = None
# Validate keywords
if keywords:
if not isinstance(keywords, dict):
raise ValueError(self.name+': keywords must be a dict')
for key in keywords:
if not isinstance(key, py23_basestring):
msg = ': keys in keywords must be of type str'
raise ValueError(self.name+msg)
else:
self._validate_string(key)
if keywords[key] is None:
keywords[key] = {}
elif not isinstance(keywords[key], dict):
msg = ': Options for keyword "'+key+'" must be a dict'
raise ValueError(self.name+msg)
if 'default' not in keywords[key]:
keywords[key]['default'] = SUPPRESS
if 'type' not in keywords[key]:
keywords[key]['type'] = str
if set(keywords[key].keys()) != set(['default', 'type']):
msg = ': Unknown key in keyword: "'+key+'"'
raise TypeError(self.name+msg)
# Check the type of the keyword
if isinstance(keywords[key]['type'], list):
msg = ': list not allowed in type for glob or keywords'
raise ValueError(self.name+msg)
else:
self._check_types_in_list([keywords[key]['type']])
self._keywords = keywords
# Since we append this dict to the end, we must keep as a list
# unless only the keywords are being kept
self._nolist = True if not self._type else False
else:
self._keywords = {} # In case keywords = None
# Type, glob and keywords can't be empty
if not (self._type or self._glob or self._keywords):
msg = ': type, glob and keywords cannot all be empty'
raise ValueError(self.name+msg)
def _parse(self, f, i, namespace):
"""Parses the current line for the key. Returns the line that
we read from and the value"""
# Separate the arguments from the key
if self._case:
args = f[i].split()[1:]
else:
args = f[i].lower().split()[1:]
# Check that the length of args matches the type length
if len(args) == len(self._type):
if not self._glob and not self._keywords:
pass # Not expecting anything else, we're good to go
elif self._glob.get('len') == '+':
msg = ': expected at least '+str(len(self._type)+1)
msg += ' arguments, got '+str(len(args))
raise ReaderError(self.name+msg)
# Checking keywords will be done later
# If the # args is less than the positional
elif len(args) < len(self._type):
if self._glob.get('len') == '+':
msg = ': expected at least '+str(len(self._type)+1)
else:
msg = ': expected '+str(len(self._type))
msg += ' arguments, got '+str(len(args))
raise ReaderError(self.name+msg)
# If there are too many arguments
elif len(args) > len(self._type):
if self._keywords:
pass
elif self._glob and self._glob['len'] in ('*', '+'):
pass
else:
n = len(self._type)
if self._glob.get('len') == '?':
n += 1
msg =': expected at most '+str(n)
else:
msg =': expected '+str(n)
if len(args) != n:
msg += ' arguments, got '+str(len(args))
raise ReaderError(self.name+msg)
# Read in the arguments, making sure they match the types and choices
val = []
for a, t in zip(args[:len(self._type)], self._type):
val.append(self._check_type_of_value(a, t, self._case))
# Remove the arguments that were just read in
try:
args = args[len(self._type):]
except IndexError:
args = []
# Read in the glob or the keywords
glob = []
kw = {}
if self._glob:
t = self._glob['type']
for a in args:
glob.append(self._check_type_of_value(a, t, self._case))
# Assign the default if there was nothing
if self._glob['join']:
if not glob:
try:
glob = self._glob['default']
except KeyError:
pass
else:
# Change all the globbed values to strings
for j, v in enumerate(glob):
glob[j] = py23_str(v)
glob = ' '.join(glob)
elif not glob:
try:
glob.append(self._glob['default'])
except KeyError:
pass
# Tag onto the end of val and prep val
if not val:
if self._nolist:
if isinstance(glob, py23_basestring):
val = glob
else:
try:
val = glob[0]
except IndexError:
val = ''
else:
val = tuple(glob)
elif not glob:
if self._nolist:
val = val[0]
else:
val = tuple(val)
elif self._glob['join']:
val.append(glob)
val = tuple(val)
else:
val.extend(glob)
val = tuple(val)
elif self._keywords:
# Each keyword is assumed to be key=value with no spaces
for kvpair in args:
try:
key, value = kvpair.split('=')
except ValueError:
msg = ': Error reading keyword argument "'+kvpair+'"'
raise ReaderError(self.name+msg)
# Make sure the keyword is good
if not self._case:
key = key.lower()
if key not in self._keywords:
raise ReaderError(self.name+': Unknown keyword: "'+key+'"')
# Assign this keyword
try:
t = self._keywords[key]['type']
except KeyError:
t = str # Default to string if not given
kw[key] = self._check_type_of_value(value, t, self._case)
# Assign the defaults
for key in self._keywords:
try:
default = self._keywords[key]['default']
except KeyError:
continue
if key not in kw and default is not SUPPRESS:
kw[key] = default
# Tag onto the end of val and prep val
if not val:
val = kw
elif not kw:
if self._nolist:
val = val[0]
else:
val.append({})
val = tuple(val)
else:
val.append(kw)
val = tuple(val)
else:
if self._nolist:
try:
val = val[0]
except IndexError:
val = ''
else:
val = tuple(val)
return self._return_val(i, val, namespace)
def _check_types_in_list(self, typ):
"""Make sure each type in a list is legal. The function is recursive"""
for t in typ:
if isinstance(t, list):
msg = ': Embedded lists not allowed in type'
raise ValueError(self.name+msg)
elif isinstance(t, tuple):
if len(t) == 0:
msg = ': Empty tuple in type'
raise ValueError(self.name+msg)
else:
self._check_types_in_list(t)
elif not (isinstance(t, py23_basestring) or isinstance(t, int) or
isinstance(t, float) or t is None or
hasattr(t, 'pattern') or t is str or t is int or
t is float):
msg = (': type must be one of None, str, float '
'int, or an instance of str, float, '
'int or regex')
raise ValueError(self.name+msg)
if isinstance(t, py23_basestring) or hasattr(t, 'pattern'):
self._validate_string(t)
def _validate_given_value(self, val, typ, case):
"""Checks that the given value is valid by checking
its type. Raises ValueError if unsuccessful.
"""
# Check case if necessary
if not case:
try:
typ = type.lower()
except AttributeError:
pass
# One of the core datatypes
if typ is float or typ is int or typ is str:
return typ(val)
# Explicit None
elif typ is None:
if val.lower() == 'none':
return None
else:
raise ValueError
# Explicit choices
elif (isinstance(typ, py23_basestring) or isinstance(typ, int) or
isinstance(typ, float)):
if type(typ)(val) == typ:
return type(typ)(val)
else:
raise ValueError
# Regular expression
else:
if typ.match(val):
return val
else:
raise ValueError
def _check_type_of_value(self, val, typ, case):
"""Checks the type of a value, accounting for
various forms of type"""
if isinstance(typ, tuple):
for tp in typ:
try:
return self._validate_given_value(val, tp, case)
except ValueError:
continue
else:
msg = self.name+': expected one of {0}, got "{1}"'
t = sorted([self._make_value_readable(x) for x in typ])
t = ', '.join(t[:-1])+' or '+t[-1]
raise ReaderError(msg.format(t, val))
else:
try:
return self._validate_given_value(val, typ, case)
except ValueError:
msg = self.name+': expected {0}, got "{1}"'
raise ReaderError(msg.format(self._make_value_readable(typ), val))
def _make_value_readable(self, val):
"""Returns a a string version of the input value."""
if isinstance(val, int) or isinstance(val, float):
return str(val)
elif isinstance(val, py23_basestring):
return '"'+str(val)+'"'
elif val is None:
return '"None"'
else:
try:
return 'regex({0})'.format(val.pattern)
except AttributeError:
return str(val).split()[1].strip("'><")
| 38.736111
| 82
| 0.505455
|
79491a9ddbc00b804e55f7fd385f18740dae8223
| 9,353
|
py
|
Python
|
opentamp/policy_hooks/robosuite/arm_hyp.py
|
Algorithmic-Alignment-Lab/openTAMP
|
f0642028d551d0436b3a3dbc3bfb2f23a00adc14
|
[
"MIT"
] | 4
|
2022-02-13T15:52:18.000Z
|
2022-03-26T17:33:13.000Z
|
opentamp/policy_hooks/robosuite/arm_hyp.py
|
Algorithmic-Alignment-Lab/openTAMP
|
f0642028d551d0436b3a3dbc3bfb2f23a00adc14
|
[
"MIT"
] | 1
|
2022-02-13T22:48:09.000Z
|
2022-02-13T22:48:09.000Z
|
opentamp/policy_hooks/robosuite/arm_hyp.py
|
Algorithmic-Alignment-Lab/openTAMP
|
f0642028d551d0436b3a3dbc3bfb2f23a00adc14
|
[
"MIT"
] | null | null | null |
NUM_OBJS = 1
NUM_TARGS = 1
from datetime import datetime
import os
import os.path
import numpy as np
from gps.algorithm.policy.lin_gauss_init import init_lqr, init_pd
from opentamp.policy_hooks.algorithm_impgps import AlgorithmIMPGPS
from opentamp.policy_hooks.multi_head_policy_opt_tf import MultiHeadPolicyOptTf
import policy_hooks.utils.policy_solver_utils as utils
from opentamp.policy_hooks.traj_opt_pi2 import TrajOptPI2
from opentamp.core.util_classes.namo_grip_predicates import ATTRMAP
import policy_hooks.robosuite.sort_prob as prob
prob.NUM_OBJS = NUM_OBJS
prob.NUM_TARGS = NUM_TARGS
from opentamp.policy_hooks.policy_mp_prior_gmm import PolicyMPPriorGMM
from opentamp.policy_hooks.policy_prior_gmm import PolicyPriorGMM
from opentamp.policy_hooks.robosuite.robot_agent import RobotAgent
from pma.robosuite_solver import RobotSolver
BASE_DIR = os.getcwd() + '/policy_hooks/'
EXP_DIR = BASE_DIR + 'experiments/'
NUM_CONDS = 1 # Per rollout server
NUM_PRETRAIN_STEPS = 20
NUM_PRETRAIN_TRAJ_OPT_STEPS = 1
NUM_TRAJ_OPT_STEPS = 1
N_SAMPLES = 10
N_TRAJ_CENTERS = 1
HL_TIMEOUT = 600
OPT_WT_MULT = 5e2
N_ROLLOUT_SERVERS = 34 # 58
N_ALG_SERVERS = 0
N_OPTIMIZERS = 0
N_DIRS = 16
N_GRASPS = 4
TIME_LIMIT = 14400
common = {
'experiment_name': 'my_experiment' + '_' + \
datetime.strftime(datetime.now(), '%m-%d-%y_%H-%M'),
'experiment_dir': EXP_DIR,
'data_files_dir': EXP_DIR + 'data_files/',
'target_filename': EXP_DIR + 'target.npz',
'log_filename': EXP_DIR + 'log.txt',
'conditions': NUM_CONDS,
}
algorithm = {
'type': AlgorithmIMPGPS,
'conditions': common['conditions'],
'policy_sample_mode': 'add',
'sample_on_policy': True,
'iterations': 1e3, #5e4,
'max_ent_traj': 0.0,
'fit_dynamics': False,
'stochastic_conditions': True,
'policy_inf_coeff': 1e2,
'policy_out_coeff': 1e1,
'kl_step': 1.,
'min_step_mult': 0.05,
'max_step_mult': 5.0,
'sample_ts_prob': 1.0,
'opt_wt': OPT_WT_MULT,
'fail_value': 50,
'use_centroids': True,
'n_traj_centers': N_TRAJ_CENTERS,
'num_samples': N_SAMPLES,
'mp_opt': True,
'her': False,
'rollout_opt': False,
}
algorithm['init_traj_distr'] = {
'type': init_pd,
'init_var': 0.01,
'pos_gains': 0.00,
}
algorithm['traj_opt'] = {
'type': TrajOptPI2,
'kl_threshold': 1.,
'covariance_damping': 0.00,
'min_temperature': 1e-3,
}
# algorithm['policy_prior'] = {
# 'type': PolicyPrior,
# }
# algorithm = {
# 'type': AlgorithmMDGPS,
# 'conditions': common['conditions'],
# 'iterations': 10,
# 'kl_step': 0.1,
# 'min_step_mult': 0.5,
# 'max_step_mult': 3.0,
# 'policy_sample_mode': 'replace',
# }
# algorithm['init_traj_distr'] = {
# 'type': init_pd,
# 'pos_gains': 1e-5,
# }
# algorithm['init_traj_distr'] = {
# 'type': init_lqr,
# 'init_var': 0.001,
# 'stiffness': 10.0,
# 'stiffness_vel': 0.5,
# 'final_weight': 5.0,
# }
# algorithm = {
# 'type': AlgorithmTrajOptPILQR,
# 'conditions': common['conditions'],
# 'iterations': 20,
# 'step_rule': 'res_percent',
# 'step_rule_res_ratio_dec': 0.2,
# 'step_rule_res_ratio_inc': 0.05,
# 'kl_step': np.linspace(0.6, 0.2, 100),
# }
# algorithm['dynamics'] = {
# 'type': DynamicsLRPrior,
# 'regularization': 1e-6,
# 'prior': {
# 'type': DynamicsPriorGMM,
# 'max_clusters': 20,
# 'min_samples_per_cluster': 60,
# 'max_samples': 30,
# },
# }
# algorithm['traj_opt'] = {
# 'type': TrajOptPILQR,
# }
# algorithm['traj_opt'] = {
# 'type': TrajOptLQRPython,
# }
algorithm['policy_prior'] = {
'type': PolicyPriorGMM,
'max_clusters': 20,
'min_samples_per_cluster': 40,
'max_samples': 50,
}
algorithm['mp_policy_prior'] = {
'type': PolicyMPPriorGMM,
'max_clusters': 20,
'min_samples_per_cluster': 40,
'max_samples': 50,
}
def refresh_config(no=NUM_OBJS, nt=NUM_TARGS):
cost_wp_mult = np.ones((3 + 2 * NUM_OBJS))
prob.NUM_OBJS = no
prob.NUM_TARGS = nt
prob.N_GRASPS = N_GRASPS
prob.FIX_TARGETS = True
prob.END_TARGETS = prob.END_TARGETS[:8]
prob.n_aux = 0
config = {
'gui_on': False,
'iterations': algorithm['iterations'],
'verbose_trials': 1,
'verbose_policy_trials': 1,
'common': common,
'algorithm': algorithm,
'num_samples': algorithm['num_samples'],
'num_distilled_samples': 0,
'num_conds': NUM_CONDS,
'mode': 'position',
'stochastic_conditions': algorithm['stochastic_conditions'],
'policy_coeff': 1e0,
'sample_on_policy': True,
'hist_len': 3,
'take_optimal_sample': True,
'num_rollouts': 10,
'max_tree_depth': 5 + no*2,
'branching_factor': 4,
'opt_wt': algorithm['opt_wt'],
'fail_value': algorithm['fail_value'],
'lr': 1e-3,
'solver_type': 'adam', #'rmsprop',
'cost_wp_mult': cost_wp_mult,
'train_iterations': 50,
'weight_decay': 1e-3,
'prim_weight_decay': 1e-3,
'val_weight_decay': 1e-3,
'batch_size': 500,
'n_layers': 2,
'prim_n_layers': 1,
'val_n_layers': 1,
'dim_hidden': [32, 32],
'prim_dim_hidden': [32],
'val_dim_hidden': [32],
'n_traj_centers': algorithm['n_traj_centers'],
'traj_opt_steps': NUM_TRAJ_OPT_STEPS,
'pretrain_steps': NUM_PRETRAIN_STEPS,
'pretrain_traj_opt_steps': NUM_PRETRAIN_TRAJ_OPT_STEPS,
'on_policy': True,
# New for multiprocess, transfer to sequential version as well.
'n_optimizers': N_OPTIMIZERS,
'n_rollout_servers': N_ROLLOUT_SERVERS,
'n_alg_servers': N_ALG_SERVERS,
'base_weight_dir': 'sawyer_',
'policy_out_coeff': algorithm['policy_out_coeff'],
'policy_inf_coeff': algorithm['policy_inf_coeff'],
'max_sample_queue': 5e2,
'max_opt_sample_queue': 10,
'task_map_file': prob.mapping_file,
'prob': prob,
'get_vector': prob.get_vector,
'obj_type': 'can',
'num_objs': no,
'num_targs': nt,
'attr_map': ATTRMAP,
'agent_type': RobotAgent,
'mp_solver_type': RobotSolver,
'll_solver_type': RobotSolver,
'update_size': 2000,
'prim_update_size': 5000,
'val_update_size': 1000,
'use_local': True,
'n_dirs': N_DIRS,
'domain': 'sawyer',
'perturb_steps': 3,
'mcts_early_stop_prob': 0.5,
'hl_timeout': HL_TIMEOUT,
'multi_policy': False,
'image_width': 107,
'image_height': 80,
'image_channels': 3,
'opt_prob': 1.,
'opt_smooth': False,
'share_buffer': True,
'split_nets': False,
'split_mcts_alg': True,
'robot_name': 'sawyer',
'ctrl_mode': 'joint_angle',
'visual_cameras': [0],
'state_include': [utils.STATE_ENUM],
'obs_include': [utils.TASK_ENUM,
#utils.OBJ_POSE_ENUM,
#utils.TARG_POSE_ENUM,
utils.END_POSE_ENUM,
#utils.RIGHT_ENUM,
utils.RIGHT_GRIPPER_ENUM,
# utils.DONE_ENUM,
],
'prim_obs_include': [
# utils.DONE_ENUM,
# utils.STATE_ENUM,
#utils.GOAL_ENUM,
utils.ONEHOT_GOAL_ENUM,
],
'val_obs_include': [utils.ONEHOT_GOAL_ENUM,
],
'prim_out_include': list(prob.get_prim_choices().keys()),
'sensor_dims': {
utils.OBJ_POSE_ENUM: 3,
utils.TARG_POSE_ENUM: 3,
utils.LIDAR_ENUM: N_DIRS,
utils.EE_ENUM: 3,
utils.RIGHT_EE_POS_ENUM: 3,
utils.RIGHT_EE_ROT_ENUM: 3,
utils.END_POSE_ENUM: 3,
utils.GRIPPER_ENUM: 1,
utils.GOAL_ENUM: 3*no,
utils.ONEHOT_GOAL_ENUM: no*(prob.n_aux + len(prob.END_TARGETS)),
utils.INGRASP_ENUM: no,
utils.TRUETASK_ENUM: 2,
utils.TRUEOBJ_ENUM: no,
utils.TRUETARG_ENUM: len(prob.END_TARGETS),
utils.ATGOAL_ENUM: no,
utils.FACTOREDTASK_ENUM: len(list(prob.get_prim_choices().keys())),
utils.RIGHT_ENUM: 7,
utils.RIGHT_GRIPPER_ENUM: 1,
# utils.INIT_OBJ_POSE_ENUM: 2,
},
'visual': False,
'time_limit': TIME_LIMIT,
'success_to_replace': 1,
'steps_to_replace': no * 50,
'curric_thresh': -1,
'n_thresh': -1,
'expand_process': False,
'her': False,
'prim_decay': 0.95,
'prim_first_wt': 1e1,
}
for o in range(no):
config['sensor_dims'][utils.OBJ_DELTA_ENUMS[o]] = 3
config['sensor_dims'][utils.OBJ_ENUMS[o]] = 3
config['sensor_dims'][utils.TARG_ENUMS[o]] = 3
config['prim_obs_include'].append(utils.OBJ_DELTA_ENUMS[o])
config['prim_obs_include'].append(utils.TARG_ENUMS[o])
return config
config = refresh_config()
| 29.41195
| 83
| 0.592003
|
79491b2ff5476ebf959fc237efa01be12e1e8c3b
| 1,989
|
py
|
Python
|
utils.py
|
bcarlier75/ft_linear_regression
|
b966c8432855873ff273ad9b50a638e290edb407
|
[
"MIT"
] | 1
|
2021-05-07T05:07:30.000Z
|
2021-05-07T05:07:30.000Z
|
utils.py
|
bcarlier75/ft_linear_regression
|
b966c8432855873ff273ad9b50a638e290edb407
|
[
"MIT"
] | null | null | null |
utils.py
|
bcarlier75/ft_linear_regression
|
b966c8432855873ff273ad9b50a638e290edb407
|
[
"MIT"
] | null | null | null |
from sys import argv
from maths import *
import matplotlib.pyplot as plt
import csv
def print_usage():
print(f'Usage: python train.py [-h] [-p] [-m]')
print(f'\t -h : display this message')
print(f'\t -p : display plot')
print(f'\t -m : display model metrics')
def check_args():
flag_plot = 0
flag_metrics = 0
for i in range(1, len(argv)):
if argv[i] == '-h':
print_usage()
return -1, -1
elif argv[i] == '-p':
flag_plot = 1
elif argv[i] == '-m':
flag_metrics = 1
return flag_plot, flag_metrics
def get_data():
mileage = []
price = []
with open('csv/data.csv', 'r') as f:
for index, line in enumerate(f):
if index != 0:
tab = line.rstrip('\n').split(',')
try:
mileage.append(float(tab[0]))
price.append(float(tab[1]))
except ValueError:
print(f'Dataset wrongly formated !')
return -1, -1
return mileage, price
def get_thetas():
tab = [0, 0]
with open('csv/thetas_file.csv') as f:
reader = csv.reader(f)
for row in reader:
row[0] = float(row[0])
row[1] = float(row[1])
return row
return tab
def write_thetas(theta_0: float, theta_1: float):
with open('csv/thetas_file.csv', 'w') as f:
f.write(str(theta_0) + ',' + str(theta_1))
def display_plot(mileage: List[float], price: List[float], mse: List[float]):
plt.plot(mileage, mse, c='aquamarine')
plt.scatter(mileage, price, s=10, c='navy')
plt.xlabel('mileage')
plt.ylabel('price')
plt.show()
def display_metrics(theta_0: float, theta_1: float, mileage: List[float], price: List[float]):
rsq = r_squared(theta_0, theta_1, mileage, price)
print(f'Coefficient : {theta_1:.8f}')
print(f'Intercept : {theta_0:.8f}')
print(f'R_squared : {rsq}')
| 27.246575
| 94
| 0.547009
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.