text string | size int64 | token_count int64 |
|---|---|---|
# This file is part of spot_motion_monitor.
#
# Developed for LSST System Integration, Test and Commissioning.
#
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
from PyQt5.QtWidgets import QDialogButtonBox
from spot_motion_monitor.utils import AutoscaleState
from spot_motion_monitor.views import PlotConfigurationDialog
class TestPlotConfigurationDialog:
def test_parametersAfterConstruction(self, qtbot):
pcDialog = PlotConfigurationDialog()
qtbot.addWidget(pcDialog)
pcDialog.show()
assert pcDialog.tabWidget.count() == 2
def test_setPlotConfiguration(self, qtbot, mocker):
pcDialog = PlotConfigurationDialog()
mockCentroidTabSetConfig = mocker.patch.object(pcDialog.centroidPlotConfigTab, 'setConfiguration')
mockPsdTabSetConfig = mocker.patch.object(pcDialog.psdPlotConfigTab, 'setConfiguration')
qtbot.addWidget(pcDialog)
pcDialog.show()
centroidConfig = {'xCentroid': {'autoscale': AutoscaleState.OFF.name, 'pixelAddition': None,
'minimum': 10, 'maximum': 1000},
'yCentroid': {'autoscale': AutoscaleState.ON.name, 'pixelAddition': None,
'minimum': None, 'maximum': None},
'scatterPlot': {'numHistogramBins': 50}}
psdConfig = {'waterfall': {'numBins': 15, 'colorMap': None},
'xPSD': {'autoscale': True},
'yPSD': {'autoscale': False, 'maximum': 1320.0}}
pcDialog.setPlotConfiguration(centroidConfig, psdConfig)
assert mockCentroidTabSetConfig.call_count == 1
assert mockPsdTabSetConfig.call_count == 1
def test_getPlotConfiguration(self, qtbot, mocker):
pcDialog = PlotConfigurationDialog()
mockCentroidTabGetConfig = mocker.patch.object(pcDialog.centroidPlotConfigTab, 'getConfiguration')
mockPsdTabGetConfig = mocker.patch.object(pcDialog.psdPlotConfigTab, 'getConfiguration')
qtbot.addWidget(pcDialog)
pcDialog.show()
centroidConfig, psdConfig = pcDialog.getPlotConfiguration()
assert mockCentroidTabGetConfig.call_count == 1
assert mockPsdTabGetConfig.call_count == 1
assert centroidConfig is not None
assert psdConfig is not None
def test_validInputFromTabs(self, qtbot):
pcDialog = PlotConfigurationDialog()
qtbot.addWidget(pcDialog)
pcDialog.show()
pcDialog.centroidPlotConfigTab.pixelAdditionXLineEdit.setText(str(-1))
assert pcDialog.buttonBox.button(QDialogButtonBox.Ok).isEnabled() is False
pcDialog.centroidPlotConfigTab.pixelAdditionXLineEdit.setText(str(10))
assert pcDialog.buttonBox.button(QDialogButtonBox.Ok).isEnabled()
pcDialog.psdPlotConfigTab.waterfallNumBinsLineEdit.setText(str(0))
assert pcDialog.buttonBox.button(QDialogButtonBox.Ok).isEnabled() is False
| 3,121 | 923 |
# -*- coding: utf-8 -*-
"""
pipelines
~~~~~~~~~
:copyright: (c) 2017-18 by Wendell Hu.
:license: MIT, see LICENSE for more details.
"""
from scrapy.exceptions import DropItem
from .db import spider_session_generator, RawArticle
class ArticlePipeline(object):
"""Persist article items into database."""
def __init__(self):
self.spider_session_generator = spider_session_generator
def process_item(self, item, spider):
if item.get('title', None) is None:
raise DropItem('Article doesn\'t have a title.')
else:
title = item.get('title')[0]
if title:
title = title.strip()
uri = item.get('uri')[0]
content = item.get('content')[0]
if content:
content = content.strip()
source = item.get('source')[0]
crawled_at = item.get('crawled_at')[0]
# published_at = item.get('published_at')[0]
# editor = item.get('editor')[0]
# published_time = item.get('published_time')[0]
if title is None or title == '' or content is None or content == '':
raise DropItem('Article doesn\'t have valid information.')
session = self.spider_session_generator()
session.add(RawArticle(title=title, uri=uri, source=source,
crawled_at=crawled_at, content=content))
session.commit()
session.close()
#: return the item for any other after-processing
return item
| 1,586 | 456 |
# 2014 - Bibek Kafle & Roland Shoemaker
# 2015-2017 - Nikolas Nyby
# Port of @jgm's commonmark.js implementation of the CommonMark spec.
# Basic usage:
#
# import commonmark
# parser = commonmark.Parser()
# renderer = commonmark.HtmlRenderer()
# print(renderer.render(parser.parse('Hello *world*')))
from __future__ import absolute_import, unicode_literals
from commonmark.blocks import Parser
from commonmark.dump import dumpAST, dumpJSON
from commonmark.render.html import HtmlRenderer
from commonmark.render.rst import ReStructuredTextRenderer
def commonmark(text, format="html"):
"""Render CommonMark into HTML, JSON or AST
Optional keyword arguments:
format: 'html' (default), 'json' or 'ast'
>>> commonmark("*hello!*")
'<p><em>hello</em></p>\\n'
"""
parser = Parser()
ast = parser.parse(text)
if format not in ["html", "json", "ast", "rst"]:
raise ValueError("format must be 'html', 'json' or 'ast'")
if format == "html":
renderer = HtmlRenderer()
return renderer.render(ast)
if format == "json":
return dumpJSON(ast)
if format == "ast":
return dumpAST(ast)
if format == "rst":
renderer = ReStructuredTextRenderer()
return renderer.render(ast)
| 1,268 | 401 |
from rest_framework import viewsets
from rest_framework.response import Response
from crowdsourcing.serializers.accountModel import *
from crowdsourcing.models import *
from crowdsourcing.permissions.util import *
class AccountModelViewSet(viewsets.ModelViewSet):
queryset = AccountModel.objects.all()
serializer_class = AccountModelSerializer
permission_classes = [IsOwnerOrReadOnly]
def list(self, request, *args, **kwargs):
try:
account = AccountModel.objects.all()
account_serializer = AccountModelSerializer(account, many=True)
return Response(account_serializer.data)
except:
return Response([])
| 686 | 177 |
# -*- coding: utf-8 -*-
import datetime
import math
from django.db import connection
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.db.models import Q, Max
from django.conf import settings
from ktapp import models
from ktapp import utils as kt_utils
from ktapp.helpers import filmlist
from ktapp import texts
from ktapp import sqls as kt_sqls
COMMENTS_PER_PAGE = 100
MESSAGES_PER_PAGE = 50
FILMS_PER_PAGE = 100
MINIMUM_YEAR = 1920
USER_PROFILE_TAB_WIDTH = {
True: '11', # 1/9
False: '12.3', # 1/8
}
def _get_user_profile_numbers(request, selected_user):
if request.user.is_authenticated() and request.user.id != selected_user.id:
number_of_messages = models.MessageCountCache.get_count(owned_by=request.user, partner=selected_user)
else:
number_of_messages = 0
return (
selected_user.number_of_ratings,
selected_user.number_of_comments,
selected_user.number_of_wishes_yes + selected_user.number_of_wishes_no + selected_user.number_of_wishes_get,
selected_user.number_of_toplists,
number_of_messages,
selected_user.number_of_reviews + selected_user.number_of_bios + selected_user.number_of_links,
)
def user_profile(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
number_of_vapiti_votes = selected_user.vote_set.filter(film__vapiti_year=settings.VAPITI_YEAR).count()
latest_votes = [int(v) for v in selected_user.latest_votes.split(',') if v != ''][:10]
latest_comments = [int(c) for c in selected_user.latest_comments.split(',') if c != ''][:10]
# profile
profile = {
'major_genres': [],
'minor_genres': [],
'major_countries': [],
'minor_countries': [],
'major_years': [],
'minor_years': [],
}
for keyword in models.Keyword.objects.raw('''
SELECT k.*, ups.score AS ups_score
FROM ktapp_userprofilesegment ups
INNER JOIN ktapp_profilesegment ps ON ps.id = ups.segment_id AND ps.dimension = 'genre'
LEFT JOIN ktapp_keyword k ON k.id = ps.segment
WHERE ups.user_id = {user_id} AND ups.score >= 50
ORDER BY ups.score DESC;
'''.format(user_id=selected_user.id)):
if keyword.ups_score >= 100:
profile['major_genres'].append(keyword)
else:
profile['minor_genres'].append(keyword)
for keyword in models.Keyword.objects.raw('''
SELECT k.*, ups.score AS ups_score
FROM ktapp_userprofilesegment ups
INNER JOIN ktapp_profilesegment ps ON ps.id = ups.segment_id AND ps.dimension = 'country'
LEFT JOIN ktapp_keyword k ON k.id = ps.segment
WHERE ups.user_id = {user_id} AND ups.score >= 100
ORDER BY ups.score DESC;
'''.format(user_id=selected_user.id)):
if keyword.ups_score >= 200:
profile['major_countries'].append(keyword)
else:
profile['minor_countries'].append(keyword)
for year in models.UserProfileSegment.objects.raw('''
SELECT ups.*, ps.segment as ps_segment
FROM ktapp_userprofilesegment ups
INNER JOIN ktapp_profilesegment ps ON ps.id = ups.segment_id AND ps.dimension = 'year'
LEFT JOIN ktapp_keyword k ON k.id = ps.segment
WHERE ups.user_id = {user_id} AND ups.score >= 50
ORDER BY ups.score DESC;
'''.format(user_id=selected_user.id)):
year_str = texts.LONG_YEARS[int(year.ps_segment)]
if year.score >= 100:
profile['major_years'].append(year_str)
else:
profile['minor_years'].append(year_str)
similarity = None
similarity_per_genre = []
if request.user.is_authenticated():
cursor = connection.cursor()
cursor.execute(kt_sqls.SIMILARITY, (request.user.id, selected_user.id))
row = cursor.fetchone()
if row:
similarity = row
cursor.execute(kt_sqls.SIMILARITY_PER_GENRE, (request.user.id, selected_user.id))
for row in cursor.fetchall():
similarity_per_genre.append(row)
ignore_pm, ignore_comment = False, False
if request.user.is_authenticated():
ignore_pm, ignore_comment = models.IgnoreUser.get(who=request.user, whom=selected_user)
return render(request, 'ktapp/user_profile_subpages/user_profile.html', {
'active_tab': 'profile',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'number_of_vapiti_votes': number_of_vapiti_votes,
'vapiti_weight': number_of_votes + 25 * number_of_vapiti_votes,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'latest_votes': selected_user.vote_set.filter(id__in=latest_votes).select_related('film').order_by('-when', '-id'),
'latest_comments': models.Comment.objects.filter(id__in=latest_comments).select_related('film', 'topic', 'poll', 'created_by', 'reply_to', 'reply_to__created_by'),
'myfav': models.Follow.objects.filter(who=request.user, whom=selected_user).count() if request.user.is_authenticated() else 0,
'fav_count': models.Follow.objects.filter(whom=selected_user).count(),
'ignore_pm': ignore_pm,
'ignore_comment': ignore_comment,
'profile': profile,
'fav_directors': list(models.Artist.objects.raw('''
SELECT a.*
FROM ktapp_artist a
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = a.id
WHERE uf.user_id = %s AND uf.domain = %s
ORDER BY a.name, a.id
''', [selected_user.id, models.UserFavourite.DOMAIN_DIRECTOR])),
'fav_actors': list(models.Artist.objects.raw('''
SELECT a.*
FROM ktapp_artist a
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = a.id
WHERE uf.user_id = %s AND uf.domain = %s
ORDER BY a.name, a.id
''', [selected_user.id, models.UserFavourite.DOMAIN_ACTOR])),
'fav_genres': list(models.Keyword.objects.raw('''
SELECT k.*
FROM ktapp_keyword k
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = k.id
WHERE uf.user_id = %s AND uf.domain = %s AND k.keyword_type = %s
ORDER BY k.name, k.id
''', [selected_user.id, models.UserFavourite.DOMAIN_GENRE, models.Keyword.KEYWORD_TYPE_GENRE])),
'fav_countries': list(models.Keyword.objects.raw('''
SELECT k.*
FROM ktapp_keyword k
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = k.id
WHERE uf.user_id = %s AND uf.domain = %s AND k.keyword_type = %s
ORDER BY k.name, k.id
''', [selected_user.id, models.UserFavourite.DOMAIN_COUNTRY, models.Keyword.KEYWORD_TYPE_COUNTRY])),
'similarity': similarity,
'similarity_per_genre': similarity_per_genre,
'permission_ban_user': kt_utils.check_permission('ban_user', request.user),
'permission_see_core': kt_utils.check_permission('see_core', request.user),
'permission_set_game_master': kt_utils.check_permission('set_game_master', request.user),
'list_of_bans': [
(
ban.created_at,
texts.BAN_TYPES.get(ban.action),
ban.created_by,
)
for ban in models.Change.objects.filter(
action__in=['ban', 'unban', 'warning', 'temp_ban_1d', 'temp_ban_3d', 'temp_ban_7d'],
object='user:%d' % selected_user.id,
).order_by('-created_at')
],
})
def user_taste(request, id, name_slug, domain):
def dictfetchall(cursor):
columns = [col[0] for col in cursor.description]
return [dict(zip(columns, row)) for row in cursor.fetchall()]
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
cursor = connection.cursor()
if domain == 'rendezok':
active_subtab = 'directors'
cursor.execute('''
SELECT
a.id, a.slug_cache, a.name,
AVG(v.rating) AS average_rating,
ROUND(10.0 * AVG(v.rating)) AS average_rating_sort_value,
COUNT(1) AS number_of_ratings,
ROUND(100.0 * COUNT(1) / a.number_of_films_as_director) AS film_ratio,
SUM(v.rating = 1) AS number_of_ratings_1,
SUM(v.rating = 2) AS number_of_ratings_2,
SUM(v.rating = 3) AS number_of_ratings_3,
SUM(v.rating = 4) AS number_of_ratings_4,
SUM(v.rating = 5) AS number_of_ratings_5
FROM ktapp_artist a
INNER JOIN ktapp_filmartistrelationship fa ON fa.artist_id = a.id AND fa.role_type = 'D'
INNER JOIN ktapp_vote v ON v.film_id = fa.film_id AND v.user_id = %s
GROUP BY a.id
HAVING COUNT(1) >= 5 OR (2*COUNT(1)>=MIN(a.number_of_films_as_director) AND COUNT(1)>=3)
ORDER BY average_rating DESC, number_of_ratings DESC, name, id
''', [selected_user.id])
elif domain == 'mufajok':
active_subtab = 'genres'
cursor.execute('''
SELECT
k.id, k.slug_cache, k.name,
AVG(v.rating) AS average_rating,
ROUND(10.0 * AVG(v.rating)) AS average_rating_sort_value,
COUNT(1) AS number_of_ratings,
SUM(v.rating = 1) AS number_of_ratings_1,
SUM(v.rating = 2) AS number_of_ratings_2,
SUM(v.rating = 3) AS number_of_ratings_3,
SUM(v.rating = 4) AS number_of_ratings_4,
SUM(v.rating = 5) AS number_of_ratings_5
FROM ktapp_keyword k
INNER JOIN ktapp_filmkeywordrelationship fk ON fk.keyword_id = k.id
INNER JOIN ktapp_vote v ON v.film_id = fk.film_id AND v.user_id = %s
WHERE k.keyword_type = 'G'
GROUP BY k.id
HAVING COUNT(1) >= 5
ORDER BY average_rating DESC, number_of_ratings DESC, name, id
''', [selected_user.id])
elif domain == 'orszagok':
active_subtab = 'countries'
cursor.execute('''
SELECT
k.id, k.slug_cache, k.name,
AVG(v.rating) AS average_rating,
ROUND(10.0 * AVG(v.rating)) AS average_rating_sort_value,
COUNT(1) AS number_of_ratings,
SUM(v.rating = 1) AS number_of_ratings_1,
SUM(v.rating = 2) AS number_of_ratings_2,
SUM(v.rating = 3) AS number_of_ratings_3,
SUM(v.rating = 4) AS number_of_ratings_4,
SUM(v.rating = 5) AS number_of_ratings_5
FROM ktapp_keyword k
INNER JOIN ktapp_filmkeywordrelationship fk ON fk.keyword_id = k.id
INNER JOIN ktapp_vote v ON v.film_id = fk.film_id AND v.user_id = %s
WHERE k.keyword_type = 'C'
GROUP BY k.id
HAVING COUNT(1) >= 5
ORDER BY average_rating DESC, number_of_ratings DESC, name, id
''', [selected_user.id])
elif domain == 'korszakok':
active_subtab = 'periods'
cursor.execute('''
SELECT
CASE
WHEN f.year < 1920 THEN 1900
ELSE FLOOR(f.year / 10) * 10
END AS period,
CASE
WHEN f.year < 1920 THEN ''
ELSE CAST((FLOOR(f.year / 10) * 10) AS CHAR)
END AS period_min,
CASE
WHEN f.year < 1920 THEN 1919
ELSE FLOOR(f.year / 10) * 10 + 9
END AS period_max,
AVG(v.rating) AS average_rating,
ROUND(10.0 * AVG(v.rating)) AS average_rating_sort_value,
COUNT(1) AS number_of_ratings,
SUM(v.rating = 1) AS number_of_ratings_1,
SUM(v.rating = 2) AS number_of_ratings_2,
SUM(v.rating = 3) AS number_of_ratings_3,
SUM(v.rating = 4) AS number_of_ratings_4,
SUM(v.rating = 5) AS number_of_ratings_5
FROM ktapp_film f
INNER JOIN ktapp_vote v ON v.film_id = f.id AND v.user_id = %s
WHERE f.year IS NOT NULL
GROUP BY period
HAVING COUNT(1) >= 5
ORDER BY average_rating DESC, number_of_ratings DESC, period
''', [selected_user.id])
else:
raise Http404
list_of_items = dictfetchall(cursor)
return render(request, 'ktapp/user_profile_subpages/user_taste.html', {
'active_tab': 'taste',
'active_subtab': active_subtab,
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'list_of_items': list_of_items,
'years_as': [1920, 1930, 1960, 1980, 2020, 2030],
})
def user_films(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
ordering_str = kt_utils.strip_whitespace(request.GET.get('o', ''))
if ordering_str == '':
ordering_str = '-other_rating_when'
if ordering_str[0] == '-':
ordering = (ordering_str[1:], 'DESC')
else:
ordering = (ordering_str, 'ASC')
filters = [('seen_by_id', selected_user.id)] + filmlist.get_filters_from_request(request)
films, nice_filters = filmlist.filmlist(
user_id=request.user.id,
filters=filters,
ordering=ordering,
films_per_page=None,
)
querystring = {}
for filter_type, filter_value in nice_filters:
if filter_type in {'title', 'year', 'director', 'actor', 'country', 'genre', 'keyword', 'my_rating', 'other_rating', 'my_wish'}:
querystring[filter_type] = filter_value
elif filter_type == 'number_of_ratings':
min_value, max_value = filter_value.split('-')
querystring['num_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['num_rating_max'] = kt_utils.coalesce(max_value, '')
elif filter_type == 'average_rating':
min_value, max_value = filter_value.split('-')
querystring['avg_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['avg_rating_max'] = kt_utils.coalesce(max_value, '')
elif filter_type == 'fav_average_rating':
min_value, max_value = filter_value.split('-')
querystring['fav_avg_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['fav_avg_rating_max'] = kt_utils.coalesce(max_value, '')
qs_combined = '&'.join('%s=%s' % (key, val) for key, val in querystring.iteritems())
if qs_combined != '':
qs_combined = '&' + qs_combined
films = list(films)
result_count = len(films)
try:
p = int(request.GET.get('p', 0))
except ValueError:
p = 0
max_pages = int(math.ceil(1.0 * result_count / FILMS_PER_PAGE))
if max_pages == 0:
max_pages = 1
if p == 0:
p = 1
if p > max_pages:
p = max_pages
films = films[(p-1) * FILMS_PER_PAGE:p * FILMS_PER_PAGE]
return render(request, 'ktapp/user_profile_subpages/user_films.html', {
'active_tab': 'films',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'result_count': result_count,
'querystring': querystring,
'qs_combined': qs_combined,
'ordering_str': ordering_str,
'p': p,
'max_pages': max_pages,
'films': films,
})
def user_comments(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
p = int(request.GET.get('p', 0))
if p == 1:
return HttpResponseRedirect(reverse('user_comments', args=(selected_user.id, selected_user.slug_cache)))
max_pages = int(math.ceil(1.0 * selected_user.number_of_comments / COMMENTS_PER_PAGE))
if max_pages == 0:
max_pages = 1
if p == 0:
p = 1
if p > max_pages:
return HttpResponseRedirect(reverse('user_comments', args=(selected_user.id, selected_user.slug_cache)) + '?p=' + str(max_pages))
comments_qs = selected_user.comment_set.select_related('film', 'topic', 'poll', 'reply_to', 'reply_to__created_by')
if max_pages > 1:
first_comment = selected_user.number_of_comments - COMMENTS_PER_PAGE * (p - 1) - (COMMENTS_PER_PAGE - 1)
last_comment = selected_user.number_of_comments - COMMENTS_PER_PAGE * (p - 1)
comments = comments_qs.filter(serial_number_by_user__lte=last_comment, serial_number_by_user__gte=first_comment)
else:
comments = comments_qs.all()
return render(request, 'ktapp/user_profile_subpages/user_comments.html', {
'active_tab': 'comments',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'comments': comments.order_by('-created_at'),
'p': p,
'max_pages': max_pages,
})
def user_wishlist(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
wishlist_type = request.GET.get('t', 'igen')
if wishlist_type == 'nem':
wishlist_type = 'N'
elif wishlist_type == 'szerez':
wishlist_type = 'G'
else:
wishlist_type = 'Y'
filters = [('wished_by_id', '%s:%s' % (wishlist_type, selected_user.id))] + filmlist.get_filters_from_request(request)
films, nice_filters = filmlist.filmlist(
user_id=request.user.id,
filters=filters,
ordering=('average_rating', 'DESC'),
films_per_page=None,
)
querystring = {}
for filter_type, filter_value in nice_filters:
if filter_type in {'title', 'year', 'director', 'actor', 'country', 'genre', 'keyword', 'my_rating', 'other_rating', 'my_wish'}:
querystring[filter_type] = filter_value
elif filter_type == 'number_of_ratings':
min_value, max_value = filter_value.split('-')
querystring['num_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['num_rating_max'] = kt_utils.coalesce(max_value, '')
elif filter_type == 'average_rating':
min_value, max_value = filter_value.split('-')
querystring['avg_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['avg_rating_max'] = kt_utils.coalesce(max_value, '')
elif filter_type == 'fav_average_rating':
min_value, max_value = filter_value.split('-')
querystring['fav_avg_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['fav_avg_rating_max'] = kt_utils.coalesce(max_value, '')
if wishlist_type == 'N':
querystring['t'] = 'nem'
if wishlist_type == 'G':
querystring['t'] = 'szerez'
qs_combined = '&'.join('%s=%s' % (key, val) for key, val in querystring.iteritems())
if qs_combined != '':
qs_combined = '&' + qs_combined
films = list(films)
result_count = len(films)
return render(request, 'ktapp/user_profile_subpages/user_wishlist.html', {
'active_tab': 'wishlist',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'result_count': result_count,
'querystring': querystring,
'qs_combined': qs_combined,
'films': films,
'wishlist_type': wishlist_type,
'number_of_wishes_yes': selected_user.number_of_wishes_yes,
'number_of_wishes_no': selected_user.number_of_wishes_no,
'number_of_wishes_get': selected_user.number_of_wishes_get,
})
def user_toplists(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
toplists = models.UserToplist.objects.filter(created_by=selected_user).order_by('-created_at')
toplist_details = []
for toplist in toplists:
if toplist.toplist_type == models.UserToplist.TOPLIST_TYPE_FILM:
items, _ = filmlist.filmlist(
user_id=request.user.id,
filters=[('usertoplist_id', toplist.id)],
ordering='serial_number',
films_per_page=None,
)
toplist_list = []
with_comments = False
for item in items:
toplist_list.append(item)
if item.comment:
with_comments = True
else:
toplist_list = []
with_comments = False
for item in models.UserToplistItem.objects.filter(usertoplist=toplist).select_related('director', 'actor').order_by('serial_number'):
toplist_list.append(item)
if item.comment:
with_comments = True
toplist_details.append((
toplist,
toplist_list,
with_comments,
))
return render(request, 'ktapp/user_profile_subpages/user_toplists.html', {
'active_tab': 'toplists',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'toplist_details': toplist_details,
})
def user_articles(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
articles = []
for review in models.Review.objects.filter(created_by=selected_user).select_related('film'):
articles.append((
review.created_at,
'R',
review.film,
None,
review.snippet + '...',
))
for bio in models.Biography.objects.filter(created_by=selected_user).select_related('artist'):
articles.append((
bio.created_at,
'B',
None,
bio.artist,
bio.snippet + '...',
))
for article in models.Link.objects.filter(author=selected_user).select_related('film', 'artist'):
articles.append((
article.created_at,
'A',
article.film,
article.artist,
article.lead,
article.url,
article.name,
article.link_domain,
article.id,
))
articles.sort(key=lambda item: item[0], reverse=True)
return render(request, 'ktapp/user_profile_subpages/user_articles.html', {
'active_tab': 'articles',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_articles': number_of_articles,
'number_of_messages': number_of_messages,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'articles': articles,
})
def user_activity(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
cursor = connection.cursor()
max_max_vote = models.KTUser.objects.all().aggregate(Max('number_of_ratings'))['number_of_ratings__max']
max_max_comment = models.KTUser.objects.all().aggregate(Max('number_of_comments'))['number_of_comments__max']
scale_vote = (1.0 * selected_user.number_of_ratings / max_max_vote)**0.3
scale_comment = (1.0 * selected_user.number_of_comments / max_max_comment)**0.3
min_year = selected_user.date_joined.year
max_year = datetime.date.today().year
years = range(max_year, min_year - 1, -1)
min_month = selected_user.date_joined.month
max_month = datetime.date.today().month
months = []
if len(years) == 1:
for month in range(max_month, min_month - 1, -1):
months.append('%04d-%02d' % (years[0], month))
else:
for year in years:
if year == max_year:
for month in range(max_month, 0, -1):
months.append('%04d-%02d' % (year, month))
elif year == min_year:
for month in range(12, min_month - 1, -1):
months.append('%04d-%02d' % (year, month))
else:
for month in range(12, 0, -1):
months.append('%04d-%02d' % (year, month))
years = ['%04d' % y for y in years]
vote_data = {
'm': {},
'y': {},
}
comment_data = {
'm': {},
'y': {},
}
max_vote = {
'm': 0,
'y': 0,
}
max_comment = {
'm': 0,
'y': 0,
}
cursor.execute('SELECT LEFT(`when`, 7) AS dt, COUNT(1) FROM ktapp_vote WHERE user_id = %s AND `when` IS NOT NULL GROUP BY dt', [selected_user.id])
for row in cursor.fetchall():
vote_data['m'][row[0]] = row[1]
if row[1] > max_vote['m']:
max_vote['m'] = row[1]
cursor.execute('SELECT LEFT(`when`, 4) AS dt, COUNT(1) FROM ktapp_vote WHERE user_id = %s AND `when` IS NOT NULL GROUP BY dt', [selected_user.id])
for row in cursor.fetchall():
vote_data['y'][row[0]] = row[1]
if row[1] > max_vote['y']:
max_vote['y'] = row[1]
cursor.execute('SELECT LEFT(created_at, 7) AS dt, COUNT(1) FROM ktapp_comment WHERE created_by_id = %s AND created_at IS NOT NULL GROUP BY dt', [selected_user.id])
for row in cursor.fetchall():
comment_data['m'][row[0]] = row[1]
if row[1] > max_comment['m']:
max_comment['m'] = row[1]
cursor.execute('SELECT LEFT(created_at, 4) AS dt, COUNT(1) FROM ktapp_comment WHERE created_by_id = %s AND created_at IS NOT NULL GROUP BY dt', [selected_user.id])
for row in cursor.fetchall():
comment_data['y'][row[0]] = row[1]
if row[1] > max_comment['y']:
max_comment['y'] = row[1]
data_month = []
for month in months:
data_month.append((
month,
vote_data['m'].get(month, 0),
comment_data['m'].get(month, 0),
int(100.0 * scale_vote * vote_data['m'].get(month, 0) / max_vote['m']) if max_vote['m'] > 0 else 0,
int(100.0 * scale_comment * comment_data['m'].get(month, 0) / max_comment['m']) if max_comment['m'] > 0 else 0,
))
data_year = []
for year in years:
data_year.append((
year,
vote_data['y'].get(year, 0),
comment_data['y'].get(year, 0),
int(100.0 * scale_vote * vote_data['y'].get(year, 0) / max_vote['y']) if max_vote['y'] > 0 else 0,
int(100.0 * scale_comment * comment_data['y'].get(year, 0) / max_comment['y']) if max_comment['y'] > 0 else 0,
))
return render(request, 'ktapp/user_profile_subpages/user_activity.html', {
'active_tab': 'activity',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'data_month': data_month,
'data_year': data_year,
})
@login_required()
def user_messages(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
messages_qs = models.Message.objects.filter(private=True).filter(owned_by=request.user).filter(
Q(sent_by=selected_user)
| Q(sent_to=selected_user)
).select_related('sent_by')
try:
p = int(request.GET.get('p', 0))
except ValueError:
p = 0
if p == 1:
return HttpResponseRedirect(reverse('user_messages', args=(selected_user.id, selected_user.slug_cache)))
max_pages = int(math.ceil(1.0 * number_of_messages / MESSAGES_PER_PAGE))
if max_pages == 0:
max_pages = 1
if p == 0:
p = 1
if p > max_pages:
return HttpResponseRedirect(reverse('user_messages', args=(selected_user.id, selected_user.slug_cache)) + '?p=' + str(max_pages))
return render(request, 'ktapp/user_profile_subpages/user_messages.html', {
'active_tab': 'messages',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'messages': messages_qs.order_by('-sent_at')[(p-1) * MESSAGES_PER_PAGE:p * MESSAGES_PER_PAGE],
'p': p,
'max_pages': max_pages,
})
@login_required()
def edit_profile(request):
def set_fav(field_name, domain, get_object_function):
old_items = set()
for item in models.UserFavourite.objects.filter(user=request.user, domain=domain):
old_items.add(item.fav_id)
new_items = set()
for name in kt_utils.strip_whitespace(request.POST.get(field_name, '')).split(','):
name = kt_utils.strip_whitespace(name)
if name:
item = get_object_function(name)
if item:
new_items.add(item.id)
for item_id in old_items - new_items:
models.UserFavourite.objects.filter(user=request.user, domain=domain, fav_id=item_id).delete()
for item_id in new_items - old_items:
models.UserFavourite.objects.create(user=request.user, domain=domain, fav_id=item_id)
next_url = request.GET.get('next', request.POST.get('next', reverse('user_profile', args=(request.user.id, request.user.slug_cache))))
if request.POST:
if request.POST.get('t', '') == 'pic':
if request.POST.get('a', '') == 'del':
if request.user.profile_pic:
request.user.profile_pic.delete()
request.user.profile_pic = None
request.user.save()
models.Event.objects.create(
user=request.user,
event_type=models.Event.EVENT_TYPE_DELETE_PROFILE_PIC,
)
else:
if 'img' in request.FILES:
picture = models.Picture.objects.create(
img=request.FILES['img'],
picture_type=models.Picture.PICTURE_TYPE_USER_PROFILE,
created_by=request.user,
user=request.user,
)
request.user.profile_pic = picture
request.user.save()
models.Event.objects.create(
user=request.user,
event_type=models.Event.EVENT_TYPE_UPLOAD_PROFILE_PIC,
)
return HttpResponseRedirect(next_url)
request.user.bio = request.POST.get('bio', '').strip()
gender = request.POST.get('gender', '')
if gender not in {'U', 'M', 'F'}:
gender = 'U'
request.user.gender = gender
try:
request.user.year_of_birth = int(request.POST.get('year_of_birth', 0))
except ValueError:
request.user.year_of_birth = 0
request.user.location = kt_utils.strip_whitespace(request.POST.get('location', ''))
request.user.public_gender = bool(request.POST.get('public_gender', ''))
request.user.public_year_of_birth = bool(request.POST.get('public_year_of_birth', ''))
request.user.public_location = bool(request.POST.get('public_location', ''))
set_fav('fav_director', models.UserFavourite.DOMAIN_DIRECTOR, models.Artist.get_artist_by_name)
set_fav('fav_actor', models.UserFavourite.DOMAIN_ACTOR, models.Artist.get_artist_by_name)
set_fav('fav_genre', models.UserFavourite.DOMAIN_GENRE, lambda name: models.Keyword.get_keyword_by_name(name, models.Keyword.KEYWORD_TYPE_GENRE))
set_fav('fav_country', models.UserFavourite.DOMAIN_COUNTRY, lambda name: models.Keyword.get_keyword_by_name(name, models.Keyword.KEYWORD_TYPE_COUNTRY))
request.user.fav_period = kt_utils.strip_whitespace(request.POST.get('fav_period', ''))
request.user.save()
models.Event.objects.create(
user=request.user,
event_type=models.Event.EVENT_TYPE_EDIT_PROFILE,
)
return HttpResponseRedirect(next_url)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, request.user)
return render(request, 'ktapp/user_profile_subpages/edit_profile.html', {
'active_tab': 'profile',
'selected_user': request.user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[False],
'fav_directors': models.Artist.objects.raw('''
SELECT a.*
FROM ktapp_artist a
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = a.id
WHERE uf.user_id = %s AND uf.domain = %s
ORDER BY a.name, a.id
''', [request.user.id, models.UserFavourite.DOMAIN_DIRECTOR]),
'fav_actors': models.Artist.objects.raw('''
SELECT a.*
FROM ktapp_artist a
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = a.id
WHERE uf.user_id = %s AND uf.domain = %s
ORDER BY a.name, a.id
''', [request.user.id, models.UserFavourite.DOMAIN_ACTOR]),
'fav_genres': models.Keyword.objects.raw('''
SELECT k.*
FROM ktapp_keyword k
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = k.id
WHERE uf.user_id = %s AND uf.domain = %s AND k.keyword_type = %s
ORDER BY k.name, k.id
''', [request.user.id, models.UserFavourite.DOMAIN_GENRE, models.Keyword.KEYWORD_TYPE_GENRE]),
'fav_countries': models.Keyword.objects.raw('''
SELECT k.*
FROM ktapp_keyword k
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = k.id
WHERE uf.user_id = %s AND uf.domain = %s AND k.keyword_type = %s
ORDER BY k.name, k.id
''', [request.user.id, models.UserFavourite.DOMAIN_COUNTRY, models.Keyword.KEYWORD_TYPE_COUNTRY]),
'topic': request.GET.get('t', ''),
})
| 37,195 | 12,837 |
#!/usr/bin/python
import os ,Image
j = 1
dir="./code/"
for f in os.listdir(dir):
if f.endswith(".jpg"):
img = Image.open(dir+f)
img = img.convert('RGB')
for i in range(6):
x = 5 + i*9
y = 6
img.crop((x, y, x+9, y+13)).save("font/%d.jpg" % j)
print "j=",j
j += 1
| 353 | 141 |
from pymonkey import make_entry_point
main = make_entry_point(('patchingmod',), 'targetmod')
if __name__ == '__main__':
exit(main())
| 141 | 53 |
class TRADE:
def __init__(self):
pass
def update(self, user_act, history):
"""
Args:
user_act: dict
history: [[system, user], [s, u], ...]
Returns:
The current dialogue state.
"""
pass
| 279 | 82 |
from dataloader_utils import Gender, HeartPart, EndPhase
from enum import Enum
import numpy as np
import math
import cv2
# --------------------------------------
# Shape (contour) similarity
# --------------------------------------
def __areas(curve1, curve2):
# floats come in
# find the corners of the bbox
def _bbox(cv):
mins = np.min(cv, axis=0)
maxs = np.max(cv, axis=0)
x_min, y_min = mins[0], mins[1]
x_max, y_max = maxs[0], maxs[1]
return x_min, y_min, x_max, y_max
box1 = _bbox(curve1)
box2 = _bbox(curve2)
xr = max(box1[2], box2[2])
yb = max(box1[3], box2[3])
xl = min(box1[0], box2[0])
yu = max(box1[1], box2[1])
# shift and rescale the curves (DC, JC will not change)
curve1[:, 0] = (curve1[:, 0] - xl) / (xr - xl + 1e-5)
curve1[:, 1] = (curve1[:, 1] - yu) / (yb - yu + 1e-5)
curve2[:, 0] = (curve2[:, 0] - xl) / (xr - xl + 1e-5)
curve2[:, 1] = (curve2[:, 1] - yu) / (yb - yu + 1e-5)
# map the coordinates to 410 x 410 mask
image1 = np.zeros((410, 410), dtype=np.uint8)
curve1 = curve1 * 400 + 5
cv2.drawContours(image1, [np.expand_dims(curve1, axis=1).astype(np.int32)], -1, (255, 0, 0), cv2.FILLED)
image2 = np.zeros((410, 410), dtype=np.uint8)
curve2 = curve2 * 400 + 5
cv2.drawContours(image2, [np.expand_dims(curve2, axis=1).astype(np.int32)], -1, (255, 0, 0), cv2.FILLED)
A = (image1 // 255 == 1).astype(np.float32)
B = (image2 // 255 == 1).astype(np.float32)
area1 = np.sum(A)
area2 = np.sum(B)
area_inter = np.sum(A * B)
area_union = area1 + area2 - area_inter
return area_union, area_inter, area1, area2
def dice(curve1, curve2): # can be viewed as F1 score
"""
Calculate the dice metric for the two curves.
:param curve1: a numpy matrix with shape (N, 2), points are in x, y format
elements are integers
:param curve2: a numpy matrix with shape (N, 2), points are in x, y format
elements are integers
:return: a real number (the dice value)
"""
_, inter, a1, a2 = __areas(curve1, curve2)
# dice metric
return 2.0 * inter / (a1 + a2)
def jaccard(curve1, curve2): # aka. Tanimoto index
"""
Calculate the jaccard metric for the two curves.
:param curve1: a numpy matrix with shape (N, 2), points are in x, y format
elements are integers
:param curve2: a numpy matrix with shape (N, 2), points are in x, y format
elements are integers
:return: a real number (the jaccard index)
"""
union, inter, _, _ = __areas(curve1, curve2)
# dice metric
return inter / union
def hausdorff(curve1, curve2): # aka. Pompeiu-Hausdorff distance
"""
Calculate the Hausdorff distance between two curves. (https://en.wikipedia.org/wiki/Hausdorff_distance)
:param curve1: a numpy matrix with shape (N, 2), points are in x, y format
:param curve2: a numpy matrix with shape (N, 2), points are in x, y format
:return: a real number (hausdorff distance)
"""
N2 = curve2.shape[0]
temp = np.expand_dims(curve1, 2)
temp = np.repeat(temp, N2, 2)
temp = temp - curve2.T
distances = temp[:, 0, :] ** 2 + temp[:, 1, :] ** 2
d1 = np.max(np.min(distances, 0))
d2 = np.max(np.min(distances, 1))
return math.sqrt(max(d1, d2))
# --------------------------------------
# Volume calculation
# --------------------------------------
def ratio(pixel_spacing: tuple, slice_thickness: float, gap: float) -> (float, float):
ratio_slice = pixel_spacing[0] * pixel_spacing[1] * slice_thickness / 1000.0 # mm^3 -> ml conversion
ratio_gap = pixel_spacing[0] * pixel_spacing[1] * gap / 1000.0
return ratio_slice, ratio_gap
def bsa(height, weight): # Mosteller BSA
if not(height is None or weight is None):
return math.sqrt(height * weight / 3600.0)
else:
return None
def area_triangular(curve):
"""
Calculates the area of a closed curve based on
crossproducts.
:param curve: a numpy matrix with shape (N, 2), points are in x, y format
elements are floats
:return: area
"""
# calculate center of mass
crm = np.sum(curve, axis=0) / curve.shape[0]
# vector between crm and a point of the curve
r = curve - crm
# side vector
curve_mtx_shifted = np.ones_like(curve)
curve_mtx_shifted[0] = curve[-1]
curve_mtx_shifted[1:] = curve[0:-1]
dr = curve - curve_mtx_shifted
# vector product
rxdr = np.cross(r, dr)
# sum up the pieces of triangulars
return np.abs(0.5 * np.sum(rxdr))
def convert_to_hierarchical(contours):
"""
convert list of contours into a hierarchical structure
slice > frame > heartpart -- Contour
:param contours: list of Contour objects
:return: a hierarchical structure which contains Contour objects
"""
hierarchical_contours = {}
for contour in contours:
if not(contour.slice in hierarchical_contours.keys()):
hierarchical_contours[contour.slice] = {}
if not(contour.frame in hierarchical_contours[contour.slice].keys()):
hierarchical_contours[contour.slice][contour.frame] = {}
hierarchical_contours[contour.slice][contour.frame][contour.part] = contour
return hierarchical_contours
def calculate_contour_area(curve: np.ndarray):
"""
calculate area with triangulars
:param curve: numpy matrix (N, 2)
:return: area of the closed curve
"""
return area_triangular(curve)
def grouping(hierarchical_contours, calculate_area):
"""
Determines the contour which phase belongs to (systole or diastole).
Calculates the areas of each contour.
:param hierarchical_contours: a hierarchical structure which contains Contour objects
(slice > frame > heartpart -- Contour)
:param calculate_area: function to calculate area of the contour
:return: hierarchical structure with areas (slice > heartpart > phase -- area)
"""
def set_endphase(slice, frame, part, phase):
hierarchical_contours[slice][frame][part].phase = phase
hierarchical_contours[slice][frame][part].corresponding_image.phase = phase
contour_areas = {}
slices = hierarchical_contours.keys()
for slice in slices:
contour_areas[slice] = {}
for part in HeartPart:
areas = []
frames = []
contour_areas[slice][part] = {}
for frame in hierarchical_contours[slice].keys():
if part in hierarchical_contours[slice][frame]:
curve = hierarchical_contours[slice][frame][part]
frames.append(frame)
areas.append(calculate_area(curve.contour_mtx))
if len(areas) > 1:
contour_areas[slice][part][EndPhase.DIA] = max(areas)
contour_areas[slice][part][EndPhase.SYS] = min(areas)
set_endphase(slice, frames[areas.index(max(areas))], part, EndPhase.DIA)
set_endphase(slice, frames[areas.index(min(areas))], part, EndPhase.SYS)
elif len(areas) == 1:
ds = np.array([frames[0] - 0, frames[0] - 20, frames[0] - 9]) # this is a heuristic
idx = np.argmin(np.abs(ds))
if idx in [0, 1]:
contour_areas[slice][part][EndPhase.DIA] = areas[0]
contour_areas[slice][part][EndPhase.SYS] = None
set_endphase(slice, frames[0], part, EndPhase.DIA)
else:
contour_areas[slice][part][EndPhase.DIA] = None
contour_areas[slice][part][EndPhase.SYS] = areas[0]
set_endphase(slice, frames[0], part, EndPhase.SYS)
else:
contour_areas[slice][part][EndPhase.DIA] = None
contour_areas[slice][part][EndPhase.SYS] = None
return contour_areas
def volume(contour_areas, part, phase, ratio):
"""
:param contour_areas: hierarchical structure with areas (slice > heartpart > phase -- area)
:param part: heartpart e.g.: left-endo
:param phase: systole or diastole
:param ratio: comes from the field view, volume changing and slice thickness
:return: volume of the heart in part at phase
"""
ratio_slice, ratio_gap = ratio
v = 0
slices = list(contour_areas.keys())
for idx in range(len(slices) - 1):
a1 = contour_areas[slices[idx]][part][phase]
a2 = contour_areas[slices[idx + 1]][part][phase]
if a1 is not None:
v += a1 * ratio_slice
if a2 is not None:
v += (a1 + np.sqrt(a1 * a2) + a2) * ratio_gap / 3.0
a1 = contour_areas[slices[-1]][part][phase] # the last slice
if a1 is not None:
v += a1 * ratio_slice
return v
def calculate_volumes_left(contour_areas, ratio, bsa=None):
lved = volume(contour_areas, HeartPart.LN, EndPhase.DIA, ratio) # left ED
lves = volume(contour_areas, HeartPart.LN, EndPhase.SYS, ratio) # left ES
lvsv = lved - lves # left Stroke-volume
volume_indices = {'lved': lved, 'lves': lves, 'lvsv': lvsv}
# other metrics: left
if bsa is None:
return volume_indices
lved_i = lved / bsa # left ED-index
lves_i = lves / bsa # left ES-index
lvsv_i = lvsv / bsa # left SV-index
volume_indices['lved_i'] = lved_i
volume_indices['lves_i'] = lves_i
volume_indices['lvsv_i'] = lvsv_i
return volume_indices
def calculate_volumes_right(contour_areas, ratio, bsa=None):
rved = volume(contour_areas, HeartPart.RN, EndPhase.DIA, ratio)
rves = volume(contour_areas, HeartPart.RN, EndPhase.SYS, ratio)
rvsv = rved - rves # right Stroke-volume
volume_indices = {'rved': rved, 'rves': rves, 'rvsv': rvsv}
# other metrics: right
if bsa is None:
return volume_indices
rved_i = rved / bsa # right ED-index
rves_i = rves / bsa # right ES-index
rvsv_i = rvsv / bsa # right SV-index
volume_indices['rved_i'] = rved_i
volume_indices['rves_i'] = rves_i
volume_indices['rvsv_i'] = rvsv_i
return volume_indices
class VolumeIndices:
def __init__(self):
self.gender = None
self.lved = None
self.lves = None
self.lvsv = None
self.lved_i = None
self.lves_i = None
self.lvsv_i = None
self.rved = None
self.rves = None
self.rvsv = None
self.rved_i = None
self.rves_i = None
self.rvsv_i = None
@classmethod
def from_dictionary(cls, dictionary: dict, gender):
def return_if_exists(abreviation):
if dictionary is not None:
if abreviation in dictionary:
return dictionary[abreviation]
return None
obj = cls()
obj.gender = gender
obj.lved = return_if_exists('lved')
obj.lves = return_if_exists('lves')
obj.lvsv = return_if_exists('lvsv')
obj.lved_i = return_if_exists('lved_i')
obj.lves_i = return_if_exists('lves_i')
obj.lvsv_i = return_if_exists('lvsv_i')
obj.rved = return_if_exists('rved')
obj.rves = return_if_exists('rves')
obj.rvsv = return_if_exists('rvsv')
obj.rved_i = return_if_exists('rved_i')
obj.rves_i = return_if_exists('rves_i')
obj.rvsv_i = return_if_exists('rvsv_i')
return obj
# --------------------------------------
# Reorder percentages
# --------------------------------------
class Zone(Enum):
UNK = 0 # unknown (missing data)
AL = 1 # abnormal low
NZ = 2 # normal zone
AH = 3 # abnormal high
class ReorderPercentage:
"""
Refrence: Petersen et al. Journal of Cardiovascular Magnetic Resonance (2017) 19:18 DOI 10.1186/s12968-017-0327-9
"""
def __init__(self, volume_idcs: list):
"""
volume_idcs - pair of VolumeIndices objects (original, predicted)
"""
self.volume_idcs = volume_idcs
self.zone_calculators = [
self._lved, self._lves, self._lvsv,
self._lved_idx, self._lves_idx, self._lvsv_idx,
self._rved, self._rves, self._rvsv,
self._rved_idx, self._rves_idx, self._rvsv_idx
]
@staticmethod
def _get_zone(gender, ventricular_value, male_ranges, female_ranges):
if gender == Gender.M:
if ventricular_value is None:
return Zone.UNK
for barrier, zone in zip(male_ranges, [Zone.AL, Zone.NZ]):
if ventricular_value < barrier:
return zone
return Zone.AH
elif gender == Gender.F:
if ventricular_value is None:
return Zone.UNK
for barrier, zone in zip(female_ranges, [Zone.AL, Zone.NZ]):
if ventricular_value < barrier:
return zone
return Zone.AH
else:
return Zone.UNK
# Left side
def _lved(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.lved
male_ranges = [93, 232]
female_ranges = [80, 175]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _lves(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.lves
male_ranges = [34, 103]
female_ranges = [25, 73]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _lvsv(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.lvsv
male_ranges = [49, 140]
female_ranges = [47, 110]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _lved_idx(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.lved_i
male_ranges = [52, 117]
female_ranges = [50, 101]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _lves_idx(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.lves_i
male_ranges = [19, 52]
female_ranges = [16, 43]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _lvsv_idx(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.lvsv_i
male_ranges = [28, 70]
female_ranges = [29, 63]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
# Right side
def _rved(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.rved
male_ranges = [99, 260]
female_ranges = [83, 192]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _rves(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.rves
male_ranges = [34, 135]
female_ranges = [26, 95]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _rvsv(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.rvsv
male_ranges = [54, 140]
female_ranges = [47, 107]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _rved_idx(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.rved_i
male_ranges = [55, 128]
female_ranges = [51, 110]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _rves_idx(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.rves_i
male_ranges = [19, 67]
female_ranges = [16, 55]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _rvsv_idx(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.rvsv_i
male_ranges = [30, 69]
female_ranges = [29, 61]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def reordering_percentage(self):
"""
This function calculates how many times
the suggested zone is different
in case of the predicted volume data.
"""
overall_errors = {}
LN = {}
NH = {}
NL = {}
HN = {}
LH = {}
HL = {}
for zone_calculator in self.zone_calculators:
zc = lambda vi: (zone_calculator(vi[0]), zone_calculator(vi[1])) # original, predicted
volumes_as_zone = list(map(zc, self.volume_idcs))
cntr = 0
equal, ln, nh, nl, hn, lh, hl = 0, 0, 0, 0, 0, 0, 0
for volume_pair in volumes_as_zone:
if not(volume_pair[0] == Zone.UNK or volume_pair[1] == Zone.UNK):
cntr += 1
if volume_pair[0] == volume_pair[1]:
equal += 1
elif volume_pair[0] == Zone.AL and volume_pair[1] == Zone.NZ:
ln += 1
elif volume_pair[0] == Zone.NZ and volume_pair[1] == Zone.AH:
nh += 1
elif volume_pair[0] == Zone.NZ and volume_pair[1] == Zone.AL:
nl += 1
elif volume_pair[0] == Zone.AH and volume_pair[1] == Zone.NZ:
hn += 1
elif volume_pair[0] == Zone.AL and volume_pair[1] == Zone.AH:
lh += 1
elif volume_pair[0] == Zone.AH and volume_pair[1] == Zone.AL:
hl += 1
overall_errors[zone_calculator.__name__] = (1 - equal / cntr) if cntr > 0 else None
LN[zone_calculator.__name__] = (ln / cntr) if cntr > 0 else None
NH[zone_calculator.__name__] = (nh / cntr) if cntr > 0 else None
NL[zone_calculator.__name__] = (nl / cntr) if cntr > 0 else None
HN[zone_calculator.__name__] = (hn / cntr) if cntr > 0 else None
LH[zone_calculator.__name__] = (lh / cntr) if cntr > 0 else None
HL[zone_calculator.__name__] = (hl / cntr) if cntr > 0 else None
return overall_errors, LN, NH, NL, HN, LH, HL
| 18,502 | 6,588 |
# Generated by Django 2.0.2 on 2018-03-09 18:07
from django.conf import settings
from django.contrib.auth.models import Group, Permission
from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations
def set_default_group_permissions(apps, schema_editor):
"""Get or create the default group"""
# Emit post migrate signal to create ContentTypes
# https://stackoverflow.com/questions/37697215/django-pytest-database-access-for-data-migration
db_alias = schema_editor.connection.alias
emit_post_migrate_signal(2, False, db_alias)
default_group = Group.objects.get(name=settings.DEFAULT_GROUP_NAME)
default_perm_names = [
# Tag
'add_tag',
# Strand
'add_strand',
# Team
'add_team',
]
default_group.permissions.set(map(lambda x: Permission.objects.get(codename=x), default_perm_names))
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20180313_2138'),
]
operations = [
migrations.RunPython(set_default_group_permissions),
]
| 1,107 | 370 |
# Coinbase Pro library:
# https://github.com/danpaquin/coinbasepro-python
#curl "https://api.pro.coinbase.com/products/BTC-USD/candles?start=2021-01-01T12:00:00&end=2021-01-12T12:00:00&granularity=3600"
import cbpro
import numpy as np
import pandas as pd
import logging
from datetime import datetime, timedelta
import json
#from IPython.core.debugger import set_trace
class Coinbase:
def __init__(self, product, logging_level = logging.INFO, products_file = None):
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging_level, format=FORMAT)
# init
self.product = product
self.df = None
# client creation
self.public_client = cbpro.PublicClient()
# get products
self.products = self.public_client.get_products()
if products_file is not None:
with open(products_file, 'w') as fp:
json.dump(self.products, fp)
logging.info(f"Found {len(self.products)} products, saved to {products_file}")
else:
logging.info(f"Found {len(self.products)} products")
found = False
for prod in self.products:
if prod['id'] == self.product:
found = True
logging.info(prod)
self.product = self.product
break
if found is False:
raise Exception(f"Product {self.product} not valid")
@staticmethod
def getProductList(products_file = None):
products = cbpro.PublicClient().get_products()
if products_file is not None:
with open(products_file, 'w') as fp:
json.dump(products, fp)
return products
@staticmethod
def getPrice(product):
return float(cbpro.PublicClient().get_product_ticker(product)['price'])
def loadHistory(self, start_date, end_date, granularity = 86400, moving_average = 20):
#
# dates are datetime objects, can be crated with:
# start_utc = datetime(2021, 1, 1)
#
start_interval = start_date - timedelta(days=moving_average)
end_interval = None
Granularity_Map = {
60: timedelta(hours=5), # 1 day per each call
86400: timedelta(days=28 * 6 -1) # 42 weeks per each call
}
if granularity not in Granularity_Map:
raise Exception(f"Granularity {granularity} not valid")
self.df = pd.DataFrame()
while True:
if end_interval is not None:
start_interval = end_interval + timedelta(seconds=1)
if start_interval > end_date:
break
end_interval = start_interval + Granularity_Map[granularity]
if end_interval > end_date:
end_interval = end_date
start_interval_iso = start_interval.isoformat()
end_interval_iso = end_interval.isoformat()
btc_history = self.public_client.get_product_historic_rates(
self.product, start=start_interval_iso,
end=end_interval_iso,
granularity=granularity)
if len(btc_history) == 1 and 'message' in btc_history:
raise Exception(btc_history['message'])
logging.info(f"Fetched from {start_interval_iso} to {end_interval_iso} : #{len(btc_history)} points")
if len(btc_history) == 0:
continue
btc_history_np = np.array(btc_history)
df_new = pd.DataFrame(btc_history_np, columns = ['Time','Low','High','Open','Close','Volume'])
self.df = self.df.append(df_new, ignore_index=True, sort=True)
self.df['tic'] = self.product
self.df['Time'] = pd.to_datetime(self.df['Time'], unit='s')
moving_average_label = f"MA{moving_average}"
self.df.sort_values(by='Time', inplace=True)
self.df[moving_average_label] = self.df['Close'].rolling(window=moving_average).mean()
# let's remove the initial points where moving average was not available
self.df = self.df[self.df['Time'] >= start_date]
self.df.reset_index(drop=True, inplace=True)
#time bucket start time
#low lowest price during the bucket interval
#high highest price during the bucket interval
#open opening price (first trade) in the bucket interval
#close closing price (last trade) in the bucket interval
#volume volume of trading activity during the bucket interval
def calculateBuy(self, moving_average = 20, below_threshold = 0.1):
# "Buy" significa che il valore era sceso del x% sotto il valore attuale e ora e' tornato sopra la moving average
#
# Let's generate the Below column (min-hold below moving average)
moving_average_label = f"MA{moving_average}"
self.df['Below'] = 0
for index, row in self.df.iterrows():
current_value = row['Close']
if current_value < row[moving_average_label]:
below = current_value - row[moving_average_label]
try:
previous_below = self.df.loc[index-1, 'Below']
except:
previous_below = 0
if below < previous_below:
self.df.loc[index, 'Below'] = below
else:
self.df.loc[index, 'Below'] = previous_below
# Let's generate the BUY trigger based on the Below column
self.df['Buy'] = 0
for index, row in self.df.iterrows():
current_value = row['Close']
try:
previous_below = self.df.loc[index-1, 'Below']
except:
previous_below = 0
if current_value > row[moving_average_label] and previous_below < -1*below_threshold*current_value:
self.df.loc[index, 'Buy'] = self.df['Close'].max()/5 # placeholder value to facilitate the plot
def calculateSell(self, moving_average = 20, above_threshold = 0.1):
# "Sell" significa che il valore era salito del x% sopra il valore attuale e ora e' sceso sotto la moving average
#
# Let's generate the Above column (max-hold above moving average)
moving_average_label = f"MA{moving_average}"
self.df['Above'] = 0
for index, row in self.df.iterrows():
current_value = row['Close']
if current_value > row[moving_average_label]:
above = current_value - row[moving_average_label]
try:
previous_above = self.df.loc[index-1, 'Above']
except:
previous_above = 0
if above > previous_above:
self.df.loc[index, 'Above'] = above
else:
self.df.loc[index, 'Above'] = previous_above
# Let's generate the SELL trigger based on the Above column
self.df['Sell'] = 0
for index, row in self.df.iterrows():
current_value = row['Close']
try:
previous_above= self.df.loc[index-1, 'Above']
except:
previous_above = 0
if current_value < row[moving_average_label] and previous_above > above_threshold*current_value:
self.df.loc[index, 'Sell'] = -1*self.df['Close'].max()/5 # placeholder value to facilitate the plot
def backSimulate(self, initial_amount = 100):
self.df['Wallet_USD'] = 0
self.df['Wallet_Crypto'] = 0
self.df['Wallet_Crypto_Hold'] = 0
for index, row in self.df.iterrows():
self.df.loc[index, 'Wallet_Crypto_Hold'] = initial_amount/self.df.loc[0,'Close'] * self.df.loc[index,'Close']
if index == 0:
self.df.loc[0, 'Wallet_USD'] = initial_amount
continue
if self.df.loc[index, 'Buy'] != 0 and self.df.loc[index-1,'Wallet_USD'] > 0:
# Buy
purchased_crypto = self.df.loc[index-1,'Wallet_USD'] / self.df.loc[index,'Close']
logging.info(f"Buy : {self.df.loc[index-1,'Wallet_USD']} USD ---> {purchased_crypto} BTC")
self.df.loc[index,'Wallet_Crypto'] = purchased_crypto
self.df.loc[index,'Wallet_USD'] = 0
elif self.df.loc[index, 'Sell'] != 0 and self.df.loc[index-1,'Wallet_Crypto'] > 0:
# Sell
sold_crypto = self.df.loc[index-1,'Wallet_Crypto'] * self.df.loc[index,'Close']
logging.info(f"Sell: {self.df.loc[index-1,'Wallet_Crypto']} BTC ---> {sold_crypto} BUSDTC")
self.df.loc[index,'Wallet_USD'] = sold_crypto
self.df.loc[index,'Wallet_Crypto'] = 0
else:
# Hold
self.df.loc[index,'Wallet_USD'] = self.df.loc[index-1,'Wallet_USD']
self.df.loc[index,'Wallet_Crypto'] = self.df.loc[index-1,'Wallet_Crypto']
def getTicker(self):
return self.public_client.get_product_ticker(self.product) | 9,077 | 2,799 |
'''
Tests for the salt-run command
'''
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
class RunTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
'''
Test the salt-run command
'''
_call_binary_ = 'salt-run'
def test_in_docs(self):
'''
test the salt-run docs system
'''
data = self.run_run('-d')
data = '\n'.join(data)
self.assertIn('jobs.active:', data)
self.assertIn('jobs.list_jobs:', data)
self.assertIn('jobs.lookup_jid:', data)
self.assertIn('manage.down:', data)
self.assertIn('manage.up:', data)
self.assertIn('network.wol:', data)
self.assertIn('network.wollist:', data)
def test_notin_docs(self):
'''
Verify that hidden methods are not in run docs
'''
data = self.run_run('-d')
data = '\n'.join(data)
self.assertNotIn('jobs.SaltException:', data)
if __name__ == '__main__':
from integration import run_tests
run_tests(RunTest)
| 1,134 | 372 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import logging.handlers
import multiprocessing
import os
import re
import threading
import traceback
import sys
import time
LOGGERS = {}
LOGGER_QUEUE_MAP = {}
default_logger_location = '/var/log/stacktach/%s.log'
default_logger_name = 'stacktach-default'
def set_default_logger_location(loc):
global default_logger_location
default_logger_location = loc
def set_default_logger_name(name):
global default_logger_name
default_logger_name = name
class ParentLoggerDoesNotExist(Exception):
def __init__(self, parent_logger_name):
self.reason = "Cannot create child logger as parent logger with the" \
"name %s does not exist." % parent_logger_name
def _create_parent_logger(parent_logger_name):
if parent_logger_name not in LOGGERS:
logger = _create_timed_rotating_logger(parent_logger_name)
LOGGERS[parent_logger_name] = logger
LOGGER_QUEUE_MAP[parent_logger_name] = multiprocessing.Queue(-1)
return LOGGERS[parent_logger_name]
def _create_child_logger(parent_logger_name):
child_logger_name = "child_%s" % parent_logger_name
if child_logger_name in LOGGERS:
return LOGGERS[child_logger_name]
if parent_logger_name in LOGGERS:
queue = LOGGER_QUEUE_MAP[parent_logger_name]
logger = _create_queue_logger(child_logger_name, queue)
LOGGERS[child_logger_name] = logger
else:
raise ParentLoggerDoesNotExist(parent_logger_name)
return LOGGERS[child_logger_name]
def _logger_factory(parent_logger_name, is_parent):
if parent_logger_name is None:
parent_logger_name = default_logger_name
if is_parent:
return _create_parent_logger(parent_logger_name)
else:
return _create_child_logger(parent_logger_name)
def get_logger(name=None, is_parent=True):
return _logger_factory(name, is_parent)
def warn(msg, name=None):
if name is None:
name = default_logger_name
get_logger(name=name, is_parent=False).warn(msg)
def error(msg, name=None):
if name is None:
name = default_logger_name
get_logger(name=name, is_parent=False).error(msg)
def info(msg, name=None):
if name is None:
name = default_logger_name
get_logger(name=name, is_parent=False).info(msg)
def _create_timed_rotating_logger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
handler = TimedRotatingFileHandlerWithCurrentTimestamp(
default_logger_location % name, when='midnight', interval=1,
backupCount=6)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.handlers[0].doRollover()
return logger
def _create_queue_logger(name, queue):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
handler = QueueHandler(queue)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
class QueueHandler(logging.Handler):
def __init__(self, queue):
logging.Handler.__init__(self)
self.queue = queue
def emit(self, record):
try:
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe
if record.exc_info:
# just to get traceback text into record.exc_text
self.format(record)
# remove exception info as it's not needed any more
record.exc_info = None
if record.args:
record.msg = record.msg % record.args
record.args = None
self.queue.put_nowait(record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class LogListener:
def __init__(self, logger):
self.logger = logger
self.queue = get_queue(logger.name)
def start(self):
self.thread = threading.Thread(target=self._receive)
self.thread.daemon = True
self.thread.start()
def _receive(self):
while True:
try:
record = self.queue.get()
# None is sent as a sentinel to tell the listener to quit
if record is None:
break
self.logger.handle(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except:
traceback.print_exc(file=sys.stderr)
def end(self):
self.queue.put_nowait(None)
self.thread.join()
for handler in self.logger.handlers:
handler.close()
def get_queue(logger_name):
return LOGGER_QUEUE_MAP[logger_name]
class TimedRotatingFileHandlerWithCurrentTimestamp(
logging.handlers.TimedRotatingFileHandler):
def __init__(self, filename, when='h', interval=1, backupCount=0,
encoding=None, delay=False, utc=False):
logging.handlers.TimedRotatingFileHandler.__init__(
self, filename, when, interval, backupCount, encoding, delay, utc)
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = re.compile(r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$")
def doRollover(self):
"""Exactly the same as TimedRotatingFileHandler's doRollover() except
that the current date/time stamp is appended to the filename rather
than the start date/time stamp, when the rollover happens."""
currentTime = int(time.time())
if self.stream:
self.stream.close()
self.stream = None
if self.utc:
timeTuple = time.gmtime(currentTime)
else:
timeTuple = time.localtime(currentTime)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
# find the oldest log file and delete it
#s = glob.glob(self.baseFilename + ".20*")
#if len(s) > self.backupCount:
# s.sort()
# os.remove(s[0])
for s in self.getFilesToDelete():
os.remove(s)
#print "%s -> %s" % (self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstNow = time.localtime(currentTime)[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
self.rolloverAt = newRolloverAt
| 8,229 | 2,490 |
from django.conf.urls import url
from . import views
app_name = 'admin'
urlpatterns = [
url(r'^create/$', views.CreateCollectionProvider.as_view(), name='create'),
url(r'^$', views.CollectionProviderList.as_view(), name='list'),
url(r'^import/$', views.ImportCollectionProvider.as_view(), name='import'),
url(r'^(?P<collection_provider_id>[a-z0-9]+)/$', views.CollectionProviderDetail.as_view(), name='detail'),
url(r'^(?P<collection_provider_id>[a-z0-9]+)/delete/$', views.DeleteCollectionProvider.as_view(), name='delete'),
url(r'^(?P<collection_provider_id>[a-z0-9]+)/export/$', views.ExportColectionProvider.as_view(), name='export'),
url(r'^(?P<collection_provider_id>[a-z0-9]+)/import/$', views.ImportCollectionProvider.as_view(), name='import'),
]
| 785 | 268 |
from flask import render_template
from flask import Flask
import plotly as py
import plotly.graph_objs as go
app = Flask(__name__)
@app.route('/')
def index():
pyplt = py.offline.plot
trace0 = go.Bar(
x=['A类户型', 'B类户型', 'C类户型'],
y=[20, 14, 23],
text=['27%市场占有率', '24%市场占有率', '19%市场占有率'],
marker=dict(
color='rgb(158,202,225)',
line=dict(
color='rgb(8,48,107)',
width=1.5,
)
),
opacity=0.6
)
data = [trace0]
layout = go.Layout(
title='2017年1月不同户型房屋单价情况',
)
fig = go.Figure(data=data, layout=layout)
div = pyplt(fig, output_type='div', include_plotlyjs=False, auto_open=False, show_link=False)
context = {}
context['graph'] = div
import sys
print('参数div占用内存大小为 %d bytes'%sys.getsizeof(div))
with open('div1.txt', 'w') as file:
file.write(div)
return render_template("index2.html",
title = 'Home',
context = context)
if __name__ == '__main__':
app.run()
| 1,087 | 448 |
default_app_config = 'test_custom_user_subclass.apps.CustomUserSubclassConfig'
| 79 | 26 |
from datetime import datetime
from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(64))
class Category(db.Model):
category = db.Column(db.String(32), primary_key=True, unique=True)
class Tag(db.Model):
tag = db.Column(db.String(32), primary_key=True, unique=True)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(64), unique=True)
slug = db.Column(db.String(32), unique=True)
content = db.Column(db.String(1000))
category = db.Column(db.String(32), db.ForeignKey(Category.category))
pub_date = db.Column(db.Date)
last_updated = db.Column(db.DateTime, default=datetime.utcnow)
draft = db.Column(db.Boolean)
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True)
post_id = db.Column(db.Integer, db.ForeignKey(Post.id))
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
comment = db.Column(db.String(256))
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
approved = db.Column(db.Boolean)
class Media(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32))
filepath = db.Column(db.String(16))
mime = db.Column(db.String(16))
size = db.Column(db.Integer)
| 1,488 | 531 |
__source__ = 'https://leetcode.com/problems/majority-element/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/majority-element.py
# Time: O(n)
# Space: O(1)
#
# Description: Leetcode # 169. Majority Element
#
# Given an array of size n, find the majority element.
# The majority element is the element that appears more than [n/2] times.
#
# You may assume that the array is non-empty and the majority element always exist in the array.
# Companies
# Adobe Zenefits
# Related Topics
# Array Divide and Conquer Bit Manipulation
# Similar Questions
# Majority Element II
#
#
import unittest
class Solution:
# @param num, a list of integers
# @return an integer
def majorityElement(self, num):
idx, cnt = 0, 1
for i in xrange(1, len(num)):
print i, idx, cnt
if num[idx] == num[i]:
cnt += 1
else:
cnt -= 1
if cnt == 0:
idx = i
cnt = 1
return num[idx]
# test
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
print Solution().majorityElement([1, 2, 3, 4, 5, 5, 5, 5, 5, 5, 6])
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/majority-element/solution/
#
Java solutions (sorting, hashmap, moore voting, bit manipulation).
# Sorting
# 5ms 67.62%
class Solution {
public int majorityElement(int[] nums) {
Arrays.sort(nums);
return nums[nums.length/2];
}
}
# Hashtable
# 25ms 24%
class Solution {
public int majorityElement(int[] nums) {
Map<Integer, Integer> myMap = new HashMap<Integer, Integer>();
//Hashtable<Integer, Integer> myMap = new Hashtable<Integer, Integer>();
int ret=0;
for (int num: nums) {
if (!myMap.containsKey(num))
myMap.put(num, 1);
else
myMap.put(num, myMap.get(num)+1);
if (myMap.get(num)>nums.length/2) {
ret = num;
break;
}
}
return ret;
}
}
# Bit manipulation
# 7ms 47.62%
class Solution {
public int majorityElement(int[] nums) {
int[] bit = new int[32];
for (int num: nums)
for (int i=0; i<32; i++)
if ((num>>(31-i) & 1) == 1)
bit[i]++;
int ret=0;
for (int i=0; i<32; i++) {
bit[i]=bit[i]>nums.length/2?1:0;
ret += bit[i]*(1<<(31-i));
}
return ret;
}
}
# Moore voting algorithm
# 3ms 100%
class Solution {
public int majorityElement(int[] nums) {
int res = nums[0], count = 1;
for (int i = 1; i < nums.length; i++) {
if (res == nums[i]) {
count++;
} else {
count--;
if(count == 0) {
res= nums[i];
count = 1;
}
}
}
return res;
}
}
# 3ms 100%
class Solution {
public int majorityElement(int[] nums) {
int count=0, ret = 0;
for (int num: nums) {
if (count==0)
ret = num;
if (num!=ret)
count--;
else
count++;
}
return ret;
}
}
'''
| 3,360 | 1,137 |
from .setup import (
create_consistent_model,
set_default_configs_and_snm3_medium,
gapfill_model,
gapfill_medium,
score_memote,
)
| 150 | 56 |
import statistics
from datetime import date
import psycopg2
from psycopg2 import sql
class Log:
def __init__(self, score, gameday):
#gather player data
self.name = score.get('name')
self.team = (score.get('team')).name
self.date = gameday
self.mins = round(((score.get('seconds_played'))/60), 2)
self.fgm = score.get('made_field_goals')
self.fga = score.get('attempted_field_goals')
if self.fga is 0:
self.fg = None
else:
self.fg = round((self.fgm/self.fga), 4)
self.ftm = score.get('made_free_throws')
self.fta = score.get('attempted_free_throws')
if self.fta is 0:
self.ft = None
else:
self.ft = round((self.ftm/self.fta), 4)
self.tpm = score.get('made_three_point_field_goals')
self.pts = ((self.fgm - self.tpm)*2) + (self.tpm*3) + (self.ftm*1)
self.reb = score.get('offensive_rebounds') + score.get('defensive_rebounds')
self.ast = score.get('assists')
self.stl = score.get('steals')
self.blk = score.get('blocks')
self.tov = score.get('turnovers')
def exists(self, cur):
#check if return is empty for given player
SQL = 'SELECT * FROM league_roster WHERE name = %s;'
cur.execute(SQL, (self.name,))
ans = cur.fetchone()
if ans is None:
return 0
else:
return 1
def get_pid(self, cur):
query = 'SELECT player_id FROM league_roster WHERE name = %s;'
cur.execute(query, (self.name,))
player_id = cur.fetchone()
return player_id[0]
def update_season_measures(self, p_id, cur):
#update games played
ct_query = 'SELECT COUNT(player_id) FROM game_logs WHERE player_id = %s'
cur.execute(ct_query, (p_id,))
res = (cur.fetchone())[0]
update_gp = 'UPDATE league_roster SET gp = %s WHERE player_id = %s'
cur.execute(update_gp, (res, p_id))
#update avgs and std devs
avg_vars = ('mins', 'fg', 'fga', 'ft', 'fta', 'tpm', 'pts', 'reb', 'ast', 'stl', 'blk', 'tov')
sd_vars = [var + '_sd' for var in avg_vars]
for avg, sd in zip(avg_vars, sd_vars):
#avg
avg_query = "SELECT AVG({}) FROM game_logs WHERE player_id = '{}'".format(avg, p_id)
cur.execute(avg_query)
res = (cur.fetchone())[0]
if res is not None:
res = round(res, 4)
update_avg = "UPDATE league_roster SET {} = %s WHERE player_id = '{}'".format(avg, p_id)
cur.execute(update_avg, (res,))
#stddev
sd_query = "SELECT STDDEV({}) FROM game_logs WHERE player_id = '{}'".format(avg, p_id)
cur.execute(sd_query)
res = (cur.fetchone())[0]
if res is not None:
res = round(res, 4)
update_sd = "UPDATE league_roster SET {} = %s WHERE player_id = '{}'".format(sd, p_id)
cur.execute(update_sd, (res,))
def ins_log(self, p_id, cur):
#Add game to logs
ins = 'INSERT INTO game_logs(player_id, name, date, mins, fgm, fga, ftm, fta, tpm, pts, reb, ast, stl, blk, tov)\
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
ins_args = (p_id, self.name, self.date, self.mins, self.fgm, self.fga, self.ftm, self.fta, self.tpm,
self.pts, self.reb, self.ast, self.stl, self.blk, self.tov)
cur.execute(ins, ins_args)
#INSERT statement breaks with None values, use update for fields where possible
update = 'UPDATE game_logs SET fg = %s, ft = %s WHERE date = %s AND name = %s'
cur.execute(update, (self.fg, self.ft, self.date, self.name))
Log.update_season_measures(self, p_id, cur)
def add_player(self, cur):
#add player to roster
ins = 'INSERT INTO league_roster(name, team) VALUES(%s, %s);'
cur.execute(ins, (self.name, self.team))
#add game to game log
p_id = self.get_pid(cur)
Log.ins_log(self, p_id, cur) | 4,101 | 1,467 |
import os
import time
import csv
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim
cudnn.benchmark = True
from models import ResNet
from metrics import AverageMeter, Result
from dataloaders.dense_to_sparse import UniformSampling, SimulatedStereo
import criteria
import utils
# This change in order to get lists
def load_split():
current_directoty = os.getcwd()
train_lists_path = current_directoty + '/trainIdxs.txt'
test_lists_path = current_directoty + '/testIdxs.txt'
train_f = open(train_lists_path)
test_f = open(test_lists_path)
train_lists = []
test_lists = []
train_lists_line = train_f.readline()
while train_lists_line:
train_lists.append(int(train_lists_line) - 1)
train_lists_line = train_f.readline()
train_f.close()
test_lists_line = test_f.readline()
while test_lists_line:
test_lists.append(int(test_lists_line) - 1)
test_lists_line = test_f.readline()
test_f.close()
val_start_idx = int(len(train_lists) * 0.8)
val_lists = train_lists[val_start_idx:-1]
train_lists = train_lists[0:val_start_idx]
return train_lists, val_lists, test_lists
# This change in order to get lists
train_lists, val_lists, test_lists = load_split()
args = utils.parse_command()
print(args)
fieldnames = ['mse', 'rmse', 'absrel', 'lg10', 'mae',
'delta1', 'delta2', 'delta3',
'data_time', 'gpu_time']
best_result = Result()
best_result.set_to_worst()
def create_data_loaders(args):
# Data loading code
print("=> creating data loaders ...")
traindir = os.path.join('data', args.data, 'train')
valdir = os.path.join('data', args.data, 'val')
train_loader = None
val_loader = None
# sparsifier is a class for generating random sparse depth input from the ground truth
sparsifier = None
max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf
if args.sparsifier == UniformSampling.name:
sparsifier = UniformSampling(num_samples=args.num_samples, max_depth=max_depth)
elif args.sparsifier == SimulatedStereo.name:
sparsifier = SimulatedStereo(num_samples=args.num_samples, max_depth=max_depth)
'''
if args.data == 'nyudepthv2':
from dataloaders.nyu_dataloader import NYUDataset
if not args.evaluate:
train_dataset = NYUDataset(traindir, type='train',
modality=args.modality, sparsifier=sparsifier)
val_dataset = NYUDataset(valdir, type='val',
modality=args.modality, sparsifier=sparsifier)
'''
if args.data == 'nyudepthv2':
from dataloaders.nyu_dataloader import NYUDataset
if not args.evaluate:
train_dataset = NYUDataset('nyu_depth_v2_labeled.mat',type = 'train',
modality=args.modality, sparsifier=sparsifier, lists = train_lists)
val_dataset = NYUDataset('nyu_depth_v2_labeled.mat', type = 'val', modality = args.modality, sparsifier = sparsifier, lists = val_lists)
elif args.data == 'kitti':
from dataloaders.kitti_dataloader import KITTIDataset
if not args.evaluate:
train_dataset = KITTIDataset(traindir, type='train',
modality=args.modality, sparsifier=sparsifier)
val_dataset = KITTIDataset(valdir, type='val',
modality=args.modality, sparsifier=sparsifier)
else:
raise RuntimeError('Dataset not found.' +
'The dataset must be either of nyudepthv2 or kitti.')
# set batch size to be 1 for validation
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)
# put construction of train loader here, for those who are interested in testing only
if not args.evaluate:
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, sampler=None,
worker_init_fn=lambda work_id:np.random.seed(work_id))
# worker_init_fn ensures different sampling patterns for each data loading thread
print("=> data loaders created.")
return train_loader, val_loader
def main():
global args, best_result, output_directory, train_csv, test_csv
# evaluation mode
start_epoch = 0
if args.evaluate:
assert os.path.isfile(args.evaluate), \
"=> no best model found at '{}'".format(args.evaluate)
print("=> loading best model '{}'".format(args.evaluate))
checkpoint = torch.load(args.evaluate)
output_directory = os.path.dirname(args.evaluate)
args = checkpoint['args']
start_epoch = checkpoint['epoch'] + 1
best_result = checkpoint['best_result']
model = checkpoint['model']
print("=> loaded best model (epoch {})".format(checkpoint['epoch']))
_, val_loader = create_data_loaders(args)
args.evaluate = True
validate(val_loader, model, checkpoint['epoch'], write_to_file=False)
return
# optionally resume from a checkpoint
elif args.resume:
assert os.path.isfile(args.resume), \
"=> no checkpoint found at '{}'".format(args.resume)
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args = checkpoint['args']
start_epoch = checkpoint['epoch'] + 1
best_result = checkpoint['best_result']
model = checkpoint['model']
optimizer = checkpoint['optimizer']
output_directory = os.path.dirname(os.path.abspath(args.resume))
print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))
train_loader, val_loader = create_data_loaders(args)
args.resume = True
# create new model
else:
train_loader, val_loader = create_data_loaders(args)
print("=> creating Model ({}-{}) ...".format(args.arch, args.decoder))
in_channels = len(args.modality)
if args.arch == 'resnet50':
model = ResNet(layers=50, decoder=args.decoder, output_size=train_loader.dataset.output_size,
in_channels=in_channels, pretrained=args.pretrained)
elif args.arch == 'resnet18':
model = ResNet(layers=18, decoder=args.decoder, output_size=train_loader.dataset.output_size,
in_channels=in_channels, pretrained=args.pretrained)
print("=> model created.")
optimizer = torch.optim.SGD(model.parameters(), args.lr, \
momentum=args.momentum, weight_decay=args.weight_decay)
# model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
model = model.cuda()
# define loss function (criterion) and optimizer
if args.criterion == 'l2':
criterion = criteria.MaskedMSELoss().cuda()
elif args.criterion == 'l1':
criterion = criteria.MaskedL1Loss().cuda()
# create results folder, if not already exists
output_directory = utils.get_output_directory(args)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
train_csv = os.path.join(output_directory, 'train.csv')
test_csv = os.path.join(output_directory, 'test.csv')
best_txt = os.path.join(output_directory, 'best.txt')
# create new csv files with only header
if not args.resume:
with open(train_csv, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
with open(test_csv, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for epoch in range(start_epoch, args.epochs):
utils.adjust_learning_rate(optimizer, epoch, args.lr)
train(train_loader, model, criterion, optimizer, epoch) # train for one epoch
result, img_merge = validate(val_loader, model, epoch) # evaluate on validation set
# remember best rmse and save checkpoint
is_best = result.rmse < best_result.rmse
if is_best:
best_result = result
with open(best_txt, 'w') as txtfile:
txtfile.write("epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n".
format(epoch, result.mse, result.rmse, result.absrel, result.lg10, result.mae, result.delta1, result.gpu_time))
if img_merge is not None:
img_filename = output_directory + '/comparison_best.png'
utils.save_image(img_merge, img_filename)
utils.save_checkpoint({
'args': args,
'epoch': epoch,
'arch': args.arch,
'model': model,
'best_result': best_result,
'optimizer' : optimizer,
}, is_best, epoch, output_directory)
def train(train_loader, model, criterion, optimizer, epoch):
average_meter = AverageMeter()
model.train() # switch to train mode
end = time.time()
for i, (input, target) in enumerate(train_loader):
input, target = input.cuda(), target.cuda()
torch.cuda.synchronize()
data_time = time.time() - end
# compute pred
end = time.time()
pred = model(input)
loss = criterion(pred, target)
optimizer.zero_grad()
loss.backward() # compute gradient and do SGD step
optimizer.step()
torch.cuda.synchronize()
gpu_time = time.time() - end
# measure accuracy and record loss
result = Result()
result.evaluate(pred.data, target.data)
average_meter.update(result, gpu_time, data_time, input.size(0))
end = time.time()
if (i + 1) % args.print_freq == 0:
print('=> output: {}'.format(output_directory))
print('Train Epoch: {0} [{1}/{2}]\t'
't_Data={data_time:.3f}({average.data_time:.3f}) '
't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
'MAE={result.mae:.2f}({average.mae:.2f}) '
'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
'REL={result.absrel:.3f}({average.absrel:.3f}) '
'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
epoch, i+1, len(train_loader), data_time=data_time,
gpu_time=gpu_time, result=result, average=average_meter.average()))
avg = average_meter.average()
with open(train_csv, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'mse': avg.mse, 'rmse': avg.rmse, 'absrel': avg.absrel, 'lg10': avg.lg10,
'mae': avg.mae, 'delta1': avg.delta1, 'delta2': avg.delta2, 'delta3': avg.delta3,
'gpu_time': avg.gpu_time, 'data_time': avg.data_time})
def validate(val_loader, model, epoch, write_to_file=True):
average_meter = AverageMeter()
model.eval() # switch to evaluate mode
end = time.time()
for i, (input, target) in enumerate(val_loader):
input, target = input.cuda(), target.cuda()
torch.cuda.synchronize()
data_time = time.time() - end
# compute output
end = time.time()
with torch.no_grad():
pred = model(input)
torch.cuda.synchronize()
gpu_time = time.time() - end
# measure accuracy and record loss
result = Result()
result.evaluate(pred.data, target.data)
average_meter.update(result, gpu_time, data_time, input.size(0))
end = time.time()
# save 8 images for visualization
skip = 50
if args.modality == 'd':
img_merge = None
else:
if args.modality == 'rgb':
rgb = input
elif args.modality == 'rgbd':
rgb = input[:,:3,:,:]
depth = input[:,3:,:,:]
if i == 0:
if args.modality == 'rgbd':
img_merge = utils.merge_into_row_with_gt(rgb, depth, target, pred)
else:
img_merge = utils.merge_into_row(rgb, target, pred)
elif (i < 8*skip) and (i % skip == 0):
if args.modality == 'rgbd':
row = utils.merge_into_row_with_gt(rgb, depth, target, pred)
else:
row = utils.merge_into_row(rgb, target, pred)
img_merge = utils.add_row(img_merge, row)
elif i == 8*skip:
filename = output_directory + '/comparison_' + str(epoch) + '.png'
utils.save_image(img_merge, filename)
if (i+1) % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
'MAE={result.mae:.2f}({average.mae:.2f}) '
'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
'REL={result.absrel:.3f}({average.absrel:.3f}) '
'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
i+1, len(val_loader), gpu_time=gpu_time, result=result, average=average_meter.average()))
avg = average_meter.average()
print('\n*\n'
'RMSE={average.rmse:.3f}\n'
'MAE={average.mae:.3f}\n'
'Delta1={average.delta1:.3f}\n'
'REL={average.absrel:.3f}\n'
'Lg10={average.lg10:.3f}\n'
't_GPU={time:.3f}\n'.format(
average=avg, time=avg.gpu_time))
if write_to_file:
with open(test_csv, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'mse': avg.mse, 'rmse': avg.rmse, 'absrel': avg.absrel, 'lg10': avg.lg10,
'mae': avg.mae, 'delta1': avg.delta1, 'delta2': avg.delta2, 'delta3': avg.delta3,
'data_time': avg.data_time, 'gpu_time': avg.gpu_time})
return avg, img_merge
if __name__ == '__main__':
main()
| 14,141 | 4,770 |
import urllib
from django import forms
from etcetera.reports import models as reports
from etcetera.extras.dateutil import formfield_callback, DateTimeField
class SearchForm(forms.Form):
q = forms.CharField(max_length=50)
def get_list(self):
# The search list is automatically everything
out_list = [
'name',
]
return out_list
def as_url_args(self):
return urllib.urlencode(self.cleaned_data)
class ReportModelForm(forms.ModelForm):
formfield_callback = formfield_callback
class Meta:
model = reports.Report
exclude = ('slug','created_by',) | 647 | 186 |
#!/usr/bin/env python3
"""
Handles the service announcement of the Cards against Cli Server.
Debugging command: (linux, requires avahi)
(A similar command should be available using bonjour on mac)
> avahi-browse --resolve "_cac._tcp"
Use the environment variable CAC_LISTEN_INTERFACES to control,
on which interface(s) the service should be announced.
Example:
export CAC_ANNOUNCE_INTERFACES=lo,wlp4s0
"""
from zeroconf import ServiceInfo, Zeroconf
import socket
import logging
import netifaces
import os
import uuid
_logger = logging.getLogger(__name__)
def start_announcing_on_if(server_name, interface, address, port):
_logger.info(
f"Starting to announce server named '{server_name}' "
f"via {interface} as {address}:{port}.")
service_uuid = uuid.uuid4()
service_type = "_cac._tcp.local."
service = ServiceInfo(service_type,
f"Cards-Against-Cli-Server-"
f"{service_uuid}.{service_type}",
socket.inet_aton(address), port,
properties=dict(name=server_name.encode("utf-8")))
zeroconf = Zeroconf(interfaces=[address])
zeroconf.register_service(service)
return zeroconf, service
def stop_announcing_on_if(zeroconf, service, iface):
_logger.info(f"Unregistering service on {iface}...")
zeroconf.unregister_service(service)
zeroconf.close()
def stop_announcing(announcers):
for zeroconf, service, iface in announcers:
stop_announcing_on_if(zeroconf, service, iface)
def start_announcing(server_name, port):
# announce on all interfaces
ifaces = get_interfaces()
result = []
for iface, addr in ifaces.items():
zeroconf, service = start_announcing_on_if(
server_name, iface, addr, port)
result.append((zeroconf, service, iface))
return result
def get_interfaces():
# get the list of interfaces
ifaces = netifaces.interfaces()
# get the address for each interface
result = dict()
for iface in ifaces:
addr = get_address_for_interface(iface)
if addr:
result[iface] = addr
if "CAC_ANNOUNCE_INTERFACES" in os.environ:
iface_whitelist = os.environ["CAC_ANNOUNCE_INTERFACES"].split(',')
result = {iface: addr
for iface, addr in result.items()
if iface in iface_whitelist and iface != ""}
return result
def get_address_for_interface(iface):
addrs = netifaces.ifaddresses(iface)
# currently, the python zeroconf implementation does only support ipv4 :-(
# however, the server still
addr_family = netifaces.AF_INET
if addr_family in addrs:
inet_addrs = addrs[netifaces.AF_INET]
for inet_addr in inet_addrs:
if "addr" in inet_addr:
return inet_addr["addr"]
return None
| 2,873 | 909 |
#o programa vai ler 2 números, em seguida mostrar um menu.
''' O programa vai pedir 2 número.
Tabela
[1]- Soma
[2]- Multiplica
[3]- Maior
[4]-Novos números
[5]- sair
'''
from time import sleep
n1 = int(input('Informe o primeiro número: '))
n2 = int(input('Informe o segundo número: '))
escolha = 0
while escolha != 5:
print('-=-'*15)
print('''
Tabela
[1]- Soma
[2]- Multiplica
[3]- Maior
[4]-Novos números
[5]- sair
''')
escolha = int(input('>>>>>>>> Qual opção: '))
if escolha == 1:
soma = n1+n2
print('O resultado da soma entre {} e {} = {}'.format( n1, n2, soma))
elif escolha == 2:
multi = n1*n2
print('O resultado da mutiplicação entre {} x {} = {}'.format( n1, n2, multi))
elif escolha == 3:
if n1 > n2:
print('O {} é maior que {}.'.format(n1,n2))
elif n1 == n2:
print('Números iguais.')
else:
print('O {} é maior que {}.'.format(n2, n1))
elif escolha == 4:
n1 = int(input('Informe o primeiro número: '))
n2 = int(input('Informe o segundo número: '))
elif escolha == 5:
print('FINALIZANDO....')
else:
print('Opção inválida. Tente novamente!')
sleep(2)
print('Fim do Programa. Volte sempre!')
| 1,440 | 554 |
# -*- coding: utf-8 -*-
"""
file: wamp_services.py
WAMP service methods the module exposes.
"""
import os
import json
import logging
from twisted.internet import reactor
from autobahn.wamp.exception import ApplicationError
from graphit.graph_io.io_jsonschema_format import read_json_schema
from mdstudio.component.session import ComponentSession
from mdstudio.deferred.chainable import chainable
from mdstudio_cli.schema_parser import SchemaParser, write_schema_info, prepaire_config, process_results
from mdstudio_cli.schema_classes import CLIORM
lg = logging.getLogger('clilogger')
class CliWampApi(ComponentSession):
"""
CLI WAMP methods.
"""
def authorize_request(self, uri, claims):
"""
If you were allowed to call this in the first place,
I will assume you are authorized
"""
return True
def result_callback(self, result):
"""
WAMP result callback
Process the results storing all file-like output to file.
Optionally store the full results directory as a JSON file.
:param result: WAMP results
:type result: :py:dict
"""
# Store results as JSON
if self.config.extra.get('store_json', False):
result_json = os.path.join(os.getcwd(), '{0}.json'.format(self.config.extra['uri']))
json.dump(result, open(result_json, 'w'))
# Process file-like output and print remaining.
process_results(result)
# Disconnect from broker and stop reactor event loop
self.disconnect()
reactor.stop()
def error_callback(self, failure):
"""
WAMP error callback
Process a WAMP endpoint failure and write the failure message to
standard out (stdout).
:param failure: Endpoint failure message
"""
failure_message = failure
if isinstance(failure, Exception) or isinstance(failure, str):
failure_message = str(failure)
elif isinstance(failure.value, ApplicationError):
failure_message = failure.value.error_message()
else:
failure.getErrorMessage()
lg.error('Unable to process: {0}'.format(failure_message))
# Disconnect from broker and stop reactor event loop
self.disconnect()
reactor.stop()
@chainable
def on_run(self):
# Get endpoint config
config = self.config.extra
# Retrieve JSON schemas for the endpoint request and response
schemaparser = SchemaParser(self)
request = yield schemaparser.get(uri=config['uri'], request=True)
request = read_json_schema(request)
request.orm = CLIORM
# Write print friendly endpoint definition to stdout or call endpoint
if config['get_endpoint_info']:
write_schema_info(request, config['uri'])
# Disconnect from broker and stop reactor event loop
self.disconnect()
reactor.stop()
else:
endpoint_input = prepaire_config(request, config['package_config'])
# Call method and wait for results
deferred = self.call(config['uri'], endpoint_input)
deferred.addCallback(self.result_callback)
deferred.addErrback(self.error_callback)
| 3,320 | 899 |
# coding=utf-8
import base64
import time
import logging
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import NoSuchElementException
from PIL import Image
from io import StringIO, BytesIO
from synchronize_util import synchronized, CONSOLE_LOCK
# This module is for code verification
# Every time there would be only one for users
get_image_data = '''
function getBase64Image(img) {
// Create an empty canvas element
var canvas = document.createElement("canvas");
canvas.width = img.width;
canvas.height = img.height;
// Copy the image contents to the canvas
var ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0);
// Get the data-URL formatted image
// Firefox supports PNG and JPEG. You could check img.src to
// guess the original format, but be aware the using "image/jpg"
// will re-encode the image.
var dataURL = canvas.toDataURL("image/png");
return dataURL.replace(/^data:image\/(png|jpg);base64,/, "");
// return dataURL;
}
code_img = document.querySelector('img[node-type="yzm_img"]');
// code_img = document.querySelector('img');
data_URL = getBase64Image(code_img);
return data_URL;
'''
def test():
driver = webdriver.PhantomJS()
driver.get('http://s.weibo.com/ajax/pincode/pin?type=sass&ts=1405404856')
verify_user(driver)
return
def get_img(base64_str):
'''
convert the base64 string to png image --> PIL.Image
'''
base64_bytes = base64.b64decode(base64_str)
image_bytes_io = BytesIO(base64_bytes)
image = Image.open(image_bytes_io)
return image
def get_code(img):
'''
given an image, return its code, each time only one image could be served --> the code string
'''
img.show()
verification_code = input('Please input the verificaiont code: ')
return verification_code
def verify_user_for_search(driver):
'''
when the driver shows the verification code, load the code in the browser and input the code-->the code
driver: the current driver which comes into the verification code
'''
while True:
feed = driver.find_elements_by_class_name('feed_list')
if len(feed) == 0:
# there is no feed in this page, meaning you need to input the code
code_png = get_img(driver.execute_script(get_image_data))
verification_code = get_code(code_png)# this action needs to be primitive
code_input = driver.find_element_by_xpath('//input[@node-type="yzm_input"]')
code_input.click()
code_input.send_keys(verification_code.strip())
submit_button = driver.find_element_by_xpath('//a[@node-type="yzm_submit"]')
submit_button.click()
time.sleep(5)
driver.get_screenshot_as_file('./screenshot/after_verfiy.png')
else:
break
logging.info('verification completed!')
return
def verify_user_for_login(driver):
'''
因为使用循环登陆,所以此验证码只保证一次,与搜索验证码的情况不同
'''
if not driver.find_element_by_xpath('//img[@node-type="verifycode_image"]'):
logging.info('There is no verfication code here, continue')
return
else:
try:
# get png, the image instance of PIL
png_element = driver.find_element_by_xpath('//img[@node-type="verifycode_image"]')
location = png_element.location
size = png_element.size
logging.info('vrcode: location--{}, size--{}'.format(location, size))
im = get_img(driver.get_screenshot_as_base64())
left = location['x']
top = location['y']
right = location['x'] + size['width']
bottom = location['y'] + size['height']
im = im.crop((left, top, right, bottom)) # defines crop points
verification_code = get_code(im)
code_input = driver.find_element_by_xpath('//input[@name="verifycode"]')
code_input.click()
code_input.send_keys(verification_code.strip())
except Exception as e:
driver.get_screenshot_as_file('./screenshot/login_failed.png')
logging.info('error, filed savedd to ./screenshot/login_failed.png')
return
@synchronized(CONSOLE_LOCK) # this method is primitive
def verify_user(driver, v_type):
'''
v_type: string, 'search', 'login'
'''
if v_type == 'search':
verify_user_for_search(driver)
elif v_type == 'login':
verify_user_for_login(driver)
else:
logging.info('Unknown verification type')
return
if __name__ == '__main__':
test()
| 4,692 | 1,473 |
__version__ = "0.0.4"
__author__ = "刘士"
| 40 | 24 |
from visitor import PrintVisitor
class Printer(PrintVisitor):
"""A simple pretty-printer class"""
def __init__(self):
self._indent = 0
def print_Lambda(self, node):
var_name = node.var.name
indent_prev = self._indent
self._indent = 0
exp_str = self.print(node.exp)
self._indent = indent_prev
return self.indent() + 'fun ' + var_name + ' -> ' + exp_str
def print_App(self, node):
indent_prev = self._indent
self._indent = 0
left_exp = self.print(node.left_exp)
right_exp = self.print(node.right_exp)
self._indent = indent_prev
return self.indent() + '(' + left_exp + ')(' + right_exp + ')'
def print_BinOp(self, node):
op = node.op
indent_prev = self._indent
self._indent = 0
left_str = self.print(node.left_exp)
right_str = self.print(node.right_exp)
self._indent = indent_prev
return self.indent() + left_str + ' ' + op + ' ' + right_str
def print_LetIn(self, node):
var_name = node.var.name
indent_prev = self._indent
self._indent = 0
exp_str = self.print(node.exp)
self._indent = indent_prev
body_str = self.print(node.body)
self._indent = indent_prev
return self.indent() + 'let ' + var_name + \
' = ' + exp_str + ' in\n' + body_str
def print_If(self, node):
indent_prev = self._indent
self._indent = 0
cond_str = self.print(node.cond)
self._indent = indent_prev + 1
exp1_str = self.print(node.exp1)
exp2_str = self.print(node.exp2)
self._indent = indent_prev
return self.indent() + 'if ' + cond_str + '\n' + \
self.indent() + 'then {\n' + exp1_str + '\n' + \
self.indent() + '} \n' + self.indent() + \
'else {\n' + exp2_str + '\n' + self.indent() + '}'
def print_While(self, node):
indent_prev = self._indent
self._indent = 0
cond_str = self.print(node.cond)
self._indent = indent_prev + 1
block_str = self.print(node.block)
self._indent = indent_prev
return self.indent() + 'while ' + cond_str + \
' do {\n' + self.indent() + block_str + \
'\n' + self.indent() + '}'
def print_Ref(self, node):
indent_prev = self._indent
self._indent = 0
exp = self.print(node.exp)
self._indent = indent_prev
return self.indent() + 'ref (' + exp + ')'
def print_Bang(self, node):
indent_prev = self._indent
self._indent = 0
var_name = self.print(node.exp)
self._indent = indent_prev
return self.indent() + '!' + var_name
def print_Assign(self, node):
indent_prev = self._indent
self._indent = 0
var_name = self.print(node.left_exp)
exp_str = self.print(node.right_exp)
self._indent = indent_prev
return self.indent() + var_name + ' := ' + exp_str
def print_Seq(self, node):
indent_prev = self._indent
self._indent = 0
left_exp_str = self.print(node.left_exp)
self._indent = indent_prev
right_exp_str = self.print(node.right_exp)
self._indent = indent_prev
return self.indent() + left_exp_str + ';\n' + \
right_exp_str
def print_Num(self, node):
return str(node.value)
def print_Bool(self, node):
return str(node.value)
def print_Var(self, node):
return node.name
def indent(self):
return ' ' * self._indent
def write(self, node):
if node is not None:
print(self.print(node))
else:
raise Exception('Input AST is None')
| 3,800 | 1,216 |
"""
A PyTorch implmentation of the KL-Divergence Loss as described in (https://arxiv.org/abs/1511.06321)
Lua Implementation (not inspected yet TODO) (https://github.com/yenchanghsu/NNclustering/blob/master/BatchKLDivCriterion.lua)
"""
import torch
import torch.nn.functional as F
from torch import nn
import numpy as np
from IPython import embed
class triplet_loss(nn.Module):
def __init__(self, alpha = 7.18):
super(triplet_loss, self).__init__()
self.alpha = alpha
def forward(self, outputs, clusters):
"""
:param indices The index of each embedding
:param outputs The set of embeddings
:param clusters Cluster assignments for each index
:return Loss Magnet loss calculated for current batch
"""
assert not clusters.requires_grad, \
"nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these variables as volatile or not requiring gradients"
_min_float = 1e-6
num_instances = 0.0
outputs = outputs.float()
clusters = clusters.cpu().data.numpy()
batch_size = outputs.size(0)
loss = torch.zeros(1)
# If GPU is available compute loss on it
loss = loss.cuda()
loss = torch.autograd.Variable(loss).cuda()
######################### Cluster Assignments ##########################
# Generate a set of clusters in the batch
# and the local indices corresponding to each of those clusters
# batch_clusters = { cluster_number : [ local_indices] }
# TODO fix later!!! -- for now assiming indices are irrelevant!
batch_clusters = {}
for i in range(0, len(clusters)):
if clusters[i] in batch_clusters.keys():
batch_clusters[clusters[i]].append(i)
else:
batch_clusters[clusters[i]] = [i]
######################### Cluster Assignments ##########################
old_clusters = list(batch_clusters.keys())
clusters = []
# remove clusters with less than D instances TODO
for c in old_clusters:
if len(batch_clusters[c]) >= 2:
clusters.append(c)
########################## CALCULATE THE LOSS #########################
instances_1 = []
instances_2 = []
instances_3 = []
for m in range(0, len(clusters)):
c = clusters[m]
for d1 in range(0, len(batch_clusters[c]) - 1):
for d2 in range(d1+1, len(batch_clusters[c])):
ins_i1 = batch_clusters[c][d1]
ins_i2 = batch_clusters[c][d2]
for mN in range(0, len(clusters)):
if mN != m:
cN = clusters[mN]
for dN in range(0, len(batch_clusters[cN])):
ins_iN = batch_clusters[cN][dN]
instances_1.append(ins_i1)
instances_2.append(ins_i2)
instances_3.append(ins_iN)
return ((outputs[instances_1] - outputs[instances_2]).norm(p=2, dim = 1) + self.alpha - (outputs[instances_1] - outputs[instances_3]).norm(p=2, dim = 1)).clamp(min = 0.0).mean()
| 3,364 | 1,026 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import os
import unittest
import warnings
from obspy.io.y.core import _is_y, _read_y
class CoreTestCase(unittest.TestCase):
"""
Nanometrics Y file test suite.
"""
def setUp(self):
# Directory where the test files are located
self.path = os.path.dirname(__file__)
def test_is_y_file(self):
"""
Testing Y file format.
"""
testfile = os.path.join(self.path, 'data', 'YAYT_BHZ_20021223.124800')
self.assertEqual(_is_y(testfile), True)
self.assertEqual(_is_y("/path/to/slist.ascii"), False)
self.assertEqual(_is_y("/path/to/tspair.ascii"), False)
def test_read_y_file(self):
"""
Testing reading Y file format.
"""
testfile = os.path.join(self.path, 'data', 'YAYT_BHZ_20021223.124800')
st = _read_y(testfile)
self.assertEqual(len(st), 1)
tr = st[0]
self.assertEqual(len(tr), 18000)
self.assertEqual(tr.stats.sampling_rate, 100.0)
self.assertEqual(tr.stats.station, 'AYT')
self.assertEqual(tr.stats.channel, 'BHZ')
self.assertEqual(tr.stats.location, '')
self.assertEqual(tr.stats.network, '')
self.assertEqual(max(tr.data),
tr.stats.y.tag_series_info.max_amplitude)
self.assertEqual(min(tr.data),
tr.stats.y.tag_series_info.min_amplitude)
def test_ignore_non_ascii_tag_station_info(self):
"""
Test faulty Y file containing non ASCII chars in TAG_STATION_INFO.
"""
testfile = os.path.join(self.path, 'data', 'YAZRSPE.20100119.060433')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
st = _read_y(testfile)
self.assertEqual(len(w), 1)
self.assertIn('Invalid', str(w[0]))
self.assertEqual(len(st), 1)
tr = st[0]
self.assertEqual(len(tr), 16976)
self.assertEqual(tr.stats.sampling_rate, 50.0)
self.assertEqual(tr.stats.station, 'AZR')
self.assertEqual(tr.stats.channel, 'E')
self.assertEqual(tr.stats.location, 'SP')
self.assertEqual(tr.stats.network, '')
def suite():
return unittest.makeSuite(CoreTestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 2,505 | 893 |
from nick_derobertis_site.software_page.config.banner import SOFTWARE_BANNER_MODEL
from nick_derobertis_site.software_page.config.card import SOFTWARE_CARD_MODELS
from nick_derobertis_site.software_page.software_page_model import SoftwarePageModel
SOFTWARE_PAGE_MODEL = SoftwarePageModel(
page_title="Nick DeRobertis' Open-Source Software",
page_link_text='Software',
banner_model=SOFTWARE_BANNER_MODEL,
card_models=SOFTWARE_CARD_MODELS,
) | 456 | 168 |
from django.shortcuts import render
from blog.models import Post
# Create your views here.
def landing(request):
recent_posts = Post.objects.order_by('-pk')[:3]
return render(
request,
'single_pages/landing.html',
{
'recent_posts': recent_posts,
}
)
def aboutme(request):
return render(
request,
'single_pages/aboutme.html'
) | 409 | 126 |
import requests
class MyIPFetcher:
@staticmethod
def fetch_ip():
r = requests.get('https://api.ipify.org')
if r.status_code != 200:
return 'Could not get the ip address. Status: ' + r.status_code
return 'The public ip is: ' + r.text | 279 | 90 |
bl_info = {
"name": "flame model format",
"blender": (2, 81, 6),
"category": "Import-Export",
}
import bpy
from bpy.props import (
BoolProperty,
FloatProperty,
StringProperty,
EnumProperty,
)
from bpy_extras.io_utils import (
ImportHelper,
ExportHelper,
path_reference_mode,
axis_conversion,
)
from bpy_extras import io_utils, node_shader_utils
import ntpath
import xml.etree.ElementTree as ET
class ImportFmod(bpy.types.Operator, ImportHelper):
bl_idname = "import_scene.fmod"
bl_label = "Import Fmod"
bl_options = {'PRESET', 'UNDO'}
filename_ext = ".fmod"
def execute(self, context):
pass
def draw(self, context):
pass
def v3_str(v):
return str(round(v[0], 4)) + "," + str(round(v[1], 4)) + "," + str(round(v[2], 4))
def name_compat(name):
if name is None:
return 'None'
else:
return name.replace(' ', '_')
def export_sub(n_meshes, data_file, mat_name, sub_vertics, sub_uvs, sub_normals, sub_indices):
from array import array
n_mesh = ET.SubElement(n_meshes, "meshe", material=mat_name)
if sub_vertics:
ET.SubElement(n_mesh, "positions", offset=str(data_file.tell()), size=str(4 * len(sub_vertics)))
float_array = array('f', sub_vertics)
float_array.tofile(data_file)
sub_vertics.clear()
if sub_uvs:
ET.SubElement(n_mesh, "uvs", offset=str(data_file.tell()), size=str(4 * len(sub_uvs)))
float_array = array('f', sub_uvs)
float_array.tofile(data_file)
sub_uvs.clear()
if sub_normals:
ET.SubElement(n_mesh, "normals", offset=str(data_file.tell()), size=str(4 * len(sub_normals)))
float_array = array('f', sub_normals)
float_array.tofile(data_file)
sub_normals.clear()
if sub_indices:
ET.SubElement(n_mesh, "indices", offset=str(data_file.tell()), size=str(4 * len(sub_indicess)))
uint_array = array('L', sub_indices)
uint_array.tofile(data_file)
sub_indices.clear()
class ExportFmod(bpy.types.Operator, ExportHelper):
bl_idname = "export_scene.fmod"
bl_label = "Export Fmod"
bl_options = {'PRESET'}
filename_ext = ".fmod"
def execute(self, context):
scene = context.scene
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
if len(context.selected_objects) < 1 :
return {"CANCELLED"}
ob = context.selected_objects[0].original
oms = []
arm = None
if ob.type == "MESH":
oms.append(ob)
elif ob.type == "ARMATURE":
arm = ob.data
for o in ob.children:
oms.append(o)
else:
return
filename = self.filepath
ppath = ntpath.dirname(filename)
model_name = ntpath.splitext(ntpath.split(filename)[1])[0]
n_model = ET.Element("model")
n_meshes = ET.SubElement(n_model, "meshes")
model_data_file = open(filename + ".dat", "wb")
for ob in oms:
me = ob.to_mesh()
if len(me.uv_layers) == 0:
return
uvs = me.uv_layers.active.data[:]
if len(uvs) == 0:
ob.to_mesh_clear()
return
verts = me.vertices[:]
if len(verts) == 0:
ob.to_mesh_clear()
return
faces = me.polygons[:]
if len(faces) == 0:
ob.to_mesh_clear()
return
faces.sort(key=lambda a: (a.material_index, a.use_smooth))
me.calc_normals_split()
loops = me.loops
materials = me.materials[:]
material_names = []
for i, m in enumerate(materials):
mat_wrap = node_shader_utils.PrincipledBSDFWrapper(m)
n_material = ET.Element("material", color=v3_str(mat_wrap.base_color), metallic=str(mat_wrap.metallic), roughness=str(mat_wrap.roughness))
color_tex_wrap = getattr(mat_wrap, "base_color_texture", None)
if color_tex_wrap:
image = color_tex_wrap.image
if image:
image.filepath
material_name = m.name
if not material_name:
material_name = str(i)
material_name = (model_name + "_" + material_name + ".fmat").replace(' ', '_')
material_names.append(material_name)
doc = ET.ElementTree(n_material)
doc.write(ntpath.join(ppath, material_name))
group_names = [g.name for g in ob.vertex_groups]
if arm:
for b in arm.edit_bones:
if b.name not in group_names:
continue
curr_mat_idx = faces[0].material_index
sub_vertics = []
sub_uvs = []
sub_normals = []
sub_indices = []
vertex_dict = {}
vert_cnt = 0
for f in faces:
if curr_mat_idx != f.material_index:
export_sub()
vert_cnt = 0
for l_idx in f.loop_indices:
vi = loops[l_idx].vertex_index
uv = uvs[l_idx].uv
no = loops[l_idx].normal
key = vi, round(uv.x, 4), round(uv.y, 4), round(no.x, 4), round(no.y, 4), round(no.z, 4)
idx = vertex_dict.get(key)
if idx is None:
idx = vert_cnt
v = verts[vi].co
sub_vertics.append([v.x, v.y, v.z])
sub_uvs.append([uv.x, uv.y])
sub_normals.append([no.x, no.y, no.z])
vertex_dict[key] = idx
vert_cnt += 1
sub_indices.append(idx)
export_sub()
ob.to_mesh_clear()
model_data_file.close()
doc = ET.ElementTree(n_model)
doc.write(filename)
return {"FINISHED"}
def draw(self, context):
pass
def menu_func_import(self, context):
self.layout.operator(ImportFmod.bl_idname, text="flame model (.fmod)")
def menu_func_export(self, context):
self.layout.operator(ExportFmod.bl_idname, text="flame model (.fmod)")
def register():
bpy.utils.register_class(ImportFmod)
bpy.utils.register_class(ExportFmod)
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
def unregister():
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
bpy.utils.unregister_class(ImportFmod)
bpy.utils.unregister_class(ExportFmod)
if __name__ == "__main__":
register()
| 7,032 | 2,329 |
import unittest
import requests
from unittest import mock
from dataverk.connectors import NaisS3Connector
from tests.dataverk.connectors.storage.test_resources.mock_nais_s3_api import mock_requests_put, mock_requests_get
from tests.dataverk.connectors.storage.test_resources.nais_s3_storage_common import NAIS_S3_ENDPOINT, NAIS_S3_BLOB_NAME, \
NAIS_S3_RESOURCE_FMT, NAIS_S3_BUCKET_NAME, NAIS_S3_RESOURCE_CONTENT
class TestNaisS3Connector(unittest.TestCase):
def test_class_instantiation(self):
s3_conn = NaisS3Connector(NAIS_S3_BUCKET_NAME, NAIS_S3_ENDPOINT)
self.assertIsInstance(s3_conn, NaisS3Connector)
@mock.patch("requests.put", side_effect=mock_requests_put)
def test_write_valid(self, mock_put):
s3_conn = NaisS3Connector(NAIS_S3_BUCKET_NAME, NAIS_S3_ENDPOINT)
s3_conn.write(data=NAIS_S3_RESOURCE_CONTENT, destination_blob_name=NAIS_S3_BLOB_NAME, fmt=NAIS_S3_RESOURCE_FMT)
@mock.patch("requests.get", side_effect=mock_requests_get)
def test_read_valid(self, mock_get):
s3_conn = NaisS3Connector(NAIS_S3_BUCKET_NAME, NAIS_S3_ENDPOINT)
resource = s3_conn.read(blob_name=f"{NAIS_S3_BLOB_NAME}.{NAIS_S3_RESOURCE_FMT}")
self.assertEqual(resource, NAIS_S3_RESOURCE_CONTENT)
@mock.patch("requests.get", side_effect=mock_requests_get)
def test_read_invalid_resource_not_found(self, mock_get):
s3_conn = NaisS3Connector(NAIS_S3_BUCKET_NAME, NAIS_S3_ENDPOINT)
with self.assertRaises(requests.exceptions.HTTPError):
resource = s3_conn.read(blob_name=f"resource/not-found.{NAIS_S3_RESOURCE_FMT}")
| 1,616 | 647 |
from django.template.backends.django import DjangoTemplates
from .test_dummy import TemplateStringsTests
class DjangoTemplatesTests(TemplateStringsTests):
engine_class = DjangoTemplates
backend_name = 'django'
| 222 | 59 |
from django import template
from cart.models import Order
register = template.Library()
@register.filter
def cart_total(user):
order = Order.objects.filter(user=user, ordered=False)
if order.exists():
return order[0].orderitems.count()
else:
return 0 | 275 | 83 |
import numpy as np
from working import writemidi
def load_npy_data(npy_data):
npy_A = np.load(npy_data[0]) * 1.0 # 64 * 84 * 1
npy_B = np.load(npy_data[1]) * 1.0 # 64 * 84 * 1
npy_AB = np.concatenate(
(
npy_A.reshape(npy_A.shape[0], npy_A.shape[1], 1),
npy_B.reshape(npy_B.shape[0], npy_B.shape[1], 1),
),
axis=2,
) # 64 * 84 * 2
return npy_AB
def save_midis(bars, file_path, tempo=80.0):
padded_bars = np.concatenate(
(
np.zeros((bars.shape[0], bars.shape[1], 24, bars.shape[3])),
bars,
np.zeros((bars.shape[0], bars.shape[1], 20, bars.shape[3])),
),
axis=2,
)
padded_bars = padded_bars.reshape(
-1, 64, padded_bars.shape[2], padded_bars.shape[3]
)
padded_bars_list = []
for ch_idx in range(padded_bars.shape[3]):
padded_bars_list.append(
padded_bars[:, :, :, ch_idx].reshape(
padded_bars.shape[0], padded_bars.shape[1], padded_bars.shape[2]
)
)
writemidi.write_piano_rolls_to_midi(
piano_rolls=padded_bars_list,
program_nums=[0],
is_drum=[False],
filename=file_path,
tempo=tempo,
beat_resolution=4,
)
if __name__ == "__main__":
data = np.load("./JC_J/test/jazz_piano_test_1.npy") * 1.0
data = data.reshape(1, data.shape[0], data.shape[1], 1)
save_midis(data, "uwu.mid")
| 1,466 | 630 |
from boa3.boa3 import Boa3
from boa3.exception import CompilerError
from boa3.neo.cryptography import hash160
from boa3.neo.vm.type.String import String
from boa3_test.tests.boa_test import BoaTest
from boa3_test.tests.test_classes.testengine import TestEngine
class TestClass(BoaTest):
default_folder: str = 'test_sc/class_test'
def test_notification_get_variables(self):
path = self.get_contract_path('NotificationGetVariables.py')
output, manifest = self.compile_and_save(path)
script = hash160(output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'script_hash', [],
expected_result_type=bytes)
contract_notifications = engine.get_events(origin=script)
self.assertEqual(len(contract_notifications), 0)
self.assertEqual(bytes(20), result)
result = self.run_smart_contract(engine, path, 'event_name', [])
contract_notifications = engine.get_events(origin=script)
self.assertEqual(len(contract_notifications), 0)
self.assertEqual('', result)
result = self.run_smart_contract(engine, path, 'state', [])
contract_notifications = engine.get_events(origin=script)
self.assertEqual(len(contract_notifications), 0)
self.assertEqual([], result)
result = self.run_smart_contract(engine, path, 'script_hash', [1])
contract_notifications = engine.get_events(origin=script)
self.assertEqual(len(contract_notifications), 1)
self.assertEqual(script, result)
engine.reset_engine()
result = self.run_smart_contract(engine, path, 'event_name', [1])
contract_notifications = engine.get_events(origin=script)
self.assertEqual(len(contract_notifications), 1)
self.assertEqual('notify', result)
engine.reset_engine()
result = self.run_smart_contract(engine, path, 'state', [1])
contract_notifications = engine.get_events(origin=script)
self.assertEqual(len(contract_notifications), 1)
self.assertEqual([1], result)
engine.reset_engine()
result = self.run_smart_contract(engine, path, 'state', ['1'])
contract_notifications = engine.get_events(origin=script)
self.assertEqual(len(contract_notifications), 1)
self.assertEqual(['1'], result)
def test_notification_set_variables(self):
path = self.get_contract_path('NotificationSetVariables.py')
output, manifest = self.compile_and_save(path)
script = hash160(output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'script_hash', script,
expected_result_type=bytes)
self.assertEqual(script, result)
result = self.run_smart_contract(engine, path, 'event_name', 'unit test')
self.assertEqual('unit test', result)
result = self.run_smart_contract(engine, path, 'state', (1, 2, 3))
self.assertEqual([1, 2, 3], result)
def test_contract_constructor(self):
path = self.get_contract_path('ContractConstructor.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'new_contract')
self.assertEqual(5, len(result))
if isinstance(result[2], str):
result[2] = String(result[2]).to_bytes()
if isinstance(result[3], str):
result[3] = String(result[3]).to_bytes()
self.assertEqual(0, result[0])
self.assertEqual(0, result[1])
self.assertEqual(bytes(20), result[2])
self.assertEqual(bytes(), result[3])
self.assertEqual({}, result[4])
def test_user_class_empty(self):
path = self.get_contract_path('UserClassEmpty.py')
output = Boa3.compile(path)
self.assertEqual(b'', output)
def test_user_class_with_static_method(self):
path = self.get_contract_path('UserClassWithStaticMethod.py')
self.assertCompilerLogs(CompilerError.NotSupportedOperation, path)
def test_user_class_with_class_method_called_from_class_name(self):
path = self.get_contract_path('UserClassWithClassMethodFromClass.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'call_by_class_name')
self.assertEqual(42, result)
def test_user_class_with_class_method_called_from_object(self):
path = self.get_contract_path('UserClassWithClassMethodFromObject.py')
self.assertCompilerLogs(CompilerError.NotSupportedOperation, path)
def test_user_class_with_class_method_with_args(self):
path = self.get_contract_path('UserClassWithClassMethodWithArgs.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'call_by_class_name', 42)
self.assertEqual(42, result)
result = self.run_smart_contract(engine, path, 'call_by_class_name', 1)
self.assertEqual(1, result)
result = self.run_smart_contract(engine, path, 'call_by_class_name', -10)
self.assertEqual(-10, result)
def test_user_class_with_class_method_with_vararg(self):
path = self.get_contract_path('UserClassWithClassMethodWithVararg.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'call_by_class_name', [])
self.assertEqual(42, result)
args = [1, 2, 3]
result = self.run_smart_contract(engine, path, 'call_by_class_name', args)
self.assertEqual(args[0], result)
args = [4, 3, 2, 1]
result = self.run_smart_contract(engine, path, 'call_by_class_name', args)
self.assertEqual(args[0], result)
def test_user_class_with_class_variable(self):
path = self.get_contract_path('UserClassWithClassVariable.py')
self.assertCompilerLogs(CompilerError.NotSupportedOperation, path)
def test_user_class_with_init(self):
path = self.get_contract_path('UserClassWithInit.py')
self.assertCompilerLogs(CompilerError.NotSupportedOperation, path)
def test_user_class_with_instance_method(self):
path = self.get_contract_path('UserClassWithInstanceMethod.py')
self.assertCompilerLogs(CompilerError.NotSupportedOperation, path)
def test_user_class_with_instance_variable(self):
path = self.get_contract_path('UserClassWithInstanceVariable.py')
self.assertCompilerLogs(CompilerError.NotSupportedOperation, path)
def test_user_class_with_base(self):
path = self.get_contract_path('UserClassWithBase.py')
self.assertCompilerLogs(CompilerError.NotSupportedOperation, path)
def test_user_class_with_keyword_base(self):
path = self.get_contract_path('UserClassWithKeywordBase.py')
self.assertCompilerLogs(CompilerError.NotSupportedOperation, path)
def test_user_class_with_decorator(self):
path = self.get_contract_path('UserClassWithDecorator.py')
self.assertCompilerLogs(CompilerError.NotSupportedOperation, path)
| 7,087 | 2,219 |
def digital_root (n):
total = 10
while total > 9 :
total = 0
x = str(n)
stringfy = []
cont = (len(x))
i = 0
e = 1
num = []
while i < cont :
stringfy += x[i:e]
i += 1
e += 1
for i in range(len(stringfy)):
t = int(stringfy[i])
num.append(t)
total = sum(num)
n = total
stringfy.clear()
num.clear()
print(total)
digital_root(493193) | 524 | 200 |
from .Connector import Connector
from .Info import Info | 55 | 15 |
# -*- coding: utf-8 -*-
from __future__ import (
print_function,
unicode_literals,
)
import hypothesis
import hypothesis.strategies
import mock
import os.path
import pytest
from datetime import timedelta
from hypothesis_regex import regex
from runwith import (
main,
__main__,
timespan,
SIGKILL,
)
try:
from shlex import quote
except ImportError:
from pipes import quote
def unused(*args):
pass
# Must be imported to be tracked by coverage.
unused(__main__)
SECOND = 1
MINUTE = 60 * SECOND
HOUR = 60 * MINUTE
DAY = 24 * HOUR
WEEK = 7 * DAY
def seconds_to_timespan(x):
y = ''
weeks, x = divmod(x, WEEK)
if weeks:
y += '%dw' % weeks
days, x = divmod(x, DAY)
if days:
y += '%dd' % days
hours, x = divmod(x, HOUR)
if hours:
y += '%dh' % hours
minutes, x = divmod(x, MINUTE)
if minutes:
y += '%dm' % minutes
seconds, x = divmod(x, SECOND)
if seconds:
y += '%ds' % seconds
if x > 0:
y += '%dms' % (1000.0 * x)
return y
@pytest.mark.parametrize('value,expected', [
('1w', timedelta(weeks=1)),
('7d', timedelta(days=7)),
('2h', timedelta(hours=2)),
('.1m', timedelta(minutes=.1)),
('.7s', timedelta(seconds=.7)),
('5m30s', timedelta(minutes=5, seconds=30)),
])
def test_timespan(value, expected):
assert timespan(value) == expected
@pytest.mark.parametrize('value', [
'1',
'123abc',
])
def test_timespan_invalid(value):
with pytest.raises(ValueError) as exc:
print(timespan(value))
assert str(exc.value) == ('Invalid time span "%s".' % value)
def test_run_without_args():
with mock.patch('subprocess.Popen') as popen:
with pytest.raises(SystemExit) as exc:
print(main([]))
assert exc.value.code == 2
popen.assert_not_called()
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
)
def test_implicit_argv(status, command):
with mock.patch('sys.argv', ['runwith', '--'] + command):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main() == status
popen.assert_called_once_with(command)
@hypothesis.given(
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
)
def test_spawn_failure(command):
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = OSError('unknown program')
with pytest.raises(SystemExit) as exc:
print(main(['--'] + command))
assert exc.value.code == 2
popen.assert_called_once_with(command)
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
)
def test_forward_status(status, command):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main(['--'] + command) == status
popen.assert_called_once_with(command)
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
)
def test_redirect_stdin(tempcwd, status, command):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
with open('foo.txt', 'wb') as stream:
stream.write(b'FOO')
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main(['-i', 'foo.txt', '--'] + command) == status
popen.assert_called_once_with(command, stdin=mock.ANY)
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
)
def test_redirect_stdout(tempcwd, status, command):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main(['-o', 'foo.txt', '--'] + command) == status
popen.assert_called_once_with(command, stdout=mock.ANY)
assert os.path.exists('foo.txt')
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
)
def test_redirect_stderr(tempcwd, status, command):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main(['-e', 'foo.txt', '--'] + command) == status
popen.assert_called_once_with(command, stderr=mock.ANY)
assert os.path.exists('foo.txt')
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
workdir=regex(r'\w+').map(quote),
)
def test_change_working_directory(tempcwd, status, command, workdir):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main(['-w', workdir, '--'] + command) == status
popen.assert_called_once_with(command, cwd=workdir)
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
timebox=hypothesis.strategies.floats(
min_value=0.001, # 1ms
max_value=31 * DAY,
).map(seconds_to_timespan),
)
def test_respect_timebox(status, command, timebox):
process = mock.MagicMock()
process.returncode = status
process.wait.side_effect = [process.returncode]
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main(['-t', timebox, '--'] + command) == status
popen.assert_called_once_with(command)
process.wait.assert_called_once_with()
process.send_signal.assert_not_called()
process.terminate.assert_not_called()
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
timebox=hypothesis.strategies.floats(
min_value=0.001, # 1ms
max_value=31 * DAY,
).map(seconds_to_timespan),
)
def test_exceed_timebox(status, command, timebox):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
thread = mock.MagicMock()
thread.is_alive.side_effect = [True, False]
thread.join.side_effect = [None, None]
with mock.patch('threading.Thread') as T:
T.side_effect = [thread]
with mock.patch('subprocess.Popen') as P:
P.side_effect = [process]
assert main(['-t', timebox, '-g', '2s', '--'] + command) == status
P.assert_called_once_with(command)
T.assert_called_once()
process.send_signal.assert_called_once_with(SIGKILL)
process.terminate.assert_not_called()
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
timebox=hypothesis.strategies.floats(
min_value=0.001, # 1ms
max_value=31 * DAY,
).map(seconds_to_timespan),
)
def test_exceed_timebox_no_grace_time(status, command, timebox):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
thread = mock.MagicMock()
thread.is_alive.side_effect = [True, True]
thread.join.side_effect = [None, None, None]
with mock.patch('threading.Thread') as T:
T.side_effect = [thread]
with mock.patch('subprocess.Popen') as P:
P.side_effect = [process]
assert main(['-t', timebox, '--'] + command) == status
P.assert_called_once_with(command)
T.assert_called_once()
process.send_signal.assert_not_called()
process.terminate.assert_called_once()
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
timebox=hypothesis.strategies.floats(
min_value=0.001, # 1ms
max_value=31 * DAY,
).map(seconds_to_timespan),
)
def test_exceed_timebox_and_grace_time(status, command, timebox):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
thread = mock.MagicMock()
thread.is_alive.side_effect = [True, True]
thread.join.side_effect = [None, None, None]
with mock.patch('threading.Thread') as T:
T.side_effect = [thread]
with mock.patch('subprocess.Popen') as P:
P.side_effect = [process]
assert main(['-t', timebox, '-g', '2s', '--'] + command) == status
P.assert_called_once_with(command)
T.assert_called_once()
process.send_signal.assert_called_once_with(SIGKILL)
process.terminate.assert_called_once()
| 10,263 | 3,649 |
"""Module for loading the real datasets."""
import numpy as np
import pandas as pd
import os
import math
import glob
import random_permutation as rp
from datetime import datetime
import sys
RANDOM_SEED = 123 # eliminate for taking from the clock!
class DataBatch(object):
"""ABC."""
def __init__(self, architecture, path, period_array, in_tuple=None, dtype='train', cols=None, remainder=False):
if (in_tuple!=None):
self.orig = Data(in_tuple)
self.features, self.labels = in_tuple
self._current_num_examples, self._num_classes = self.labels.shape
self.weights = self.orig.labels @ get_weights(self.orig.labels) # 30/01/2018: this weights are not used in the tensor model!
self._file_index = 0
elif (path!=None):
self.features, self.labels = None, None
self.h5_path = path
self.dtype = dtype
self.all_files = glob.glob(os.path.join(self.h5_path, "*.h5"))
self._num_columns = architecture['n_input'] # dataset_file.get_storer(self.dtype+ '/features').ncols - self.index_length
self._num_classes = architecture['n_classes'] # dataset_file.get_storer(self.dtype+'/labels').ncols - self.index_length
if (dtype == 'valid'):
num_exam = architecture['valid_num_examples']
else:
num_exam = architecture['total_num_examples']
if (cols==None):
self._dict = self.get_metadata_dataset(num_exam)
else:
self._dict = self.get_metadata_dataset_cols(num_exam, cols, remainder)
if (self._dict == None):
raise ValueError('DataBatch: The dictionary was not loaded!')
if dtype == 'train':
self._loan_random = rp.CustomRandom(self._total_num_examples) # np.random.RandomState(RANDOM_SEED)
self.dataset_index = 0 #to record the access to files
self._file_index = 0 #to record the sequential order inside a file
# self.dataset = pd.HDFStore(self.all_files[self.dataset_index]) # the first file of the path
# self._current_num_examples = self.dataset.get_storer(self.dtype+'/features').nrows
# self._num_columns = self.dataset.get_storer('features').attrs.num_columns
self.period_range = period_array #set(range(period_array[0], period_array[1]+1))
#self.period_features = set(list(self.dataset['features'].index.get_level_values(2)))
#self.period_inter = self.period_features.intersection(self.period_range)
self.transitions = {'MBA_DELINQUENCY_STATUS': ['0','3','6','9','C','F','R']}
if any('MBA_DELINQUENCY_STATUS' in s for s in self.features_list):
self.idx_transitions = [self.features_list.index('MBA_DELINQUENCY_STATUS_' + v) for v in self.transitions['MBA_DELINQUENCY_STATUS']]
else: #Dataset empty!
self._dict = None
def get_metadata_dataset(self, max_rows):
try:
files_dict = {}
self._total_num_examples = 0
ok_inputs = True
files_dict[0] = {}
files_dict[0]['dataset_features'] = [] # np.empty((max_rows, num_feat), dtype=np.float32)
files_dict[0]['dataset_labels'] = [] # np.empty((max_rows,num_class), dtype=np.int8)
for i, file_path in zip(range(len(self.all_files)), self.all_files):
with pd.HDFStore(file_path) as dataset_file:
print(file_path, '...to load')
total_rows = dataset_file.get_storer(self.dtype + '/features').nrows
if (total_rows <= max_rows):
max_rows -= total_rows
files_dict[0]['dataset_features'].extend(dataset_file.select(self.dtype+'/features', start=0).values) #, stop=500000
files_dict[0]['dataset_labels'].extend(dataset_file.select(self.dtype+'/labels', start=0, stop=total_rows).values)
else:
total_rows = max_rows
files_dict[0]['dataset_features'].extend(dataset_file.select(self.dtype+'/features', start=0, stop=total_rows).values) #, stop=500000
files_dict[0]['dataset_labels'].extend(dataset_file.select(self.dtype+'/labels', start=0, stop=total_rows).values)
if (ok_inputs):
self.index_length = len(dataset_file.get_storer(self.dtype+'/features').attrs.data_columns)
self.features_list = dataset_file.get_storer(self.dtype+'/features').attrs.non_index_axes[0][1][self.index_length:]
self.labels_list = dataset_file.get_storer(self.dtype+'/labels').attrs.non_index_axes[0][1][self.index_length:]
ok_inputs = False
self._total_num_examples += total_rows
print(file_path, ' loaded in RAM')
if (total_rows == max_rows):
break
files_dict[0]['nrows'] = self._total_num_examples
files_dict[0]['init_index'] = 0
files_dict[0]['end_index'] = self._total_num_examples
#files_dict[0]['class_weights'] = self.get_weights_class(files_dict[0]['dataset_labels'])
files_dict[0]['class_weights'] = self.get_global_weights_transition_class(files_dict[0])
return files_dict
except Exception as e:
raise ValueError('Error in retrieving the METADATA object: ' + str(e))
def get_weights_class(self, labels):
class_weights = np.sum(labels, axis=0)
print('class_weights', class_weights)
class_weights = np.round(class_weights/np.float32(self._total_num_examples),decimals=3)
# 1-weights approach:
class_weights = np.subtract([1]*len(class_weights), class_weights)
#normalizing 1-weights approach:
#sumcw = np.sum(class_weights)
#class_weights = np.round(class_weights/np.float32(sumcw),decimals=3)
print('class_weights', class_weights)
return class_weights
def get_weights_transition_class(self, data_dict):
categorical_cols = {'MBA_DELINQUENCY_STATUS': ['0','3','6','9','C','F','R']}
idx_categorical_cols = {}
for cat, values in categorical_cols.items():
idx_categorical_cols[cat] = [[], []]
if any(cat in s for s in self.features_list):
idx_categorical_cols[cat][0].extend([self.features_list.index(cat+'_'+v) for v in values])
idx_categorical_cols[cat][1].extend([cat+'_'+v for v in values])
print(cat, 'is found', len(values), len(idx_categorical_cols[cat][0]), len(idx_categorical_cols[cat][1]))
print(idx_categorical_cols)
self._idx_categorical_cols = idx_categorical_cols['MBA_DELINQUENCY_STATUS'][0]
trans_subset = []
weights_mtx=[]
for cat, values in idx_categorical_cols.items():
for val in values[0]:
print('val', val)
trans_subset = [data_dict['dataset_labels'][i] for i, elem in enumerate(data_dict['dataset_features']) if elem[val]==1]
total_ex = len(trans_subset)
print('total_ex: ', total_ex)
if (total_ex>0):
print('trans_subset[0]: ', trans_subset[0])
class_weights = np.sum(trans_subset, axis=0)
print('class_weights', class_weights)
class_weights = np.round(class_weights/np.float32(total_ex),decimals=3)
# 1-weights approach:
class_weights = np.subtract([1]*len(class_weights), class_weights)
else:
class_weights = np.zeros((self._num_classes), dtype='float32')
print('class_weights', class_weights)
weights_mtx.append(class_weights)
weights_mtx= np.array(weights_mtx)
print('weights_mtx', weights_mtx)
return weights_mtx
def get_global_weights_transition_class(self, data_dict):
categorical_cols = {'MBA_DELINQUENCY_STATUS': ['0','3','6','9','C','F','R']}
idx_categorical_cols = {}
for cat, values in categorical_cols.items():
idx_categorical_cols[cat] = [[], []]
if any(cat in s for s in self.features_list):
idx_categorical_cols[cat][0].extend([self.features_list.index(cat+'_'+v) for v in values])
idx_categorical_cols[cat][1].extend([cat+'_'+v for v in values])
print(cat, 'is found', len(values), len(idx_categorical_cols[cat][0]), len(idx_categorical_cols[cat][1]))
print(idx_categorical_cols)
self._idx_categorical_cols = idx_categorical_cols['MBA_DELINQUENCY_STATUS'][0]
trans_subset = []
weights_mtx=[]
for cat, values in idx_categorical_cols.items():
for val in values[0]:
print('val', val)
trans_subset = [data_dict['dataset_labels'][i] for i, elem in enumerate(data_dict['dataset_features']) if elem[val]==1]
total_ex = len(trans_subset)
print('total_ex: ', total_ex, 'self._total_num_examples: ', self._total_num_examples)
if (total_ex>0):
print('trans_subset[0]: ', trans_subset[0])
class_weights = np.sum(trans_subset, axis=0)
print('class_weights', class_weights)
class_weights = np.round(class_weights/np.float32(self._total_num_examples),decimals=3)
# 1-weights approach:
class_weights = np.subtract([1]*len(class_weights), class_weights)
else:
class_weights = np.zeros((self._num_classes), dtype='float32')
print('class_weights', class_weights)
weights_mtx.append(class_weights)
weights_mtx= np.array(weights_mtx)
print('weights_mtx', weights_mtx)
return weights_mtx
def get_metadata_dataset_cols(self, max_rows, cols, remainder):
try:
files_dict = {}
self._total_num_examples = 0
ok_inputs = True
files_dict[0] = {}
files_dict[0]['dataset_features'] = [] # np.empty((max_rows, num_feat), dtype=np.float32)
files_dict[0]['dataset_labels'] = [] # np.empty((max_rows,num_class), dtype=np.int8)
for i, file_path in zip(range(len(self.all_files)), self.all_files):
with pd.HDFStore(file_path) as dataset_file:
print(file_path, '...to load')
total_rows = dataset_file.get_storer(self.dtype + '/features').nrows
if (ok_inputs):
self.index_length = len(dataset_file.get_storer(self.dtype+'/features').attrs.data_columns)
if (remainder==True):
cols = set(dataset_file.get_storer(self.dtype+'/features').attrs.non_index_axes[0][1][self.index_length:]) - set(cols)
cols =list(cols)
self.features_list = cols
print('Columns of dataset: ', len(self.features_list), self.features_list)
self.labels_list = dataset_file.get_storer(self.dtype+'/labels').attrs.non_index_axes[0][1][self.index_length:]
ok_inputs = False
if (total_rows <= max_rows):
max_rows -= total_rows
df_feat = dataset_file.select(self.dtype+'/features', start=0)
files_dict[0]['dataset_features'].extend(df_feat[self.features_list].values) #, stop=500000
del df_feat
print('len(files_dict[0][dataset_features][0]): ', len(files_dict[0]['dataset_features'][0]))
df_lab = dataset_file.select(self.dtype+'/labels', start=0, stop=total_rows)
files_dict[0]['dataset_labels'].extend(df_lab.values)
del df_lab
else:
total_rows = max_rows
df_feat = dataset_file.select(self.dtype+'/features', start=0, stop=total_rows)
files_dict[0]['dataset_features'].extend(df_feat[self.features_list].values) #, stop=500000
del df_feat
print('len(files_dict[0][dataset_features][0]): ', len(files_dict[0]['dataset_features'][0]))
df_lab = dataset_file.select(self.dtype+'/labels', start=0, stop=total_rows)
files_dict[0]['dataset_labels'].extend(df_lab.values)
del df_lab
self._total_num_examples += total_rows
print(file_path, ' loaded in RAM')
if (total_rows == max_rows):
break
files_dict[0]['nrows'] = self._total_num_examples
files_dict[0]['init_index'] = 0
files_dict[0]['end_index'] = self._total_num_examples
class_weights = np.sum(files_dict[0]['dataset_labels'], axis=0)
print('class_weights', class_weights)
class_weights = np.round(class_weights/np.float32(self._total_num_examples),decimals=3)
# 1-weights approach:
class_weights = np.subtract([1]*len(class_weights), class_weights)
#normalizing 1-weights approach:
#sumcw = np.sum(class_weights)
#class_weights = np.round(class_weights/np.float32(sumcw),decimals=3)
print('class_weights', class_weights)
files_dict[0]['class_weights'] = class_weights
return files_dict
except Exception as e:
raise ValueError('Error in retrieving the METADATA object: ' + str(e))
# def get_metadata_dataset_repeats(self, repeats):
# try:
# files_dict = {}
# index = 0
# for z in range(repeats):
# for file_path in self.all_files:
# dataset_file = pd.HDFStore(file_path) # the first file of the path
# dataset_features = dataset_file.select(self.dtype+'/features', start=0, stop=500000).values # , stop=5000000
# nrows = dataset_features.shape[0] # dataset_file.get_storer(self.dtype + '/features').nrows
# dataset_labels = dataset_file.select(self.dtype+'/labels', start=0, stop=nrows).values
# files_dict[index] = {'path': file_path, 'nrows': nrows,
# 'init_index': self._total_num_examples, 'end_index': self._total_num_examples + nrows,
# 'dataset' : dataset_file, 'dataset_features' : dataset_features, 'dataset_labels': dataset_labels}
# self._total_num_examples += nrows
# print('dict: ', files_dict[index], ' total rows: ', self._total_num_examples)
# index += 1
# # if dataset.is_open: dataset.close()
# return files_dict
# except Exception as e:
# raise ValueError('Error in retrieving the METADATA object: ' + str(e))
# this method batches the training set in lots of size batch_size, if it reaches the end, concatenates the tail with the front and continues until the num_epoch.
def next_batch(self, batch_size):
"""Get the next batch of the data with the given batch size."""
if not isinstance(batch_size, int):
raise TypeError('batch_size has to be of int type.')
# if self._file_index == 0:
# self.sample()
# print('self._file_index: ', self._file_index)
# print('self._file_index end: ', self._file_index + batch_size)
if self._file_index + batch_size <= self._current_num_examples:
temp_features = self.features[self._file_index:
self._file_index + batch_size, :]
temp_labels = self.labels[self._file_index:
self._file_index + batch_size]
self._file_index += batch_size
# if _global_index has become _num_examples, we need to reset it to
# zero. Otherwise, we don't change it. The following line does this.
self._file_index = self._file_index % self._current_num_examples
else:
temp_end = self._file_index + batch_size - self._current_num_examples
temp_features = np.concatenate(
(self.features[self._file_index:, :],
self.features[:temp_end, :]),
axis=0)
temp_labels = np.concatenate(
(self.labels[self._file_index:], self.labels[:temp_end]),
axis=0)
self._file_index = temp_end
# self.shuffle()
self._file_index = 0
return temp_features, temp_labels, np.array(
[1.0], dtype=np.dtype('float32')) # temp_weights
def next_sequential_batch_period(self, batch_size):
"""Get the next batch of the data with the given batch size."""
if not isinstance(batch_size, int):
raise TypeError('DataBatch: batch_size has to be of int type.')
if (self.dataset==None):
raise ValueError('DataBatch: The file_dataset was not loaded!')
if self._file_index + batch_size <= self._current_num_examples:
temp_features = self.dataset.select('features', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]), start=self._file_index, stop=self._file_index + batch_size)
temp_labels = self.dataset.select('labels', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]), start=self._file_index, stop=self._file_index + batch_size)
self._file_index += batch_size
else:
temp_features = self.dataset.select('features', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]), start=self._file_index)
temp_labels = self.dataset.select('labels', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]), start=self._file_index)
self._file_index = 0
self.dataset_index += 1
self.dataset.close()
self.dataset = pd.HDFStore(self.all_files[self.dataset_index]) # the next file of the path
self._current_num_examples = self.dataset.get_storer('features').nrows
return temp_features, temp_labels, np.array([1.0], dtype=np.dtype('float32')) # temp_weights
def next_sequential_batch(self, batch_size):
"""Get the next batch of the data with the given batch size."""
if not isinstance(batch_size, int):
raise TypeError('DataBatch: batch_size has to be of int type.')
if (self._dict==None):
raise ValueError('DataBatch: The dataset was not loaded!')
if self._file_index + batch_size <= self._dict[self.dataset_index]['nrows']:
# temp_features = pd.read_hdf(self._dict[self.dataset_index]['dataset'], self.dtype+'/features', start=self._file_index, stop=self._file_index + batch_size)
# temp_labels = pd.read_hdf(self._dict[self.dataset_index]['dataset'], self.dtype+'/labels', start=self._file_index, stop=self._file_index + batch_size)
# temp_features = self._dict[self.dataset_index]['dataset'].select(self.dtype+'/features', start=self._file_index, stop=self._file_index + batch_size)
temp_features = np.array(self._dict[self.dataset_index]['dataset_features'][self._file_index: self._file_index + batch_size])
temp_labels = np.array(self._dict[self.dataset_index]['dataset_labels'][self._file_index: self._file_index + batch_size])
self._file_index += batch_size
else:
# temp_features = pd.read_hdf(self._dict[self.dataset_index]['dataset'], self.dtype+'/features', start=self._file_index)
# temp_labels = pd.read_hdf(self._dict[self.dataset_index]['dataset'], self.dtype+'/labels', start=self._file_index)
temp_features = np.array(self._dict[self.dataset_index]['dataset_features'][self._file_index :])
temp_labels = np.array(self._dict[self.dataset_index]['dataset_labels'][self._file_index :])
# hdf = pd.read_hdf('storage.h5', 'd1', where=['A>.5'], columns=['A','B'])
self._file_index = 0
#self.dataset_index += 1
#if (self.dataset_index >= len(self.all_files)):
# self.dataset_index = 0
# self.dataset.close()
# self.dataset = pd.HDFStore(self.all_files[self.dataset_index]) # the next file of the path
# self._current_num_examples = self.dataset.get_storer(self.dtype+'/features').nrows
transitions = temp_features[:, self.idx_transitions]
return temp_features, temp_labels, np.array([1.0], dtype=np.dtype('float32')), transitions # temp_weights
def next_random_batch_perfiles(self, batch_size): # pending!!
"""Get the next batch of the data with the given batch size."""
if not isinstance(batch_size, int):
raise TypeError('DataBatch: batch_size has to be of int type.')
if (self.h5_path==None):
raise ValueError('DataBatch: The file_dataset was not loaded!')
# all_files = glob.glob(os.path.join(self.h5_path, "*.h5"))
records_per_file = math.ceil(np.float32(batch_size / len(self.all_files)))
#period_range = set(range(self.period_range[0], self.period_range[1]+1))
features_list = self.dataset.get_storer('features').attrs.non_index_axes[0][1][3:]
temp_features = pd.DataFrame(None,columns=features_list)
labels_list = self.dataset.get_storer('labels').attrs.non_index_axes[0][1][3:]
temp_labels = pd.DataFrame(None,columns=labels_list)
for file_path in self.all_files:
# if self.dataset.is_open: self.dataset.close()
self.dataset = pd.HDFStore(file_path) # the first file of the path
self._current_num_examples = self.dataset.get_storer('features').nrows
self._num_columns = self.dataset.get_storer('features').ncols - len(self.dataset.get_storer('features').attrs.data_columns)
self._num_classes = self.dataset.get_storer('labels').ncols - len(self.dataset.get_storer('labels').attrs.data_columns)
# random_loan= np.random.sample(range(self._num_examples), k=records_per_file) # if one is after the training dates?
period_random = np.random.RandomState()
for i in range(records_per_file):
while True:
try:
random_loan = self._loan_random.randint(self._current_num_examples)
loan_id = self.dataset.select('features', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]),start=random_loan, stop=random_loan+1).index.get_level_values(0)[0]
if str(loan_id):
df_features = self.dataset.select('features', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]) + ' & LOAN_ID=' + str(loan_id))
df_labels = self.dataset.select('labels', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]) + ' & LOAN_ID=' + str(loan_id))
# df_features = self.dataset['features'].loc[(loan_id, slice(None), slice(None)), :]
# df_labels = self.dataset['labels'].loc[(loan_id, slice(None), slice(None)), :]
if (df_features.shape[0] > 0):
r_period = period_random.randint(df_features.shape[0])
temp_features = pd.concat([temp_features, df_features.iloc[r_period, :].to_frame().T], ignore_index=True, copy=False)
temp_labels = pd.concat([temp_labels, df_labels.iloc[r_period, :].to_frame().T], ignore_index=True, copy=False)
break
except Exception as e:
print('Invalid Loan: ' + str(e))
print('temp_features')
self.dataset.close()
return temp_features, temp_labels, np.array([1.0], dtype=np.dtype('float32')) # temp_weights
def next_random_batch(self, batch_size): # pending!! --_exp
"""Get the next batch of the data with the given batch size."""
if not isinstance(batch_size, int):
raise TypeError('DataBatch: batch_size has to be of int type.')
if (self.h5_path==None):
raise ValueError('DataBatch: The file_dataset was not loaded!')
temp_features = [] #np.empty((batch_size,len(self.features_list)))
temp_labels = [] #np.zeros((batch_size,len(self.labels_list)))
random_batch = np.array(list(self._loan_random.get_batch(batch_size)))
#print(len(random_batch), random_batch)
orb_size = 0
for k, v in self._dict.items():
try:
records_per_file = np.logical_and(random_batch>=v['init_index'], random_batch<(v['end_index']))
orb = np.sort(random_batch[records_per_file]) - v['init_index']
#print(len(orb), orb)
assert(len(orb)==batch_size)
#print(len(orb), orb)
if (len(orb)>0):
temp_features.extend(np.array([v['dataset_features'][index] for index in orb]))
temp_labels.extend(np.array([v['dataset_labels'][index] for index in orb]))
#print('temp ready!!')
orb_size += len(orb)
except Exception as e:
print('Invalid Range: ' + str(e))
assert(np.where(np.sum(temp_labels, axis=1)==0)[0].size == 0)
temp_features = np.array(temp_features)
temp_labels = np.array(temp_labels)
#print('shapes: ', temp_features.shape, temp_labels.shape)
assert(temp_features.shape[0]==batch_size)
assert(temp_labels.shape[0]==batch_size)
# the same permutation:
permutation = np.random.permutation(len(temp_features))
temp_features = temp_features[permutation, :]
temp_labels = temp_labels[permutation, :]
transitions = temp_features[:, self.idx_transitions]
return temp_features, temp_labels, transitions #, np.array([1.0], dtype=np.dtype('float32')) # temp_weights
def next_random_batch_ind_access(self, batch_size): # pending!! --_ind_access
"""Get the next batch of the data with the given batch size."""
if not isinstance(batch_size, int):
raise TypeError('DataBatch: batch_size has to be of int type.')
if (self.h5_path==None):
raise ValueError('DataBatch: The file_dataset was not loaded!')
features_list = self.dataset.get_storer('features').attrs.non_index_axes[0][1][self.index_length:]
temp_features = pd.DataFrame(None,columns=features_list)
labels_list = self.dataset.get_storer('labels').attrs.non_index_axes[0][1][self.index_length:]
temp_labels = pd.DataFrame(None,columns=labels_list)
random_batch = self._loan_random.get_batch(batch_size)
startTime = datetime.now()
for i in random_batch:
try:
startTime1 = datetime.now()
partial_number = 0
values_list = list(self._dict.values())
for e in values_list:
partial_number += e['nrows']
if partial_number >= i:
break
if self.dataset.is_open: self.dataset.close()
self.dataset = pd.HDFStore(e['path']) # the first file of the path
self._current_num_examples = self.dataset.get_storer('features').nrows
self._num_columns = self.dataset.get_storer('features').ncols - len(self.dataset.get_storer('features').attrs.data_columns)
self._num_classes = self.dataset.get_storer('labels').ncols - len(self.dataset.get_storer('labels').attrs.data_columns)
true_loan = self._current_num_examples - (partial_number - i)
df_features = self.dataset.select('features', start=true_loan, stop=true_loan+1)
df_labels = self.dataset.select('labels', start=true_loan, stop=true_loan+1)
temp_features = pd.concat([temp_features, df_features], ignore_index=True, copy=False)
temp_labels = pd.concat([temp_labels, df_labels], ignore_index=True, copy=False)
print('Time for Getting one element: ', datetime.now() - startTime1)
# self.dataset.close()
except Exception as e:
print('Invalid Loan: ' + str(e))
print('Time for Getting' + str(batch_size) +' random elements: ', datetime.now() - startTime)
return temp_features, temp_labels, np.array([1.0], dtype=np.dtype('float32')) # temp_weights
def shuffle(self):
"""Reshuffle the dataset and its corresponding labels."""
permutation = np.random.permutation(self._current_num_examples)
self.features = self.features[permutation, :]
self.labels = self.labels[permutation]
return
def shuffle(self, data, labels):
"""Reshuffle the dataset data and its corresponding labels."""
rows = np.shape(data)[0]
permutation = np.random.permutation(rows)
data = data[permutation, :]
labels = labels[permutation]
return
def sample(self):
"""Sample with replacement."""
probs = self.weights / self.weights.sum()
gamma = 0 # .8
probs = gamma * probs + (1 - gamma) / self._current_num_examples
indices = np.random.choice(
self._current_num_examples, size=self._current_num_examples, replace=True, p=probs)
self.features = self.orig.features[indices, :]
self.labels = self.orig.labels[indices]
# self.weights = self.weights_orig[indices]
def total_num_batch(self, batch_size):
total_batch = 0
values_list = list(self._dict.values())
for e in values_list:
total_batch += math.ceil(np.float32( e['nrows'] / batch_size))
return total_batch
@property
def total_num_examples(self):
"""Get the number of examples in the dataset."""
return self._total_num_examples
@property
def num_classes(self):
"""Get the number of examples in the dataset."""
return self._num_classes
@property
def num_columns(self):
"""Get the number of examples in the dataset."""
return self._num_columns
@property
def class_weights(self):
return self._dict[0]['class_weights']
class Data(object):
"""ABC."""
def __init__(self, in_tuple=None):
if in_tuple !=None:
if in_tuple[0].shape[0] != in_tuple[1].shape[0]:
raise ValueError('Sizes should match!')
self.features, self.labels = in_tuple
self._num_examples, self._num_classes = self.labels.shape
@property
def num_examples(self):
"""Get the number of examples in the dataset."""
return self._num_examples
@property
def num_classes(self):
"""Get the number of examples in the dataset."""
return self._num_classes
class Dataset(object):
"""A new class to represent learning datasets."""
def __init__(self, architecture, train_tuple=None, valid_tuple=None, test_tuple=None, feature_columns=None, train_path=None, valid_path=None, test_path=None,
train_period=[121, 279], valid_period=[280,285], test_period=[286, 304], cols=None, remainder=False):
if (train_tuple!=None and valid_tuple!=None and test_tuple!=None):
self.train = DataBatch(train_tuple, train_period, cols=cols)
self.validation = Data(valid_tuple)
self.test = Data(test_tuple)
self.feature_columns = feature_columns
elif (train_path==None and valid_path==None and test_path==None):
raise ValueError('DataBatch: The path for at least one set was not loaded!')
else:
self.train = DataBatch(architecture, train_path, train_period, dtype='train', cols=cols, remainder=remainder)
self.validation = DataBatch(architecture, valid_path, valid_period, dtype='valid', cols=cols, remainder=remainder) # Data((h5_dataset.get('valid/features'), h5_dataset.get('valid/labels')))
self.test = DataBatch(architecture, test_path, test_period, dtype='test', cols=cols, remainder=remainder) # Data((h5_dataset.get('test/features'), h5_dataset.get('test/labels'))) #if it gives some trouble, it will be loaded at the end.
def get_weights(labels):
"""Get the weights per class."""
# weights = np.ones_like(self.labels[1, :])
weights = labels.shape[0] / (1e-8 + labels.sum(axis=0))
# print(weights)
# weights = np.array(
# [
# 5.561735, 2.349348, 6.397953, 2.575793, 0.056791, 2.591479,
# 94.966762
# ],
# dtype=self.labels.dtype)
return weights
| 35,567 | 10,401 |
from tkinter import * #导入tkinter模块
window = Tk() #创建主窗口对象
window.title('Place Example') #设置窗口标题
window.geometry('300x200') #设置窗口大小与位置
colors = ['red', 'green', 'light blue', 'yellow']
#Place放置效果
[Label(window, font="Arial 12",text='place(80,%d),anchor=NW' % (20 + i * 40),
bg=colors[i]).place(x=40, y=20 + i * 40, width=200, height=30)
for i in range(4)
]
#进入Tk事件循环
window.mainloop() | 402 | 208 |
from django.urls import path, include
from django.contrib import admin
import fullctl.django.views as views
import django_devicectl.urls
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("social_django.urls", namespace="social")),
path("", include("fullctl.django.urls")),
path("", include("django_devicectl.urls")),
path("", views.org_redirect),
]
| 387 | 121 |
"""
Originally Taken from https://github.com/rwightman/pytorch-image-models/blob/acd6c687fd1c0507128f0ce091829b233c8560b9/timm/models/hub.py
"""
import json
import logging
import os
from functools import partial
from typing import Union, Optional
import pytorch_lightning
import torch
try:
from torch.hub import get_dir
except ImportError:
from torch.hub import _get_torch_home as get_dir
from satflow import __version__
try:
from huggingface_hub import hf_hub_url
from huggingface_hub import cached_download
cached_download = partial(cached_download, library_name="satflow", library_version=__version__)
except ImportError:
hf_hub_url = None
cached_download = None
_logger = logging.getLogger(__name__)
def get_cache_dir(child_dir=""):
"""
Returns the location of the directory where models are cached (and creates it if necessary).
"""
hub_dir = get_dir()
child_dir = () if not child_dir else (child_dir,)
model_dir = os.path.join(hub_dir, "checkpoints", *child_dir)
os.makedirs(model_dir, exist_ok=True)
return model_dir
def has_hf_hub(necessary=False):
if hf_hub_url is None and necessary:
# if no HF Hub module installed and it is necessary to continue, raise error
raise RuntimeError(
"Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`."
)
return hf_hub_url is not None
def hf_split(hf_id):
rev_split = hf_id.split("@")
assert (
0 < len(rev_split) <= 2
), "hf_hub id should only contain one @ character to identify revision."
hf_model_id = rev_split[0]
hf_revision = rev_split[-1] if len(rev_split) > 1 else None
return hf_model_id, hf_revision
def load_cfg_from_json(json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def _download_from_hf(model_id: str, filename: str):
hf_model_id, hf_revision = hf_split(model_id)
url = hf_hub_url(hf_model_id, filename, revision=hf_revision)
return cached_download(url, cache_dir=get_cache_dir("hf"))
def load_model_config_from_hf(model_id: str):
assert has_hf_hub(True)
cached_file = _download_from_hf(model_id, "config.json")
default_cfg = load_cfg_from_json(cached_file)
default_cfg[
"hf_hub"
] = model_id # insert hf_hub id for pretrained weight load during model creation
model_name = default_cfg.get("architecture")
return default_cfg, model_name
def load_state_dict_from_hf(model_id: str):
assert has_hf_hub(True)
cached_file = _download_from_hf(model_id, "pytorch_model.pth")
state_dict = torch.load(cached_file, map_location="cpu")
return state_dict
def cache_file_from_hf(model_id: str):
assert has_hf_hub(True)
cached_file = _download_from_hf(model_id, "pytorch_model.pth")
return cached_file
def load_pretrained(
model,
default_cfg: Optional[dict] = None,
in_chans: int = 12,
strict: bool = True,
) -> Union[torch.nn.Module, pytorch_lightning.LightningModule]:
"""Load pretrained checkpoint
Taken from https://github.com/rwightman/pytorch-image-models/blob/acd6c687fd1c0507128f0ce091829b233c8560b9/timm/models/helpers.py
Args:
model (nn.Module) : PyTorch model module, or LightningModule
default_cfg (Optional[Dict]): default configuration for pretrained weights / target dataset
in_chans (int): in_chans for model
strict (bool): strict load of checkpoint
"""
is_lightning_module = issubclass(model, pytorch_lightning.LightningModule)
default_cfg = default_cfg or getattr(model, "default_cfg", None) or {}
pretrained_path = default_cfg.pop("checkpoint_path", None)
hf_hub_id = default_cfg.pop("hf_hub", None)
if in_chans != default_cfg.get("input_channels", None):
strict = False
_logger.warning(
f"Unable to convert pretrained weights because of mismatch in input channels, using random init for first layer."
)
if not is_lightning_module:
# The model is passed uninitialized, so if not having to do the PL thing, should initialize here
model = model(**default_cfg)
if not pretrained_path and not hf_hub_id:
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
return model
if hf_hub_id and has_hf_hub(necessary=not pretrained_path):
_logger.info(f"Loading pretrained weights from Hugging Face hub ({hf_hub_id})")
if is_lightning_module:
checkpoint = cache_file_from_hf(hf_hub_id)
model.load_from_checkpoint(checkpoint, strict=strict, **default_cfg)
return model
state_dict = load_state_dict_from_hf(hf_hub_id)
else:
if is_lightning_module:
model.load_from_checkpoint(pretrained_path, strict=strict, **default_cfg)
return model
state_dict = torch.load(pretrained_path, map_location="cpu")
model.load_state_dict(state_dict, strict=strict)
return model
| 5,110 | 1,777 |
class TransactionStatus(Enum, IComparable, IFormattable, IConvertible):
"""
An enumerated type listing the possible statuses associated with a Transaction,TransactionGroup,or SubTransaction,
or the result of a particular method call on one of those objects.
enum TransactionStatus,values: Committed (3),Error (5),Pending (4),Proceed (6),RolledBack (2),Started (1),Uninitialized (0)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
Committed = None
Error = None
Pending = None
Proceed = None
RolledBack = None
Started = None
Uninitialized = None
value__ = None
| 1,434 | 490 |
import argparse
import shlex
import logging
import sys
from . import VMKernel, ExecutionStrategy
parser = argparse.ArgumentParser()
parser.add_argument('command', help='The command to be executed')
parser.add_argument(
'-t', '--type',
default=ExecutionStrategy.ELF, type=lambda s: ExecutionStrategy[s.upper()],
help='Executable type (elf, flat)'
)
parser.add_argument('-m', '--memory', default=10_000, type=int, help='The amount of memory to give to the VM (bytes)')
parser.add_argument('-d', '--debug', action='store_true', default=False, help='Enable debug output')
parser.add_argument('-v', '--verbose', action='store_true', default=False)
args = parser.parse_args()
if args.verbose:
print(f'Initializing VM with {args.memory:,d} bytes of memory...')
if args.debug:
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(message)s'
)
vm = VMKernel(args.memory)
cmd, *cmd_args = shlex.split(args.command)
if args.type == ExecutionStrategy.ELF:
if args.verbose:
print(f'Running ELF executable {cmd!r} with arguments {cmd_args}...')
vm.execute(args.type, cmd, cmd_args)
elif args.type == ExecutionStrategy.FLAT:
if cmd_args:
raise ValueError(f'Running flat binaries with arguments is not supported yet! Arguments: {cmd_args}')
if args.verbose:
print(f'Running flat executable {cmd!r}...')
vm.execute(args.type, cmd)
else:
raise ValueError(f'Invalid executable type: {args.type}')
if args.verbose:
print(f'Command {args.command!r} executed!')
| 1,568 | 512 |
# 3.2 Stack Min
# How would you design a stack which, in addition to push and pop, has a function min which
# returns the minimum element? Push, pop, and min should all operate in O(1) time.
| 198 | 59 |
# coding=utf-8
import requests
from bs4 import BeautifulSoup as bs
class ImomoeClientJapaneseAnimePage(object):
def __init__(self):
self.base_url = "http://www.imomoe.in"
r = requests.get(self.base_url + "/list/2.html")
self.jp_html = r.content
self.soup = bs(self.jp_html, "lxml")
self.all_div = self.soup.find_all("div")
self.focus_div = self.all_div[13]
self.classic_div = self.all_div[22]
self.movie_div = self.all_div[24]
self.ova_div = self.all_div[25]
def get_focus_list(self):
"""
获取热门日本番剧列表
"""
focus = self.focus_div.select("li")
focus_result = []
for i in focus:
result = {}
result["title"] = i.p.a["title"]
result["href"] = self.base_url + i.p.a["href"]
result["img"] = i.img["src"]
result["info"] = i.select("p")[1].string
focus_result.append(result)
return focus_result
def get_classic_list(self):
"""
获取经典日本番剧列表
"""
classic = self.classic_div.select("li")
classic_result = []
for i in classic:
result = {}
result["title"] = i.p.a["title"]
result["href"] = self.base_url + i.p.a["href"]
result["img"] = i.img["src"]
classic_result.append(result)
return classic_result
def get_movie_list(self):
"""
获取日本剧场版动漫列表
"""
movie = self.movie_div.select("li")
movie_result = []
for i in movie:
result = {}
result["title"] = i.p.a["title"]
result["href"] = self.base_url + i.p.a["href"]
result["img"] = i.img["src"]
movie_result.append(result)
return movie_result
def get_ova_list(self):
"""
获取日本OVA版动漫列表
"""
ova = self.ova_div.select("li")
ova_result = []
for i in ova:
result = {}
result["title"] = i.p.a["title"]
result["href"] = self.base_url + i.p.a["href"]
result["img"] = i.img["src"]
ova_result.append(result)
return ova_result
| 2,224 | 765 |
# proxy module
from __future__ import absolute_import
from codetools.contexts.adapter.unit_conversion_adapter import *
| 119 | 34 |
import gym
import os
from ruamel.yaml import YAML, dump, RoundTripDumper
from stable_baselines3 import PPO
from stable_baselines3.ppo import MlpPolicy
from raisimGymTorch.env.bin import rsg_anymal
from raisimGymTorch.stable_baselines3.RaisimSbGymVecEnv import RaisimSbGymVecEnv as VecEnv
# Parallel environments
# directories
stb_path = os.path.dirname(os.path.realpath(__file__))
rsc_path = stb_path + "/../../../rsc"
task_path = stb_path + "/../env/envs/rsg_anymal"
# config
cfg = YAML().load(open(task_path + "/cfg.yaml", 'r'))
# create environment from the configuration file
env = VecEnv(rsg_anymal.RaisimGymEnv(rsc_path, dump(cfg['environment'], Dumper=RoundTripDumper)), cfg['environment'])
obs = env.reset()
n_steps = int(cfg['environment']['max_time'] / cfg['environment']['control_dt'])
model = PPO(MlpPolicy, env,
n_steps=n_steps,
verbose=1,
batch_size=int(n_steps*env.num_envs/4),
n_epochs=4)
model.learn(total_timesteps=250000000) | 998 | 375 |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName # noqa: F401,E501
from tapi_server.models.tapi_connectivity_coordinate_type import TapiConnectivityCoordinateType # noqa: F401,E501
from tapi_server.models.tapi_connectivity_reversion_mode import TapiConnectivityReversionMode # noqa: F401,E501
from tapi_server.models.tapi_topology_resilience_type import TapiTopologyResilienceType # noqa: F401,E501
from tapi_server import util
class TapiConnectivityResilienceConstraint(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, is_lock_out=False, max_switch_times=None, restoration_coordinate_type=None, is_coordinated_switching_both_ends=False, hold_off_time=None, is_frozen=False, wait_to_revert_time=15, resilience_type=None, preferred_restoration_layer=None, restore_priority=None, reversion_mode=None): # noqa: E501
"""TapiConnectivityResilienceConstraint - a model defined in OpenAPI
:param is_lock_out: The is_lock_out of this TapiConnectivityResilienceConstraint. # noqa: E501
:type is_lock_out: bool
:param max_switch_times: The max_switch_times of this TapiConnectivityResilienceConstraint. # noqa: E501
:type max_switch_times: int
:param restoration_coordinate_type: The restoration_coordinate_type of this TapiConnectivityResilienceConstraint. # noqa: E501
:type restoration_coordinate_type: TapiConnectivityCoordinateType
:param is_coordinated_switching_both_ends: The is_coordinated_switching_both_ends of this TapiConnectivityResilienceConstraint. # noqa: E501
:type is_coordinated_switching_both_ends: bool
:param hold_off_time: The hold_off_time of this TapiConnectivityResilienceConstraint. # noqa: E501
:type hold_off_time: int
:param is_frozen: The is_frozen of this TapiConnectivityResilienceConstraint. # noqa: E501
:type is_frozen: bool
:param wait_to_revert_time: The wait_to_revert_time of this TapiConnectivityResilienceConstraint. # noqa: E501
:type wait_to_revert_time: int
:param resilience_type: The resilience_type of this TapiConnectivityResilienceConstraint. # noqa: E501
:type resilience_type: TapiTopologyResilienceType
:param preferred_restoration_layer: The preferred_restoration_layer of this TapiConnectivityResilienceConstraint. # noqa: E501
:type preferred_restoration_layer: List[TapiCommonLayerProtocolName]
:param restore_priority: The restore_priority of this TapiConnectivityResilienceConstraint. # noqa: E501
:type restore_priority: int
:param reversion_mode: The reversion_mode of this TapiConnectivityResilienceConstraint. # noqa: E501
:type reversion_mode: TapiConnectivityReversionMode
"""
self.openapi_types = {
'is_lock_out': bool,
'max_switch_times': int,
'restoration_coordinate_type': TapiConnectivityCoordinateType,
'is_coordinated_switching_both_ends': bool,
'hold_off_time': int,
'is_frozen': bool,
'wait_to_revert_time': int,
'resilience_type': TapiTopologyResilienceType,
'preferred_restoration_layer': List[TapiCommonLayerProtocolName],
'restore_priority': int,
'reversion_mode': TapiConnectivityReversionMode
}
self.attribute_map = {
'is_lock_out': 'is-lock-out',
'max_switch_times': 'max-switch-times',
'restoration_coordinate_type': 'restoration-coordinate-type',
'is_coordinated_switching_both_ends': 'is-coordinated-switching-both-ends',
'hold_off_time': 'hold-off-time',
'is_frozen': 'is-frozen',
'wait_to_revert_time': 'wait-to-revert-time',
'resilience_type': 'resilience-type',
'preferred_restoration_layer': 'preferred-restoration-layer',
'restore_priority': 'restore-priority',
'reversion_mode': 'reversion-mode'
}
self._is_lock_out = is_lock_out
self._max_switch_times = max_switch_times
self._restoration_coordinate_type = restoration_coordinate_type
self._is_coordinated_switching_both_ends = is_coordinated_switching_both_ends
self._hold_off_time = hold_off_time
self._is_frozen = is_frozen
self._wait_to_revert_time = wait_to_revert_time
self._resilience_type = resilience_type
self._preferred_restoration_layer = preferred_restoration_layer
self._restore_priority = restore_priority
self._reversion_mode = reversion_mode
@classmethod
def from_dict(cls, dikt) -> 'TapiConnectivityResilienceConstraint':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.connectivity.ResilienceConstraint of this TapiConnectivityResilienceConstraint. # noqa: E501
:rtype: TapiConnectivityResilienceConstraint
"""
return util.deserialize_model(dikt, cls)
@property
def is_lock_out(self):
"""Gets the is_lock_out of this TapiConnectivityResilienceConstraint.
The resource is configured to temporarily not be available for use in the protection scheme(s) it is part of. This overrides all other protection control states including forced. If the item is locked out then it cannot be used under any circumstances. Note: Only relevant when part of a protection scheme. # noqa: E501
:return: The is_lock_out of this TapiConnectivityResilienceConstraint.
:rtype: bool
"""
return self._is_lock_out
@is_lock_out.setter
def is_lock_out(self, is_lock_out):
"""Sets the is_lock_out of this TapiConnectivityResilienceConstraint.
The resource is configured to temporarily not be available for use in the protection scheme(s) it is part of. This overrides all other protection control states including forced. If the item is locked out then it cannot be used under any circumstances. Note: Only relevant when part of a protection scheme. # noqa: E501
:param is_lock_out: The is_lock_out of this TapiConnectivityResilienceConstraint.
:type is_lock_out: bool
"""
self._is_lock_out = is_lock_out
@property
def max_switch_times(self):
"""Gets the max_switch_times of this TapiConnectivityResilienceConstraint.
Used to limit the maximum swtich times. When work fault disappears , and traffic return to the original work path, switch counter reset. # noqa: E501
:return: The max_switch_times of this TapiConnectivityResilienceConstraint.
:rtype: int
"""
return self._max_switch_times
@max_switch_times.setter
def max_switch_times(self, max_switch_times):
"""Sets the max_switch_times of this TapiConnectivityResilienceConstraint.
Used to limit the maximum swtich times. When work fault disappears , and traffic return to the original work path, switch counter reset. # noqa: E501
:param max_switch_times: The max_switch_times of this TapiConnectivityResilienceConstraint.
:type max_switch_times: int
"""
self._max_switch_times = max_switch_times
@property
def restoration_coordinate_type(self):
"""Gets the restoration_coordinate_type of this TapiConnectivityResilienceConstraint.
:return: The restoration_coordinate_type of this TapiConnectivityResilienceConstraint.
:rtype: TapiConnectivityCoordinateType
"""
return self._restoration_coordinate_type
@restoration_coordinate_type.setter
def restoration_coordinate_type(self, restoration_coordinate_type):
"""Sets the restoration_coordinate_type of this TapiConnectivityResilienceConstraint.
:param restoration_coordinate_type: The restoration_coordinate_type of this TapiConnectivityResilienceConstraint.
:type restoration_coordinate_type: TapiConnectivityCoordinateType
"""
self._restoration_coordinate_type = restoration_coordinate_type
@property
def is_coordinated_switching_both_ends(self):
"""Gets the is_coordinated_switching_both_ends of this TapiConnectivityResilienceConstraint.
Is operating such that switching at both ends of each flow acorss the FC is coordinated at both ingress and egress ends. # noqa: E501
:return: The is_coordinated_switching_both_ends of this TapiConnectivityResilienceConstraint.
:rtype: bool
"""
return self._is_coordinated_switching_both_ends
@is_coordinated_switching_both_ends.setter
def is_coordinated_switching_both_ends(self, is_coordinated_switching_both_ends):
"""Sets the is_coordinated_switching_both_ends of this TapiConnectivityResilienceConstraint.
Is operating such that switching at both ends of each flow acorss the FC is coordinated at both ingress and egress ends. # noqa: E501
:param is_coordinated_switching_both_ends: The is_coordinated_switching_both_ends of this TapiConnectivityResilienceConstraint.
:type is_coordinated_switching_both_ends: bool
"""
self._is_coordinated_switching_both_ends = is_coordinated_switching_both_ends
@property
def hold_off_time(self):
"""Gets the hold_off_time of this TapiConnectivityResilienceConstraint.
This attribute indicates the time, in milliseconds, between declaration of signal degrade or signal fail, and the initialization of the protection switching algorithm. # noqa: E501
:return: The hold_off_time of this TapiConnectivityResilienceConstraint.
:rtype: int
"""
return self._hold_off_time
@hold_off_time.setter
def hold_off_time(self, hold_off_time):
"""Sets the hold_off_time of this TapiConnectivityResilienceConstraint.
This attribute indicates the time, in milliseconds, between declaration of signal degrade or signal fail, and the initialization of the protection switching algorithm. # noqa: E501
:param hold_off_time: The hold_off_time of this TapiConnectivityResilienceConstraint.
:type hold_off_time: int
"""
self._hold_off_time = hold_off_time
@property
def is_frozen(self):
"""Gets the is_frozen of this TapiConnectivityResilienceConstraint.
Temporarily prevents any switch action to be taken and, as such, freezes the current state. Until the freeze is cleared, additional near-end external commands are rejected and fault condition changes and received APS messages are ignored. All administrative controls of any aspect of protection are rejected. # noqa: E501
:return: The is_frozen of this TapiConnectivityResilienceConstraint.
:rtype: bool
"""
return self._is_frozen
@is_frozen.setter
def is_frozen(self, is_frozen):
"""Sets the is_frozen of this TapiConnectivityResilienceConstraint.
Temporarily prevents any switch action to be taken and, as such, freezes the current state. Until the freeze is cleared, additional near-end external commands are rejected and fault condition changes and received APS messages are ignored. All administrative controls of any aspect of protection are rejected. # noqa: E501
:param is_frozen: The is_frozen of this TapiConnectivityResilienceConstraint.
:type is_frozen: bool
"""
self._is_frozen = is_frozen
@property
def wait_to_revert_time(self):
"""Gets the wait_to_revert_time of this TapiConnectivityResilienceConstraint.
If the protection system is revertive, this attribute specifies the time, in minutes, to wait after a fault clears on a higher priority (preferred) resource before reverting to the preferred resource. # noqa: E501
:return: The wait_to_revert_time of this TapiConnectivityResilienceConstraint.
:rtype: int
"""
return self._wait_to_revert_time
@wait_to_revert_time.setter
def wait_to_revert_time(self, wait_to_revert_time):
"""Sets the wait_to_revert_time of this TapiConnectivityResilienceConstraint.
If the protection system is revertive, this attribute specifies the time, in minutes, to wait after a fault clears on a higher priority (preferred) resource before reverting to the preferred resource. # noqa: E501
:param wait_to_revert_time: The wait_to_revert_time of this TapiConnectivityResilienceConstraint.
:type wait_to_revert_time: int
"""
self._wait_to_revert_time = wait_to_revert_time
@property
def resilience_type(self):
"""Gets the resilience_type of this TapiConnectivityResilienceConstraint.
:return: The resilience_type of this TapiConnectivityResilienceConstraint.
:rtype: TapiTopologyResilienceType
"""
return self._resilience_type
@resilience_type.setter
def resilience_type(self, resilience_type):
"""Sets the resilience_type of this TapiConnectivityResilienceConstraint.
:param resilience_type: The resilience_type of this TapiConnectivityResilienceConstraint.
:type resilience_type: TapiTopologyResilienceType
"""
self._resilience_type = resilience_type
@property
def preferred_restoration_layer(self):
"""Gets the preferred_restoration_layer of this TapiConnectivityResilienceConstraint.
Indicate which layer this resilience parameters package configured for. # noqa: E501
:return: The preferred_restoration_layer of this TapiConnectivityResilienceConstraint.
:rtype: List[TapiCommonLayerProtocolName]
"""
return self._preferred_restoration_layer
@preferred_restoration_layer.setter
def preferred_restoration_layer(self, preferred_restoration_layer):
"""Sets the preferred_restoration_layer of this TapiConnectivityResilienceConstraint.
Indicate which layer this resilience parameters package configured for. # noqa: E501
:param preferred_restoration_layer: The preferred_restoration_layer of this TapiConnectivityResilienceConstraint.
:type preferred_restoration_layer: List[TapiCommonLayerProtocolName]
"""
self._preferred_restoration_layer = preferred_restoration_layer
@property
def restore_priority(self):
"""Gets the restore_priority of this TapiConnectivityResilienceConstraint.
none # noqa: E501
:return: The restore_priority of this TapiConnectivityResilienceConstraint.
:rtype: int
"""
return self._restore_priority
@restore_priority.setter
def restore_priority(self, restore_priority):
"""Sets the restore_priority of this TapiConnectivityResilienceConstraint.
none # noqa: E501
:param restore_priority: The restore_priority of this TapiConnectivityResilienceConstraint.
:type restore_priority: int
"""
self._restore_priority = restore_priority
@property
def reversion_mode(self):
"""Gets the reversion_mode of this TapiConnectivityResilienceConstraint.
:return: The reversion_mode of this TapiConnectivityResilienceConstraint.
:rtype: TapiConnectivityReversionMode
"""
return self._reversion_mode
@reversion_mode.setter
def reversion_mode(self, reversion_mode):
"""Sets the reversion_mode of this TapiConnectivityResilienceConstraint.
:param reversion_mode: The reversion_mode of this TapiConnectivityResilienceConstraint.
:type reversion_mode: TapiConnectivityReversionMode
"""
self._reversion_mode = reversion_mode
| 16,267 | 4,730 |
import base64
import json
import requests
import thread
import urllib
from app import redis
import config
class ApiError(Exception):
pass
class AuthError(ApiError):
pass
def cookies():
cookies = redis.get(config.COOKIE_KEY)
if not cookies:
raise AuthError('could not load cookies')
return json.loads(cookies)
def csrf():
csrf = redis.get(config.CSRF_KEY)
if not csrf:
raise AuthError('could not load csrf token')
return csrf
def url(path):
return config.PROXY_URL + '/' + path.lstrip('/')
def request(method, path, headers=None, **kwargs):
headers = headers or {}
headers.update({
'X-CSRF-Token': csrf(),
})
r = requests.request(
method, url(path), headers=headers, cookies=cookies(), **kwargs)
try:
return r.json()
except Exception:
print r.text
def get(path):
return request('get', path)
def post(path, data=None, headers=None):
headers = headers or {}
headers.update({
'content-type': 'application/json',
})
return request('post', path, data=json.dumps(data), headers=headers)
def refresh(func):
if not hasattr(func, '__call__'):
data = func
early = True
else:
data = 'update'
early = False
def decorate(dfunc):
def wrap(*args, **kwargs):
ret = dfunc(*args, **kwargs)
redis.set(config.REFRESH_KEY, data)
return ret
wrap.__name__ = dfunc.__name__
return wrap
if early:
return decorate
return decorate(func)
class CPU:
regs = ['pc', 'sp', 'sr', 'sg']
def get(self, path):
return get(path)
def post(self, path, data=None):
headers = {
'referer': url('/cpu/debugger'),
}
if not data:
data = {'body': {}}
return post(path, data=data, headers=headers)
@refresh
def set_reg(self, reg, value):
self.post('/cpu/regs', {'reg': reg, 'val': value})
@refresh
def set_mem(self, addr, value):
self.post('/cpu/updatememory', {'addr': addr, 'val': value})
# api endpoints
def manual(self):
return self.get('/get_manual')['manual']
def load(self):
self.post('/cpu/load')
self.reset()
@refresh
def reset(self, debug=True):
if debug:
j = self.post('/cpu/reset/debug')
else:
j = self.post('/cpu/reset/nodebug')
return j['data']['success']
@refresh
def send_input(self, data):
return self.post('/cpu/send_input', data={'body': data})
@refresh
def step_out(self):
return self.post('/cpu/dbg/step_out')
@refresh
def step_over(self):
return self.post('/cpu/dbg/step_over')
@refresh
def step(self, n=1):
if n > 1:
return self.post('/cpu/dbg/stepn/{}'.format(n))
else:
return self.post('/cpu/step')
def breakpoints(self):
return self.get('/cpu/dbg/events')
def stepcount(self):
return self.get('/cpu/dbg/stepcount')
@refresh('continue')
def _continue(self):
return self.post('/cpu/dbg/continue')
@refresh
def _break(self, addr):
return self.post('/cpu/dbg/event', data={'addr': addr, 'event': 0})
@refresh
def unbreak(self, addr):
return self.post('/cpu/dbg/event', data={'addr': addr, 'event': -1})
def read(self, addr, length):
data = self.get('/cpu/dbg/memory/{:04x}?len={}'.format(addr, length))
if data['error']:
raise ApiError(data['error'])
else:
return base64.b64decode(data['raw'])
@refresh
def let(self, target, value):
if target.startswith('r') or target in self.regs:
if target in self.regs:
i = target
else:
i = target.lstrip('r')
self.set_reg(i, value)
else:
self.set_mem(target, value)
def snapshot(self):
return self.get('/cpu/snapshot')
cpu = CPU()
def assemble(asm):
return post('/assemble', data={'asm': asm})
def disasm(obj):
return get('/cpu/dbg/disasm?obj=' + urllib.quote(obj))['data']['insns']
def whoami():
return get('/whoami')
| 4,285 | 1,389 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import slumber
HOST = "http://api.sylvadb.com/v1/"
SYLVADB_API = os.environ.get("SYLVADB_API", HOST)
NODE = "node"
RELATIONSHIP = "relationship"
# Extracted from Six for Python 2 and 3 compatibility
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class BaseMeta(type):
"""Metaclass to populate resources properties by looking up _attrs"""
def __new__(metaname, classname, baseclasses, attrs): # noqa
cls = type.__new__(metaname, classname, baseclasses, attrs)
for attr in cls._attrs.keys():
setattr(cls, attr, property(
# Getter
lambda self: self._attrs.get(attr, None),
# Setter
lambda self, value: self._attrs.__setitem__(attr, value),
))
return cls
class Base(object):
def get(self, key, *args, **kwargs):
"""
If key is not found, a `KeyError` is returned.
An optional `default` value can be passed.
"""
_key = self.__keytransform__(key)
try:
return self.__getitem__(_key)
except KeyError as e:
if args or kwargs:
return kwargs.get("default", args[0])
else:
raise e
def __keytransform__(self, key):
"""
Extract the right key, `key` can be a string containing a slug,
an object with a `_slug` attribute, or a dictionary with a 'slug' key.
"""
if isinstance(key, dict):
return key.get("slug")
elif hasattr(key, "_slug"):
return key._slug
return key
def __repr__(self):
mode = getattr(self, "_mode", None)
slug = getattr(self, "_slug", None)
if mode is not None:
if slug is not None:
msg = "{}{} of {}".format(mode.capitalize(),
self.__class__.__name__, slug)
else:
msg = "{}{}".format(mode.capitalize(), self.__class__.__name__)
else:
msg = "{}".format(self.__class__.__name__)
return "<SylvaDB {} at {}>".format(msg, hex(id(self)))
@add_metaclass(BaseMeta)
class Graph(object):
"""Graph class with properties and access to data and schema"""
_attrs = {
"name": None,
"description": None,
"public": False,
} # For the metaclass
def __init__(self, graph_slug, auth):
self._api = API(token=auth, graph_slug=graph_slug)
self.nodes = Data(api=self._api, mode=NODE)
self.relationships = Data(api=self._api, mode=RELATIONSHIP)
self.rels = self.relationships
self.pull()
def push(self):
"""Push changes from the Graph properties to the server"""
self._api.patch_graph(params=self._attrs)
def pull(self):
"""Pull changes to the Graph properties from the server"""
_attrs = self._api.get_graph()
for prop in self._attrs:
self._attrs[prop] = _attrs[prop]
def destroy(self):
"""Delete all contents and remove the Graph"""
return self._api.delete_graph()
def export(self, data=True, schema=True):
"""Export Graph data, schema or both"""
if data and not schema:
return self._api.export_data()
elif not data and schema:
return self._api.export_schema()
else:
return self._api.export_graph()
class Data(Base):
"""Data class to handle nodes and relationships"""
def __init__(self, api, mode):
self._api = api
self._mode = mode
self._types = None
self._datacols = {}
@property
def types(self):
"""Lazy loading property to list data types (node and rel types)"""
if self._types is None:
self._types = TypeCollection(self._api, self._mode)
return self._types
def __getitem__(self, datatype):
"""
Return a `DataCollection` of type `datatype`, referring to a node type
or relationship type. If not found, a `KeyError` is returned
"""
_key = self.__keytransform__(datatype)
if _key in self.types or _key in [t["slug"] for t in self.types]:
if _key not in self._datacols:
# Required step to keep track of new data to add in collections
data_collections = DataCollection(self._api, self._mode, _key)
self._datacols[_key] = data_collections
return self._datacols[_key]
else:
raise KeyError("{}type '{}' not found".format(self._mode, _key))
def __iter__(self):
"""Return an interator over the types"""
return iter(self.types)
def __len__(self):
"""Return the number of types"""
return len(self.types)
class BaseCollection(Base):
"""BaseCollection class to handle collections"""
def __init__(self, api, mode, slug=None):
self._api = api
self._mode = mode
self._slug = slug
self._data = None
self._to_add = [] # Tracks new data to add in push
@property
def data(self):
"""Lazy loading the data (list of nodes and relationships)"""
if self._data is None:
self.pull()
return self._data
def _hydrate(self, data_dict):
"""Transform data to be sent to the server. Override to customize"""
return data_dict
def _dehydrate(self, data_dict):
"""Transform data from server. Override to customize"""
return data_dict
def add(self, data_dict):
"""Add a new data dictionary to be added on a push"""
self._to_add.append(self._hydrate(data_dict))
def all(self):
"""Return all the elements in the collection"""
return self.data + self._to_add
def single(self):
"""Return the first item dictionary in the collection"""
data = self.data + self._to_add
if data:
return data[0]
def __getitem__(self, key):
# TODO: Lazy loading and slicing from server
_key = self.__keytransform__(key)
if isinstance(_key, (int, slice)):
return self.all()[_key]
def __iter__(self):
"""Return an interator over the data"""
return iter(self.all())
def __len__(self):
"""Return the number of elements in the data"""
return len(self.all())
class DataCollection(BaseCollection):
"""DataCollection class to handle collection of nodes or relationships"""
def __init__(self, api, mode, slug=None):
super(DataCollection, self).__init__(api, mode, slug)
self._properties = None
def _hydrate(self, data_dict):
"""Transform data to be sent to the server. Override to customize"""
return {"id": None, "properties": data_dict}
def push(self):
"""Push new data to the server for the datatype `datatype_slug`"""
if self._to_add:
func = getattr(self._api, "post_{}s".format(self._mode))
ids = func(self._slug, params=self._to_add)
if ids:
# Update IDs as returned by the server
for i, _id in enumerate(ids):
self._to_add[i].update({"id": _id})
self._data += self._to_add
self._to_add = []
def pull(self):
"""Pull data from the server"""
func = getattr(self._api, "get_{}s".format(self._mode))
data = func(self._slug)
self._data = data.get("{}s".format(self._mode), [])
self._to_add = []
@property
def properties(self):
"""Lazy loading the properties of a data type"""
if self._properties is None:
self._properties = PropertyCollection(self._api, self._mode,
self._slug)
return self._properties
class TypeCollection(BaseCollection):
"""TypeCollection class to handle collection of nodes or rels types"""
def push(self):
"""Push new data to the server for the datatype `datatype_slug`"""
if self._to_add:
func = getattr(self._api, "post_{}types".format(self._mode))
func(params=self._to_add)
self._to_add = []
def pull(self):
"""Pull data from the server"""
func = getattr(self._api, "get_{}types".format(self._mode))
self._data = func()
self._to_add = []
class PropertyCollection(BaseCollection):
def pull(self):
"""Pull type properties from the server"""
func = getattr(self._api,
"get_{}type_schema_properties".format(self._mode))
self._data = func(self._slug)
if self._data:
self._data = self._data["properties"]
class SlumberTokenAuth():
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers['Authorization'] = "Token {0}".format(self.token)
return r
class API(object):
def __init__(self, token, graph_slug=None):
self._api = slumber.API(SYLVADB_API, auth=SlumberTokenAuth(token))
self._slug = graph_slug
def __repr__(self):
return "<SylvaDB API at {}>".format(hex(id(self)))
def use(self, graph_slug):
"""Change the graph over with the API works"""
self._slug = graph_slug
# Graphs methods
def filter_graphs(self, params=None):
"""Filtering over graphs using params"""
# TODO: Filter and search
return self._api.graphs.filter(params)
def get_graphs(self):
"""Get user graphs (collaborations too)"""
return self._api.graphs.get()
def post_graph(self, params=None):
"""Create a new graph"""
# The params available are:
# - name
# - description
return self._api.graphs.post(params)
def get_graph(self):
"""Get the info about a graph"""
return self._api.graphs(self._slug).get()
def put_graph(self, params=None):
"""Modify info for a graph."""
# The params available are (omitted ones are removed):
# - name
# - description
# - public
return self._api.graphs(self._slug).put(params)
def patch_graph(self, params=None):
"""Modify info for a graph."""
# The params available are (omitted ones aren't treated):
# - name
# - description
# - public
return self._api.graphs(self._slug).patch(params)
def delete_graph(self):
"""Delete a graph."""
return self._api.graphs(self._slug).delete()
# Export and import methods
# The methods that allow export are all GET
def export_graph(self):
"""Export all the info for a graph."""
return self._api.graphs(self._slug).export.graph.get()
def export_schema(self):
"""Export the schema for a graph."""
return self._api.graphs(self._slug).export.schema.get()
def export_data(self):
"""Export the data for a graph."""
return self._api.graphs(self._slug).export.data.get()
# The methods that allow import are all PUT
# def import_graph(self, params=None):
# return self._api.graphs(self._slug).import.graph.put(params)
# def import_schema(self, params=None):
# return self._api.graphs(self._slug).import.schema.put(params)
# def import_data(self, params=None):
# return self._api.graphs(self._slug).import.data.put(params)
# Schema methods
def get_nodetypes(self):
"""Get node types for a graph."""
return (self._api
.graphs(self._slug)
.types.nodes.get())
def post_nodetypes(self, params):
"""Create node types for a graph."""
# The params available are:
# - name
# - description
return (self._api
.graphs(self._slug)
.types.nodes.post(params))
def get_relationshiptypes(self):
"""Get relationship types for a graph."""
return (self._api
.graphs(self._slug)
.types.relationships.get())
def post_relationshiptypes(self, params):
"""Create relationship types for a graph."""
# Required:
# - source
# - target
# The params available are:
# - name
# - description
return (self._api
.graphs(self._slug)
.types.relationships.post(params))
def get_nodetype(self, nodetype_slug):
"""Get a single node type for a graph."""
# Required:
# - nodetype_slug
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug).get())
def delete_nodetype(self, nodetype_slug):
"""Delete a single node type for a graph."""
# Required:
# - nodetype_slug
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug).delete())
def get_nodetype_schema(self, nodetype_slug):
"""Get the schema for a node type."""
# Required:
# - nodetype_slug
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.schema.get())
def put_nodetype_schema(self, nodetype_slug, params=None):
"""Modify the schema for a node type."""
# Required:
# - nodetype_slug
# The params available are (omitted ones are removed):
# - name
# - description
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.schema.put(params))
def patch_nodetype_schema(self, nodetype_slug, params=None):
"""Modify the schema for a node type."""
# Required:
# - nodetype_slug
# The params available are (omitted ones aren't treated):
# - name
# - description
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.schema.patch(params))
def get_nodetype_schema_properties(self, nodetype_slug):
"""Get the properties from a schema for a node type."""
# Required:
# - nodetype_slug
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.schema.properties.get())
def post_nodetype_schema_properties(self, nodetype_slug, params=None):
"""Create a property from a schema for a node type"""
# Required:
# - nodetype_slug
# The params available are:
# - key
# - description
# - datatype
# TODO: Use choices too
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.schema.properties.post(params))
def get_relationshiptype(self, relationshiptype_slug):
"""Get a single relationship type for a graph."""
# Required:
# - nodetype_slug
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug).get())
def delete_relationshiptype(self, relationshiptype_slug):
"""Delete a single relationship type for a graph."""
# Required:
# - relationshiptype_slug
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug).delete())
def get_relationshiptype_schema(self, relationshiptype_slug):
"""Get the schema for a relationship type."""
# Required:
# - relationshiptype_slug
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug)
.schema.get())
def put_relationshiptype_schema(self, relationshiptype_slug, params=None):
"""Modify the schema for a relationship type."""
# Required:
# - relationshiptype_slug
# The params available are (omitted ones are removed):
# - name
# - description
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug)
.schema.put(params))
def patch_relationshiptype_schema(self, relationshiptype_slug,
params=None):
"""Modify the schema for a relationship type."""
# Required:
# - relationshiptype_slug
# The params available are (omitted ones aren't treated):
# - name
# - description
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug)
.schema.patch(params))
def get_relationshiptype_schema_properties(self, relationshiptype_slug):
"""Get the properties from a schema for a relationship type."""
# Required:
# - relationshiptype_slug
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug)
.schema.properties.get())
def post_relationshiptype_schema_properties(self, relationshiptype_slug,
params=None):
"""Create a property from a schema for a relationship type"""
# Required:
# - relationshiptype_slug
# The params available are:
# - key
# - description
# - datatype
# TODO: Use choices too
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug)
.schema.properties.post(params))
# Data methods
def get_nodes(self, nodetype_slug):
"""Get nodes for a node type."""
# Required:
# - nodetype_slug
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.nodes.get())
def post_nodes(self, nodetype_slug, params=None):
"""Create nodes for a node type."""
# Required:
# - nodetype_slug
# The params is a list of dict. Each dict is a node that will be
# created. The properties of the nodes must be the same that the
# properties in the schema for that type. Otherwise, the properties
# are ignored.
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.nodes.post(params))
def filter_nodes(self, nodetype_slug, limit=None, offset=None,
params=None):
"""Filtering over nodes for a node type using params."""
# Required:
# - nodetype_slug
# The params available are:
# - The properties and their values to filter.
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.filter.post(params))
def filter_nodes_get(self, nodetype_slug, limit=None, offset=None,
params=None):
"""Filtering over nodes for a node type using params."""
# Required:
# - nodetype_slug
# The params available are:
# - The properties and their values to filter.
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.filter.get(**params))
def get_node(self, nodetype_slug, node_id):
"""Get info for a single node from a node type."""
# Required:
# - nodetype_slug
# - node id
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.nodes(node_id).get())
def put_node(self, nodetype_slug, node_id, params=None):
"""Modify a single node from a node type."""
# Required:
# - nodetype_slug
# - node id
# The params available are (omitted ones are removed):
# - The properties depending of the node type.
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.nodes(node_id).put(params))
def patch_node(self, nodetype_slug, node_id, params=None):
"""Modify a single node from a node type."""
# Required:
# - nodetype_slug
# - node id
# The params available are (omitted ones aren't treated):
# - The properties depending of the node type.
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.nodes(node_id).patch(params))
def delete_node(self, nodetype_slug, node_id):
"""Remove a single node from a node type."""
# Required:
# - nodetype_slug
# - node id
return (self._api
.graphs(self._slug)
.types.nodes(nodetype_slug)
.nodes(node_id).delete())
def get_relationships(self, relationshiptype_slug):
"""Get relationships for a relationship type."""
# Required:
# - relationshiptype_slug
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug)
.relationships.get())
def post_relationships(self, relationshiptype_slug,
params=None):
"""Create relationships for a relationship type."""
# Required:
# - relationshiptype_slug
# The params is a list of dict. Each dict is a relationship that
# will be created. The properties of the relationships must be
# the same that the properties in the schema for that type.
# Otherwise, the properties are ignored.
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug)
.relationships.post(params))
def get_relationship(self, relationshiptype_slug,
relationship_id):
"""Get info for a single relationship from a relationship type."""
# Required:
# - relationshiptype_slug
# - relationship id
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug)
.relationships(relationship_id).get())
def put_relationship(self, relationshiptype_slug,
relationship_id, params=None):
"""Modify a single relationship from a relationship type."""
# Required:
# - relationshiptype_slug
# - relationship id
# The params available are (omitted ones are removed):
# - The properties depending of the relationship type.
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug)
.relationships(relationship_id).put(params))
def patch_relationship(self, relationshiptype_slug,
relationship_id, params=None):
"""Modify a single relationship from a relationship type."""
# Required:
# - relationshiptype_slug
# - relationship id
# The params available are (omitted ones aren't treated):
# - The properties depending of the relationship type.
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug)
.relationships(relationship_id).patch(params))
def delete_relationship(self, relationshiptype_slug,
relationship_id):
"""Remove a single relationship from a relationship type."""
# Required:
# - relationshiptype_slug
# - relationship id
return (self._api
.graphs(self._slug)
.types.relationships(relationshiptype_slug)
.relationships(relationship_id).delete())
| 24,882 | 7,037 |
import time
from logic.axis import Axis
e = Axis.azimuth()
#e.motor.set_dist(209)
#e.move_angle(degrees=-90)
#time.sleep(1)
#e.move_angle(seconds=340)
from skyfield.api import load
from skyfield.api import Topos
ts = load.timescale()
planets = load('de421.bsp')
earth = planets["earth"]
moon = planets["moon"]
jupiter = planets["jupiter barycenter"]
stations_url = 'http://celestrak.com/NORAD/elements/stations.txt'
satellites = load.tle(stations_url)
satellite = satellites['ISS (ZARYA)']
target = jupiter
#target = earth + satellite
print(target)
here = earth + Topos('47.827435 N', '-0.397186 W')
while True:
t = ts.now()
astrometric = here.at(t).observe(target)
alt, az, d = astrometric.apparent().altaz()
# print(alt, az)
d, m, s = az.dms(warn=False)
d, m, s = int(d), int(m), int(s)
#d, m, s = 90, 0, 0
print(d, m, s)
e.move_angle(degrees=d, minutes=m, seconds=s)
time.sleep(1)
| 940 | 426 |
# -*- coding: utf-8 -*-
import time
import sys
import pigpio as GPIO
MAX31855_ADDR = 0x10
pi = GPIO.pi()
class DFRobot_MAX31855:
def __init__(self):
self.i2c = pi.i2c_open(1,MAX31855_ADDR)
def readData(self):
a = pi.i2c_read_byte_data(self.i2c.handle, 0x00)
b = pi.i2c_read_byte_data(self.i2c.handle, 0x01)
# c = pi.i2c_read_byte_data(self.i2c.handle, 0x02)
d = pi.i2c_read_byte_data(self.i2c.handle, 0x03)
return a,b,d
def readCelsius(self):
a,b,d = self.readData()
if(d&0x7):
return False
if(a&0x80):
a = 0xff - a
b = 0xff - b
temp = -((((a << 8) | b) >> 2)+1)*0.25
return temp
temp = (((a << 8) | b) >> 2)*0.25
return temp
| 717 | 374 |
import logging
log_name = 'runtime.log'
LOGGER = logging.getLogger(__name__)
fh = logging.FileHandler(encoding='utf-8', mode='a', filename=log_name)
logging.basicConfig(handlers=[fh], format='[%(asctime)s %(levelname)s]<%(process)d> %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO) | 316 | 118 |
# --------------
# Code starts here
class_1 = ['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2 = ['Hilary Mason','Carla Gentry','Corinna Cortes']
new_class = class_1+class_2
print(new_class)
new_class.append('Peter Warden')
print(new_class)
new_class.remove('Carla Gentry')
print(new_class)
# Code ends here
# --------------
# Code starts here
courses = {'Math':65,'English':70,'History':80,'French':70,'Science':60}
print(courses)
total = sum(courses.values())
print(total)
percentage = total/500 * 100
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics = {'Geoffrey Hinton':78,'Andrew Ng':95,'Sebastian Raschka':65,
'Yoshua Benjo':50,'Hilary Mason':70}
topper = max(mathematics,key = mathematics.get)
print(topper)
# Code ends here
# --------------
# Given string
topper = 'andrew ng'
# Code starts here
first_name,last_name = topper.split()
full_name=last_name+' '+first_name
print(full_name)
certificate_name = full_name.upper()
print(certificate_name)
# Code ends here
| 1,060 | 410 |
'''
Recalculates paper discussion count
'''
from django.core.management.base import BaseCommand
from paper.models import Paper
class Command(BaseCommand):
def handle(self, *args, **options):
papers = Paper.objects.iterator()
count = Paper.objects.count()
print('Recalculating paper discussion count')
for i, paper in enumerate(papers):
try:
print(f'Paper: {paper.id} - {i + 1}/{count}')
new_count = paper.get_discussion_count()
paper.discussion_count = new_count
paper.save()
except Exception as e:
print(
f'Error updating discussion count for paper: {paper.id}', e
)
print('Finished recalculating paper discussion count')
| 812 | 210 |
"""
ccc05j5.py: Python solution to CCC '05 J5 (Bananas)
"""
# Create these sets to hold the known valid and invalid A-words to save time
# We already know that 'A' is a valid A-word and an empty string is always invalid
known_awords = set(['A'])
known_nonawords = set([''])
def is_aword(word):
# First try looking into the known sets
if word in known_awords:
return True
if word in known_nonawords:
return False
# Otherwise see if the word starts with a 'B' and ends with an 'S'
# Since 'A' is already handled
# if the word's length is less than 3 it cannot be valid
if len(word) < 3 or not (word[0] == 'B' and word[-1] == 'S'):
known_nonawords.add(word)
return False
# If yes then get the word in between the 'B' and the 'S'
inner_word = word[1:-1]
# See if the inner word is a monkey word
if is_monkey(inner_word):
known_awords.add(word)
return True
else:
known_nonawords.add(word)
return False
# Create sets to hold known results like above
known_words = set()
known_nonwords = set([''])
def is_monkey(word):
# Check known sets
if word in known_words:
return True
if word in known_nonwords:
return False
# First check if the word itself is an A word
if is_aword(word):
known_words.add(word)
return True
else:
# If not then see if it's two monkey words joined together with an N
for i, c in enumerate(word):
# For every single occurrence of N try splitting
if c == 'N':
try:
word1 = word[0:i]
word2 = word[i + 1:]
# See if both parts of the string are monkey words
if is_monkey(word1) and is_monkey(word2):
known_words.add(word)
return True
# Catch the possible IndexError with Ns in the beginning or end of the string
except IndexError:
pass
# If that did not return then the word is not a monkey word
known_nonwords.add(word)
return False
while True:
word = input()
if word == 'X':
break
print("YES" if is_monkey(word) else "NO")
| 2,291 | 672 |
from unittest import TestCase, main
from aocfw import TestCaseMixin
from p1 import Solution
class SolutionTests(TestCase, TestCaseMixin):
solution = Solution
source = "sample.txt"
given = 37
def test_find_target(self):
data = self.get_parsed_data()
self.assertEqual(Solution().get_target(data), 2)
def test_get_fuel_cost_2(self):
data = self.get_parsed_data()
ans = Solution().get_fuel_cost(data, 2)
self.assertEqual(ans, 37)
def test_get_fuel_cost_1(self):
data = self.get_parsed_data()
ans = Solution().get_fuel_cost(data, 1)
self.assertEqual(ans, 41)
def test_get_fuel_cost_3(self):
data = self.get_parsed_data()
ans = Solution().get_fuel_cost(data, 3)
self.assertEqual(ans, 39)
def test_get_fuel_cost_10(self):
data = self.get_parsed_data()
ans = Solution().get_fuel_cost(data, 10)
self.assertEqual(ans, 71)
if __name__ == "__main__":
main()
| 1,004 | 372 |
#! /usr/bin/env python
import argparse
import yaml
from aiohttp import web, ClientSession, TCPConnector, BasicAuth
import async_timeout
parser = argparse.ArgumentParser(description='Prometheus rabbitmq exporter.')
parser.add_argument('-c', '--config', dest='config', default='config.yml',
help='Path to configuration yaml-file. Default config.yml')
parser.add_argument('--host', dest='host', default='0.0.0.0',
help='HTTP server host. Default 0.0.0.0')
parser.add_argument('-p', '--port', dest='port', default=9125, type=int,
help='HTTP server port. Default 9125')
args = parser.parse_args()
def create_app() -> web.Application:
app = web.Application()
app.router.add_get('/', index)
app.router.add_get('/metrics', metrics)
return app
def get_config() -> dict:
config_path = args.config
with open(config_path) as f:
config_data = yaml.load(f)
return config_data
async def get_queues(target: dict) -> list:
try:
queues = []
target_url = target['url']
auth = BasicAuth(login=target['login'], password=target['password'])
connector = TCPConnector(verify_ssl=False)
async with ClientSession(connector=connector) as session:
url = target_url + '/api/queues'
with async_timeout.timeout(10):
async with session.get(url, auth=auth) as response:
result = await response.json()
for item in result:
queues.append({
'name': item['name'],
'messages': item['messages']
})
return queues
except Exception as ex:
print(ex)
return []
async def index(request):
return web.Response(text='<h1>RabbitMQ exporter</h1><p><a href="/metrics">Metrics</a><p>', content_type='text/html')
async def metrics(request):
config = get_config()
result = '# HELP rabbitmq_queues_messages Displays queue messages count\n'
result += '# TYPE rabbitmq_queues_messages gauge\n'
for target in config.get('targets', []):
queues = await get_queues(target=target)
for queue in queues:
result += 'rabbitmq_queues_messages{target="%s",name="%s",queue="%s"} %s\n' % (
target['url'], target['name'], queue['name'], queue['messages']
)
return web.Response(text=result)
if __name__ == '__main__':
app = create_app()
web.run_app(app, host=args.host, port=args.port)
| 2,569 | 776 |
KEY_BackSpace = 0xff08
KEY_Tab = 0xff09
KEY_Return = 0xff0d
KEY_Escape = 0xff1b
KEY_Insert = 0xff63
KEY_Delete = 0xffff
KEY_Home = 0xff50
KEY_End = 0xff57
KEY_PageUp = 0xff55
KEY_PageDown = 0xff56
KEY_Left = 0xff51
KEY_Up = 0xff52
KEY_Right = 0xff53
KEY_Down = 0xff54
KEY_F1 = 0xffbe
KEY_F2 = 0xffbf
KEY_F3 = 0xffc0
KEY_F4 = 0xffc1
KEY_F5 = 0xffc2
KEY_F6 = 0xffc3
KEY_F7 = 0xffc4
KEY_F8 = 0xffc5
KEY_F9 = 0xffc6
KEY_F10 = 0xffc7
KEY_F11 = 0xffc8
KEY_F12 = 0xffc9
KEY_F13 = 0xFFCA
KEY_F14 = 0xFFCB
KEY_F15 = 0xFFCC
KEY_F16 = 0xFFCD
KEY_F17 = 0xFFCE
KEY_F18 = 0xFFCF
KEY_F19 = 0xFFD0
KEY_F20 = 0xFFD1
KEY_ShiftLeft = 0xffe1
KEY_ShiftRight = 0xffe2
KEY_ControlLeft = 0xffe3
KEY_ControlRight = 0xffe4
KEY_MetaLeft = 0xffe7
KEY_MetaRight = 0xffe8
KEY_AltLeft = 0xffe9
KEY_AltRight = 0xffea
KEY_Scroll_Lock = 0xFF14
KEY_Sys_Req = 0xFF15
KEY_Num_Lock = 0xFF7F
KEY_Caps_Lock = 0xFFE5
KEY_Pause = 0xFF13
KEY_Super_L = 0xFFEB
KEY_Super_R = 0xFFEC
KEY_Hyper_L = 0xFFED
KEY_Hyper_R = 0xFFEE
KEY_KP_0 = 0xFFB0
KEY_KP_1 = 0xFFB1
KEY_KP_2 = 0xFFB2
KEY_KP_3 = 0xFFB3
KEY_KP_4 = 0xFFB4
KEY_KP_5 = 0xFFB5
KEY_KP_6 = 0xFFB6
KEY_KP_7 = 0xFFB7
KEY_KP_8 = 0xFFB8
KEY_KP_9 = 0xFFB9
KEY_KP_Enter = 0xFF8D
# Define a scancode lookup dictionary
SCANCODES = {
KEY_Escape: [[0x01], [0x81]],
'1': [[0x02], [0x82]], '!': [[0x2A, 0x02], [0x82, 0xAA]],
'2': [[0x03], [0x83]], '@': [[0x2A, 0x03], [0x83, 0xAA]],
'3': [[0x04], [0x84]], '#': [[0x2A, 0x04], [0x84, 0xAA]],
'4': [[0x05], [0x85]], '$': [[0x2A, 0x05], [0x85, 0xAA]],
'5': [[0x06], [0x86]], '%': [[0x2A, 0x06], [0x86, 0xAA]],
'6': [[0x07], [0x87]], '^': [[0x2A, 0x07], [0x87, 0xAA]],
'7': [[0x08], [0x88]], '&': [[0x2A, 0x08], [0x88, 0xAA]],
'8': [[0x09], [0x89]], '*': [[0x2A, 0x09], [0x89, 0xAA]],
'9': [[0x0A], [0x8A]], '(': [[0x2A, 0x0A], [0x8A, 0xAA]],
'0': [[0x0B], [0x8B]], ')': [[0x2A, 0x0B], [0x8B, 0xAA]],
'-': [[0x0C], [0x8C]], '_': [[0x2A, 0x0C], [0x8C, 0xAA]],
'=': [[0x0D], [0x8D]], '+': [[0x2A, 0x0D], [0x8D, 0xAA]],
KEY_BackSpace: [[0x0E], [0x8E]],
'\b': [[0x0E], [0x8E]],
KEY_Tab: [[0x0F], [0x8F]],
'\t': [[0x0F], [0x8F]],
'q': [[0x10], [0x90]], 'Q': [[0x2A, 0x10], [0x90, 0xAA]],
'w': [[0x11], [0x91]], 'W': [[0x2A, 0x11], [0x91, 0xAA]],
'e': [[0x12], [0x92]], 'E': [[0x2A, 0x12], [0x92, 0xAA]],
'r': [[0x13], [0x93]], 'R': [[0x2A, 0x13], [0x93, 0xAA]],
't': [[0x14], [0x94]], 'T': [[0x2A, 0x14], [0x94, 0xAA]],
'y': [[0x15], [0x95]], 'Y': [[0x2A, 0x15], [0x95, 0xAA]],
'u': [[0x16], [0x96]], 'U': [[0x2A, 0x16], [0x96, 0xAA]],
'i': [[0x17], [0x97]], 'I': [[0x2A, 0x17], [0x97, 0xAA]],
'o': [[0x18], [0x98]], 'O': [[0x2A, 0x18], [0x98, 0xAA]],
'p': [[0x19], [0x99]], 'P': [[0x2A, 0x19], [0x99, 0xAA]],
'[': [[0x1A], [0x9A]], '{': [[0x2A, 0x1A], [0x9A, 0xAA]],
']': [[0x1B], [0x9B]], '}': [[0x2A, 0x1B], [0x9B, 0xAA]],
KEY_Return: [[0x1C], [0x9C]],
'\r': [[0x1C], [0x9C]],
'\n': [[0x1C], [0x9C]],
KEY_ControlLeft: [[0x1D], [0x9D]],
'a': [[0x1E], [0x9E]], 'A': [[0x2A, 0x1E], [0x9E, 0xAA]],
's': [[0x1F], [0x9F]], 'S': [[0x2A, 0x1F], [0x9F, 0xAA]],
'd': [[0x20], [0xA0]], 'D': [[0x2A, 0x20], [0xA0, 0xAA]],
'f': [[0x21], [0xA1]], 'F': [[0x2A, 0x21], [0xA1, 0xAA]],
'g': [[0x22], [0xA2]], 'G': [[0x2A, 0x22], [0xA2, 0xAA]],
'h': [[0x23], [0xA3]], 'H': [[0x2A, 0x23], [0xA3, 0xAA]],
'j': [[0x24], [0xA4]], 'J': [[0x2A, 0x24], [0xA4, 0xAA]],
'k': [[0x25], [0xA5]], 'K': [[0x2A, 0x25], [0xA5, 0xAA]],
'l': [[0x26], [0xA6]], 'L': [[0x2A, 0x26], [0xA6, 0xAA]],
';': [[0x27], [0xA7]], ':': [[0x2A, 0x27], [0xA7, 0xAA]],
'\'': [[0x28], [0xA8]], '\"': [[0x2A, 0x28], [0xA8, 0xAA]],
'`': [[0x29], [0xA9]], '~': [[0x2A, 0x29], [0xA9, 0xAA]],
KEY_ShiftLeft: [[0x2A], [0xAA]],
'\\': [[0x2B], [0xAB]], '|': [[0x2A, 0x2B], [0xAB, 0xAA]],
'z': [[0x2C], [0xAC]], 'Z': [[0x2A, 0x2C], [0xAC, 0xAA]],
'x': [[0x2D], [0xAD]], 'X': [[0x2A, 0x2D], [0xAD, 0xAA]],
'c': [[0x2E], [0xAE]], 'C': [[0x2A, 0x2E], [0xAE, 0xAA]],
'v': [[0x2F], [0xAF]], 'V': [[0x2A, 0x2F], [0xAF, 0xAA]],
'b': [[0x30], [0xB0]], 'B': [[0x2A, 0x30], [0xB0, 0xAA]],
'n': [[0x31], [0xB1]], 'N': [[0x2A, 0x31], [0xB1, 0xAA]],
'm': [[0x32], [0xB2]], 'M': [[0x2A, 0x32], [0xB2, 0xAA]],
',': [[0x33], [0xB3]], '<': [[0x2A, 0x33], [0xB3, 0xAA]],
'.': [[0x34], [0xB4]], '>': [[0x2A, 0x34], [0xB4, 0xAA]],
'/': [[0x35], [0xB5]], '?': [[0x2A, 0x35], [0xB5, 0xAA]],
KEY_ShiftRight: [[0x36], [0xB6]],
KEY_Sys_Req: [[0x37], [0xB7]],
KEY_AltLeft: [[0x38], [0xB8]],
#'SPACE': [[0x39], [0xB9]],
' ': [[0x39], [0xB9]],
KEY_Caps_Lock: [[0x3A], [0xBA]],
KEY_F1: [[0x3B], [0xBB]],
KEY_F2: [[0x3C], [0xBC]],
KEY_F3: [[0x3D], [0xBD]],
KEY_F4: [[0x3E], [0xBE]],
KEY_F5: [[0x3F], [0xBF]],
KEY_F6: [[0x40], [0xC0]],
KEY_F7: [[0x41], [0xC1]],
KEY_F8: [[0x42], [0xC2]],
KEY_F9: [[0x43], [0xC3]],
KEY_F10: [[0x44], [0xC4]],
KEY_F11: [[0x57], [0xD7]],
KEY_F12: [[0x58], [0xD8]],
KEY_Num_Lock: [[0x45], [0xC5]],
KEY_Scroll_Lock: [[0x46], [0xC6]],
KEY_Home: [[0x47], [0xC7]],
KEY_Up: [[0x48], [0xC8]],
KEY_PageUp: [[0x49], [0xC9]],
#'MINUS': [[0x4A], [0xCA]],
KEY_Left: [[0x4B], [0xCB]],
#'CENTER': [[0x4C], [0xCC]],
KEY_Right: [[0x4D], [0xCD]],
#'PLUS': [[0x4E], [0xCE]],
KEY_End: [[0x4F], [0xCF]],
KEY_Down: [[0x50], [0xD0]],
KEY_PageDown: [[0x51], [0xD1]],
KEY_Insert: [[0x52], [0xD2]],
KEY_Delete: [[0x53], [0xD3]],
#'E_DIV': [[0xE0, 0x54], [0xE0, 0xD4]],
#'E_ENTER': [[0xE0, 0x1C], [0xE0, 0x9C]],
#'E_INS': [[0xE0, 0x52], [0xE0, 0xD2]],
#'E_DEL': [[0xE0, 0x53], [0xE0, 0xD3]],
#'E_HOME': [[0xE0, 0x47], [0xE0, 0xC7]],
#'E_END': [[0xE0, 0x4F], [0xE0, 0xCF]],
#'E_PGUP': [[0xE0, 0x49], [0xE0, 0xC9]],
#'E_PGDN': [[0xE0, 0x51], [0xE0, 0xD1]],
#'E_LEFT': [[0xE0, 0x4B], [0xE0, 0xCB]],
#'E_RIGHT': [[0xE0, 0x4D], [0xE0, 0xCD]],
#'E_UP': [[0xE0, 0x48], [0xE0, 0xC8]],
#'E_DOWN': [[0xE0, 0x50], [0xE0, 0xD0]],
KEY_AltRight: [[0x0C, 0x38], [0xC0, 0xB8]],
KEY_ControlRight: [[0x0C, 0x1D], [0xC0, 0x9D]],
KEY_Super_L: [[0xE0, 0x5B], [0xE0, 0xDB]],
KEY_Super_R: [[0xE0, 0x5C], [0xE0, 0xDC]],
# No scan code for pause key released
KEY_Pause: [[0xE1, 0x1D, 0x45, 0xE1, 0x9D, 0xC5], []],
}
def rfbToScancodes_real(key, up):
if key <= 255:
key = chr(key)
key = key.lower()
if key in SCANCODES: codes = SCANCODES[key]
else: return None
if up: return codes[0]
else: return codes[1]
def rfbToScancodes(key, up):
ret = rfbToScancodes_real(key, up)
print "rfbToScancodes(%i, %s) = %s" % (key, str(up), str(ret))
if ret is not None:
realret = []
for r in ret: realret.append(r - 1)
ret = realret
return ret
| 7,638 | 4,481 |
from app import db
from sqlalchemy.dialects.postgresql import UUID
import uuid
from app.utils.uuid_converter import str2uuid
class ShoppingItemModel(db.Model):
__tablename__ = 'shoppingItems'
id = db.Column(UUID(as_uuid=True), default=lambda: uuid.uuid4().hex)
shoppingItemID = db.Column(db.Integer, primary_key=True)
shoppingItem_name = db.Column(db.String(200))
shoppingItem_quantity = db.Column(db.Integer)
shoppingItem_done = db.Column(db.Boolean, default=False)
shoppingList_id = db.Column(db.Integer, db.ForeignKey('shoppinglists.shoppinglistID'))
def __init__(self, name, quantity):
self.shoppingItem_name = name
self.shoppingItem_quantity = quantity
def json(self):
return {
"id": str(self.id),
"shoppingItem_name": self.shoppingItem_name,
"shoppingItem_quantity": self.shoppingItem_quantity,
"shoppingItem_done": self.shoppingItem_done,
"shoppingList_id": self.shoppingList_id
}
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
class ShoppinglistModel(db.Model):
__tablename__ = 'shoppinglists'
id = db.Column(UUID(as_uuid=True), default=lambda: uuid.uuid4().hex)
shoppinglistID = db.Column(db.Integer, primary_key=True)
shoppinglist_name = db.Column(db.String(120))
shoppinglist_done = db.Column(db.Boolean, default=False)
shoppingItems = db.relationship('ShoppingItemModel', backref='ShoppinglistModel', lazy=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.userID'))
def __init__(self, name, done, user_id):
self.shoppinglist_name = name
self.shoppinglist_done = done
self.user_id = user_id
def json(self):
return {
"id": str(self.id),
"shoppinglist_name": self.shoppinglist_name,
"shoppinglist_done": self.shoppinglist_done,
"shoppingItems": [ item.json() for item in self.shoppingItems ]
}
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
@classmethod
def get_all(cls):
return cls.query.all()
@classmethod
def get_by_id(cls, id):
return cls.query.filter_by(id=id).first()
@classmethod
def get_all_by_user(cls, user_id):
return cls.query.filter_by(user_id=user_id).all()
| 2,235 | 901 |
__all__ = [
'AccessDenied', 'AccountDeactivated', 'BaseError', 'DoesNotMeetRequirements',
'DuplicatedRecord', 'FileExist', 'FileVerConflict', 'InvalidAccessToken', 'InvalidCredential',
'InvalidRefreshToken', 'InvalidReply', 'InvalidSource', 'NotQualifiedToPick',
'PermissionDenied', 'PitStatusLocked', 'ProjMetaLocked', 'RecordNotFound', 'RoleIsTaken',
'WeekPwd', 'register_error_handlers'
]
from .auth_fail import (AccessDenied, AccountDeactivated, InvalidAccessToken, InvalidCredential,
InvalidRefreshToken, PermissionDenied)
from .bad_opt import InvalidReply, InvalidSource, PitStatusLocked, ProjMetaLocked, RoleIsTaken
from .base_error import BaseError
from .data_conflict import DuplicatedRecord, FileExist, FileVerConflict, RecordNotFound
from .quality_control import DoesNotMeetRequirements, NotQualifiedToPick, WeekPwd
def register_error_handlers(app):
from flask_json import json_response
# - - - - - - - - - - - - - - - - - - - - - - -
@app.errorhandler(BaseError)
def handle_olea_exceptions(e: BaseError):
return json_response(status_=e.http_code, data_=e)
# - - - - - - - - - - - - - - - - - - - - - - -
from sentry_sdk import init as sentry_init
from sentry_sdk.integrations import flask, redis, sqlalchemy
if not app.config.get('IGNORE_ERRORS', False):
sentry_init(dsn=app.config['SENTRY_DSN'],
integrations=[
flask.FlaskIntegration(),
sqlalchemy.SqlalchemyIntegration(),
redis.RedisIntegration(),
],
traces_sample_rate=0.2)
| 1,705 | 543 |
# -*- coding: utf-8 -*-
import sys
import dotenv
# 打印系统信息
print("Python %s on %s" % (sys.version, sys.platform))
sys.path.extend([WORKING_DIR_AND_PYTHON_PATHS])
# 导入环境变量
dotenv.load_dotenv(dotenv_path=PROJECT_ROOT + "scripts/deploy/local/environ.sh")
| 255 | 121 |
#!/usr/bin/env python3
from lampi.lampi_app import LampiApp
if __name__ == "__main__":
LampiApp().run()
| 110 | 47 |
from .rep_utils import RepEngine
| 33 | 10 |
import subprocess as sp
import shlex
import os
import argparse
import glob
import sys
extensionsTuple = (".m2ts", ".wav", ".flac")
def wavEncode(filePath):
sp.run(
shlex.split(
f"eac3to \"{filePath}\" -log=NUL \"{os.path.splitext(filePath)[0]}.wav\""
)
)
def wavEncode2(filePath, trackNumber):
sp.run(
shlex.split(
f"eac3to \"{filePath}\" -log=NUL {trackNumber}:\"{os.path.splitext(filePath)[0]}_Track{trackNumber}.wav\""
)
)
def flacEncode(filePath):
sp.run(
shlex.split(
f"eac3to \"{filePath}\" -log=NUL \"{os.path.splitext(filePath)[0]}.flac\""
)
)
def aacEncode(filePath):
sp.run(
shlex.split(
f"ffmpeg -i \"{filePath}\" -loglevel panic \"{os.path.splitext(filePath)[0]}.wav\""
)
)
sp.run(
shlex.split(
f"qaac \"{os.path.splitext(filePath)[0]}.wav\" -V 127 --no-delay -o \"{os.path.splitext(filePath)[0]}.m4a\""
)
)
if os.path.exists(f"{os.path.splitext(filePath)[0]}.wav"):
os.remove(f"{os.path.splitext(filePath)[0]}.wav")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-R", "--recursive", action="store_true", default=False, help="Check files recurcively if your path is a folder.")
parser.add_argument("-W", "--wav", action="store_true", default=False, help="Encode a PCM file, use this with .m2ts files.")
parser.add_argument("-T", "--track", action="store", type=int, default=False, help="Track number to encode.")
parser.add_argument("-F", "--flac", action="store_true", default=False, help="Enable FLAC encoding.")
parser.add_argument("-A", "--aac", action="store_true", default=False, help="Enable AAC encoding.")
parser.add_argument("path", metavar="path", type=str, nargs="?", help="Path of the file/folder you want to use")
args = parser.parse_args()
if args.path == None:
print(f"[WARNING] Usage: python {sys.argv[0]} -ARG/--arg path\n[INFO] Setting path to the current directory.")
args.path = os.getcwd()
if args.wav:
if os.path.isfile(args.path):
if args.track:
wavEncode2(args.path, args.track)
else:
wavEncode(args.path)
else:
if args.recursive:
fileList = glob.glob(f"{args.path}/**/*", recursive=True)
else:
fileList = glob.glob(f"{args.path}/*")
for audioFile in fileList:
if audioFile.endswith(extensionsTuple[0]):
if args.track:
wavEncode2(audioFile, args.track)
else:
wavEncode(audioFile)
if args.flac:
if os.path.isfile(args.path):
flacEncode(args.path)
else:
if args.recursive:
fileList = glob.glob(f"{args.path}/**/*", recursive=True)
else:
fileList = glob.glob(f"{args.path}/*")
for audioFile in fileList:
if audioFile.endswith(extensionsTuple):
flacEncode(audioFile)
if args.aac:
if os.path.isfile(args.path):
aacEncode(args.path)
else:
if args.recursive:
fileList = glob.glob(f"{args.path}/**/*", recursive=True)
else:
fileList = glob.glob(f"{args.path}/*")
for audioFile in fileList:
if audioFile.endswith(extensionsTuple):
aacEncode(audioFile)
| 3,592 | 1,218 |
import tensorflow as tf
import wandb
import yaml
import subprocess
from augmentation.utilities.visualize import gallery
from augmentation.utilities.wandb import *
from augmentation.utilities.checkpoint import load_tf_optimizer_state
def rewrite_config_for_resumption(config):
config.prev_wandb_entity = config.wandb_entity
config.prev_wandb_project = config.wandb_project
config.prev_wandb_run_id = wandb.run.id
config.resume = True
yaml.dump(config.__dict__, open(config._config_path, 'w'))
# Push the change for this config
for cmd in [['git', 'add', config._config_path],
['git', 'commit', '-m', f'cfg_update_{wandb.run.id}'],
['git', 'pull'],
['git', 'push']]:
subprocess.run(cmd)
return config
def reload_run(model,
optimizer,
robust_loss_calc,
wandb_run_id,
wandb_project,
wandb_entity,
wandb_ckpt_path,
resume_epoch=-1,
continue_training=True):
# By default, we start at the beginning
start_epoch, start_step = 0, 0
# Load up the previous run
prev_run = load_wandb_run(wandb_run_id, wandb_project, wandb_entity)
step_extractor = particular_checkpoint_step_extractor(resume_epoch,
lambda fname: fname.split(".")[-2].split("_")[-1])
# If the previous run crashed, wandb_ckpt_path should be '': this is the typical use case
# but this should be changed in the future
_, loaded_epoch = load_most_recent_keras_model_weights(model, prev_run,
model_name='ckpt',
exclude='generator',
step_extractor=step_extractor,
wandb_ckpt_path=wandb_ckpt_path)
# If we're continuing training AND if we reloaded a model
# - load up the optimizer and DRO state
# - set the start epoch and start step
if continue_training and loaded_epoch is not None:
start_epoch = loaded_epoch
for line in prev_run.history():
if 'epochs' in line and line['epochs'] == start_epoch:
start_step = line['train_step/step']
break
# Reloading the optimizer states from that epoch
opt_ckpt = get_most_recent_model_file(prev_run,
wandb_ckpt_path=wandb_ckpt_path,
model_name='optimizer',
step_extractor=particular_checkpoint_step_extractor(start_epoch))
load_tf_optimizer_state(optimizer, opt_ckpt.name)
# Reloading the state of GDRO from that epoch
gdro_ckpt = get_most_recent_model_file(prev_run,
wandb_ckpt_path=wandb_ckpt_path,
model_name='gdro',
step_extractor=particular_checkpoint_step_extractor(start_epoch))
robust_loss_calc._adv_prob_logits = tf.convert_to_tensor(np.load(gdro_ckpt.name))
print(f"Loaded epoch {loaded_epoch} from {wandb_run_id}. Starting from step {start_step} and epoch {start_epoch}.",
flush=True)
return start_epoch, start_step
def log_robust_train_step_to_wandb(group_aliases, group_batches, group_targets, group_predictions, group_losses,
robust_loss, consistency_loss, consistency_penalty_weight,
irm_losses, irm_penalty_weight,
gradients, model, optimizer,
robust_loss_calc, step, log_images=False, log_weights_and_grads=False):
# Loop over the data from each group
# for i, (batch, targets, predictions, loss) in enumerate(zip(group_batches, group_targets,
for (alias, batch, targets, predictions, loss, irm) in zip(group_aliases, group_batches, group_targets,
group_predictions, group_losses, irm_losses):
# Log data generated in this train step
wandb.log({f'train_step/{alias}/targets': targets.numpy(),
f'train_step/{alias}/predictions': wandb.Histogram(predictions.numpy()),
f'train_step/{alias}/argmax_predictions': tf.argmax(predictions, axis=-1).numpy(),
f'train_step/{alias}/loss': loss.numpy(),
f'train_step/{alias}/irm': irm.numpy()},
step=step)
# Optionally, log the minibatch of images
if log_images:
wandb.log({f'train_step/{alias}/images': wandb.Image(gallery(batch.numpy()))}, step=step)
# Log all the gradients and weights: every 50 steps
if log_weights_and_grads:
wandb.log({f'gradients/{v.name}': g.numpy() for v, g in zip(model.trainable_variables, gradients)}, step=step)
wandb.log({f'weights/{v.name}': v.numpy() for v in model.trainable_variables}, step=step)
for prob, alias in zip(tf.nn.softmax(robust_loss_calc._adv_prob_logits, axis=-1).numpy().reshape(-1),
robust_loss_calc._aliases):
wandb.log({f'train_step/gdro_adv_prob.{alias}': prob}, step=step)
wandb.log({'train_step/irm_penalty_weight': irm_penalty_weight,
'train_step/consistency_penalty_weight': consistency_penalty_weight,
# 'train_step/gdro_adv_probs': tf.nn.softmax(robust_loss_calc._adv_prob_logits, axis=-1).numpy(),
'train_step/robust_loss': robust_loss.numpy(),
'train_step/consistency_loss': consistency_loss.numpy(),
'train_step/global_gradient_norm': tf.linalg.global_norm(gradients).numpy(),
'train_step/learning_rate': optimizer._decayed_lr(tf.float32).numpy(),
'train_step/step': step}, step=step)
def consistency_penalty(predictions_orig, predictions_1, predictions_2, consistency_type, scale=1.0):
# CAMEL consistency: JS-Divergence of augmentations, plus KL between original and average augmentation
if consistency_type == 'camel':
avg_predictions = (predictions_1 + predictions_2) / 2.0
return tf.reduce_mean((tf.keras.losses.KLD(predictions_orig, avg_predictions) * 0.5 +
tf.keras.losses.KLD(predictions_1, avg_predictions) * 0.25 +
tf.keras.losses.KLD(predictions_2, avg_predictions) * 0.25)) * scale
# JS-Divergence between original and both augmentations (as in AugMix)
elif consistency_type == 'triplet-js':
avg_predictions = (predictions_orig + predictions_1 + predictions_2) / 3.0
return tf.reduce_mean((tf.keras.losses.KLD(predictions_orig, avg_predictions) +
tf.keras.losses.KLD(predictions_1, avg_predictions) +
tf.keras.losses.KLD(predictions_2, avg_predictions)) / 3.0) * scale
# KL divergence between original and each augmentation
elif consistency_type == 'kl':
return tf.reduce_mean((tf.keras.losses.KLD(predictions_orig, predictions_1) +
tf.keras.losses.KLD(predictions_orig, predictions_2)) * scale * 0.5)
elif consistency_type == 'reverse-kl':
return tf.reduce_mean((tf.keras.losses.KLD(predictions_1, predictions_orig) +
tf.keras.losses.KLD(predictions_2, predictions_orig)) * scale * 0.5)
elif consistency_type == 'none':
return tf.convert_to_tensor(0.)
else:
assert False, f'consistency_type {consistency_type} not supported'
def irm_penalty_explicit(targets, pred_logits, penalty_weight):
""" Computes the IRM penalty grad_{w} |_{w=1.0} crossent(targets, w*logits) explicitly """
if penalty_weight == 0.:
return tf.convert_to_tensor(0.)
xent = tf.keras.losses.sparse_categorical_crossentropy(targets, pred_logits, from_logits=True)
sparse_logit = xent + tf.reduce_logsumexp(pred_logits,
axis=-1) # equivalent to grabbing the logit indexed by target
grad = sparse_logit - tf.reduce_sum(pred_logits * tf.nn.softmax(pred_logits, axis=-1), axis=-1)
return tf.reduce_sum(grad ** 2) * penalty_weight
def irm_penalty_gradient(targets, pred_logits, penalty_weight, tape):
""" Computes IRM penalty as formulated in the paper
Currently does not work: tf does not support second order gradients of cross entropy
"""
if penalty_weight == 0.:
return 0.
# Taken from https://github.com/facebookresearch/InvariantRiskMinimization/blob/6aad47e689913b9bdad05880833530a5edac389e/code/colored_mnist/main.py#L107
scale = tf.convert_to_tensor(1.)
tape.watch(scale)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)(targets, pred_logits * scale)
grad = tape.gradient(loss, scale)
return tf.reduce_sum(grad ** 2) * penalty_weight
def consistency_penalty_scheduler(step, n_anneal_steps, base_penalty_weight):
"""
Schedule the consistency penalty.
"""
if base_penalty_weight == 0:
return 0.
if step >= n_anneal_steps:
return base_penalty_weight
return 0.0
def irm_penalty_scheduler(step, n_anneal_steps=100, base_penalty_weight=10000.):
"""
Schedule the IRM penalty weight using a step function as done by
https://github.com/facebookresearch/InvariantRiskMinimization
If the penalty weight is 0. (IRM disabled), just return 0.
"""
if base_penalty_weight == 0.:
return 0.
if step >= n_anneal_steps:
return base_penalty_weight
# return 1.0
return 0.0 # train with no irm at first
def irm_loss_rescale(total_loss, irm_penalty_weight):
"""
Rescale the total loss by the IRM penalty weight as done by
https://github.com/facebookresearch/InvariantRiskMinimization
"""
if irm_penalty_weight > 1.0:
return total_loss / irm_penalty_weight
return total_loss
class GDROLoss:
def __init__(self, group_aliases, group_counts, superclass_ids, adj_coef, step_size):
"""
group_counts: list of integer sizes of the groups
adj_coef: scalar coefficient of the generalization gap penalty
step_size: robust learning rate for the "mixture of expert" probabilities
"""
assert len(group_aliases) == len(group_counts) == len(superclass_ids)
group_counts = tf.cast(tf.stack(group_counts), tf.float32)
print(f"GDROLoss: Group counts {group_counts}")
self._adj = adj_coef * 1. / tf.math.sqrt(group_counts)
print("adj_coef", adj_coef)
print("total adjustment", self._adj)
self._step_size = step_size
self._adv_probs = tf.ones(len(group_counts)) / len(group_counts)
# _adv_prob_logits must exist, being logged by wandb now
self._adv_prob_logits = tf.zeros_like(group_counts)
self._aliases = group_aliases
# For now, assume superclass_ids are 0, 1, -1
superclass_idxs_ = {}
for i in set(superclass_ids):
superclass_idxs_[i] = [idx for idx, j in enumerate(superclass_ids) if j == i]
superclass_freqs_ = {i: len(idxs) / len(group_aliases) for i, idxs in superclass_idxs_.items()}
self.superclass_idxs = superclass_idxs_.values()
self.superclass_freqs = superclass_freqs_.values()
print("GDROLoss: superclass indices, freqs", self.superclass_idxs, self.superclass_freqs)
def compute_loss(self, losses):
""" losses: list of losses (scalars) """
if len(losses) == 0: return tf.convert_to_tensor(0.0)
losses = tf.stack(losses, axis=-1) + self._adj
self._adv_prob_logits += self._step_size * losses
loss = tf.convert_to_tensor(0.)
for idxs, freq in zip(self.superclass_idxs, self.superclass_freqs):
adv_probs = tf.nn.softmax(tf.gather(self._adv_prob_logits, idxs), axis=-1)
loss = loss + tf.reduce_sum(adv_probs * tf.gather(losses, idxs), axis=-1) * freq
return loss
| 12,285 | 3,884 |
import os
import parser
import pandas as pd
def file_converter(filename, expected_format):
"""Given a file returns a converted file to a preferred format"""
read_methods = [method for method in dir(pd) if method[:4] == 'read']
i = 0
while os.path.exists("converted filename {}.".format(i) + expected_format.replace("to_", "") + ""):
i += 1
try:
for method in read_methods[1:]:
try:
df = getattr(pd, method)(filename)
df_converted = getattr(pd.DataFrame, expected_format)(df)
if df_converted:
with open("converted filename {}.".format(i) + expected_format.replace("to_", "") + "", 'w') as converted_file:
converted_file.write(df_converted)
break
except:
continue
except ValueError:
print("This format can't be converted.")
if __name__ == "__main__":
args = parser.arguments_parser()
file_converter(args.filename, args.expectedformat)
| 1,046 | 294 |
"""
Runs AuTuMN apps
You can access this script from your CLI by running:
python -m apps --help
"""
import os
import click
from . import covid_19, marshall_islands, mongolia, sir_example
from .marshall_islands.calibration import run_calibration_chain as run_rmi_calibration_chain
from .mongolia.calibration import run_calibration_chain as run_mongolia_calibration_chain
from .covid_19.calibration.victoria import (
run_vic_calibration_chain as run_victoria_covid_calibration_chain,
)
from .covid_19.calibration.malaysia import (
run_mys_calibration_chain as run_malaysia_covid_calibration_chain,
)
from .covid_19.calibration.philippines import (
run_phl_calibration_chain as run_philippines_covid_calibration_chain,
)
from autumn.db.models import create_power_bi_outputs, collate_outputs_powerbi
from autumn.plots.database_plots import plot_from_database
@click.group()
def cli():
"""AuTuMN CLI"""
@click.group()
def db():
"""Database utilities"""
@db.command("plot")
@click.argument("model_run_path", type=str)
def plot_database(model_run_path):
"""Re-plot data from a model run folder"""
plot_from_database(model_run_path)
@db.command("powerbi")
@click.argument("src_db_path", type=str)
@click.argument("dest_db_path", type=str)
def powerbi_convert(src_db_path, dest_db_path):
"""Convert model outputs into PowerBI format"""
assert os.path.isfile(src_db_path), f"{src_db_path} must be a file"
create_power_bi_outputs(src_db_path, dest_db_path)
@db.command("powerbi-collate")
@click.argument("src_db_dir", type=str)
@click.argument("dest_db_path", type=str)
@click.argument("max_size_mb", type=int)
def powerbi_collate(src_db_dir, dest_db_path, max_size_mb):
"""Collate MCMC databases and then convert model outputs into PowerBI format"""
assert os.path.isdir(src_db_dir), f"{src_db_dir} must be a folder"
src_db_paths = [
os.path.join(src_db_dir, fname) for fname in os.listdir(src_db_dir) if fname.endswith(".db")
]
collate_outputs_powerbi(src_db_paths, dest_db_path, max_size_mb)
@click.group()
def run():
"""Run a model"""
@run.command("covid")
@click.argument("country", type=click.Choice(covid_19.COUNTRY_RUNNERS))
def run_covid(country):
"""Run the COVID model for some country"""
runner = getattr(covid_19, country)
runner.run_model()
@run.command("sir_example")
@click.argument("country", type=click.Choice(sir_example.COUNTRY_RUNNERS))
def run_sir_example(country):
"""Run the SIR model for some country"""
runner = getattr(sir_example, country)
runner.run_model()
@run.command("rmi")
def run_rmi():
"""Run the Marshall Islands TB model"""
marshall_islands.run_model()
@run.command("mongolia")
def run_mongolia():
"""Run the Mongolia TB model"""
mongolia.run_model()
@click.group()
def calibrate():
"""
Calibrate a model
"""
@calibrate.command("rmi")
@click.argument("max_seconds", type=int)
@click.argument("run_id", type=int)
def rmi_calibration(max_seconds, run_id):
"""Run Marshall Islands model calibration."""
marshall_islands.calibration.run_calibration_chain(max_seconds, run_id)
@calibrate.command("mongolia")
@click.argument("max_seconds", type=int)
@click.argument("run_id", type=int)
def mongolia_calibration(max_seconds, run_id):
"""Run Mongolia model calibration."""
mongolia.calibration.run_calibration_chain(max_seconds, run_id)
@calibrate.command("victoria")
@click.argument("max_seconds", type=int)
@click.argument("run_id", type=int)
def victoria_calibration(max_seconds, run_id):
"""Run Victoria COVID model calibration."""
run_victoria_covid_calibration_chain(max_seconds, run_id)
@calibrate.command("malaysia")
@click.argument("max_seconds", type=int)
@click.argument("run_id", type=int)
def malaysia_calibration(max_seconds, run_id):
"""Run Malaysia COVID model calibration."""
run_malaysia_covid_calibration_chain(max_seconds, run_id)
@calibrate.command("philippines")
@click.argument("max_seconds", type=int)
@click.argument("run_id", type=int)
def philippines_calibration(max_seconds, run_id):
"""Run Malaysia COVID model calibration."""
run_philippines_covid_calibration_chain(max_seconds, run_id)
cli.add_command(run)
cli.add_command(calibrate)
cli.add_command(db)
cli()
| 4,317 | 1,571 |
"""
MyMemory Translated
@website https://mymemory.translated.net/
@provide-api yes (https://mymemory.translated.net/doc/spec.php)
@using-api yes
@results JSON
@stable yes
@parse url, title, content
"""
import re
from sys import version_info
from searx.utils import is_valid_lang
if version_info[0] == 3:
unicode = str
categories = ['general']
url = u'http://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'
web_url = u'http://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'
weight = 100
parser_re = re.compile(u'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)
api_key = ''
def request(query, params):
m = parser_re.match(unicode(query, 'utf8'))
if not m:
return params
from_lang, to_lang, query = m.groups()
from_lang = is_valid_lang(from_lang)
to_lang = is_valid_lang(to_lang)
if not from_lang or not to_lang:
return params
if api_key:
key_form = '&key=' + api_key
else:
key_form = ''
params['url'] = url.format(from_lang=from_lang[1],
to_lang=to_lang[1],
query=query,
key=key_form)
params['query'] = query
params['from_lang'] = from_lang
params['to_lang'] = to_lang
return params
def response(resp):
results = []
results.append({
'url': web_url.format(
from_lang=resp.search_params['from_lang'][2],
to_lang=resp.search_params['to_lang'][2],
query=resp.search_params['query']),
'title': '[{0}-{1}] {2}'.format(
resp.search_params['from_lang'][1],
resp.search_params['to_lang'][1],
resp.search_params['query']),
'content': resp.json()['responseData']['translatedText']
})
return results
| 1,851 | 646 |
import DebugDataFeedServe_pb2 as NetworkInterfacePacket
import socket
import sys
import time
import math
from google.protobuf.internal import encoder
from google.protobuf.internal import decoder
from threading import Thread
from Queue import Queue
def build_test_packet(dt):
t = int(round(time.time() * 1000))
system_state = NetworkInterfacePacket.SystemStateMessage()
system_state.time = t
# assert not system_state.HasField("time")
aircraft_state = system_state.aircraftState.add()
aircraft_state.aircraftID = "7C1468"
aircraft_state.time = t
aircraft_state.heading = 81.0
speed = 0.01
deg_to_rad = 0.0174533
position = aircraft_state.position
position.altitude = 2278.38
position.latitude = math.radians(-37.7549 + speed*(dt/1000.0))
position.longitude = math.radians(144.6835)
velocity = aircraft_state.velocity
velocity.dr = 0.0
velocity.dtheta = 0.0
velocity.dphi = math.radians(speed)
return system_state
# I had to implement this because the tools in google.protobuf.internal.decoder
# read from a buffer, not from a file-like objcet
def readRawVarint32(stream):
mask = 0x80 # (1 << 7)
raw_varint32 = []
while 1:
b = stream.read(1)
# eof
if b == "":
break
raw_varint32.append(b)
if not (ord(b) & mask):
# we found a byte starting with a 0, which means it's the last byte
# of this varint
break
return raw_varint32
# These methods are from here: http://stackoverflow.com/questions/2340730/are-t
# here-c-equivalents-for-the-protocol-buffers-delimited-i-o-functions-in-ja/3453
# 9706#34539706
def writeDelimitedTo(message, connection):
message_str = message.SerializeToString()
delimiter = encoder._VarintBytes(len(message_str))
connection.send(delimiter + message_str)
def readDelimitedFrom(MessageType, stream):
raw_varint32 = readRawVarint32(stream)
message = None
if raw_varint32:
size, _ = decoder._DecodeVarint32(raw_varint32, 0)
data = stream.read(size)
if len(data) < size:
raise Exception("Unexpected end of file")
message = MessageType()
message.ParseFromString(data)
return message
# In place version that takes an already built protobuf object
# In my tests, this is around 20% faster than the other version
# of readDelimitedFrom()
def readDelimitedFrom_inplace(message, stream):
raw_varint32 = readRawVarint32(stream)
if raw_varint32:
size, _ = decoder._DecodeVarint32(raw_varint32, 0)
data = stream.read(size)
if len(data) < size:
raise Exception("Unexpected end of file")
message.ParseFromString(data)
return message
else:
return None
class ServerThread(Thread):
def __init__(self, message_queue):
Thread.__init__(self)
self.daemon = True
self.message_queue = message_queue
self.running = False
def run(self):
self.start_server('localhost', 6989)
def start_server(self, address, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("starting server on {} port {}".format(address, port))
sock.bind((address, port))
print("socket has been bound")
# listen for incoming connections
sock.listen(1)
while True:
# wait for a connection
connection, client_address = sock.accept()
print("connection initiated from {}", client_address)
self.running = True
while True:
message = self.message_queue.get(True, None)
writeDelimitedTo(message, connection)
def is_running(self):
return self.running and self.is_alive()
class SimulationThread(Thread):
def __init__(self, message_queue, server_thread):
Thread.__init__(self)
self.daemon = True
self.message_queue = message_queue
self.server_thread = server_thread
def run(self):
start_time = int(round(time.time() * 1000))
while True:
current_time = int(round(time.time() * 1000))
dt = current_time - start_time
message = build_test_packet(dt)
if self.server_thread.is_running():
self.message_queue.put(message, True, 2.0)
print(self.message_queue.qsize())
else:
print("waiting until server is running...")
time.sleep(0.5)
if __name__ == "__main__":
# print(build_test_packet(0))
message_queue = Queue(10)
server_thread = ServerThread(message_queue)
server_thread.start()
sim_thread = SimulationThread(message_queue, server_thread)
sim_thread.start()
while True:
time.sleep(1)
| 4,849 | 1,555 |
from sorl.thumbnail_standalone.base import ThumbnailBackend
from sorl import __version__
| 89 | 26 |
import os
import random
def search_substring_from_folder(path, substr):
files = os.listdir(path)
for file in files:
if substr in file:
return os.path.join(path, file)
return None
def list_all_files(dir_path):
files = os.listdir(dir_path)
return [os.path.join(dir_path, name) for name in files]
def path_to_random_file(dir_path):
dir_paths = list_all_files(dir_path)
return random.choice(dir_paths)
| 452 | 156 |
import os
CLDIR = "/hg190/corpora/courtlistener"
CLUSTERS_DIR = os.path.join (CLDIR, "clusters")
OPINIONS_DIR = os.path.join (CLDIR, "opinions")
NL="\n"
JSON_EXT = ".json"
JSONL_EXT = ".jsonl"
TXT_EXT = ".txt"
MIL = 1000000
HALFMIL = 500000
LAKH = 100000
CL_TOKEN="df4dd26ef31ffc5cd9398804fc354cf09ece7e0a"
CL_USERNAME="sandeepsoni"
CL_PASSWORD="courtlistener"
| 365 | 198 |
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Tools.Expression import Gffread
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True,
help="Input gff with annotation")
parser.add_argument("-g", "--genome", action="store", dest="genome", required=True,
help="Fasta with genome")
parser.add_argument("-o", "--output", action="store", dest="output", required=True,
help="Output file to write cds")
args = parser.parse_args()
Gffread.extract_cds(args.input, args.genome, args.output)
| 645 | 200 |
"""
The microgrid package organizes the test-bench functionalities in subpackages.
""" | 86 | 23 |
from django.apps import AppConfig
class EpsilonConfig(AppConfig):
name = 'epsilon'
| 89 | 27 |
"""
Pipe is a quick-and-dirty implementation of sequential functions invocation.
Functions should be designed the way that a result returned by
preceding function (pipe argument) could be used as the first argument
to the next function, and so on.
"""
def run(*args, initial=None):
"""Creates and runs the pipe calling functions in given sequence.
Args:
*args: functions to call in sequence
initial: argument to the first function (pipe argument)
Returns:
result of the last function called
"""
data = initial
for fun in args:
data = fun(data)
return data
def load(x):
"""When used in pipe, replaces pipe argument.
Note:
Value returned by preceding function call, if any, is lost.
Args:
x: argument to the next function (pipe argument)
Returns:
function that performs the required action in pipe
"""
return lambda _: x
def tee(f, *args, **kwargs):
"""When used in pipe, calls function but does not use the return value.
For example, for logging or printing.
Pipe argument is passed first, before args and kwargs, similar to pipe.call.
Resulting value is not used, pipe argument is passed along further instead.
Note:
Tee'ing a generator is not supported. May precede with ``list``.
Args:
f: function to call
*args: other positional arguments to the function
**kwargs: keyword arguments to the function
Returns:
function that performs the required action in pipe
"""
def g(x):
pargs = (x,) + args
f(*pargs, **kwargs)
return x
return g
def method(name, *args, **kwargs):
"""When used in pipe, calls stated method of current pipe argument.
Provided args and kwargs are used as method arguments.
Args:
name: name of the method to call
*args: positional arguments to the method
**kwargs: key arguments to the method
Returns:
function that performs the required action in pipe
"""
def g(x):
f = getattr(x, name, None)
return f(*args, **kwargs)
return g
def tee_method(name, *args, **kwargs):
"""When used in pipe, calls stated method but does not use the return value.
Calls stated method of current pipe argument,
but passes pipe argument on untouched.
Args:
name: name of the method to call
*args: positional arguments to the method
**kwargs: key arguments to the method
Returns:
function that performs the required action in pipe
"""
def g(x):
f = getattr(x, name, None)
f(*args, **kwargs)
return x
return g
def call(f, *args, **kwargs):
"""When used in pipe, calls given function.
Calls given function, passing pipe argument first, before args and kwargs.
Hence function should be (re)designed to handle this.
Resulting value is then used as pipe argument and passed along further.
Note:
kwargs will overwrite pipe argument ``if xarg in kwarg``.
Args:
f: function to call
*args: other positional arguments to the function
**kwargs: keyword arguments to the function
Returns:
function that performs the required action in pipe
"""
def g(x):
pargs = (x,) + args
return f(*pargs, **kwargs)
return g
| 3,414 | 927 |
#
# Copyright 2020-2021 Picovoice Inc.
#
# You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE"
# file accompanying this source.
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import argparse
import os
import sys
import struct
import wave
from threading import Thread
import numpy as np
from picovoice import Picovoice
from pvrecorder import PvRecorder
class PicovoiceDemo(Thread):
def __init__(
self,
access_key,
audio_device_index,
keyword_path,
context_path,
porcupine_library_path=None,
porcupine_model_path=None,
porcupine_sensitivity=0.5,
rhino_library_path=None,
rhino_model_path=None,
rhino_sensitivity=0.5,
require_endpoint=True,
output_path=None):
super(PicovoiceDemo, self).__init__()
self._picovoice = Picovoice(
access_key=access_key,
keyword_path=keyword_path,
wake_word_callback=self._wake_word_callback,
context_path=context_path,
inference_callback=self._inference_callback,
porcupine_library_path=porcupine_library_path,
porcupine_model_path=porcupine_model_path,
porcupine_sensitivity=porcupine_sensitivity,
rhino_library_path=rhino_library_path,
rhino_model_path=rhino_model_path,
rhino_sensitivity=rhino_sensitivity,
require_endpoint=require_endpoint)
self.audio_device_index = audio_device_index
self.output_path = output_path
@staticmethod
def _wake_word_callback():
print('[wake word]\n')
@staticmethod
def _inference_callback(inference):
if inference.is_understood:
print('{')
print(" intent : '%s'" % inference.intent)
print(' slots : {')
for slot, value in inference.slots.items():
print(" %s : '%s'" % (slot, value))
print(' }')
print('}\n')
else:
print("Didn't understand the command.\n")
def run(self):
recorder = None
wav_file = None
try:
recorder = PvRecorder(device_index=self.audio_device_index, frame_length=self._picovoice.frame_length)
recorder.start()
if self.output_path is not None:
wav_file = wave.open(self.output_path, "w")
wav_file.setparams((1, 2, 16000, 512, "NONE", "NONE"))
print(f"Using device: {recorder.selected_device}")
print('[Listening ...]')
while True:
pcm = recorder.read()
if wav_file is not None:
wav_file.writeframes(struct.pack("h" * len(pcm), *pcm))
self._picovoice.process(pcm)
except KeyboardInterrupt:
sys.stdout.write('\b' * 2)
print('Stopping ...')
finally:
if recorder is not None:
recorder.delete()
if wav_file is not None:
wav_file.close()
self._picovoice.delete()
@classmethod
def show_audio_devices(cls):
devices = PvRecorder.get_audio_devices()
for i in range(len(devices)):
print(f'index: {i}, device name: {devices[i]}')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--access_key',
help='AccessKey obtained from Picovoice Console (https://picovoice.ai/console/)',
required=True)
parser.add_argument('--keyword_path', help="Absolute path to a Porcupine keyword file.")
parser.add_argument('--context_path', help="Absolute path to a Rhino context file.")
parser.add_argument('--porcupine_library_path', help="Absolute path to Porcupine's dynamic library.", default=None)
parser.add_argument('--porcupine_model_path', help="Absolute path to Porcupine's model file.", default=None)
parser.add_argument(
'--porcupine_sensitivity',
help="Sensitivity for detecting wake word. Each value should be a number within [0, 1]. A higher sensitivity " +
"results in fewer misses at the cost of increasing the false alarm rate.",
type=float,
default=0.5)
parser.add_argument('--rhino_library_path', help="Absolute path to Rhino's dynamic library.", default=None)
parser.add_argument('--rhino_model_path', help="Absolute path to Rhino's model file.", default=None)
parser.add_argument(
'--rhino_sensitivity',
help="Inference sensitivity. It should be a number within [0, 1]. A higher sensitivity value results in fewer" +
"misses at the cost of (potentially) increasing the erroneous inference rate.",
type=float,
default=0.5)
parser.add_argument(
'--require_endpoint',
help="If set to `False`, Rhino does not require an endpoint (chunk of silence) before finishing inference.",
default='True',
choices=['True', 'False'])
parser.add_argument('--audio_device_index', help='index of input audio device', type=int, default=-1)
parser.add_argument('--output_path', help='Absolute path to recorded audio for debugging.', default=None)
parser.add_argument('--show_audio_devices', action='store_true')
args = parser.parse_args()
if args.require_endpoint.lower() == 'false':
require_endpoint = False
else:
require_endpoint = True
if args.show_audio_devices:
PicovoiceDemo.show_audio_devices()
else:
if not args.keyword_path:
raise ValueError("Missing path to Porcupine's keyword file.")
if not args.context_path:
raise ValueError("Missing path to Rhino's context file.")
PicovoiceDemo(
access_key=args.access_key,
audio_device_index=args.audio_device_index,
keyword_path=args.keyword_path,
context_path=args.context_path,
porcupine_library_path=args.porcupine_library_path,
porcupine_model_path=args.porcupine_model_path,
porcupine_sensitivity=args.porcupine_sensitivity,
rhino_library_path=args.rhino_library_path,
rhino_model_path=args.rhino_model_path,
rhino_sensitivity=args.rhino_sensitivity,
require_endpoint=require_endpoint,
output_path=os.path.expanduser(args.output_path) if args.output_path is not None else None).run()
if __name__ == '__main__':
main()
| 6,879 | 2,019 |
def insertion_sort(nums: list[float]) -> list[float]:
"""Sorts a list in-place using the insertion sort approach.
This version does more comparisons and moves more data than necessary, so
it is sub-optimal.
Time complexity: O(n) best O(n^2) worst O(n^2) average.
Space complexity: O(n) total O(1) auxiliary.
Args:
nums: A list of numbers.
Returns.
The sorted list.
"""
for target in range(1, len(nums)):
swap = target
while swap > 0 and nums[swap - 1] > nums[swap]:
nums[swap - 1], nums[swap] = nums[swap], nums[swap - 1]
swap -= 1
return nums
algorithm = insertion_sort
name = 'in-place naive'
| 700 | 239 |
from django.utils.translation import ugettext_lazy as _
from rest_framework.serializers import (
Serializer,
JSONField,
ModelSerializer,
)
from rest_framework.utils.serializer_helpers import BindingDict
from rest_framework.exceptions import ValidationError
from rest_framework.fields import empty
from django.db import models
from django.utils.functional import cached_property
# Import your package here.
from drf_localize.core import (
localize,
localize_key_type
)
# Create your serializers here.
class I18N(Serializer):
context: dict = None
localize_namespace: bool = False
localize_model: models.Model = None
localize_translate: list = []
def __init__(self, **kwargs):
self.localize_model = kwargs.pop('model', None)
self.context = kwargs.pop('context', None)
self.localize_namespace = kwargs.pop('namespace', False)
self.localize_translate, self.localize_field, self.localize_auto_update = localize._model_set(model=self.localize_model) # noqa
localize._signal(model=self.localize_model) # noqa
super(I18N, self).__init__(**kwargs)
def to_representation(self, instance):
# Not evaluating non-request context
if 'request' not in self.context:
return {}
response = {}
request = self.context.get('request', {})
data = getattr(request, 'data', {})
i18n = data.get(self.localize_field, {})
languages = localize.get_languages(request=request)
# Update i18n with request's language -> i18n.LANGUAGE_CODE -> i18n.en
if language := request.LANGUAGE_CODE:
response[language] = {}
# Take i18n field from request body
if i18n and isinstance(i18n, dict):
keys = list(i18n.keys())
# Check if i18n object has valid language keys
if difference := list(set(keys) - set(languages)):
raise ValidationError({
self.localize_field: [_('Unknown language keys "%(key)s".') % {'key': ','.join(difference)}]
})
# Attach language keys with values
for language in languages:
response[language] = {}
value = i18n.get(language, '')
value_string = value if isinstance(value, str) else ''
# Model based field translation
if self.localize_model and self.localize_translate:
for field in self.localize_translate:
keyed_data = data.get(field, '')
keyed = i18n.get(language, {})
if not isinstance(keyed, dict):
keyed = {}
# Retrieve language field value, if set
keyed = keyed.get(field, '')
value_string = keyed if keyed and isinstance(keyed, str) else ''
# Defaulting to internal body key value
value_string = keyed_data if not value_string else value_string
# Update language code key value
response[language].update({field: value_string})
# We are skipping the rest, because model based translation is already in use
continue
# Blank string if value is not string, and non-model
response[language] = value_string
# Namespacing keys, means each language is allowed to have 2nd level keys, non-model
if self.localize_namespace:
response[language] = {}
if not isinstance(value, dict):
continue
for key, value in value.items():
# Skipping if value is not string
if not isinstance(value, str):
continue
# Attach 2nd level value
response[language].update({key: value})
return response
def to_internal_value(self, data):
return {self.localize_field: data}
def update(self, instance, validated_data):
pass
def create(self, validated_data):
pass
class I18NModelSerializer(ModelSerializer):
def __init__(self, instance=None, data=empty, **kwargs):
self.localize_model = self.Meta.model # noqa
self.localize_field = getattr(self.localize_model, 'LOCALIZE_FIELD', None)
super(I18NModelSerializer, self).__init__(instance=instance, data=data, **kwargs)
@cached_property
def fields(self):
"""
A dictionary of {field_name: field_instance}.
"""
# `fields` is evaluated lazily. We do this to ensure that we don't
# have issues importing modules that use ModelSerializers as fields,
# even if Django's app-loading stage has not yet run.
fields = BindingDict(self)
for key, value in self.get_fields().items():
fields[key] = value
if self.localize_field:
fields.update({
self.localize_field: JSONField(
required=False, default={}
)
})
return fields
def _i18n(self, validated_data):
typing = validated_data.get('type', None)
serializer = I18N(
data=self,
context=self.context,
model=self.localize_model,
namespace=typing == localize_key_type.KEY_NAMESPACE
)
serializer.is_valid(raise_exception=True)
# In case model does not have i18n field
if self.localize_field:
validated_data.update({self.localize_field: serializer.data})
return validated_data
def create(self, validated_data):
validated_data = self._i18n(validated_data)
return super(I18NModelSerializer, self).create(validated_data)
def update(self, instance, validated_data):
validated_data = self._i18n(validated_data)
return super(I18NModelSerializer, self).update(instance, validated_data)
| 6,010 | 1,662 |
from datetime import datetime, timezone
from unittest.mock import patch
from pathlib import Path
from freezegun import freeze_time
from django.test import TestCase, Client
from django.conf import settings
from api.models import Image
HERE = Path(__file__).resolve().parent
FAKE_IMGUR_PATH = "https://i.imgur.com/RxArJUc.png"
class PostImageViewTestCase(TestCase):
@patch("api.views.image_c._upload_image", return_value=FAKE_IMGUR_PATH)
def test_image_with_exif_db_correct(self, patch_upload):
cli = Client()
test_time = datetime(2019, 11, 11, 11, 11, 11, tzinfo=timezone.utc)
with freeze_time(test_time):
with open(HERE / "20180311_132133.jpg", "rb") as f_img:
resp = cli.post("/api/images", {'image': f_img}, format='multipart')
self.assertEqual(resp.status_code, 200)
resp_data = resp.json()
self.assertIn('token', resp_data)
img_id = resp_data['token']
img = Image.objects.get(pk=img_id)
self.assertEqual(img.image_path, FAKE_IMGUR_PATH)
self.assertEqual(img.created_at, test_time)
self.assertEqual(img.orig_time, datetime(2018, 3, 11, 13, 21, 33, tzinfo=timezone.utc))
@patch("api.views.image_c._get_image_original_date", return_value=None)
@patch("api.views.image_c._upload_image", return_value=FAKE_IMGUR_PATH)
def test_image_without_exif_db_correct(self, patch_upload, _):
cli = Client()
test_time = datetime(2019, 11, 11, 11, 11, 11, tzinfo=timezone.utc)
with freeze_time(test_time):
with open(HERE / "20180311_132133.jpg", "rb") as f_img:
resp = cli.post("/api/images", {'image': f_img}, format='multipart')
resp_data = resp.json()
self.assertEqual(resp.status_code, 200)
self.assertIn('token', resp_data)
img_id = resp_data['token']
img = Image.objects.get(pk=img_id)
self.assertEqual(img.image_path, FAKE_IMGUR_PATH)
self.assertEqual(img.created_at, test_time)
self.assertIsNone(img.orig_time)
@patch("api.views.image_c._upload_image", return_value=FAKE_IMGUR_PATH)
def test_return_400_if_not_image(self, patch_upload):
cli = Client()
with open(HERE / "test_image_c.py", "rb") as f_img:
resp = cli.post("/api/images", {'image': f_img}, format='multipart')
self.assertEqual(resp.status_code, 400)
| 2,417 | 894 |
import numpy as np
from .. import utils
from .. import ExcursionProblem
def truth(x):
xv, yv, zv = x[:,0],x[:,1], x[:,2]
'''more wiggles in physics case'''
def xsec(xv,yv,zv):
return (12*np.exp(-xv/2)+((0.1*np.cos(10*yv)))+((0.2*np.cos(15*xv))))*np.exp(-0.3*zv)
def eff(xv,yv,zv):
return np.tanh((1.3*xv-yv)+1)*1
def stats(nevents):
return (1-np.tanh((nevents-5)))/2.
def analysis(xv,yv,zv):
return stats(xsec(xv,yv,zv) * eff(xv,yv,zv))
return 3*(np.log(analysis(xv,yv,zv)) - np.log(0.05))
bounding_box = [[0,1.5],[0,1.5],[0,1.5]]
npoints = [60,60,60]
single_function = ExcursionProblem([truth],[0.0],ndim = 3, bounding_box = bounding_box, plot_npoints=npoints)
| 730 | 364 |
from JumpScale import j
BASEURLS = {'github': 'github.com',
'bitbucket': 'bitbucket.org'}
class VCSConfig(object):
def __init__(self, provider, account):
self._account = account
self._provider = provider
self._ini = j.config.getInifile(provider)
def _getConfig(self, key, password=False):
if not self._ini.checkParam(self._account, key):
value = "@%s" % key
else:
value = self._ini.getValue(self._account, key)
if value == "@%s" % key:
question = "Please provide %s for %s on %s" % (key, self._account, self._provider)
if password:
value = j.console.askPassword(question)
else:
value = j.console.askString(question)
self._ini.addSection(self._account)
self._ini.addParam(self._account, key, value)
return value
@property
def login(self):
return self._getConfig('login')
@property
def passwd(self):
return self._getConfig('passwd', True)
class VCSFactory(object):
def getClient(self, type, provider, account, reponame):
userconfig = VCSConfig(provider, account)
url = ""
basepath = j.system.fs.joinPaths(j.dirs.codeDir, provider, account, reponame)
user = userconfig.login
passwd = userconfig.passwd
baseurl = BASEURLS.get(provider, provider)
if type in ["git"]:
from JumpScale.baselib import git
if user in ('git', 'ssh'): # This is ssh
url = "git@%s:%s/%s.git" % (baseurl, account, reponame)
else:
url = "https://%s:%s@%s/%s/%s.git" % (user, passwd, baseurl, account, reponame)
return VCSGITClient(j.clients.git.getClient(basepath, url, login=user, passwd=passwd))
elif type in ['hg']:
from JumpScale.baselib import mercurial
if user in ('hg', 'ssh'):
url = "ssh://hg@%s/%s/%s" % (baseurl, account, reponame)
else:
url = "https://%s:%s@%s/%s/%s" % (user, passwd, baseurl, account, reponame)
return VCSHGClient(j.clients.mercurial.getClient(basepath, url))
class VCSClient(object):
def update(self):
raise NotImplementedError()
def clone(self):
raise NotImplementedError()
def checkout(self, revision):
raise NotImplementedError()
def push(self, force=False):
raise NotImplementedError()
def init(self):
"""
Make sure repo exist clone if not existing
"""
raise NotImplementedError()
class VCSGITClient(VCSClient):
def __init__(self, client):
self.client = client
self.baseDir = client.baseDir
def clone(self):
self.client._clone()
def update(self):
self.client.pull()
def push(self, force=False):
self.client.push(force)
def init(self):
self.client.init()
class VCSHGClient(VCSClient):
def __init__(self, client):
self.client = client
self.baseDir = client.basedir
def clone(self):
self.client._clone()
def update(self):
self.client.pullupdate()
def push(self, force=False):
self.client.push()
def init(self):
pass # mercurial repos are initialized at contruction time
| 3,365 | 1,029 |
# (C) Copyright 2017, 2019 by Rocky Bernstein
"""
CPython 2.3 bytecode opcodes
This is a like Python 2.3's opcode.py with some classification
of stack usage.
"""
import xdis.opcodes.opcode_2x as opcode_2x
from xdis.opcodes.base import (
finalize_opcodes,
format_extended_arg,
init_opdata,
update_pj2,
)
version = 2.3
l = locals()
init_opdata(l, opcode_2x, version)
update_pj2(globals(), l)
opcode_arg_fmt = {"EXTENDED_ARG": format_extended_arg}
finalize_opcodes(l)
| 488 | 210 |
from construct import Struct, Int8ul, Int16ul, Int32ul, Array, Const, Tell, Default
from .piostring import PioString, IndexedPioString
TRACK_ENTRY_MAGIC = 0x24
Track = Struct(
"entry_start" / Tell,
"magic" / Const(TRACK_ENTRY_MAGIC, Int16ul),
"index_shift" / Int16ul, # the index inside the page <<5 (0x00, 0x20, 0x40, ...)
"bitmask" / Int32ul,
"sample_rate" / Int32ul,
"composer_index" / Int32ul,
"file_size" / Int32ul,
"u1" / Int32ul, # some id?
"u2" / Int16ul, # always 19048?
"u3" / Int16ul, # always 30967?
"artwork_id" / Int32ul,
"key_id" / Int32ul, # not sure
"original_artist_id" / Int32ul,
"label_id" / Int32ul,
"remixer_id" / Int32ul,
"bitrate" / Int32ul,
"track_number" / Int32ul,
"bpm_100" / Int32ul,
"genre_id" / Int32ul,
"album_id" / Int32ul, # album artist is set in album entry
"artist_id" / Int32ul,
"id" / Int32ul, # the rekordbox track id
"disc_number" / Int16ul,
"play_count" / Int16ul,
"year" / Int16ul,
"sample_depth" / Int16ul, # not sure
"duration" / Int16ul,
"u4" / Int16ul, # always 41?
"color_id" / Int8ul,
"rating" / Int8ul,
"u5" / Default(Int16ul, 1), # always 1?
"u6" / Int16ul, # alternating 2 or 3
"str_idx" / Array(21, Int16ul),
"str_u1" / IndexedPioString(0), # empty
"texter" / IndexedPioString(1),
"str_u2" / IndexedPioString(2), # thought tracknumber -> wrong!
"str_u3" / IndexedPioString(3), # strange strings, often zero length, sometimes low binary values 0x01/0x02 as content
"str_u4" / IndexedPioString(4), # strange strings, often zero length, sometimes low binary values 0x01/0x02 as content
"message" / IndexedPioString(5),
"kuvo_public" / IndexedPioString(6), # "ON" or empty
"autoload_hotcues" / IndexedPioString(7), # "ON" or empty
"str_u5" / IndexedPioString(8), # 8
"str_u6" / IndexedPioString(9), # empty
"date_added" / IndexedPioString(10),
"release_date" / IndexedPioString(11),
"mix_name" / IndexedPioString(12),
"str_u7" / IndexedPioString(13), # empty
"analyze_path" / IndexedPioString(14),
"analyze_date" / IndexedPioString(15),
"comment" / IndexedPioString(16),
"title" / IndexedPioString(17),
"str_u8" / IndexedPioString(18), # always empty; only in newer versions?
"filename" / IndexedPioString(19),
"path" / IndexedPioString(20)
)
| 2,302 | 1,036 |
# -*- coding:utf-8 -*-
import os
import sys
import wakeup
import snowboydetect
class SnowboyWakeup(wakeup.Wakeup):
def __init__(self, args):
print(args)
tm = type(args['models'])
ts = type(args['sensitivity'])
if tm is not list:
args['models'] = [args['models']]
if ts is not list:
args['sensitivity'] = [args['sensitivity']]
model_str = ",".join(args['models'])
sensitivity = args['sensitivity']
self.detector = snowboydetect.SnowboyDetect(
resource_filename=args['resource'].encode(), model_str=model_str.encode())
self.detector.SetAudioGain(args['audio_gain'])
self.detector.ApplyFrontend(args['apply_frontend'])
self.num_hotwords = self.detector.NumHotwords()
if len(sensitivity) != 0:
assert self.num_hotwords == len(sensitivity), \
"number of hotwords in decoder_model (%d) and sensitivity " \
"(%d) does not match" % (self.num_hotwords, len(sensitivity))
sensitivity_str = ",".join([str(t) for t in sensitivity])
if len(sensitivity) != 0:
self.detector.SetSensitivity(sensitivity_str.encode())
def start(self, audio_data):
return self.detector.RunDetection(audio_data)
| 1,301 | 408 |
import cv2
def grayCode(n):
return n ^ (n >> 1)
def inversegrayCode(n):
inv = 0;
while(n):
inv = inv ^ n;
n = n >> 1;
return inv;
def image_grayCode(image):
row,col,channel=image.shape
for r in range(row):
for c in range(col):
for ch in range(channel):
image[r][c][ch]=grayCode(image[r][c][ch])
return image
def image_inversegrayCode(image):
row,col,channel=image.shape
for r in range(row):
for c in range(col):
for ch in range(channel):
image[r][c][ch]=inversegrayCode(image[r][c][ch])
return image
| 637 | 224 |
#
# PySNMP MIB module SNMP553S-MGMT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SNMP553S-MGMT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:08:38 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint")
dsx1, = mibBuilder.importSymbols("GDCDSX1-MIB", "dsx1")
SCinstance, = mibBuilder.importSymbols("GDCMACRO-MIB", "SCinstance")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Bits, iso, TimeTicks, Integer32, ModuleIdentity, Counter32, IpAddress, MibIdentifier, Unsigned32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Bits", "iso", "TimeTicks", "Integer32", "ModuleIdentity", "Counter32", "IpAddress", "MibIdentifier", "Unsigned32", "Gauge32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
snmp553s = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3))
snmp553sc = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 4))
snmp553sAlarmData = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1))
snmp553sNoResponseAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 1))
snmp553sDiagRxErrAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 2))
snmp553sPowerUpAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 3))
snmp553sNvRamCorrupt = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 4))
snmp553sUnitFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 5))
snmp553sMbiLock = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 6))
snmp553sLocalPwrFail = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 7))
snmp553sTimingLoss = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 8))
snmp553sStatusChange = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 9))
snmp553sUnsoTest = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 10))
snmp553sLossOfSignal = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 11))
snmp553sLossOfFrame = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 12))
snmp553sAis = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 13))
snmp553sReceivedYellow = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 14))
snmp553sUnavailSignalState = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 15))
snmp553sExcessiveZeros = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 16))
snmp553sLowAverageDensity = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 17))
snmp553sControlledSlips = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 18))
snmp553sBipolarViolations = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 19))
snmp553sCrcErrors = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 6, 3, 1, 20))
snmp553sMIBversion = MibScalar((1, 3, 6, 1, 4, 1, 498, 6, 3, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(5, 5)).setFixedLength(5)).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sMIBversion.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sMIBversion.setDescription("Identifies the version of the MIB. The format of the version is x.yzT, where 'x' identifies the major revision number, 'y' identifies the minor revision number, 'z' identifies the typographical revision, and T identifies the test revision. Acceptable values for the individual revision components are as follows: x: 1 - 9 y: 0 - 9 z: 0 - 9 T: A - Z Upon formal release, no designation for the test revision will be present.")
snmp553sMaintenanceTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 3, 3), )
if mibBuilder.loadTexts: snmp553sMaintenanceTable.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sMaintenanceTable.setDescription('The SNMP553S Maintenance table. This table augments the Gdc Dsx1 maintenance table, providing maintenance functions specific to the 553S.')
snmp553sMaintenanceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 3, 3, 1), ).setIndexNames((0, "SNMP553S-MGMT-MIB", "snmp553sMaintenanceIndex"))
if mibBuilder.loadTexts: snmp553sMaintenanceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sMaintenanceEntry.setDescription('The SNMP553S Maintenance table entry.')
snmp553sMaintenanceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 3, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sMaintenanceIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sMaintenanceIndex.setDescription('The index value which uniquely identifies the 553S to which this entry is applicable. This has the form of a SCinstance which defines the slot, line, and drop of the 553S with sub-identifier value set to unit(1).')
snmp553sCascadePresent = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notPresent", 1), ("present", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sCascadePresent.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sCascadePresent.setDescription('Identifies whether or not the cascade interface cards are installed.')
snmp553sExtModemPresent = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notPresent", 1), ("present", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sExtModemPresent.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sExtModemPresent.setDescription('Identifies whether or not an external modem is attached to the modem ppp interface.')
snmp553sUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("master", 1), ("remote", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sUnitType.setDescription("Identifies the GDC Netcon position of the unit. The value master(1) indicates that the NMS unit is configured as a Netcon Master, whereby diagnostic management information is received through a local Netcon port. The value remote(2) indicates that the unit is configured as a Netcon remote, whereby diagnostic management information is received through it's network interface or through an out of band (modem) link.")
snmp553sManagementSource = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("modemSnmp", 1), ("secondaryChannel", 2), ("fdl", 3), ("daisyChain", 4), ("bus485", 5), ("localSnmp", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sManagementSource.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sManagementSource.setDescription('Identifies the management data source. The value modemSnmp(1) indicates that the management source is SNMP via an dial-up ppp link. The value secondaryChannel(2)indicates that the management source is via an inband Diagnostics communications channel. The value fdl(3) indicates that the management source is via the out-of-band facilities data link. The value daisyChain(4) indicates that the management source via the NETCON master-in port. The value bus485(5) indicates that the management source is via the shelf commander in the Universal Systems Shelf. The value localSnmp(6) indicates that the management source is SNMP via a locap ppp link.')
snmp553sProductType = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("snmp553sd1ifp", 1), ("snmp553sd3ifp", 2), ("snmp553scifp", 3), ("nms553d1", 4), ("nms553d1ifp", 5), ("nms553d3ifp", 6), ("nms553c", 7), ("nms553cifp", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sProductType.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sProductType.setDescription('Indicates the product type of the unit.')
snmp553sLedStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 3, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(3, 3)).setFixedLength(3)).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sLedStatus.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sLedStatus.setDescription("Returns a bitwise snapshot of the front panel LED state. Octet 1 bit 7 - not used bit 6 - ON bit 5 - DSX1 bit 4 - NMSRSP (nms553 products) or MGMT DATA (553s products) bit 3 - NMSDAT (nms553 products) or MGMT RESP (553s products) bit 2 - ER bit 1 - DSX OOF bit 0 - DSX LOS Octet 2 bit 7 - not used bit 6 - NET OOF bit 5 - NET LOS bit 4 - NET BPV bit 3 - NET AIS bit 2 - TM/ALM bit 1 - LBK bit 0 - future use Octet 3 bit 7 - not used bit 6 - CHA SD ==> 553D1 and 553D3 only bit 5 - CHA RD ==> '' bit 4 - CHB SD ==> 553D3 only bit 3 - CHB RD ==> '' bit 2 - CHC SD ==> '' bit 1 - CHC RD ==> '' bit 0 - future use. Note: The value returned for all Channel SD and RD leds is unreliable when the unit is performing DSU level self-tests and loopbacks.")
snmp553sUnitSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 3, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(16, 16)).setFixedLength(16)).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sUnitSerialNumber.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sUnitSerialNumber.setDescription("This object returns the GDC 8-byte unit serial number in 16 nibbles. Each device has a unique serial number of the form: 00xx0pmmddyy#### where xx = Product type: 26 - NMS553C products 27 - NMS553D products 43 - 553SD products 44 - 553SC products p = Source of Software 1 - Software programmed by manufacturing ' 9 - Software programmed by engineering mm = Month of manufacture dd = Day of manufacture yy = Year of manufacture #### = Daily manufacturing sequence number.")
snmp553sSaveAllConfig = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("saveConfig", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sSaveAllConfig.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sSaveAllConfig.setDescription('Commands unit to save 553S CSU and DSU configuration variables in non-volatile memory and configure the unit as defined. When CSU and DSU configuration variables are modified they are not updated in the 553S hardware until a SaveConfig command is received via this object. When this value is set to SaveConfig(2) the current 553S csu and dsu configuration variables become active within the unit. The value of normal(1) will be returned when the initialization is complete. The value normal(1) cannot be set by management.')
snmp553sUnitConfigTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 3, 4), )
if mibBuilder.loadTexts: snmp553sUnitConfigTable.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sUnitConfigTable.setDescription('The SNMP553S Unit Configuration table. This table augments the GDC dsx1 configuration table, providing additional unit level configuration specific to the 553s.')
snmp553sUnitConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 3, 4, 1), ).setIndexNames((0, "SNMP553S-MGMT-MIB", "snmp553sUnitConfigIndex"))
if mibBuilder.loadTexts: snmp553sUnitConfigEntry.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sUnitConfigEntry.setDescription('The SNMP553S Unit Configuration table entry.')
snmp553sUnitConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 4, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sUnitConfigIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sUnitConfigIndex.setDescription('The index value which uniquely identifies the SNMP553S to which this entry is applicable. This has the form of a SCinstance which defines the slot, line, and drop of 553S with a sub-identifier value of unit(1).')
snmp553sSaveCsuConfig = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("saveConfig", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sSaveCsuConfig.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sSaveCsuConfig.setDescription('Commands unit to save 553S CSU configuration variables in non-volatile memory and configure the unit as defined. When CSU configuration variables are modified they are not updated in the 553S hardware until a SaveConfig command is received via this object. When this value is set to SaveConfig(2) the current 553S csu configuration variables become active within the unit. The value of normal(1) will be returned when the initialization is complete. The value normal(1) cannot be set by management.')
snmp553sShelfCommander = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sShelfCommander.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sShelfCommander.setDescription('Identifies the 553s unit as the shelf commander in a Datacomm 4-pak or Universal Systems Shelf. When configured as the shelf commander, the unit passes the SNMP diagnostics information using the Netcon protocol to other units in the shelf via the RS485 bus.')
snmp553sForceFakeMaster = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sForceFakeMaster.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sForceFakeMaster.setDescription('Forces the 553s as a Netcon Master unit. This option allows a unit without a master-in connection to a netcon controller behave as a Netcon master for configuring diagnostics on downstream (remote) units.')
snmp553sDaisyChainBps = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("bps75", 2), ("bps9600", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sDaisyChainBps.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sDaisyChainBps.setDescription('Indicates the data rate at which a remote unit will propogate NMS commands out of a daisy chain connection. None(1) indicates that no outbound daisy chain exists on this unit.')
snmp553sChannelConfigTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 3, 5), )
if mibBuilder.loadTexts: snmp553sChannelConfigTable.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sChannelConfigTable.setDescription('The SNMP553S Channel Configuration table. This table augments the GDC dsu Channel Configuration table, providing additional unit level configuration specific to the 553s.')
snmp553sChannelConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 3, 5, 1), ).setIndexNames((0, "SNMP553S-MGMT-MIB", "snmp553sChannelConfigIndex"))
if mibBuilder.loadTexts: snmp553sChannelConfigEntry.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sChannelConfigEntry.setDescription('The SNMP553S Channel Configuration table entry.')
snmp553sChannelConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 5, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sChannelConfigIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sChannelConfigIndex.setDescription('The index value which uniquely identifies the 553S to which this entry is applicable. This has the form of a SCinstance which defines the slot, line, drop, and channel of the 553S.')
snmp553sDCCCompatibilityMode = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("nms553", 1), ("nms510", 2), ("other", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sDCCCompatibilityMode.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sDCCCompatibilityMode.setDescription('Identifies the 553S DCC mode. The value nms553(1) indicates that the inband link is communicating with an NMS553 remote. The value nms510(2) indicates that the inband link is communicating with an NMS510 remote. This object works with with snmp553sUnitType; it can never be set to nms510(2) unless snmp553sUnitType is master(1). The value other(3) indicates that the compatibility mode has been specified elsewhere. This value can never be set by management operation.')
snmp553sSaveDsuConfig = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("saveConfig", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sSaveDsuConfig.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sSaveDsuConfig.setDescription('Commands unit to save 553S channel configuration variables in non-volatile memory and configure the unit as defined. When DSU configuration variables are modified they are not updated in the 553S hardware until a SaveConfig command is received via this object. When this value is set to SaveConfig(2) the current 553S dsu configuration variables become active within the unit. The value of normal(1) will be returned when the initialization is complete. The value normal(1) cannot be set by management.')
snmp553sDiagTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 3, 6), )
if mibBuilder.loadTexts: snmp553sDiagTable.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sDiagTable.setDescription('The 553S Diagnostics table. This table augments the GDC dsx1 Diagnostics table and the GDC dsu diagnostics table.')
snmp553sDiagEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 3, 6, 1), ).setIndexNames((0, "SNMP553S-MGMT-MIB", "snmp553sDiagIndex"))
if mibBuilder.loadTexts: snmp553sDiagEntry.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sDiagEntry.setDescription('The SNMP553S Diagnostics table entry.')
snmp553sDiagIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 6, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sDiagIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sDiagIndex.setDescription('The index value which uniquely identifies the 553S to which this entry is applicable. This has the form of a SCinstance which defines the slot, line, and drop of the 553S with a sub-identifier value of unit(1).')
snmp553sDiagTestDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16))).clone(namedValues=NamedValues(("noLimit", 1), ("testTime1Min", 2), ("testTime2Mins", 3), ("testTime3Mins", 4), ("testTime4Mins", 5), ("testTime5Mins", 6), ("testTime6Mins", 7), ("testTime7Mins", 8), ("testTime8Mins", 9), ("testTime9Mins", 10), ("testTime10Mins", 11), ("testTime15Mins", 12), ("testTime20Mins", 13), ("testTime25Mins", 14), ("testTime30Mins", 15), ("testTime30Secs", 16)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sDiagTestDuration.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sDiagTestDuration.setDescription('Selects the duration to run a diagnostic test, in time. The value(1) noLimit indicates that the test should run until explicitly terminated. The default value is noLimit(1). This object is used for all tests defined in both the GDC dsx1 mib and the GDC dsu mib.')
snmp553sDiagProgPattern = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 6, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sDiagProgPattern.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sDiagProgPattern.setDescription('The 16 bit user programmable test pattern. This object works with the gdcDsx1SendCode object in that when gdcDsx1SendCode is set to sendProgPattern(4), the the value of this object is used as the 16 bit user programmable test pattern. This object is used for all tests defined in both the GDC dsx1 mib.')
snmp553sAlarmHistoryTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 3, 7), )
if mibBuilder.loadTexts: snmp553sAlarmHistoryTable.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmHistoryTable.setDescription('The snmp553sAlarmHistoryTable contains entries that report the history of all supported alarms. The history consists of the the number of times the alarm has occurred since last history reset, the time of the first alarm occurence (hours,minutes, seconds,day,month,year), and the time of the last alarm occurrence (hours,minutes,seconds,day,month,year). The structure of the table is such that alarm status is supported on a unit or interface basis, and then on an alarm type basis within the interface. For simplicity sake and to support the reporting of status of all alarms, a unit is treated as an interface 1 in this table.')
snmp553sAlarmHistoryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1), ).setIndexNames((0, "SNMP553S-MGMT-MIB", "snmp553sAlarmHistoryIndex"), (0, "SNMP553S-MGMT-MIB", "snmp553sAlarmHistoryIdentifier"))
if mibBuilder.loadTexts: snmp553sAlarmHistoryEntry.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmHistoryEntry.setDescription('An entry in the GDC SNMP553S Alarm History table.')
snmp553sAlarmHistoryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmHistoryIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmHistoryIndex.setDescription('The index value which uniquely identifies the interface to which this entry is applicable.')
snmp553sAlarmHistoryIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmHistoryIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmHistoryIdentifier.setDescription('The unique alarm identifier assigned to this alarm type. The format of this identifier is an OBJECT IDENTIFIER that has the following format: {iso(1) org(3) dod(6) internet(1) private(4) enterprises(1) gdc(498) xxx(x) alarm(z) yyy(y) where xxx(x) is the administratively assigned family object identifier (z) is the object identifier for alarms in the family defined MIB and yyy(y) is the administratively assigned alarm type identifier for this alarm.')
snmp553sAlarmCount = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmCount.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmCount.setDescription('The number of occurrences of this alarm. This objects value is incremented once for each time that the alarm occurs. This count is incremented regardless of whether or not the alarm is masked or is not reporteded because of the threshold configuration.')
snmp553sAlarmFirstOccurrenceHours = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmFirstOccurrenceHours.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmFirstOccurrenceHours.setDescription('The hour (0-23) the alarm first occurred.')
snmp553sAlarmFirstOccurrenceMinutes = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmFirstOccurrenceMinutes.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmFirstOccurrenceMinutes.setDescription('The minute (0-59) the alarm first occurred.')
snmp553sAlarmFirstOccurrenceSeconds = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmFirstOccurrenceSeconds.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmFirstOccurrenceSeconds.setDescription('The second (0-59) the alarm first occurred.')
snmp553sAlarmFirstOccurrenceMonth = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmFirstOccurrenceMonth.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmFirstOccurrenceMonth.setDescription('The month (1-12) the alarm first occurred.')
snmp553sAlarmFirstOccurrenceDay = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmFirstOccurrenceDay.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmFirstOccurrenceDay.setDescription('The day (1-31) the alarm first occurred.')
snmp553sAlarmFirstOccurrenceYear = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmFirstOccurrenceYear.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmFirstOccurrenceYear.setDescription('The year (0-99) the alarm first occurred.')
snmp553sAlarmLastOccurrenceHours = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmLastOccurrenceHours.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmLastOccurrenceHours.setDescription('The hour (0-23) the alarm last occurred.')
snmp553sAlarmLastOccurrenceMinutes = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmLastOccurrenceMinutes.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmLastOccurrenceMinutes.setDescription('The minute (0-59) the alarm last occurred.')
snmp553sAlarmLastOccurrenceSeconds = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmLastOccurrenceSeconds.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmLastOccurrenceSeconds.setDescription('The second (0-59) the alarm last occurred.')
snmp553sAlarmLastOccurrenceMonth = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmLastOccurrenceMonth.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmLastOccurrenceMonth.setDescription('The month (1-12) the alarm last occurred.')
snmp553sAlarmLastOccurrenceDay = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmLastOccurrenceDay.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmLastOccurrenceDay.setDescription('The day (1-31) the alarm last occurred.')
snmp553sAlarmLastOccurrenceYear = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 7, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmLastOccurrenceYear.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmLastOccurrenceYear.setDescription('The year (0-99) the alarm last occurred.')
snmp553sAlarmMaintenanceTable = MibTable((1, 3, 6, 1, 4, 1, 498, 6, 3, 8), )
if mibBuilder.loadTexts: snmp553sAlarmMaintenanceTable.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmMaintenanceTable.setDescription('The snmp553sAlarmMaintenanceTable contains entries that configure the real time clock and that clear the alarm history table.')
snmp553sAlarmMaintenanceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 6, 3, 8, 1), ).setIndexNames((0, "SNMP553S-MGMT-MIB", "snmp553sAlarmMaintenanceIndex"))
if mibBuilder.loadTexts: snmp553sAlarmMaintenanceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmMaintenanceEntry.setDescription('An entry in the GDC snmp553s Alarm Maintenance Table.')
snmp553sAlarmMaintenanceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 8, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sAlarmMaintenanceIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sAlarmMaintenanceIndex.setDescription('The index value which uniquely identifies the interface to which this entry is applicable.')
snmp553sClearAlarmHistory = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 8, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("clear", 1), ("norm", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sClearAlarmHistory.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sClearAlarmHistory.setDescription('Supports the action of clearing the alarm history table. When this object is set to clear(1), then the unit clears the alarm history table. The value of norm(2) will be returned when the clear is complete. The value of norm(2) can not be set by management.')
snmp553sRTCHours = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 8, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sRTCHours.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sRTCHours.setDescription('The hour of day (0-23) to be set or read.')
snmp553sRTCMinutes = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 8, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sRTCMinutes.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sRTCMinutes.setDescription('The minute of the hour (0-59) to be set or read.')
snmp553sRTCSeconds = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 8, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sRTCSeconds.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sRTCSeconds.setDescription('The second of the minute (0-59) to be set or read.')
snmp553sRTCMonth = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 8, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sRTCMonth.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sRTCMonth.setDescription('The month of the year (1-12) to be set or read.')
snmp553sRTCDay = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 8, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sRTCDay.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sRTCDay.setDescription('The day of the month (1-31) to be set or read.')
snmp553sRTCYear = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 8, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmp553sRTCYear.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sRTCYear.setDescription('The year (0-99) to be set or read.')
snmp553sTimeOfLastAlarmClear = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 6, 3, 8, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(6, 6)).setFixedLength(6)).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmp553sTimeOfLastAlarmClear.setStatus('mandatory')
if mibBuilder.loadTexts: snmp553sTimeOfLastAlarmClear.setDescription('Returns the time (hours,minutes,seconds,month,day,year) of the last alarm clear command sent to the unit. Octet 1 - Hour of last alarm clear (0-23) Octet 2 - Minute of last alarm clear (0-59) Octet 3 - Second of last alarm clear (0-59) Octet 4 - Month of last alarm clear (1-12) Octet 5 - Day of last alarm clear (1-31) Octet 6 - Year of last alarm clear (0-99).')
mibBuilder.exportSymbols("SNMP553S-MGMT-MIB", snmp553sDiagProgPattern=snmp553sDiagProgPattern, snmp553sExtModemPresent=snmp553sExtModemPresent, snmp553sAlarmHistoryIndex=snmp553sAlarmHistoryIndex, snmp553sDiagTestDuration=snmp553sDiagTestDuration, snmp553sForceFakeMaster=snmp553sForceFakeMaster, snmp553sAlarmHistoryTable=snmp553sAlarmHistoryTable, snmp553sAlarmFirstOccurrenceSeconds=snmp553sAlarmFirstOccurrenceSeconds, snmp553sAlarmHistoryEntry=snmp553sAlarmHistoryEntry, snmp553sDiagRxErrAlm=snmp553sDiagRxErrAlm, snmp553sRTCDay=snmp553sRTCDay, snmp553sLocalPwrFail=snmp553sLocalPwrFail, snmp553sCascadePresent=snmp553sCascadePresent, snmp553sAlarmData=snmp553sAlarmData, snmp553sManagementSource=snmp553sManagementSource, snmp553sDaisyChainBps=snmp553sDaisyChainBps, snmp553sAlarmLastOccurrenceYear=snmp553sAlarmLastOccurrenceYear, snmp553sSaveCsuConfig=snmp553sSaveCsuConfig, snmp553sLedStatus=snmp553sLedStatus, snmp553sAlarmFirstOccurrenceMinutes=snmp553sAlarmFirstOccurrenceMinutes, snmp553sAlarmLastOccurrenceMonth=snmp553sAlarmLastOccurrenceMonth, snmp553sChannelConfigIndex=snmp553sChannelConfigIndex, snmp553sMIBversion=snmp553sMIBversion, snmp553sSaveAllConfig=snmp553sSaveAllConfig, snmp553sProductType=snmp553sProductType, snmp553sDiagTable=snmp553sDiagTable, snmp553sUnitFailure=snmp553sUnitFailure, snmp553s=snmp553s, snmp553sShelfCommander=snmp553sShelfCommander, snmp553sAlarmLastOccurrenceMinutes=snmp553sAlarmLastOccurrenceMinutes, snmp553sAlarmLastOccurrenceSeconds=snmp553sAlarmLastOccurrenceSeconds, snmp553sLossOfSignal=snmp553sLossOfSignal, snmp553sRTCYear=snmp553sRTCYear, snmp553sAlarmFirstOccurrenceHours=snmp553sAlarmFirstOccurrenceHours, snmp553sAlarmLastOccurrenceDay=snmp553sAlarmLastOccurrenceDay, snmp553sDCCCompatibilityMode=snmp553sDCCCompatibilityMode, snmp553sAlarmMaintenanceEntry=snmp553sAlarmMaintenanceEntry, snmp553sNvRamCorrupt=snmp553sNvRamCorrupt, snmp553sBipolarViolations=snmp553sBipolarViolations, snmp553sMaintenanceEntry=snmp553sMaintenanceEntry, snmp553sClearAlarmHistory=snmp553sClearAlarmHistory, snmp553sMaintenanceTable=snmp553sMaintenanceTable, snmp553sAlarmMaintenanceIndex=snmp553sAlarmMaintenanceIndex, snmp553sAlarmFirstOccurrenceYear=snmp553sAlarmFirstOccurrenceYear, snmp553sChannelConfigEntry=snmp553sChannelConfigEntry, snmp553sChannelConfigTable=snmp553sChannelConfigTable, snmp553sCrcErrors=snmp553sCrcErrors, snmp553sStatusChange=snmp553sStatusChange, snmp553sDiagIndex=snmp553sDiagIndex, snmp553sReceivedYellow=snmp553sReceivedYellow, snmp553sExcessiveZeros=snmp553sExcessiveZeros, snmp553sMaintenanceIndex=snmp553sMaintenanceIndex, snmp553sPowerUpAlm=snmp553sPowerUpAlm, snmp553sAlarmFirstOccurrenceMonth=snmp553sAlarmFirstOccurrenceMonth, snmp553sUnitType=snmp553sUnitType, snmp553sRTCMinutes=snmp553sRTCMinutes, snmp553sAis=snmp553sAis, snmp553sAlarmHistoryIdentifier=snmp553sAlarmHistoryIdentifier, snmp553sLossOfFrame=snmp553sLossOfFrame, snmp553sRTCMonth=snmp553sRTCMonth, snmp553sControlledSlips=snmp553sControlledSlips, snmp553sUnitConfigTable=snmp553sUnitConfigTable, snmp553sc=snmp553sc, snmp553sDiagEntry=snmp553sDiagEntry, snmp553sUnsoTest=snmp553sUnsoTest, snmp553sAlarmCount=snmp553sAlarmCount, snmp553sRTCHours=snmp553sRTCHours, snmp553sTimingLoss=snmp553sTimingLoss, snmp553sUnitConfigEntry=snmp553sUnitConfigEntry, snmp553sUnitConfigIndex=snmp553sUnitConfigIndex, snmp553sAlarmFirstOccurrenceDay=snmp553sAlarmFirstOccurrenceDay, snmp553sUnitSerialNumber=snmp553sUnitSerialNumber, snmp553sNoResponseAlm=snmp553sNoResponseAlm, snmp553sAlarmMaintenanceTable=snmp553sAlarmMaintenanceTable, snmp553sUnavailSignalState=snmp553sUnavailSignalState, snmp553sMbiLock=snmp553sMbiLock, snmp553sTimeOfLastAlarmClear=snmp553sTimeOfLastAlarmClear, snmp553sRTCSeconds=snmp553sRTCSeconds, snmp553sAlarmLastOccurrenceHours=snmp553sAlarmLastOccurrenceHours, snmp553sLowAverageDensity=snmp553sLowAverageDensity, snmp553sSaveDsuConfig=snmp553sSaveDsuConfig)
| 35,725 | 14,406 |
from nested_dict import nested_dict
from functools import partial
import torch
from torch.nn.init import kaiming_normal_
from torch.nn.parallel._functions import Broadcast
from torch.nn.parallel import scatter, parallel_apply, gather
import torch.nn.functional as F
from torch.distributions import Normal, Independent, kl
import pdb
import numpy as np
import math
import cv2
from utils.config import *
def str2bool(v):
"""
codes from : https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def cuda(tensor, is_cuda):
if is_cuda : return tensor.cuda()
else : return tensor
def postprocess_prediction(prediction, size=None, print_info=True, ostu_th=False):
"""
Postprocess saliency maps by resizing and applying gaussian blurringself.
args:
prediction: numpy array with saliency postprocess_prediction
size: original (H,W) of the image
returns:
numpy array with saliency map normalized 0-255 (int8)
"""
if print_info:
print('max %.4f min %.4f'%(np.max(prediction), np.min(prediction))) # l1 norm is much larger than l2? but maps are similar
prediction = prediction - np.min(prediction)
# prediction = prediction - np.mean(prediction)
# prediction[prediction<0] = 0
# print('max %.4f min %.4f'%(np.max(prediction), np.min(prediction))) # l1 norm is much larger than l2? but maps are similar
if np.max(prediction) != 0:
saliency_map = (prediction/np.max(prediction) * 255).astype(np.uint8)
else:
saliency_map = prediction.astype(np.uint8)
if size is None:
size = MNIST_RESIZE
# resize back to original size
saliency_map = cv2.GaussianBlur(saliency_map, (7, 7), 0)
saliency_map = cv2.resize(saliency_map, (size[1], size[0]), interpolation=cv2.INTER_CUBIC)
# clip again
# saliency_map = np.clip(saliency_map, 0, 255)
if np.max(saliency_map)!=0:
saliency_map = saliency_map.astype('float') / np.max(saliency_map) * 255.
else:
print('Zero saliency map.')
if ostu_th:
_, th2 = cv2.threshold(saliency_map, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# ret2, th2 = cv2.threshold(saliency_map, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return th2
return saliency_map
def distillation(y, teacher_scores, labels, T, alpha):
p = F.log_softmax(y/T, dim=1)
q = F.softmax(teacher_scores/T, dim=1)
# l_kl = F.kl_div(p, q, size_average=False) * (T**2) / y.shape[0]
l_kl = F.kl_div(p, q, reduction='sum') * (T**2) / y.shape[0]
l_ce = F.cross_entropy(y, labels)
return l_kl * alpha + l_ce * (1. - alpha)
def distillation_my(y, teacher_scores, labels, T, alpha):
p = F.log_softmax(y/T, dim=1)
q = F.softmax(teacher_scores/T, dim=1)
# l_kl = F.kl_div(p, q, size_average=False) * (T**2) / y.shape[0]
l_kl = F.kl_div(p, q, reduction='sum') * (T**2) / y.shape[0]
l_ce = F.cross_entropy(y, labels).div(math.log(2)) # divide log(2)
return l_kl * alpha + l_ce * (1. - alpha)
def at_my(x):
return F.normalize(x.pow(2).mean(1))
def at(x):
return F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
def at_loss(x, y):
# pdb.set_trace()
if y.size()[-2:] != x.size()[-2:]:
y = F.interpolate(y, x.size()[-2:])
# y = y.view(y.size(0), -1)
return (at(x) - at(y)).pow(2).mean()
def at_loss_my_new(x, y):
# pdb.set_trace()
if y.size()[-2:] != x.size()[-2:]:
y = F.interpolate(y, x.size()[-2:])
return (x - y).pow(2).mean()
# def kl_divergence(self, latent_space1, latent_space2):
# kl_div = kl.kl_divergence(latent_space1, latent_space2)
# return kl_div
def at_loss_my_dist(s, t):
return torch.mean(kl.kl_divergence(s, t))
def at_loss_my(x, y):
# pdb.set_trace()
if y.size()[-2:] != x.size()[-2:]:
y = F.interpolate(y, x.size()[-2:])
y = y.view(y.size(0), -1)
# y = (y-y.min()) / (y.max()+1e-8)
# tmp_x = at(x)
# tmp_x = (tmp_x-tmp_x.min()) / (tmp_x.max()+1e-8) # _norm
# return (tmp_x - y).pow(2).mean()
return (x - y).pow(2).mean()
# def at_loss_my(x, y):
# # pdb.set_trace()
# if y.size()[-2:] != x.size()[-2:]:
# y = F.interpolate(y, x.size()[-2:])
# y = y.view(y.size(0), -1)
# y = y * 0.25 # _d4
# return (at(x) - y).pow(2).mean()
def cast(params, dtype='float'):
if isinstance(params, dict):
return {k: cast(v, dtype) for k,v in params.items()}
else:
return getattr(params.cuda() if torch.cuda.is_available() else params, dtype)()
def conv_params(ni, no, k=1):
return kaiming_normal_(torch.Tensor(no, ni, k, k))
def linear_params(ni, no):
return {'weight': kaiming_normal_(torch.Tensor(no, ni)), 'bias': torch.zeros(no)}
def bnparams(n):
return {'weight': torch.rand(n),
'bias': torch.zeros(n),
'running_mean': torch.zeros(n),
'running_var': torch.ones(n)}
def data_parallel(f, input, params, mode, device_ids, output_device=None):
device_ids = list(device_ids)
if output_device is None:
output_device = device_ids[0]
if len(device_ids) == 1:
return f(input, params, mode)
params_all = Broadcast.apply(device_ids, *params.values())
params_replicas = [{k: params_all[i + j*len(params)] for i, k in enumerate(params.keys())}
for j in range(len(device_ids))]
replicas = [partial(f, params=p, mode=mode)
for p in params_replicas]
inputs = scatter([input], device_ids)
outputs = parallel_apply(replicas, inputs)
return gather(outputs, output_device)
def flatten(params):
return {'.'.join(k): v for k, v in nested_dict(params).items_flat() if v is not None}
def batch_norm(x, params, base, mode):
# pdb.set_trace()
return F.batch_norm(x, weight=params[base + '.weight'],
bias=params[base + '.bias'],
running_mean=params[base + '.running_mean'],
running_var=params[base + '.running_var'],
training=mode)
def print_tensor_dict(params):
kmax = max(len(key) for key in params.keys())
for i, (key, v) in enumerate(params.items()):
print(str(i).ljust(5), key.ljust(kmax + 3), str(tuple(v.shape)).ljust(23), torch.typename(v), v.requires_grad)
def set_requires_grad_except_bn_(params):
for k, v in params.items():
if not k.endswith('running_mean') and not k.endswith('running_var'):
v.requires_grad = True
| 6,768 | 2,625 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class DirectoryType(str, Enum):
active_directory = "ActiveDirectory"
class OSType(str, Enum):
windows = "Windows"
linux = "Linux"
class Tier(str, Enum):
standard = "Standard"
premium = "Premium"
class HDInsightClusterProvisioningState(str, Enum):
in_progress = "InProgress"
failed = "Failed"
succeeded = "Succeeded"
canceled = "Canceled"
deleting = "Deleting"
class AsyncOperationState(str, Enum):
in_progress = "InProgress"
succeeded = "Succeeded"
failed = "Failed"
| 1,030 | 299 |
from fastapi import FastAPI, Response
from pydantic import BaseModel
import src.option as option
import src.plot as plot
app = FastAPI()
@app.get("/")
async def root():
return "Chia Signature Version: 0.0.1 https://github.com/Pow-Duck/ChiaSignature"
class InputDataModel(BaseModel):
farmer_public_key: str
pool_key: str
@app.post("/signature", status_code=200)
async def signature(input_data: InputDataModel, response: Response):
try:
plot.create_plots(input_data.farmer_public_key, input_data.pool_key)
(plot_id1, plot_memo2) = plot.create_plots(input_data.farmer_public_key, input_data.pool_key)
return option.api_return(plot_id1, plot_memo2, True, None)
except Exception as e:
response.status_code = 500
print("err: ", e)
return option.api_return(None, None, False, "Failed to generate, please verify that the parameters are correct")
| 913 | 301 |
from tkinter import *
from PIL import ImageTk, Image ,ImageDraw, ImageFont, ImageFilter
import json
import html5lib
import plyer
import urllib.request
import imageio
import webbrowser
import requests
import bs4
def notif_and_helplines():
url = "https://www.mohfw.gov.in/"
html_data = requests.get(url)
bs = bs4.BeautifulSoup(html_data.text,'html.parser')
newWindow = Toplevel()
newWindow.title("NOTIFICATIONS, HELPLINES AND ADVISORIES")
newWindow.state('zoomed')
newWindow.iconbitmap(r'Images\coronavirus_image_UXL_icon.ico')
def shift():
x1,y1,x2,y2 = canvas.bbox("marquee")
if(x2<0 or y1<0):
x1 = canvas.winfo_width()
y1 = canvas.winfo_height()//2
canvas.coords("marquee",x1,y1)
else:
canvas.move("marquee", -2, 0)
canvas.after(1000//fps,shift)
labe1 = Label(newWindow, text = " LATEST NOTIFICATIONS " , font = "Times 28 bold roman" , pady = 10, padx = 20 ,fg = "#EC4D37", bg = "black").pack()
labe2 = Label(newWindow, text = " " , font = "Times 15 bold roman").pack()
canvas = Canvas(newWindow,bg = '#EC4D37')
canvas.pack(fill = BOTH, expand = 1)
text_var = bs.find("span" ,class_ = "tested").get_text()
text = canvas.create_text(0,-2000, text = text_var, font = ('Times New Roman',20,'bold'),fill = 'black',tags = ("marquee",),anchor = 'w')
x1,y1,x2,y2 = canvas.bbox("marquee")
width = x2-x1
height = y2-y1
canvas['width'] = width
canvas['height'] = height
fps = 45
shift()
def labe3_open():
webbrowser.open_new('https://cdn.s3waas.gov.in/s30777d5c17d4066b82ab86dff8a46af6f/uploads/2020/05/2020050898.pdf')
labe3 = Label(newWindow, text = " For any technical enquiry with respect to COVID-19, you may kindly email on technicalquery.covid19@gov.in Aarogya Setu IVRS ✆ 1921 ",
font = "Times 15 normal roman" , pady = 3, padx = 170 ,fg = "red", bg = "gray13", cursor = "hand2")
labe3.bind("<Button-1>", lambda e: labe3_open())
labe3.pack()
labe4 = Label(newWindow,text = " Helpline Number : +91-11-23978046 Toll Free : 1075 Helpline Email ID : ncov2019@gov.in ", font = "Times 13 normal roman" ,fg = "black", bg = "yellow",padx = 420 ).pack()
def labe5_open():
url2 = "https://www.mohfw.gov.in/pdf/StatewiseCovidHospitalslink19062020.pdf"
webbrowser.open_new(url2)
labe5 = Label(newWindow,text = " COVID-19 Facilities in States & Union Territories ",font = "Times 12 bold roman" ,fg = "blue",
bg = "yellow", cursor = "hand2", padx = 620 )
labe5.bind("<Button-1>", lambda e: labe5_open())
labe5.pack()
frame = Frame(newWindow,width = 900,height = 900)
frame.pack(expand = True, fill = BOTH)
canvas1 = Canvas(frame,width = 900, height = 900,scrollregion = (0,0,1000,1000))
hbar = Scrollbar(frame, orient = HORIZONTAL)
hbar.pack(side = BOTTOM,fill = X)
hbar.config(command = canvas1.xview)
vbar = Scrollbar(frame,orient = VERTICAL)
vbar.pack(side = LEFT,fill = Y)
vbar.config(command = canvas1.yview)
canvas1.config(width = 900,height = 900)
canvas1.config(xscrollcommand = hbar.set, yscrollcommand = vbar.set)
canvas1.pack(side=LEFT,expand = True,fill = BOTH)
info_div1 = bs.find("div" , class_ = "main-body-content").find("section" ,class_ = "site-update").find("div" , class_ = "container").find("div" , class_ = "row").find_all("div" , class_ = "update-box")
info_div2 = bs.find("div" , class_ = "main-body-content").find_all("section" ,class_ = "site-update")[4].find("div" , class_ = "container").find("div" , class_ = "row").find("div" , class_ = "site-faq").find("div" , class_ = "faq-content")
def Button_1_open():
webbrowser.open_new(info_div1[0].find("a").get('href'))
def Button_2_open():
webbrowser.open_new(info_div1[1].find("a").get('href'))
def Button_3_open():
webbrowser.open_new(info_div1[2].find("a").get('href'))
def Button_4_open():
webbrowser.open_new(info_div1[3].find("a").get('href'))
def Button_5_open():
webbrowser.open_new(info_div1[4].find("a").get('href'))
def Button_6_open():
webbrowser.open_new(info_div1[5].find("a").get('href'))
def Button_7_open():
webbrowser.open_new(info_div2.find("a").get('href'))
render = ImageTk.PhotoImage(Image.open ("Images/coronavirus3.png").resize((300,40) , Image.ANTIALIAS))
covid_img = Label(canvas1)
covid_img.image = render
canvas1.create_image(180, 45,image = render)
f1 = ('Bookman Old Style', "25", "bold roman")
text_1 = Label(canvas1, text = " Updates ",fg = "gray20" , font = f1)
canvas_text1 = canvas1.create_window(415, 45, window = text_1)
'''text = info_div1[0].find("a").get_text().strip()'''
button_1 = Button(canvas1, text = " COVID-19 Vaccination of Pregnant Women PosterEnglish " ,wraplength = 300,command = Button_1_open , cursor = "hand2", fg = "blue" , font = "serif 10 normal roman" , padx = 4, pady = 4,height = 5,width = 57)
canvas_button1 = canvas1.create_window(250, 150, window = button_1)
button_2 = Button(canvas1, text = " Counseling booklet for Frontline workers and Vaccinators " ,wraplength = 300,command = Button_2_open, cursor = "hand2", fg = "blue" , font = "serif 10 normal roman", padx = 4, pady = 4,height = 5,width = 57)
canvas_button2 = canvas1.create_window(250, 250, window = button_2)
button_3 = Button(canvas1, text = info_div1[2].find("a").get_text().strip() ,wraplength = 300,command = Button_3_open, cursor = "hand2", fg = "blue" , font = "serif 10 normal roman", padx = 4, pady = 4,height = 5,width = 57)
canvas_button3 = canvas1.create_window(250, 350, window = button_3)
button_4 = Button(canvas1, text = " Toolkit for Youth Campaign on COVID Appropriate Behaviour, Vaccination drive and Psychosocial well-being " ,wraplength = 300,command = Button_4_open, cursor = "hand2", fg = "blue" , font = "serif 10 normal roman", padx = 4, pady = 4,height = 5,width = 57)
canvas_button4 = canvas1.create_window(250, 450, window = button_4)
button_5 = Button(canvas1, text = info_div1[4].find("a").get_text().strip() ,wraplength = 300,command = Button_5_open, cursor = "hand2", fg = "blue" , font = "serif 10 normal roman", padx = 4, pady = 4,height = 5,width = 57)
canvas_button5 = canvas1.create_window(250, 550, window = button_5)
button_6 = Button(canvas1, text = info_div1[5].find("a").get_text().strip() ,wraplength = 300,command = Button_6_open, cursor = "hand2", fg = "blue" , font = "serif 10 normal roman" , padx = 4, pady = 4,height = 5,width = 57)
canvas_button6 = canvas1.create_window(250, 650, window = button_6)
text_2 = Label(canvas1, text = " FAQ's ", fg = "gray20" , font = "Times 25 bold roman")
canvas_text2 = canvas1.create_window(80, 755, window = text_2)
button_7 = Button(canvas1, text = info_div2.get_text() ,wraplength = 500,command = Button_7_open, cursor = "hand2", fg = "blue" , font = "serif 10 normal roman" , padx = 4, pady = 4,height = 5,width = 57)
canvas_button7 = canvas1.create_window(250, 855, window = button_7)
text_3 = Label(canvas1, text = " source: " , font = "Times 15 bold roman")
canvas_text2 = canvas1.create_window(160, 970, window = text_3)
def call_back(event):
webbrowser.open_new(event.widget.cget("text"))
lbl = Label(canvas1, text = r"www.mohfw.gov.in", fg = "blue" , cursor = "hand2",font = "Times 13 bold roman")
canvas_lbl = canvas1.create_window(280, 970, window = lbl)
lbl.bind("<Button-1>", call_back)
render2 = ImageTk.PhotoImage(Image.open ("Images/vaccination.png").resize((570,550) , Image.ANTIALIAS))
img2 = Label(frame, image = render2)
img2.image = render2
img2.pack(side = RIGHT)
url2 = "https://www.worldometers.info/coronavirus/"
html_data2 = requests.get(url2)
bs2 = bs4.BeautifulSoup(html_data2.text,'html.parser')
info_data = bs2.find("div" , class_ = "content-inner").find_all("div" , id = "maincounter-wrap")
f = ("Times", "20", "bold italic")
text1 = Label(canvas1, text = " Worldwide " , font = "Times 25 bold roman" , width = 17)
canvas1.create_window(750, 45, window = text1)
text2 = Label(canvas1, text = info_data[0].get_text() , font = f , bg = "light blue", height = 4, width = 17,borderwidth = 1, relief = "solid")
canvas1.create_window(750, 150, window = text2)
text3 = Label(canvas1, text = info_data[1].get_text() , font = f , bg = "tomato", height = 4, width = 17,borderwidth = 1, relief = "solid")
canvas1.create_window(750, 300, window = text3)
text4 = Label(canvas1, text = info_data[2].get_text() , font = f , bg = "light green", height = 4, width = 17,borderwidth = 1, relief = "solid")
canvas1.create_window(750, 450, window = text4)
info_data2 = bs2.find("div" , class_ = "content-inner").find_all("div" , class_ = "col-md-6")
text5 = Label(canvas1, text = " Active Cases " + "\n" + "────────────────────" + "\n\n" + info_data2[0].find("div" , class_ = "number-table-main").get_text() + "\n" + "currently infected patients" + "\n" , font = "Times 19 bold italic" , bg = "gray85", height = 6, width = 24,borderwidth = 1, relief = "solid")
canvas1.create_window(750, 650, window = text5)
text6 = Label(canvas1, text = " Closed Cases " + "\n" + "────────────────────" + "\n\n" + info_data2[1].find("div" , class_ = "number-table-main").get_text() + "\n" + "cases which had an outcome" + "\n" , font = "Times 19 bold italic" , bg = "gray85", height = 6, width = 24,borderwidth = 1, relief = "solid")
canvas1.create_window(750, 880, window = text6)
| 9,720 | 3,844 |
from easy_differ.easy_differ import list_diff, text_diff | 56 | 20 |
# -*- coding: utf-8 -*-
from ..base.xfs_account import XFSAccount
class CloudsharesNet(XFSAccount):
__name__ = "CloudsharesNet"
__type__ = "account"
__version__ = "0.03"
__status__ = "testing"
__pyload_version__ = "0.5"
__description__ = """Cloudshares.net account plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
PLUGIN_DOMAIN = "cloudshares.net"
| 429 | 169 |
import numpy as np
import xobjects as xo
import xfields as xf
def test_mean_and_std():
for CTX in xo.ContextCpu, xo.ContextPyopencl, xo.ContextCupy:
if CTX not in xo.context.available:
continue
print(f"Test {CTX}")
ctx = CTX()
n_x=100
a_host = np.array(np.random.rand(n_x))
a_dev = ctx.nparray_to_context_array(a_host)
mm, ss = xf.mean_and_std(a_dev)
assert np.isclose(mm, np.mean(a_host))
assert np.isclose(ss, np.std(a_host))
weights_host = np.zeros_like(a_host)+.2
weights_dev = ctx.nparray_to_context_array(weights_host)
mm, ss = xf.mean_and_std(a_dev, weights=weights_dev)
assert np.isclose(mm, np.mean(a_host))
assert np.isclose(ss, np.std(a_host))
| 790 | 311 |