content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
###############################################################################
#Convert genbank to fasta format
#
#@requires Biopython
#@author:charles.hefer@gmail.com
#@version:0.1
###############################################################################
import optparse
from Bio import SeqIO
def __main__():
"""Parse the cmd lne options"""
parser = optparse.OptionParser()
parser.add_option("-i", "--input", default=None, dest="input",
help="The input file")
parser.add_option("-o", "--ouput", default=None, dest="output",
help="The output file")
(options, args) = parser.parse_args()
if not options.input:
parser.error("Need to specify the input genbank file")
if not options.output:
parser.error("Need to specify the output fasta file")
with open(options.input, "r") as handle:
entries = [entry for entry in SeqIO.parse(handle, "genbank")]
with open(options.output, "w") as handle:
SeqIO.write(entries, handle, "fasta")
if __name__ == "__main__":
__main__()
| [
29113,
29113,
7804,
4242,
21017,
201,
198,
2,
3103,
1851,
2429,
17796,
284,
3049,
64,
5794,
201,
198,
2,
201,
198,
2,
31,
47911,
8436,
404,
7535,
201,
198,
2,
31,
9800,
25,
10641,
829,
13,
258,
2232,
31,
14816,
13,
785,
201,
198,
... | 2.874317 | 366 |
# -*- encoding: utf-8
import pygments
import pygments.token
import pygments.lexers
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
198,
198,
11748,
12972,
11726,
198,
11748,
12972,
11726,
13,
30001,
198,
11748,
12972,
11726,
13,
2588,
364,
628
] | 2.931034 | 29 |
import ddsp.training
import gin
| [
11748,
49427,
2777,
13,
34409,
198,
11748,
39733,
628
] | 3.666667 | 9 |
#!/usr/bin/python3
import os
import sys
import json
from common import place
from collections import defaultdict
import argparse
"""
result: {
startTime: "2021-01-01..."
client: 'combi',
players: 2,
width: 15,
height: 15,
deadline: 2,
offset: 4,
occupiedRatio: 0.1241
myStartProbability: 1.2
minimaxActivationValue: 0.01
filterValue: 1
place: 1
numPlayers: 5
enemyNames: set("name",...)
ourName: "name"
endActiveEnemies: set("name")
}
"""
ATTRIBUTES = [
"myStartProbability",
"minimaxActivationValue",
"filterValue"
]
CLIENTS =[
"combi",
"minimax",
"rollouts",
"probability",
"basic"
]
if __name__ == '__main__':
args = get_parser().parse_args()
if not args.all and not args.names and not args.client and not args.num and not args.attributes:
print("which stats do you want to have?")
get_parser().print_usage()
exit(1)
results = get_results(args.files)
print()
if args.names or args.all:
enemy_names, our_names, lost_names = get_names(results)
print("we had the names", ", ".join(our_names))
print(len(enemy_names), "names occured, lost against", len(lost_names), "=", '{:.1f}%'.format(100 * len(lost_names) / len(enemy_names)), "of names")
print("total\twon\tratio\tname")
for name in lost_names:
print(enemy_names[name], enemy_names[name] - lost_names[name], "{:.1f}%".format((enemy_names[name] - lost_names[name]) / enemy_names[name] * 100), name, sep="\t")
print()
if args.num or args.all:
print("total\twon\tratio\tplayers")
total_games, won_games, win_ratio = get_games_by_num_players(results)
for num_players in total_games:
print(total_games[num_players], won_games[num_players], "{:.1f}%".format(win_ratio[num_players]*100), num_players, sep="\t")
print()
if args.attributes or args.all:
total_games, won_games, win_ratio = get_attribute_stats(results)
for attribute in total_games:
print(attribute)
print("total\twon\tratio\tvalue")
for value in total_games[attribute]:
print(total_games[attribute][value], won_games[attribute][value], "{:.1f}%".format(100*win_ratio[attribute][value]), value, sep="\t")
print()
if args.output is not None:
create_attribute_diagram(results, args.output)
if args.client or args.all:
total_games, won_games, win_ratio = get_client_stats(results)
print("total\twon\tratio\tclient")
for client in total_games:
print(total_games[client], won_games[client], "{:.1f}%".format(win_ratio[client]*100), client, sep="\t")
print()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
33918,
198,
6738,
2219,
1330,
1295,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
1822,
29572,
198,
198,
37811,
198,
20274,
25,
1391,
198,
22... | 2.308775 | 1,208 |
from .restrictions import *
from .parsing import *
from .errors import *
| [
6738,
764,
2118,
2012,
507,
1330,
1635,
198,
6738,
764,
79,
945,
278,
1330,
1635,
198,
6738,
764,
48277,
1330,
1635,
198
] | 3.318182 | 22 |
from enum import Enum
from typing import NamedTuple
__all__ = "SizeHint", "PosHint", "Anchor", "Easing"
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
34441,
51,
29291,
198,
198,
834,
439,
834,
796,
366,
10699,
39,
600,
1600,
366,
21604,
39,
600,
1600,
366,
2025,
354,
273,
1600,
366,
36,
2313,
1,
628,
628,
198
] | 2.725 | 40 |
# -*- coding: utf-8 -*-
import sqlalchemy as sa
import structlog
from ..schema import NotificationType
from ..schema import TransportType
from ..schema import Priority
from .users import get_user_transports_for_notification
logger = structlog.getLogger(__name__, source='YoDB')
from yo.db import metadata
from .queue import put_many
notifications_table = sa.Table(
'notifications',
metadata,
sa.Column('nid', sa.BigInteger(), primary_key=True),
sa.Column('eid', sa.Text()),
sa.Column('notify_type', sa.Integer(), nullable=False),
sa.Column('to_username',sa.Text(),nullable=False),
sa.Column('from_username',sa.Text(),nullable=True),
sa.Column('json_data', sa.UnicodeText()),
sa.Column('created', sa.DateTime, default=sa.func.now(), nullable=False),
sa.Column('priority', sa.Integer, default=Priority.normal.value),
sa.Index('ix_notifications_unique','eid','to_username',unique=True)
)
INSERT_NOTIFICATON_STMT = '''
INSERT INTO notifications(eid, notify_type, to_username, from_username, json_data, priority, created)
VALUES($1, $2, $3, $4, $5, $6, NOW())
ON CONFLICT DO NOTHING
RETURNING nid'''
GET_LAST_BLOCK_STMT = '''
SELECT eid FROM notifications ORDER BY DESC nid LIMIT 1;
'''
'''
flow
bf detects operation
op's handlers are run tp generate event
begin transaction
event is stored
potential notification accounts are determined
account notification prefs are loaded
events are filtered against notification prefs
filtered events are added to transport queues
end transaction
transport queue item read
if not rate-limited
load user info from conveyor
attempt send
if success:
delete queue item
record result
if rate-limited:
delete queue item
record result
'''
# create notification methods
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
44161,
282,
26599,
355,
473,
198,
11748,
2878,
6404,
628,
198,
6738,
11485,
15952,
2611,
1330,
42808,
6030,
198,
6738,
11485,
15952,
2611,
1330,
19940,
6030,
198,
6... | 2.967374 | 613 |
# Test the LinearSystem component.
import unittest
import numpy as np
from openmdao.api import Group, Problem, LinearSystem, IndepVarComp, ScipyGMRES
from openmdao.test.util import assert_rel_error
if __name__ == "__main__":
unittest.main()
| [
2,
6208,
262,
44800,
11964,
7515,
13,
198,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1280,
9132,
5488,
13,
15042,
1330,
4912,
11,
20647,
11,
44800,
11964,
11,
1423,
538,
19852,
7293,
11,
1446,
541,... | 2.941176 | 85 |
from django.conf.urls import patterns, url, include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from richard.videos.sitemaps import CategorySitemap, SpeakerSitemap, VideoSitemap
from sergey.views import SpeakerList
admin.autodiscover()
sitemaps = {
'category': CategorySitemap,
'speaker': SpeakerSitemap,
'video': VideoSitemap
}
urlpatterns = patterns('',
url(r'^$', 'sergey.views.home', name='home'),
url(r'^login-failure$', 'richard.base.views.login_failure', name='login_failure'),
(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
url(r'^admin/', include(admin.site.urls)),
url(r'^pages/', include('richard.pages.urls')),
url(r'^speaker/$', SpeakerList.as_view(), name='videos-speaker-list'),
url(r'^search/?$', 'sergey.views.search', name='videos-search'),
url(r'', include('richard.videos.urls')),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += patterns('',
url(r'^(robots.txt)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
url(r'^(favicon.ico)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT, 'show_indexes': False,}),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
19016,
11,
2291,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12708,
1330,
9037,
198,
6738,
42625,
14208,
13,
3642,
... | 2.552294 | 545 |
import argparse
import sys
import unittest
from typing import Optional, Sequence
from unittest import skipIf
from unittest.mock import MagicMock, patch
SequenceType = Sequence
if sys.version_info >= (3, 9):
from collections.abc import Sequence # pylint: disable=reimported
from typing import Annotated, Literal
else:
from typing_extensions import Annotated
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
import corgy
from corgy import Corgy, CorgyHelpFormatter, corgyparser
from corgy._corgy import BooleanOptionalAction
class TestCorgyMeta(unittest.TestCase):
"""Tests to check validity of classes inheriting from Corgy."""
@classmethod
class TestCorgyAddArgsToParser(unittest.TestCase):
"""Tests to check that Corgy properly adds arguments to ArgumentParsers."""
@skipIf(sys.version_info < (3, 10), "`|` syntax needs Python 3.10 or higher")
@skipIf(sys.version_info < (3, 9), "`typing.Sequence` doesn't accept multiple args")
@skipIf(sys.version_info < (3, 9), "`typing.Sequence` doesn't accept multiple args")
@skipIf(sys.version_info < (3, 9), "`typing.Sequence` doesn't accept multiple args")
@skipIf(sys.version_info < (3, 9), "`typing.Sequence` doesn't accept multiple args")
@skipIf(sys.version_info < (3, 9), "`typing.Sequence` doesn't accept multiple args")
@skipIf(sys.version_info < (3, 9), "`typing.Sequence` doesn't accept multiple args")
class TestCorgyCmdlineParsing(unittest.TestCase):
"""Test cases to check parsing of command line arguments by Corgy."""
class TestCorgyCustomParsers(unittest.TestCase):
"""Tests to check usage of the @corgyparser decorator."""
| [
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
555,
715,
395,
198,
6738,
19720,
1330,
32233,
11,
45835,
198,
6738,
555,
715,
395,
1330,
14267,
1532,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
11,
8529,
198,
198,
... | 2.984563 | 583 |
import click
from vmanage.cli.export.templates import templates
from vmanage.cli.export.policies import policies
@click.group()
@click.pass_context
def export(ctx):
"""
Export commands
"""
export.add_command(templates)
export.add_command(policies) | [
11748,
3904,
198,
6738,
410,
805,
496,
13,
44506,
13,
39344,
13,
11498,
17041,
1330,
24019,
198,
6738,
410,
805,
496,
13,
44506,
13,
39344,
13,
79,
4160,
444,
1330,
4788,
198,
198,
31,
12976,
13,
8094,
3419,
198,
31,
12976,
13,
6603... | 2.932584 | 89 |
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='flask-themer',
version='1.4.1',
description='Simple theme mechanism for Flask',
author='Tyler Kennedy',
author_email='tk@tkte.ch',
url='https://github.com/tktech/flask-themer',
long_description=long_description,
long_description_content_type="text/markdown",
py_modules=['flask_themer'],
install_requires=[
'flask'
],
tests_require=[
'pytest',
'pytest-cov'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
keywords=[
'flask',
'themes',
'jinja2'
]
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
40406,
7,
198,
220,
220,
22... | 2.277778 | 360 |
#!/usr/bin/env python
import tornado.ioloop
import tornado.web
import tornado.template
import dns.resolver
import yaml
try:
with open( '../config.yaml', 'r' ) as f:
settings = yaml.safe_load( f )
except IOError:
print "Error reading config.yaml, have you created one? (Hint: Try running ./generate_config.py)"
exit()
if __name__ == "__main__":
DOMAIN = settings["domain"]
API_SERVER = "https://api." + DOMAIN
app = make_app()
app.listen( 1234 )
tornado.ioloop.IOLoop.current().start()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
33718,
13,
1669,
11224,
198,
11748,
33718,
13,
12384,
198,
11748,
33718,
13,
28243,
198,
11748,
288,
5907,
13,
411,
14375,
198,
11748,
331,
43695,
198,
198,
28311,
25,
198,
220,
... | 2.608911 | 202 |
#### Globals/Imports ####
import json
import xml
import xmltodict
import xml.etree.ElementTree as ET
import os
import collections
from timeit import default_timer as timer
import datetime
import traceback
import sys
import re
#### Custom Exceptions ####
#### ProcessedClinicalTrial Class Definition ####
class ProcessedClinicalTrial:
"""Wrapper class and methods for converting pubmed XML (as instance of ElementTree root)
to JSON format for posting to elasticsearch.
"""
#### Clinical Trial XML Parsing Methods ####
# todo: further parse the inclusion and exclusion criteria portions of criteria texblock
#### Utilities #### | [
4242,
40713,
874,
14,
3546,
3742,
1303,
21017,
198,
198,
11748,
33918,
198,
11748,
35555,
198,
11748,
2124,
76,
2528,
375,
713,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
11748,
28686,
198,
11748,
17268,
198,
67... | 3.878788 | 165 |
import glob
import json
import os
import pickle
import cv2 as cv
import numpy as np
from tqdm import tqdm
if __name__ == '__main__':
img_dir = 'data/2011_09_26/2011_09_26_drive_0119_extract/image_00/data'
calibration_path = 'data/2011_09_26/2011_09_26_image_00_chessboards.json'
output_path = 'calibration_results.pkl'
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
best_points = 0
best_objpoints, best_imgpoints, best_image = None, None, None
total_objpoints = []
total_imgpoints = []
visualize = False
use_all_images = False
for idx, image in enumerate(glob.glob(os.path.join(img_dir, "*.png"))):
if idx > 0:
break
print(f"Analyzing img: {image}")
# generate 1 image per chessboard
images = generate_calibration_images(image, calibration_path)
# find points in each chessboard
objpoints, imgpoints, points = calibrate_chessboards(images, visualize)
total_objpoints.extend(objpoints)
total_imgpoints.extend(imgpoints)
if points > best_points:
best_points = points
best_objpoints = objpoints
best_imgpoints = imgpoints
best_image = image
print(f"Best points: {points}\n")
best_image = cv.imread(best_image)
best_image = cv.cvtColor(best_image, cv.COLOR_BGR2GRAY)
if use_all_images:
print("Calibrating using all chessboard in all images")
# here we assume that all images are almost equal so we can use the shape of an arbitrary chosen image
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(total_objpoints, total_imgpoints, best_image.shape[::-1],
None, None)
else:
print(f"Calibrating using all chessboard in image with {best_points} corners detected.")
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(best_objpoints, best_imgpoints, best_image.shape[::-1], None,
None)
print(f"RMS Reprojection error: {ret:.4f}")
if ret:
print(f"Saving results on {output_path}")
with open(output_path, 'wb') as output:
results = {
'K': mtx,
'D': dist
}
pickle.dump(results, output, pickle.HIGHEST_PROTOCOL)
print("The calibration has ended successfully!")
| [
11748,
15095,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
628,
628,
198,
361,
11593,
367... | 2.172291 | 1,126 |
#OAM: 2 16-bit addresses
# 8 bit spriteref
# 10 bit x pos
# 10 bit y pos
# 1 bit x-flip
# 1 bit y-flip
# 1 bit priority
# 1 bit enable
OAM = [
{
"spriteref":1,
"x_pos":13,
"y_pos":2,
"x_flip":0,
"y_flip":0,
"priority":0,
"enable":1
},
{
"spriteref":2,
"x_pos":2,
"y_pos":2,
"x_flip":0,
"y_flip":0,
"priority":1,
"enable":1
},
{
"spriteref":0,
"x_pos":18,
"y_pos":18,
"x_flip":1,
"y_flip":0,
"priority":0,
"enable":1
},
{
"spriteref":0,
"x_pos":34,
"y_pos":34,
"x_flip":0,
"y_flip":1,
"priority":0,
"enable":1
},
{
"spriteref":0,
"x_pos":50,
"y_pos":50,
"x_flip":1,
"y_flip":1,
"priority":0,
"enable":1
},
{
"spriteref":1,
"x_pos":50,
"y_pos":2,
"x_flip":0,
"y_flip":0,
"priority":0,
"enable":1
},
{
"spriteref":1,
"x_pos":66,
"y_pos":2,
"x_flip":1,
"y_flip":0,
"priority":0,
"enable":1
},
{
"spriteref":3,
"x_pos":0,
"y_pos":50,
"x_flip":0,
"y_flip":0,
"priority":0,
"enable":1
},
{
"spriteref":4,
"x_pos":16,
"y_pos":50,
"x_flip":0,
"y_flip":0,
"priority":0,
"enable":1
},
{
"spriteref":5,
"x_pos":32,
"y_pos":50,
"x_flip":0,
"y_flip":0,
"priority":0,
"enable":1
},
{
"spriteref":6,
"x_pos":48,
"y_pos":50,
"x_flip":0,
"y_flip":0,
"priority":0,
"enable":1
},
{
"spriteref":7,
"x_pos":64,
"y_pos":50,
"x_flip":0,
"y_flip":0,
"priority":0,
"enable":1
},
{
"spriteref":8,
"x_pos":80,
"y_pos":50,
"x_flip":0,
"y_flip":0,
"priority":0,
"enable":1
},
{
"spriteref":9,
"x_pos":96,
"y_pos":50,
"x_flip":0,
"y_flip":0,
"priority":0,
"enable":1
}
]
SPRITE = [
[
5,3,3,3,3,3,3,5,5,3,3,3,3,3,3,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,3,3,3,3,3,3,3,5,3,3,3,3,3,3,3,
5,3,5,5,5,5,5,3,5,3,5,5,5,5,5,3,
5,3,5,5,5,5,5,3,5,3,5,5,5,5,5,3,
5,3,5,5,5,5,5,3,5,3,5,5,5,5,5,3,
5,3,5,5,3,5,5,3,5,3,5,5,3,5,5,3,
5,3,5,5,5,5,5,3,5,3,5,5,5,5,5,3,
5,3,5,5,5,5,5,3,5,3,5,5,5,5,5,3,
5,5,3,3,3,3,3,5,5,5,3,3,3,3,3,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,3,5,0,0,5,5,5,5,5,0,0,5,3,3,5,
5,3,5,0,0,5,5,5,5,5,0,0,5,3,3,5,
5,5,3,5,5,5,5,5,5,5,5,5,3,3,3,5,
5,5,3,3,3,3,5,5,5,5,3,3,5,5,5,5,
5,5,5,5,5,5,3,3,3,3,5,5,5,5,5,5,
],
[
4,4,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
4,4,4,2,2,2,2,2,2,2,2,2,0,0,0,2,
2,4,4,4,2,2,2,2,2,2,2,2,0,0,0,2,
2,2,4,4,4,2,2,2,2,2,2,2,0,0,0,2,
2,2,2,4,4,4,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,4,4,4,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,4,4,4,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,4,4,4,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,4,4,4,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,4,4,4,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,4,4,4,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,4,4,4,2,2,2,
2,0,0,0,2,2,2,2,2,2,2,4,4,4,2,2,
2,0,0,0,2,2,2,2,2,2,2,2,4,4,4,2,
2,0,0,0,2,2,2,2,2,2,2,2,2,4,4,4,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,4,4,
],
[
4,4,1,1,1,1,1,1,1,1,1,1,1,1,1,3,
4,4,4,1,1,1,1,1,1,1,1,1,1,1,3,0,
1,4,4,4,1,1,1,1,1,1,1,1,1,3,0,0,
1,1,4,4,4,1,1,1,1,1,1,1,3,0,0,0,
1,1,1,4,4,4,1,1,1,1,1,3,0,0,0,0,
1,1,1,1,4,4,4,1,1,1,3,0,0,0,0,0,
1,1,1,1,1,4,4,4,1,3,0,0,0,0,0,0,
1,1,1,1,1,1,4,4,3,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,3,4,4,0,0,0,0,0,0,
1,1,1,1,1,1,3,1,4,4,4,0,0,0,0,0,
1,1,1,1,1,3,1,1,1,4,4,4,0,0,0,0,
1,1,1,1,3,1,1,1,1,1,4,4,4,0,0,0,
1,1,1,3,1,1,1,1,1,1,1,4,4,4,0,0,
1,1,3,1,1,1,1,1,1,1,1,1,4,4,4,0,
1,3,1,1,1,1,1,1,1,1,1,1,1,4,4,4,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
],
[
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
],
[
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
],
[
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
],
[
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
],
[
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
],
[
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
],
[
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
]
]
PALLET = [
{
"r":128,
"g":128,
"b":128
},
{
"r":255,
"g":0,
"b":0
},
{
"r":0,
"g":255,
"b":0
},
{
"r":0,
"g":0,
"b":255
},
{
"r":255,
"g":255,
"b":0
},
{
"r":0,
"g":255,
"b":255
},
{
"r":255,
"g":0,
"b":255
},{
"r":255,
"g":255,
"b":255
}
]
# EBI_ALE, EBI_WE active low
# EBI_AD "active high"
# EBI_ALE = Adresse
# EBI_WE = Data
# after a address data burst wait 2 clock cycles.
clockCycle = 39.68 # 25,2 MHz
print("task write_oam_data();")
print("// Generating OAM")
print("")
wait_clock_cycles(2, clockCycle)
index = 0
print("bank_select = 0;")
for oam_obj in OAM:
oam_obj["spriteref"]= ([str(x) for x in '{:08b}'.format(oam_obj["spriteref"])])
oam_obj["x_pos"] = ([str(x) for x in '{:010b}'.format(oam_obj["x_pos"])])
oam_obj["y_pos"] = ([str(x) for x in '{:010b}'.format(oam_obj["y_pos"])])
oam_obj["x_flip"] = ([str(x) for x in '{:01b}'.format(oam_obj["x_flip"])])
oam_obj["y_flip"] = ([str(x) for x in '{:01b}'.format(oam_obj["y_flip"])])
oam_obj["priority"] = ([str(x) for x in '{:01b}'.format(oam_obj["priority"])])
oam_obj["enable"] = ([str(x) for x in '{:01b}'.format(oam_obj["enable"])])
dataobject1 = "".join(oam_obj["x_pos"][2:11]) + "".join(oam_obj["spriteref"])
dataobject2 = "".join(oam_obj["enable"]) + "".join(oam_obj["priority"]) + "".join(oam_obj["y_flip"]) + "".join(oam_obj["x_flip"]) + "".join(oam_obj["y_pos"]) + "".join(oam_obj["x_pos"][:2])
print("EBI_AD = "+str(index)+";")
print("#1;")
print("EBI_ALE = 0;")
print("#1;")
print("EBI_ALE = 1;")
print("#1;")
print("EBI_AD = 16'b"+dataobject1+";")
print("#1;")
print("EBI_WE = 0;")
print("#1;")
print("EBI_WE = 1;")
print("#1;")
wait_clock_cycles(2, clockCycle)
print("EBI_AD = "+str(index+1)+";")
print("#1;")
print("EBI_ALE = 0;")
print("#1;")
print("EBI_ALE = 1;")
print("#1;")
print("EBI_AD = 16'b"+dataobject2+";")
print("#1;")
print("EBI_WE = 0;")
print("#1;")
print("EBI_WE = 1;")
print("#1;")
print("")
wait_clock_cycles(2, clockCycle)
index += 2
print("endtask")
print("task write_sprite_data();")
print("")
print("// Generating VRAM SPRITE")
print("")
wait_clock_cycles(2, clockCycle)
index = 0
print("bank_select = 1;")
memoryIndex = 0
for sprite_obj in SPRITE:
for offset in range(0,256):
if offset%2 == 0:
sprite_obj[offset]= ([str(x) for x in '{:08b}'.format(sprite_obj[offset+1])]) + ([str(x) for x in '{:08b}'.format(sprite_obj[offset])])
print("EBI_AD = "+str(memoryIndex)+";")
print("#1;")
print("EBI_ALE = 0;")
print("#1;")
print("EBI_ALE = 1;")
print("#1;")
print("EBI_AD = 16'b"+"".join(sprite_obj[offset])+";")
print("#1;")
print("EBI_WE = 0;")
print("#1;")
print("EBI_WE = 1;")
print("#1;")
memoryIndex+=1
wait_clock_cycles(2, clockCycle)
index += 256
print("endtask")
print("task write_pallet_data();")
print("")
print("// Generating Pallet")
print("")
wait_clock_cycles(2, clockCycle)
index = 0
print("bank_select = 3;")
for pallet_obj in PALLET:
pallet_obj["r"]= ([str(x) for x in '{:08b}'.format(pallet_obj["r"])])
pallet_obj["g"]= ([str(x) for x in '{:08b}'.format(pallet_obj["g"])])
pallet_obj["b"]= ([str(x) for x in '{:08b}'.format(pallet_obj["b"])])
print("EBI_AD = "+str(index)+";")
print("#1;")
print("EBI_ALE = 0;")
print("#1;")
print("EBI_ALE = 1;")
print("#1;")
print("EBI_AD = 16'b"+ "".join(pallet_obj["r"]) +"".join(pallet_obj["g"])+";")
print("#1;")
print("EBI_WE = 0;")
print("#1;")
print("EBI_WE = 1;")
print("#1;")
wait_clock_cycles(2, clockCycle)
print("EBI_AD = "+str(index+1)+";")
print("#1;")
print("EBI_ALE = 0;")
print("#1;")
print("EBI_ALE = 1;")
print("#1;")
print("EBI_AD = 16'b"+ "00000000" +"".join(pallet_obj["b"])+";")
print("#1;")
print("EBI_WE = 0;")
print("#1;")
print("EBI_WE = 1;")
print("#1;")
wait_clock_cycles(2, clockCycle)
index += 2
print("endtask")
| [
2,
46,
2390,
25,
362,
1467,
12,
2545,
9405,
198,
2,
220,
220,
220,
807,
1643,
599,
799,
567,
69,
198,
2,
220,
220,
220,
838,
1643,
2124,
1426,
198,
2,
220,
220,
220,
838,
1643,
331,
1426,
198,
2,
220,
220,
220,
352,
1643,
2124... | 1.276831 | 10,566 |
import grpc
from concurrent import futures
import time
import news_databroker_pb2_grpc
import news_databroker_pb2
port = 8061
news = [
'In Hong Kong, where newspapers have alleged Japan has been selling below-cost semiconductors, some electronics manufacturers share that view.',
'The Ministry of International Trade and Industry (MITI) will revise its long-term energy supply/demand outlook by August to meet a forecast downtrend in Japanese energy demand, ministry officials said.',
'Thailands trade deficit widened to 4.5 billion baht in the first quarter of 1987 from 2.1 billion a year ago, the Business Economics Department said.',
'Prices of Malaysian and Sumatran CPO are now around 332 dlrs a tonne CIF for delivery in Rotterdam, traders said.',
'Victoria and Western Australia yesterday lifted their ban on foreign-flag ships carrying containers but NSW ports are still being disrupted by a separate dispute, shipping sources said.',
'He told Reuters in a telephone interview that trading in palm oil, sawn timber, pepper or tobacco was being considered.',
'SRI LANKA GETS USDA APPROVAL FOR WHEAT PRICE',
'Osaka-based Sumitomo, with desposits of around 23.9 trillion yen, merged with Heiwa Sogo, a small, struggling bank with an estimated 1.29 billion dlrs in unrecoverable loans, in October.',
'Asked by Reuters to clarify his statement on Monday in which he said the pact should be allowed to lapse, Subroto said Indonesia was ready to back extension of the ITA.',
'Banks, which bid for a total 12.2 billion marks liquidity, will be credited with the funds allocated today and must buy back securities pledged on May 6.']
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
news_databroker_pb2_grpc.add_NewsDatabrokerServicer_to_server(NewsDatabroker(), server)
print("Starting server. Listening on port : " + str(port))
server.add_insecure_port("[::]:{}".format(port))
server.start()
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0)
| [
11748,
1036,
14751,
198,
6738,
24580,
1330,
25650,
198,
11748,
640,
198,
11748,
1705,
62,
19608,
397,
305,
6122,
62,
40842,
17,
62,
2164,
14751,
198,
11748,
1705,
62,
19608,
397,
305,
6122,
62,
40842,
17,
198,
198,
634,
796,
807,
3312... | 3.537133 | 579 |
from .mobilenet import mobilenet
from .mobilenetv2 import mobilenetv2
from .resnet import resnet, wideresnet
from .vgg import vgg
| [
6738,
764,
76,
25898,
268,
316,
1330,
17754,
268,
316,
198,
6738,
764,
76,
25898,
268,
316,
85,
17,
1330,
17754,
268,
316,
85,
17,
198,
6738,
764,
411,
3262,
1330,
581,
3262,
11,
3094,
411,
3262,
198,
6738,
764,
85,
1130,
1330,
41... | 2.826087 | 46 |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.session import Session
from tests import unittest
| [
2,
15069,
33448,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
921,
198,
2,
743,
407,
779,
428,
2393,
... | 3.888199 | 161 |
from behave import given, then
@given('I open google.com')
@then('the title should contain "{title}"') | [
6738,
17438,
1330,
1813,
11,
788,
628,
198,
31,
35569,
10786,
40,
1280,
23645,
13,
785,
11537,
628,
198,
31,
8524,
10786,
1169,
3670,
815,
3994,
45144,
7839,
36786,
11537
] | 3.533333 | 30 |
"""
MESH sub-module of pyhail
Contains the single pol MESH retrieval for gridded radar data.
Required reflectivity and temperature data
Joshua Soderholm - 15 June 2018
"""
import pyart
from pyhail import common
import netCDF4
import numpy as np
def _get_latlon(radgrid, ref_name):
"""
Generates lattitude and longitude arrays.
Parameters:
===========
radgrid: struct
Py-ART grid object.
Returns:
========
longitude: ndarray
Array of coordinates for all points.
latitude: ndarray
Array of coordinates for all points.
From cpol_processing: https://github.com/vlouf/cpol_processing
"""
# Declare array, filled 0 in order to not have a masked array.
lontot = np.zeros_like(radgrid.fields[ref_name]['data'].filled(0))
lattot = np.zeros_like(radgrid.fields[ref_name]['data'].filled(0))
for lvl in range(radgrid.nz):
lontot[lvl, :, :], lattot[lvl, :, :] = radgrid.get_point_longitude_latitude(lvl)
longitude = pyart.config.get_metadata('longitude')
latitude = pyart.config.get_metadata('latitude')
longitude['data'] = lontot
latitude['data'] = lattot
return longitude, latitude
def main(grid, fnames, out_ffn, snd_input, temph_data, ref_name, save_flag):
"""
Hail grids adapted fromWitt et al. 1998 and Cintineo et al. 2012.
Exapnded to grids (adapted from wdss-ii)
Gridding set to 1x1x1km on a 20,145x145km domain
Parameters:
===========
radgrid: struct
Py-ART grid object.
out_ffn: string
output full filename (inc path)
snd_input: string
sounding full filename (inc path)
temph_data: list
contains 0C and -20C altitude (m) in first and second element position, only used if snd_input is empty
ref_name: string
name of reflectivity field in radar object
save_flag: logical
if True, then save grid to file
Returns:
========
None, write to file
"""
#MESH constants
z_lower_bound = 40
z_upper_bound = 50
if len(snd_input) > 0:
#build sounding data
snd_data = netCDF4.Dataset(snd_input)
snd_temp = snd_data.variables["temp"][:]
snd_geop = snd_data.variables["height"][:]
snd_rh = snd_data.variables["rh"][:]
snd_data.close()
#run interpolation
snd_t_0C = common.sounding_interp(snd_temp,snd_geop,0) #m
snd_t_minus20C = common.sounding_interp(snd_temp,snd_geop,-20) #m
else:
snd_t_0C = temph_data[0]
snd_t_minus20C = temph_data[1]
# Latitude Longitude field for each point.
longitude, latitude = _get_latlon(grid, ref_name)
grid.add_field('longitude', longitude)
grid.add_field('latitude', latitude)
# extract grids
refl_grid = grid.fields[ref_name]['data']
grid_sz = np.shape(refl_grid)
alt_vec = grid.z['data']
alt_grid = np.tile(alt_vec,(grid_sz[1], grid_sz[2], 1))
alt_grid = np.swapaxes(alt_grid, 0, 2) #m
#calc reflectivity weighting function
weight_ref = (refl_grid - z_lower_bound)/(z_upper_bound - z_lower_bound)
weight_ref[refl_grid <= z_lower_bound] = 0
weight_ref[refl_grid >= z_upper_bound] = 1
#calc hail kenitic energy
hail_KE = (5 * 10**-6) * 10**(0.084 * refl_grid) * weight_ref
#calc temperature based weighting function
weight_height = (alt_grid - snd_t_0C) / (snd_t_minus20C - snd_t_0C)
weight_height[alt_grid <= snd_t_0C] = 0
weight_height[alt_grid >= snd_t_minus20C] = 1
#calc severe hail index
grid_sz_m = alt_vec[1] - alt_vec[0]
SHI = 0.1 * np.sum(weight_height * hail_KE, axis=0) * grid_sz_m
#calc maximum estimated severe hail (mm)
MESH = 2.54 * SHI**0.5
#calc warning threshold (J/m/s) NOTE: freezing height must be in km
WT = 57.5 * (snd_t_0C/1000) - 121
#calc probability of severe hail (POSH) (%)
POSH = 29 * np.log(SHI/WT) + 50
POSH = np.real(POSH)
POSH[POSH<0] = 0
POSH[POSH>100] = 100
#add grids to grid object
hail_KE_field = {'data': hail_KE, 'units': 'Jm-2s-1', 'long_name': 'Hail Kinetic Energy',
'standard_name': 'hail_KE', 'comments': 'Witt et al. 1998'}
grid.add_field(fnames['hail_ke'], hail_KE_field, replace_existing=True)
SHI_grid = np.zeros_like(hail_KE)
SHI_grid[0,:,:] = SHI
SHI_field = {'data': SHI_grid, 'units': 'J-1s-1', 'long_name': 'Severe Hail Index',
'standard_name': 'SHI', 'comments': 'Witt et al. 1998, only valid in the first level'}
grid.add_field(fnames['shi'], SHI_field, replace_existing=True)
MESH_grid = np.zeros_like(hail_KE)
MESH_grid[0,:,:] = MESH
MESH_field = {'data': MESH_grid, 'units': 'mm', 'long_name': 'Maximum Expected Size of Hail',
'standard_name': 'MESH', 'comments': 'Witt et al. 1998, only valid in the first level'}
grid.add_field(fnames['mesh'], MESH_field, replace_existing=True)
POSH_grid = np.zeros_like(hail_KE)
POSH_grid[0,:,:] = POSH
POSH_field = {'data': POSH_grid, 'units': '%', 'long_name': 'Probability of Severe Hail',
'standard_name': 'POSH', 'comments': 'Witt et al. 1998, only valid in the first level'}
grid.add_field(fnames['posh'], POSH_field, replace_existing=True)
# Saving data to file
if save_flag:
grid.write(out_ffn)
#return dictionary
#out_dict = {'hail_KE':hail_KE_field, 'SHI':SHI_field, 'MESH':MESH_field, 'POSH':POSH_field}
return grid | [
37811,
198,
44,
44011,
850,
12,
21412,
286,
12972,
71,
603,
198,
198,
4264,
1299,
262,
2060,
755,
337,
44011,
45069,
329,
1036,
1638,
276,
13428,
1366,
13,
198,
37374,
4079,
3458,
290,
5951,
1366,
198,
198,
47740,
311,
12342,
22981,
5... | 2.204129 | 2,567 |
import importlib
import os
from contextlib import suppress
import pytest
from django.conf import settings
from django.dispatch import Signal
here = os.path.dirname(__file__)
doc_dir = os.path.join(here, "../../../docs")
base_dir = os.path.join(here, "../../byro")
with open(os.path.join(doc_dir, "developer/plugins/general.rst"), "r") as doc_file:
plugin_docs = doc_file.read()
@pytest.mark.parametrize(
"app", [app for app in settings.INSTALLED_APPS if app.startswith('byro.')]
)
| [
11748,
1330,
8019,
198,
11748,
28686,
198,
6738,
4732,
8019,
1330,
18175,
198,
198,
11748,
12972,
9288,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
6381,
17147,
1330,
26484,
198,
198,
1456,
796,
28686,
13,... | 2.714286 | 182 |
"""
Created on Wed Feb 17 13:27:55 2021
@author: William Callender
This program downloads data about the current progress of COVID-19 vaccinations
in the United States to project the total number of vaccinations going forward.
It does this by getting data about the number of vaccinations per day since
vaccines first became approved. It then runs linear regression over that data
to determine how fast the number of daily vaccinations is increasing. It
integrates this projection and accounts for the current number of people fully
vaccinated and the fact that both vaccines require 2 shots for full
vaccination. It shows this with some graphs and a crude projection for when the
US could be fully vaccinated.
It creates 2 csv files and 2 new optional png files every day.
The data is sourced (by default) from the Our World in Data COVID-19 repository
under the
[Creative Commons BY license](https://creativecommons.org/licenses/by/4.0/).
This code is licensed likewise.
I cannot predict the future, and neither can this program. These projections
were created as a way for me to be less bored while inside, and should not be
used in place of projections provided by experts. They should be used merely
for entertainment purposes or for learning about coding, not for anything where
the accuracy of projections is important. By using this code you acknowledge
that I am not responsible for any harm caused by its misuse.
"""
# change this line to True to have the program always overwrite without asking
defaultReplace = False
# change the filename for the vaccination data
fname = 'vaccinations.csv'
# the url to download the raw data from
url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/vaccinations.csv'
# should the program keep a log of its projections?
shouldLogProjections = True
# filename for the log, should end in .csv
logfile = 'projections.csv'
# should output plots as png?
saveFigs = True
import wget
from os import remove, path
import csv
from scipy.stats import linregress as linreg
from scipy.integrate import simps
from datetime import date, timedelta
from matplotlib import pyplot as plt
from numpy import linspace
replace = False
filename = fname
if path.exists(fname) and not defaultReplace:
c = ''
while c.lower() != 'y' and c != 'n':
print(f'Should the existing {fname} be replaced by updated data? (y/n)')
c = input()
replace = c.lower() == 'y'
elif not path.exists(fname):
print(f'No existing data at {fname}, downloading... ', end='')
filename = wget.download(url, out=fname)
print('done!')
if replace:
try:
remove(fname)
print('Removed existing file')
print('Downloading new data... ', end='')
filename = wget.download(url, out=fname)
print('done!')
except FileNotFoundError:
if not defaultReplace:
print(f"Unexpected FileNotFoundError. Will continue with execution assuming file {fname} doesn't exist")
except:
print('Unknown error')
raise
dates = []
dailyVaccinations = []
peopleFullyVaccinated = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[1] == 'USA':
if not row[7] == '':
dailyVaccinations.append(int(row[7]))
dates.append(date.fromisoformat(row[2]))
if not row[5] == '':
peopleFullyVaccinated.append(int(row[5]))
peopleFullyVaccinatedToday = max(peopleFullyVaccinated)
days = [(d - dates[0]).days for d in dates]
res = linreg(days, dailyVaccinations)
m, b, r2 = (res.slope, res.intercept, res.rvalue**2)
samples = 1000
projectTo = 240
x = linspace(min(days), projectTo, samples)
yhat = [m*xi + b for xi in x]
# divide by 2 because shots require 2 doses :( but then add everyone who's already vaccinated :)
projectedVaccinations = [(simps(yhat[:i], x[:i]) / 2) + peopleFullyVaccinatedToday for i in range(1, len(x))]
crossover = 0
for i, v in enumerate(projectedVaccinations):
if v > 330e6:
crossover = min(days) + (i/samples)*projectTo
break
crossoverDate = dates[0] + timedelta(days=crossover)
plt.scatter(days, dailyVaccinations, label='True daily vaccinations')
plt.plot(x, yhat, color='r', label=f'y = {round(m,3)}x + {round(b,3)}')
plt.legend()
plt.xlabel(f'Number of days since {dates[0]}')
plt.ylabel('Number of daily vaccinations')
plt.title(f'Projection of daily vaccinations from {date.today()}')
if saveFigs:
plt.savefig('Projected daily vaccinations ' + str(date.today()))
plt.show()
plt.close()
plt.plot(x[1:], projectedVaccinations, label='Projected Full Vaccinations')
plt.plot(x[1:], [330e6 for _ in range(len(projectedVaccinations))], label='Total US Population (330 million)')
plt.legend()
plt.xlabel(f'Number of days since {dates[0]}')
plt.ylabel('Number of full vaccinations')
plt.title(f'Projection of full vaccinations from {date.today()}')
if saveFigs:
plt.savefig('Projected total vaccinations ' + str(date.today()))
plt.show()
plt.close()
if crossover == 0:
print('This projects not all US citizens will be vaccinated within the timeframe')
else:
print('This projects all US citizens may be vaccinated by', crossoverDate.strftime('%B %d, %Y'))
print("Please note that this doesn't factor in vaccine skepticism, an upper limit to daily vaccinations, and other unforseen challenges. This projection is beyond optimistic, but it's probably reasonable to assume that the general population will be allowed to request vaccines by this date, even if they're difficult to get.")
if shouldLogProjections:
todayAlreadyExists = False
if not path.exists(logfile):
print(f'Log not found at {logfile}, creating it')
with open(logfile, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Current Date', 'Projected Full Vaccination Date'])
else:
with open(logfile, 'r') as f:
reader = csv.reader(f)
if list(reader)[-1][0] == str(date.today()):
todayAlreadyExists = True
print('Data from today already exists, not adding to log')
if not todayAlreadyExists:
with open(logfile, 'a', newline='') as f:
writer = csv.writer(f)
writer.writerow([str(date.today()), str(crossoverDate)])
print(f'Appending data from today to {logfile}') | [
37811,
198,
41972,
319,
3300,
3158,
1596,
1511,
25,
1983,
25,
2816,
33448,
198,
198,
31,
9800,
25,
3977,
4889,
2194,
198,
198,
1212,
1430,
21333,
1366,
546,
262,
1459,
4371,
286,
7375,
11008,
12,
1129,
46419,
198,
259,
262,
1578,
1829... | 2.944444 | 2,178 |
"""
Picklify is a function that works similar to memoization; it is meant for
functions that return a dictionary. Often, such functions will parse a file to
generate a dictionary that maps certain keys to values. To save on such
overhead costs, we "picklify" them the first time they are called (save the
dictionary in a pickle file), and then simply load the dictionary from the
saved pickle files the next time around.
"""
import pickle
import sys
from pathlib import Path
from .config import PICKLE_PATH
def picklify(dict_generator, *args, **kwargs):
"""
Given a function that returns an object such as a dictionary (only dict
fully supported), returns the dictionary generated by the function. The
function is only called if it has not been "picklified" (passed as an
argument to this function) before. Otherwise, its cached dictionary is
returned instead. Thus getting the dicttionary by wrapping this function
speeds up the dictionary creation overall.
Note that this function should not be called by two different functions
with the same name.
:param dict_generator: the function which generates a dictionary.
:param *args: Any args to pass to the dictionary
:param **kwargs: Any keyword args to pass to the dictionary.
:returns: dictionary returned by dict_generator().
"""
# Danger! Never call picklify with functions that have the same name!
pickle_path = f"{PICKLE_PATH}/{dict_generator.__name__}.pickle"
try:
with open(pickle_path, "rb") as pickle_handle:
dict_to_return = pickle.load(pickle_handle)
except FileNotFoundError:
dict_to_return = dict_generator(*args, **kwargs)
try:
Path(pickle_path).parent.mkdir(parents=True, exist_ok=True)
with open(pickle_path, "wb") as pickle_handle:
pickle.dump(
dict_to_return,
pickle_handle,
protocol=pickle.HIGHEST_PROTOCOL,
)
except PermissionError:
print(
"Caching failed due to permission errors...", file=sys.stderr
)
return dict_to_return
| [
37811,
198,
31686,
75,
1958,
318,
257,
2163,
326,
2499,
2092,
284,
16155,
1634,
26,
340,
318,
4001,
329,
198,
12543,
2733,
326,
1441,
257,
22155,
13,
18023,
11,
884,
5499,
481,
21136,
257,
2393,
284,
198,
8612,
378,
257,
22155,
326,
... | 2.79846 | 779 |
import django_filters
import graphene
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models import Q
from graphene import Field, relay
from graphene_permissions.mixins import AuthFilter
from graphene_permissions.permissions import AllowAny, AllowAuthenticated
from rest_framework.generics import get_object_or_404
from api.graphql.applications.application_types import CityType
from api.graphql.reservation_units.reservation_unit_filtersets import (
ReservationUnitsFilterSet,
)
from api.graphql.reservation_units.reservation_unit_mutations import (
EquipmentCategoryCreateMutation,
EquipmentCategoryDeleteMutation,
EquipmentCategoryUpdateMutation,
EquipmentCreateMutation,
EquipmentDeleteMutation,
EquipmentUpdateMutation,
PurposeCreateMutation,
PurposeUpdateMutation,
ReservationUnitCreateMutation,
ReservationUnitImageCreateMutation,
ReservationUnitImageDeleteMutation,
ReservationUnitImageUpdateMutation,
ReservationUnitUpdateMutation,
)
from api.graphql.reservation_units.reservation_unit_types import (
EquipmentCategoryType,
EquipmentType,
KeywordCategoryType,
KeywordGroupType,
KeywordType,
PurposeType,
ReservationUnitByPkType,
ReservationUnitCancellationRuleType,
ReservationUnitType,
ReservationUnitTypeType,
TaxPercentageType,
)
from api.graphql.reservations.reservation_filtersets import ReservationFilterSet
from api.graphql.reservations.reservation_mutations import (
ReservationApproveMutation,
ReservationCancellationMutation,
ReservationConfirmMutation,
ReservationCreateMutation,
ReservationDenyMutation,
ReservationRequiresHandlingMutation,
ReservationUpdateMutation,
ReservationWorkingMemoMutation,
)
from api.graphql.reservations.reservation_types import (
AgeGroupType,
ReservationCancelReasonType,
ReservationDenyReasonType,
ReservationMetadataSetType,
ReservationPurposeType,
ReservationType,
)
from api.graphql.resources.resource_mutations import (
ResourceCreateMutation,
ResourceDeleteMutation,
ResourceUpdateMutation,
)
from api.graphql.resources.resource_types import ResourceType
from api.graphql.spaces.space_mutations import (
SpaceCreateMutation,
SpaceDeleteMutation,
SpaceUpdateMutation,
)
from api.graphql.spaces.space_types import SpaceType
from api.graphql.terms_of_use.terms_of_use_types import TermsOfUseType
from api.graphql.units.unit_mutations import UnitUpdateMutation
from api.graphql.units.unit_types import UnitByPkType, UnitType
from permissions.api_permissions.graphene_field_decorators import (
check_resolver_permission,
)
from permissions.api_permissions.graphene_permissions import (
AgeGroupPermission,
CityPermission,
EquipmentCategoryPermission,
EquipmentPermission,
KeywordPermission,
PurposePermission,
ReservationMetadataSetPermission,
ReservationPermission,
ReservationPurposePermission,
ReservationUnitCancellationRulePermission,
ReservationUnitPermission,
ResourcePermission,
SpacePermission,
TaxPercentagePermission,
TermsOfUsePermission,
UnitPermission,
)
from permissions.helpers import (
get_service_sectors_where_can_view_reservations,
get_units_where_can_view_reservations,
)
from reservation_units.models import Equipment, EquipmentCategory, ReservationUnit
from reservations.models import Reservation
from resources.models import Resource
from spaces.models import ServiceSector, Space, Unit
schema = graphene.Schema(query=Query, mutation=Mutation)
| [
11748,
42625,
14208,
62,
10379,
1010,
198,
11748,
42463,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13... | 3.172174 | 1,150 |
"""
Component for interacting with a HomeSeer HomeTroller or HS3 software installation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/homeseer/
"""
import asyncio
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_EVENT, CONF_HOST, CONF_ID, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import EventOrigin
from homeassistant.helpers import aiohttp_client, discovery
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pyhs3==0.9']
DOMAIN = 'homeseer'
CONF_HTTP_PORT = 'http_port'
CONF_ASCII_PORT = 'ascii_port'
CONF_LOCATION_NAMES = 'location_names'
CONF_ALLOW_EVENTS = 'allow_events'
DEFAULT_HTTP_PORT = 80
DEFAULT_PASSWORD = 'default'
DEFAULT_USERNAME = 'default'
DEFAULT_ASCII_PORT = 11000
DEFAULT_LOCATION_NAMES = False
DEFAULT_ALLOW_EVENTS = True
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_HTTP_PORT, default=DEFAULT_HTTP_PORT): cv.port,
vol.Optional(CONF_ASCII_PORT, default=DEFAULT_ASCII_PORT): cv.port,
vol.Optional(CONF_LOCATION_NAMES, default=DEFAULT_LOCATION_NAMES): cv.boolean,
vol.Optional(CONF_ALLOW_EVENTS, default=DEFAULT_ALLOW_EVENTS): cv.boolean
})
}, extra=vol.ALLOW_EXTRA)
HOMESEER_COMPONENTS = ['binary_sensor', 'cover', 'light', 'lock', 'scene', 'sensor', 'switch']
async def async_setup(hass, config):
"""Set up the HomeSeer component."""
from pyhs3 import STATE_LISTENING
config = config.get(DOMAIN)
host = config[CONF_HOST]
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
http_port = config[CONF_HTTP_PORT]
ascii_port = config[CONF_ASCII_PORT]
location_names = config[CONF_LOCATION_NAMES]
allow_events = config[CONF_ALLOW_EVENTS]
homeseer = HSConnection(hass, host, username, password, http_port, ascii_port, location_names)
await homeseer.api.initialize()
if len(homeseer.devices) == 0 and len(homeseer.events) == 0:
_LOGGER.error('No supported HomeSeer devices found, aborting component setup.')
return False
await homeseer.start()
i = 0
while homeseer.api.state != STATE_LISTENING:
if i < 3:
i += 1
await asyncio.sleep(1)
elif i == 3:
_LOGGER.error('Failed to connect to HomeSeer ASCII server, aborting component setup.')
await homeseer.stop()
return False
_LOGGER.info('Connected to HomeSeer ASCII server at {}:{}'.format(host, ascii_port))
homeseer.add_remotes()
if not allow_events:
HOMESEER_COMPONENTS.remove('scene')
for component in HOMESEER_COMPONENTS:
hass.async_create_task(discovery.async_load_platform(
hass, component, DOMAIN, {}, config))
hass.data[DOMAIN] = homeseer
hass.bus.async_listen_once('homeassistant_stop', homeseer.stop)
return True
class HSConnection:
"""Manages a connection between HomeSeer and Home Assistant."""
@property
@property
@property
class HSRemote:
"""Link remote-type devices that should fire events rather than create entities to Home Assistant."""
def update_callback(self):
"""Fire the event."""
data = {CONF_ID: self._device.ref, CONF_EVENT: self._device.value}
self._hass.bus.async_fire(self._event, data, EventOrigin.remote)
| [
37811,
198,
21950,
329,
24986,
351,
257,
5995,
4653,
263,
5995,
51,
10646,
393,
18070,
18,
3788,
9988,
13,
198,
198,
1890,
517,
3307,
546,
428,
7515,
11,
3387,
3522,
284,
262,
10314,
379,
198,
5450,
1378,
11195,
12,
562,
10167,
13,
... | 2.534314 | 1,428 |
import pandas as pd
import os
import subprocess | [
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
28686,
198,
198,
11748,
850,
14681
] | 3.266667 | 15 |
#%%
import cv2
import tkinter as tk
import os, time
import PIL
from PIL import Image, ImageTk
import threading
import time
import platform as plt
# from lobe import modify_lobe
from tm2_tflite import modify_tm
from cv_dust import find_dust
from custom_io import c_io
### 影像與AI相關的參數 這些參數幾乎都在 Stream 中使用
camera_idx = 0 # 相機
model_path = 'model_tm' # 模型資料夾
label_idx = {0:'clean', 1:'dust', 2:'other'} # 編號與標籤
label_2tk = {'dust':'有粉塵', 'clean':'無粉塵', 'other':'其他'} # 標籤與顯示內容的對應
cv_dust_idx = 1 # 控制粉塵容錯值
cv_dust_radius_min = 3 # CV 框出粉塵的最小半徑
cv_dust_radius_max = 25 # CV 框出粉塵的最大半徑
cv_thred = 150 # 二值化的閥值
cv_max_area = 1500 # 歸類為雜物的大小 ( 超過 )
cv_lambda = 0.5 # OpenCV 的加權指數
ai_lambda = 0.5 # AI 的加權指數
### IO 控制
pin_buzzer = 16
pin_relay = 5
io_clock = 5 # 裝置總共會執行幾秒
t_temp = 0.5 # 蜂鳴器的間斷時間
swith = True # 蜂鳴器的參數
### OS 控制
cmd_col, cmd_raw = os.get_terminal_size()
# 按鈕事件
# 影像串流加 OpenCV
# AI 的部份
App()
# %%
| [
2,
16626,
201,
198,
11748,
269,
85,
17,
201,
198,
11748,
256,
74,
3849,
355,
256,
74,
201,
198,
11748,
28686,
11,
640,
201,
198,
11748,
350,
4146,
201,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
51,
74,
201,
198,
11748,
4704,
278... | 1.274194 | 992 |
# @Author : guopeiming
# @Contact : guopeiming.gpm@{qq, gmail}.com
import torch
from queue import Queue
import numpy as np
import torch.nn as nn
from utils import chart_helper
from utils import trees
from utils.vocabulary import Vocabulary
from utils.transliterate import TRANSLITERATIONS, BERT_TOKEN_MAPPING
from typing import List, Dict, Union, Tuple
from utils.trees import InternalParseNode, LeafParseNode
from transformers import BertModel, BertTokenizerFast
from config.Constants import NER_LABELS, STOP, START, TAG_UNK, PAD_STATEGY, TRUNCATION_STATEGY, CHARACTER_BASED
from model.transformer import LearnedPositionalEmbedding, Transformer
from model.partition_transformer import PartitionTransformer
| [
2,
2488,
13838,
1058,
915,
3008,
320,
278,
198,
2,
2488,
17829,
1058,
915,
3008,
320,
278,
13,
70,
4426,
31,
90,
38227,
11,
308,
4529,
27422,
785,
198,
11748,
28034,
198,
6738,
16834,
1330,
4670,
518,
198,
11748,
299,
32152,
355,
45... | 3.287037 | 216 |
class BuildHookInterface(object): # no cov
"""
Example usage:
=== ":octicons-file-code-16: plugin.py"
```python
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class SpecialBuildHook(BuildHookInterface):
PLUGIN_NAME = 'special'
...
```
=== ":octicons-file-code-16: hooks.py"
```python
from hatchling.plugin import hookimpl
from .plugin import SpecialBuildHook
@hookimpl
def hatch_register_build_hook():
return SpecialBuildHook
```
"""
PLUGIN_NAME = ''
"""The name used for selection."""
@property
def app(self):
"""
An instance of [Application](utilities.md#hatchling.bridge.app.Application).
"""
if self.__app is None:
from ....bridge.app import Application
self.__app = Application().get_safe_application()
return self.__app
@property
def root(self):
"""
The root of the project tree.
"""
return self.__root
@property
def config(self):
"""
The cumulative hook configuration.
=== ":octicons-file-code-16: pyproject.toml"
```toml
[tool.hatch.build.hooks.<PLUGIN_NAME>]
[tool.hatch.build.targets.<TARGET_NAME>.hooks.<PLUGIN_NAME>]
```
=== ":octicons-file-code-16: hatch.toml"
```toml
[build.hooks.<PLUGIN_NAME>]
[build.targets.<TARGET_NAME>.hooks.<PLUGIN_NAME>]
```
"""
return self.__config
@property
@property
def build_config(self):
"""
An instance of [BuilderConfig](utilities.md#hatchling.builders.config.BuilderConfig).
"""
return self.__build_config
@property
def directory(self):
"""
The build directory.
"""
return self.__directory
@property
def target_name(self):
"""
The plugin name of the build target.
"""
return self.__target_name
def clean(self, versions):
"""
This occurs before the build process if the `-c`/`--clean` flag was passed to
the [`build`](../cli/reference.md#hatch-build) command, or when invoking
the [`clean`](../cli/reference.md#hatch-clean) command.
"""
def initialize(self, version, build_data):
"""
This occurs immediately before each build.
Any modifications to the build data will be seen by the build target.
"""
def finalize(self, version, build_data, artifact_path):
"""
This occurs immediately after each build and will not run if the `--hooks-only` flag
was passed to the [`build`](../cli/reference.md#hatch-build) command.
The build data will reflect any modifications done by the target during the build.
"""
| [
4871,
10934,
39,
566,
39317,
7,
15252,
2599,
220,
1303,
645,
39849,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
17934,
8748,
25,
628,
220,
220,
220,
24844,
366,
25,
78,
11048,
684,
12,
7753,
12,
8189,
12,
1433,
25,
13877,
13,
9... | 2.270706 | 1,304 |
import collections
import datetime
import os
import re
import time
import unicodedata
from django.conf import settings
from django.core.management import base
from django.db import transaction
from django.utils import timezone
from ...models import Album, Photo
from ...storages import get_storage
ignores = [re.compile(i) for i in getattr(settings, 'GALLERY_IGNORES', ())]
patterns = [(cat, re.compile(pat)) for cat, pat in getattr(settings, 'GALLERY_PATTERNS', ())]
def walk_photo_storage(storage, path=''):
"""
Yield (directory path, file names) 2-uples for the photo storage.
This function directories that do not contain any files.
"""
directories, files = storage.listdir(path)
if files:
yield path, files
for directory in directories:
dir_path = os.path.join(path, directory)
for result in walk_photo_storage(storage, dir_path):
yield result
def iter_photo_storage(command):
"""
Yield (relative path, category, regex captures) for each photo.
"""
photo_storage = get_storage('photo')
for dirpath, filenames in walk_photo_storage(photo_storage):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
# HFS+ stores names in NFD which causes issues with some fonts.
filepath = unicodedata.normalize('NFKC', filepath)
if is_ignored(filepath):
command.write_out(f"- {filepath}", verbosity=3)
continue
result = is_matched(filepath)
if result is not None:
command.write_out(f"> {filepath}", verbosity=3)
category, captures = result
yield filepath, category, captures
else:
command.write_err(f"? {filepath}", verbosity=1)
def scan_photo_storage(command):
"""
Return a dictionary of albums keyed by (category, dirpath).
Each album is a dictionary of photos, keyed by filename.
The result can be passed to ``synchronize_albums`` and
``synchronize_photos``.
"""
albums = collections.defaultdict(lambda: {})
for path, category, captures in iter_photo_storage(command):
dirpath, filename = os.path.split(path)
albums[category, dirpath][filename] = captures
return albums
def get_album_info(captures, command):
"""
Return the date and name of an album.
``captures`` are elements extracted from the file name of a random photo
in the album.
"""
date = None
try:
kwargs = {
k: int(captures['a_' + k])
for k in ('year', 'month', 'day')
}
date = datetime.date(**kwargs)
except KeyError:
pass
except ValueError as e:
command.write_err(f"{e} {kwargs}", verbosity=1)
name = ' '.join(v for k, v in sorted(captures.items())
if k.startswith('a_name') and v is not None)
name = name.replace('/', ' > ')
return date, name
def get_photo_info(captures, command):
"""
Return the datetime of a photo.
``captures`` are elements extracted from the file name of the photo.
"""
date = None
try:
kwargs = {
k: int(captures['p_' + k])
for k in ('year', 'month', 'day', 'hour', 'minute', 'second')
}
date = datetime.datetime(**kwargs)
if settings.USE_TZ:
date = timezone.make_aware(date, timezone.get_default_timezone())
except KeyError:
pass
except ValueError as e:
command.write_err(f"{e} {kwargs}", verbosity=1)
return date
def synchronize_albums(albums, command):
"""
Synchronize albums from the filesystem to the database.
``albums`` is the result of ``scan_photo_storage``.
"""
new_keys = set(albums.keys())
old_keys = set((a.category, a.dirpath) for a in Album.objects.all())
for category, dirpath in sorted(new_keys - old_keys):
random_capture = next(iter(albums[category, dirpath].values()))
date, name = get_album_info(random_capture, command)
command.write_out(f"Adding album {dirpath} ({category}) as {name}", verbosity=1)
Album.objects.create(category=category, dirpath=dirpath, date=date, name=name)
for category, dirpath in sorted(old_keys - new_keys):
command.write_out(f"Removing album {dirpath} ({category})", verbosity=1)
Album.objects.get(category=category, dirpath=dirpath).delete()
def synchronize_photos(albums, command):
"""
Synchronize photos from the filesystem to the database.
``albums`` is the result of ``scan_photo_storage``.
"""
for (category, dirpath), filenames in albums.items():
album = Album.objects.get(category=category, dirpath=dirpath)
new_keys = set(filenames.keys())
old_keys = set(p.filename for p in album.photo_set.all())
for filename in sorted(new_keys - old_keys):
date = get_photo_info(albums[category, dirpath][filename], command)
command.write_out(
f"Adding photo {filename} to album {dirpath} ({category})",
verbosity=2)
photo = Photo.objects.create(album=album, filename=filename, date=date)
for preset in command.resize_presets:
photo.thumbnail(preset)
for filename in sorted(old_keys - new_keys):
command.write_out(
f"Removing photo {filename} from album {dirpath} ({category})",
verbosity=2)
photo = Photo.objects.get(album=album, filename=filename)
photo.delete()
if not command.full_sync:
continue
for filename in sorted(old_keys & new_keys):
date = get_photo_info(albums[category, dirpath][filename], command)
photo = Photo.objects.get(album=album, filename=filename)
if date != photo.date:
command.write_out(
f"Fixing date of photo {filename} from album {dirpath} ({category})",
verbosity=2)
photo.date = date
photo.save()
| [
11748,
17268,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
640,
198,
11748,
28000,
9043,
1045,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
1330,
2779,
198,
... | 2.412736 | 2,544 |
import os
import dash
from texts import APP_TITLE
from style_settings import EXTERNAL_STYLESHEETS
app = dash.Dash(__name__, external_stylesheets=EXTERNAL_STYLESHEETS)
app.title = APP_TITLE
server = app.server
server.secret_key = os.environ.get("secret_key", "secret")
app.config.suppress_callback_exceptions = True
| [
11748,
28686,
198,
11748,
14470,
198,
6738,
13399,
1330,
43504,
62,
49560,
2538,
198,
6738,
3918,
62,
33692,
1330,
7788,
31800,
1847,
62,
2257,
56,
28378,
13909,
32716,
198,
198,
1324,
796,
14470,
13,
43041,
7,
834,
3672,
834,
11,
7097,... | 2.953271 | 107 |
"""Tests for the google_domains component."""
| [
37811,
51,
3558,
329,
262,
23645,
62,
3438,
1299,
7515,
526,
15931,
198
] | 3.538462 | 13 |
from math import inf
import os
import subprocess
import pickle
import ipdb
from matplotlib import pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
HEAD = 13
NECK = 12
LEFT_SHOULDER = 9
RIGHT_SHOULDER = 8
LEFT_ELBOW = 10
RIGHT_ELBOW = 7
LEFT_WRIST = 11
RIGHT_WRIST = 6
LEFT_HIP = 3
RIGHT_HIP = 2
LEFT_KNEE = 4
RIGHT_KNEE = 1
LEFT_HEEL = 5
RIGHT_HEEL = 0
LEFT_BTOE = 19
RIGHT_BTOE = 20
LEFT_STOE = 21
RIGHT_STOE = 22
LEFT_ANKLE = 23
RIGHT_ANKLE = 24
LEFT_EAR = 17
RIGHT_EAR = 18
NOSE = 14
LEFT_EYE = 15
RIGHT_EYE = 16
joints= {
0: 'Right_heel', 1: 'Right_knee', 2: 'Right_hip', 3: 'Left_hip', 4: 'Left_knee',
5: 'Left_heel', 6: 'Right_wrist',7: 'Right_elbow', 8: 'Right_shoulder', 9: 'Left_shoulder',
10: 'Left_elbow', 11: 'Left_wrist', 12: 'Neck', 13: 'Head_top', 14: 'Nose', 15: 'Left_eye',
16: 'Right_eye', 17: 'Left_ear', 18: 'Right_ear', 19: 'Left_big_toe', 20: 'Right_big_toe',
21: 'Left_small_toe', 22: 'Right_small_toe', 23: 'Left_ankle', 24: 'Right_ankle'
}
def make_video(output_path, img_dir, fps=25):
"""
output_path is the final mp4 name
img_dir is where the images to make into video are saved.
"""
cmd = [
'ffmpeg',
'-y',
'-threads', '16',
'-framerate', str(fps),
'-i', '{img_dir}/frame%04d.png'.format(img_dir=img_dir),
'-profile:v', 'baseline',
'-level', '3.0',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-an',
'-vf', 'scale=trunc(iw/2)*2:trunc(ih/2)*2',
output_path,
]
print(' '.join(cmd))
try:
err = subprocess.call(cmd)
if err:
ipdb.set_trace()
except OSError:
ipdb.set_trace()
print('OSError')
if __name__ == '__main__':
#Should have the 3D joint coordinates [x,y,z] for each joint in the kinematic skeleton
main_dir = '/home/nayari/projects/SoccerKicks'
dir = main_dir + '/Rendered/'
action = 'Penalty/' #Penalty Freekick
file_name = '14_penalty' #
input_dir = dir + action + file_name
ouput_dir = main_dir + '/animations/' + file_name + '/'
preds_file = 'hmmr_output.pkl'
input_alpha = dir + action + file_name + '/hmmr_output/'
alphapose_in = input_alpha + preds_file
openpose_in = dir + action + file_name + '/hmmr_output_openpose/'+ preds_file
make_image(openpose_in, ouput_dir, file_name, frame = 16)
| [
6738,
10688,
1330,
1167,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
2298,
293,
198,
11748,
20966,
9945,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
1330,
11034,
198,
6738,
285,... | 2.01668 | 1,259 |
import numpy as np
from math import degrees
import math
a = [0, 0]
b = [2, 4]
c = [6, 6]
print(findAngle(a, b, c))
| [
11748,
299,
32152,
355,
45941,
198,
6738,
10688,
1330,
7370,
198,
11748,
10688,
628,
198,
64,
796,
685,
15,
11,
657,
60,
198,
65,
796,
685,
17,
11,
604,
60,
198,
66,
796,
685,
21,
11,
718,
60,
198,
4798,
7,
19796,
13450,
293,
7,... | 2.269231 | 52 |
from sklearn.cluster import KMeans
import numpy as np
from crankshaft.analysis_data_provider import AnalysisDataProvider
| [
6738,
1341,
35720,
13,
565,
5819,
1330,
509,
5308,
504,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1067,
2283,
3099,
701,
13,
20930,
62,
7890,
62,
15234,
1304,
1330,
14691,
6601,
29495,
628
] | 3.514286 | 35 |
"""
Created by Edward Li at 8/23/20
"""
import numpy as np
| [
37811,
198,
41972,
416,
10443,
7455,
379,
807,
14,
1954,
14,
1238,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
628
] | 2.857143 | 21 |
from copy import deepcopy
import pytest
from django.urls import reverse
from rest_framework import status
from pet.models import Pet
@pytest.mark.django_db
| [
6738,
4866,
1330,
2769,
30073,
198,
198,
11748,
12972,
9288,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
198,
6738,
4273,
13,
27530,
1330,
4767,
628,
198,
31,
9078,
9288,
13,
4102,
1... | 3.404255 | 47 |
from lxml import html
if __name__ == '__main__':
parse_html() | [
6738,
220,
300,
19875,
1330,
27711,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
21136,
62,
6494,
3419
] | 2.576923 | 26 |
"""Demonstrate docstrings and does nothing really."""
def myfunc():
"""Simple function to demonstrate circleci."""
return 1
print(myfunc())
| [
37811,
35477,
23104,
2205,
37336,
290,
857,
2147,
1107,
526,
15931,
198,
198,
4299,
616,
20786,
33529,
198,
220,
220,
220,
37227,
26437,
2163,
284,
10176,
9197,
979,
526,
15931,
198,
220,
220,
220,
1441,
352,
198,
198,
4798,
7,
1820,
... | 3.409091 | 44 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: convert_snake_case
short_description: test converting data to snake_case
description: test converting data to snake_case
options:
data:
description: Data to modify
type: dict
required: True
reversible:
description:
- Make the snake_case conversion in a way that can be converted back to the original value
- For example, convert IAMUser to i_a_m_user instead of iam_user
default: False
ignore_list:
description: list of top level keys that should not have their contents converted
type: list
default: []
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
22961,
3611,
5094,
13789,
410,
18,
13,
15,
10,
357,
3826,
27975,
45761,
393,
3740,
1378,
2503,
13,
41791,
13,
2398,
... | 3.058282 | 326 |
buger = juice = 2001
for i in range(0, 5) :
if(i < 3) :
new_buger = int(input())
if new_buger < buger :
buger = new_buger
else :
new_juice = int(input())
if new_juice < juice :
juice = new_juice
print(buger + juice - 50) | [
25456,
263,
796,
13135,
796,
5878,
198,
1640,
1312,
287,
2837,
7,
15,
11,
642,
8,
1058,
198,
220,
220,
220,
611,
7,
72,
1279,
513,
8,
1058,
198,
220,
220,
220,
220,
220,
220,
220,
649,
62,
25456,
263,
796,
493,
7,
15414,
28955,
... | 1.912752 | 149 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# ---------------------------------------------------------
from math import floor
import cv2
import numpy as np
def blur(src, radius=5):
"""Wrapper function for cv2.GaussianBlur
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
radius (int, optional) : size of the square kernel, MUST be an odd integer.
Defaults to 5.
Returns:
numpy.ndarray: a copy of the source image after apply the effect
"""
return cv2.GaussianBlur(src, (radius, radius), cv2.BORDER_DEFAULT)
def overlay_weighted(src, background, alpha, beta, gamma=0):
"""overlay two images together, pixels from each image is weighted as follow
dst[i] = alpha*src[i] + beta*background[i] + gamma
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
background (numpy.ndarray) : background image. Must be in same shape are `src`
alpha (float) : transparent factor for the foreground
beta (float) : transparent factor for the background
gamma (int, optional) : luminance constant. Defaults to 0.
Returns:
numpy.ndarray: a copy of the source image after apply the effect
"""
return cv2.addWeighted(src, alpha, background, beta, gamma).astype(np.uint8)
def overlay(src, background):
"""Overlay two images together via bitwise-and:
dst[i] = src[i] & background[i]
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
background (numpy.ndarray) : background image. Must be in same shape are `src`
Returns:
numpy.ndarray: a copy of the source image after apply the effect
"""
return cv2.bitwise_and(src, background).astype(np.uint8)
def translation(src, offset_x, offset_y):
"""Shift the image in x, y direction
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
offset_x (int) : pixels in the x direction.
Positive value shifts right and negative shifts right.
offset_y (int) : pixels in the y direction.
Positive value shifts down and negative shifts up.
Returns:
numpy.ndarray: a copy of the source image after apply the effect
"""
rows, cols = src.shape
trans_matrix = np.float32([[1, 0, offset_x], [0, 1, offset_y]])
# size of the output image should be in the form of (width, height)
dst = cv2.warpAffine(src, trans_matrix, (cols, rows), borderValue=255)
return dst.astype(np.uint8)
def bleed_through(src, background=None, alpha=0.8, gamma=0, offset_x=0, offset_y=5):
"""Apply bleed through effect, background is flipped horizontally.
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
background (numpy.ndarray, optional) : background image. Must be in same
shape as foreground. Defaults to None.
alpha (float, optional) : transparent factor for the foreground. Defaults to 0.8.
gamma (int, optional) : luminance constant. Defaults to 0.
offset_x (int, optional) : background translation offset. Defaults to 0.
Positive value shifts right and negative shifts right.
offset_y (int, optional) : background translation offset. Defaults to 5.
Positive value shifts down and negative shifts up.
Returns:
numpy.ndarray: a copy of the source image after apply the effect. Pixel value ranges [0, 255]
"""
if background is None:
background = src.copy()
background = cv2.flip(background, 1) # flipped horizontally
background = translation(background, offset_x, offset_y)
beta = 1 - alpha
return overlay_weighted(src, background, alpha, beta, gamma)
def pepper(src, amount=0.05):
"""Randomly sprinkle dark pixels on src image.
Wrapper function for skimage.util.noise.random_noise().
See https://scikit-image.org/docs/stable/api/skimage.util.html#random-noise
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
amount (float, optional) : proportion of pixels in range [0, 1] to apply the effect.
Defaults to 0.05.
Returns:
numpy.ndarray: a copy of the source image after apply the effect.
Pixel value ranges [0, 255] as uint8.
"""
dst = src.copy()
# Method returns random floats in uniform distribution [0, 1)
noise = np.random.random(src.shape)
dst[noise < amount] = 0
return dst.astype(np.uint8)
def salt(src, amount=0.3):
"""Randomly sprinkle white pixels on src image.
Wrapper function for skimage.util.noise.random_noise().
See https://scikit-image.org/docs/stable/api/skimage.util.html#random-noise
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
amount (float, optional) : proportion of pixels in range [0, 1] to apply the effect.
Defaults to 0.05.
Returns:
numpy.ndarray: a copy of the source image after apply the effect.
Pixel value ranges [0, 255]
"""
dst = src.copy()
# Method returns random floats in uniform distribution [0, 1)
noise = np.random.random(src.shape)
dst[noise < amount] = 255
return dst.astype(np.uint8)
def salt_then_pepper(src, salt_amount=0.1, pepper_amount=0.05):
"""Randomly add salt then add pepper onto the image.
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
salt_amount (float) : proportion of pixels in range [0, 1] to
apply the salt effect.
Defaults to 0.1.
pepper_amount (float) : proportion of pixels in range [0, 1] to
apply the pepper effect.
Defaults to 0.05.
Returns:
numpy.ndarray: a copy of the source image after apply the effect.
Pixel value ranges [0, 255] as uint8.
"""
salted = salt(src, amount=salt_amount)
return pepper(salted, amount=pepper_amount)
def pepper_then_salt(src, pepper_amount=0.05, salt_amount=0.1):
"""Randomly add pepper then salt onto the image.
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
pepper_amount (float) : proportion of pixels in range [0, 1] to
apply the pepper effect.
Defaults to 0.05.
salt_amount (float) : proportion of pixels in range [0, 1] to
apply the salt effect.
Defaults to 0.1.
Returns:
numpy.ndarray: a copy of the source image after apply the effect.
Pixel value ranges [0, 255] as uint8.
"""
peppered = pepper(src, amount=pepper_amount)
return salt(peppered, amount=salt_amount)
def create_2D_kernel(kernel_shape, kernel_type="ones"):
"""Create 2D kernel for morphological operations.
Arguments:
kernel_shape (tuple) : shape of the kernel (rows, cols)
kernel_type (str, optional) : type of kernel. Defaults to "ones".
::
All supported kernel types are below:
"ones": kernel is filled with all 1s in shape (rows, cols)
[[1,1,1],
[1,1,1],
[1,1,1]]
"upper_triangle": upper triangular matrix filled with ones
[[1,1,1],
[0,1,1],
[0,0,1]]
"lower_triangle": lower triangular matrix filled with ones
[[1,0,0],
[1,1,0],
[1,1,1]]
"x": "X" shape cross
[[1,0,1],
[0,1,0],
[1,0,1]]
"plus": "+" shape cross
[[0,1,0],
[1,1,1],
[0,1,0]]
"ellipse": elliptical kernel
[[0, 0, 1, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0]]
Raises:
ValueError: if kernel is not a 2-element tuple or
kernel_type is not one of the supported values
Returns:
numpy.ndarray: a 2D array of shape `kernel_shape`.
"""
if len(kernel_shape) != 2:
raise ValueError("Kernel shape must be a tuple of 2 integers")
kernel_rows, kernel_cols = kernel_shape
if kernel_type == "ones":
kernel = np.ones(kernel_shape)
elif kernel_type == "upper_triangle":
kernel = np.triu(np.ones(kernel_shape))
elif kernel_type == "lower_triangle":
kernel = np.tril(np.ones(kernel_shape))
elif kernel_type == "x":
diagonal = np.eye(kernel_rows, kernel_cols)
kernel = np.add(diagonal, np.fliplr(diagonal))
kernel[kernel > 1] = 1
elif kernel_type == "plus":
kernel = np.zeros(kernel_shape)
center_col = floor(kernel.shape[0] / 2)
center_row = floor(kernel.shape[1] / 2)
kernel[:, center_col] = 1
kernel[center_row, :] = 1
elif kernel_type == "ellipse":
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernel_shape)
else:
valid_kernel_types = {
"ones",
"upper_triangle",
"lower_triangle",
"x",
"plus",
"ellipse",
}
raise ValueError(
f"Invalid kernel_type: {kernel_type}. Valid types are {valid_kernel_types}"
)
return kernel.astype(np.uint8)
def morphology(src, operation="open", kernel_shape=(3, 3), kernel_type="ones"):
"""Dynamic calls different morphological operations
("open", "close", "dilate" and "erode") with the given parameters
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
operation (str, optional) : name of a morphological operation:
``("open", "close", "dilate", "erode")``
Defaults to ``"open"``.
kernel_shape (tuple, optional) : shape of the kernel (rows, cols).
Defaults to (3,3).
kernel_type (str, optional) : type of kernel.
``("ones", "upper_triangle", "lower_triangle", "x", "plus", "ellipse")``
Defaults to ``"ones"``.
Returns:
numpy.ndarray: a copy of the source image after apply the effect.
"""
kernel = create_2D_kernel(kernel_shape, kernel_type)
if operation == "open":
return open(src, kernel)
elif operation == "close":
return close(src, kernel)
elif operation == "dilate":
return dilate(src, kernel)
elif operation == "erode":
return erode(src, kernel)
else:
valid_operations = ["open", "close", "dilate", "erode"]
raise ValueError(
f"Invalid morphology operation '{operation}'. Valid morphological operations are {valid_operations}"
)
def open(src, kernel):
""" "open" morphological operation. Like morphological "erosion", it removes
foreground pixels (white pixels), however it is less destructive than erosion.
For more information see:
1. https://docs.opencv.org/master/d9/d61/tutorial_py_morphological_ops.html
2. http://homepages.inf.ed.ac.uk/rbf/HIPR2/open.htm
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
kernel (numpy.ndarray) : a 2D array for structuring the morphological effect
Returns:
numpy.ndarray: a copy of the source image after apply the effect.
"""
return cv2.morphologyEx(src, cv2.MORPH_OPEN, kernel)
def close(src, kernel):
""" "close" morphological operation. Like morphological "dilation", it grows the
boundary of the foreground (white pixels), however, it is less destructive than
dilation of the original boundary shape.
For more information see:
1. https://docs.opencv.org/master/d9/d61/tutorial_py_morphological_ops.html
2. http://homepages.inf.ed.ac.uk/rbf/HIPR2/close.htm
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
kernel (numpy.ndarray) : a 2D array for structuring the morphological effect
Returns:
numpy.ndarray: a copy of the source image after apply the effect.
"""
return cv2.morphologyEx(src, cv2.MORPH_CLOSE, kernel)
def erode(src, kernel):
""" "erode" morphological operation. Erodes foreground pixels (white pixels).
For more information see:
1. https://docs.opencv.org/master/d9/d61/tutorial_py_morphological_ops.html
2. http://homepages.inf.ed.ac.uk/rbf/HIPR2/erode.htm
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
kernel (numpy.ndarray) : a 2D array for structuring the morphological effect
Returns:
numpy.ndarray: a copy of the source image after apply the effect.
"""
return cv2.erode(src, kernel)
def dilate(src, kernel):
""" "dilate" morphological operation. Grows foreground pixels (white pixels).
For more information see:
1. https://docs.opencv.org/master/d9/d61/tutorial_py_morphological_ops.html
2. http://homepages.inf.ed.ac.uk/rbf/HIPR2/dilate.htm
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
kernel (numpy.ndarray) : a 2D array for structuring the morphological effect
Returns:
numpy.ndarray: a copy of the source image after apply the effect.
"""
return cv2.dilate(src, kernel)
| [
2,
20368,
22369,
12,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
198,
2,
20368,
22369,
12,
198,
198,
6738,
10688,
1330,
4314,
198,
198,
11748,
269,
85,
17,
198,
117... | 2.345365 | 5,901 |
# Code was created by M. Heriyanto, 2020/01/13
# https://github.com/ezygeo-ai/machine-learning-and-geophysical-inversion/blob/master/scripts/fwd_sp.py
import numpy as np
import matplotlib.pyplot as plt
import pickle
# SP forward function
# === TEST FORWARD MODELING
x0 = 77.07 # m
alpha = 309.37 * (np.pi/180) # deg2rad
h = 41.81 # m
K = 94686
measure_loc = np.linspace(0, 150, 101) # Location of measurement
print('number of data: ', len(measure_loc))
par_mod = [x0, alpha, h, K] # model parameter of subsurface
get_SPData, get_SPData_noise, noise_from_maxData = SPfunc(measure_loc, par_mod) # forward modeling test
plt.figure()
plt.plot(measure_loc, get_SPData, 'b.')
plt.plot(measure_loc, get_SPData_noise, 'r*')
plt.xlim([0, 150])
plt.ylim([-10, 50])
plt.xlabel('position (m)')
plt.ylabel('SP data (mV)')
plt.legend(['ori', 'noise'])
plt.grid()
plt.figure()
plt.hist(noise_from_maxData, density=True, bins=20)
plt.ylabel('noise distribution')
plt.show()
with open('../data/SP_syn_data.pickle', 'wb') as f:
pickle.dump([measure_loc, get_SPData_noise], f)
| [
2,
6127,
373,
2727,
416,
337,
13,
2332,
7745,
14723,
11,
12131,
14,
486,
14,
1485,
201,
198,
2,
3740,
1378,
12567,
13,
785,
14,
68,
7357,
469,
78,
12,
1872,
14,
30243,
12,
40684,
12,
392,
12,
469,
41789,
12,
259,
9641,
14,
2436,... | 2.247505 | 501 |
"""
This is a script includes two type of experience replay pool
"""
import random
from collections import namedtuple
from sum_tree import SumSegmentTree, MinSegmentTree
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
class ReplayMemory:
"""Experience replay pool"""
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
class PrioritizedReplayMemory(ReplayMemory):
"""Prioritized Experience replay pool"""
| [
37811,
198,
1212,
318,
257,
4226,
3407,
734,
2099,
286,
1998,
24788,
5933,
198,
198,
37811,
198,
11748,
4738,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
2160,
62,
21048,
1330,
5060,
41030,
434,
27660,
11,
1855,
41030,
434,
2766... | 3.009091 | 220 |
import sys, subprocess, time
"""
This script is made as a wrapper for sc2 bots to set a timeout to the bots (in case they can't find the last enemy structure or the game is ending in a draw)
Ideally this script should be done with a bot that terminates on its own after certain things have been achieved, e.g. testing if the bot can expand at all, and then terminates after it has successfully expanded.
Usage: see .bat files in /bat_files/ folder
cd into python-sc2/ directory
docker build -t test_image -f test/Dockerfile .
docker run test_image -c "python test/travis_test_script.py test/autotest_bot.py"
Or if you want to run from windows:
pipenv run python test/travis_test_script.py test/autotest_bot.py
"""
retries = 3
# My maxout bot (reaching 200 supply in sc2) took 110 - 140 real seconds for 7 minutes in game time
# How long the script should run before it will be killed:
timeout_time = 5 * 60 # 5 minutes real time
if len(sys.argv) > 1:
# Attempt to run process with retries and timeouts
t0 = time.time()
process, result = None, None
output_as_list = []
i = 0
for i in range(retries):
t0 = time.time()
process = subprocess.Popen(["python", sys.argv[1]], stdout=subprocess.PIPE)
try:
# Stop the current bot if the timeout was reached - the bot needs to finish a game within 3 minutes real time
result = process.communicate(timeout=timeout_time)
except subprocess.TimeoutExpired:
continue
out, err = result
result = out.decode("utf-8")
if process.returncode is not None and process.returncode != 0:
# Bot has thrown an error, try again
print(f"Bot has thrown an error with error code {process.returncode}. This was try {i+1} out of {retries}.")
continue
# Break as the bot run was successful
break
if process.returncode is not None:
# Reformat the output into a list
print_output: str = result
linebreaks = [
["\r\n", print_output.count("\r\n")],
["\r", print_output.count("\r")],
["\n", print_output.count("\n")],
]
most_linebreaks_type = max(linebreaks, key=lambda x: x[1])
linebreak_type, linebreak_count = most_linebreaks_type
output_as_list = print_output.split(linebreak_type)
print("Travis test script, bot output:\r\n{}\r\nEnd of bot output".format("\r\n".join(output_as_list)))
time_taken = time.time() - t0
# Bot was not successfully run in time, returncode will be None
if process.returncode is None or process.returncode != 0:
print(
f"Exiting with exit code 5, error: Attempted to launch script {sys.argv[1]} timed out after {time_taken} seconds. Retries completed: {i}"
)
exit(5)
# process.returncode will always return 0 if the game was run successfully or if there was a python error (in this case it returns as defeat)
print("Returncode: {}".format(process.returncode))
print("Game took {} real time seconds".format(round(time.time() - t0, 1)))
if process is not None and process.returncode == 0:
for line in output_as_list:
# This will throw an error even if a bot is called Traceback
if "Traceback " in line:
print("Exiting with exit code 3")
exit(3)
print("Exiting with exit code 0")
exit(0)
# Exit code 1: game crashed I think
print("Exiting with exit code 1")
exit(1)
# Exit code 2: bot was not launched
print("Exiting with exit code 2")
exit(2)
| [
11748,
25064,
11,
850,
14681,
11,
640,
198,
198,
37811,
198,
1212,
4226,
318,
925,
355,
257,
29908,
329,
629,
17,
29641,
284,
900,
257,
26827,
284,
262,
29641,
357,
259,
1339,
484,
460,
470,
1064,
262,
938,
4472,
4645,
393,
262,
983... | 2.663971 | 1,360 |
#
# BSD 2-Clause License
#
# Copyright (c) 2021, Cristel Chandre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as xp
import multiprocess
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib import cm
import copy
import time
from datetime import date
from scipy.io import savemat
from tqdm import tqdm
plt.rcParams.update({
'text.usetex': True,
'font.family': 'serif',
'font.sans-serif': ['Palatino'],
'font.size': 24,
'axes.labelsize': 30,
'figure.figsize': [8, 8],
'image.cmap': 'bwr'})
| [
2,
198,
2,
347,
10305,
362,
12,
2601,
682,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
33448,
11,
24568,
417,
20631,
260,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
... | 3.147059 | 578 |
# -*- coding: utf-8 -*-
# Zilliqa Python Library
# Copyright (C) 2019 Gully Chen
# MIT License
"""
zillipy
.zilliqa.node
~~~~~~~~~~~~
Zilliqa Node API.
:copyright: (c) 2019 by Gully Chen.
:license: MIT License, see LICENSE for more details.
"""
import socket
from jsonrpcclient.clients.socket_client import SocketClient
class Node:
"""Zilliqa Node API."""
LocalNode = Node("127.0.0.1", 4201)
if "__main__" == __name__:
print(LocalNode.GetCurrentMiniEpoch())
print(LocalNode.GetCurrentDSEpoch())
print(LocalNode.GetNodeType())
print(LocalNode.GetDSCommittee())
print(LocalNode.GetNodeState())
print(LocalNode.IsTxnInMemPool("txn_id"))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
1168,
50173,
20402,
11361,
10074,
198,
2,
15069,
357,
34,
8,
13130,
220,
402,
2132,
12555,
198,
2,
17168,
13789,
198,
37811,
198,
89,
359,
541,
88,
198,
13,
89,
5... | 2.645669 | 254 |
import os
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.slim.python.slim.learning import train_step
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as tf_saver
from scipy.ndimage.filters import gaussian_filter1d
from scipy.misc import imread, imresize
from slim.preprocessing import inception_preprocessing
from image_model import inception_v1
from datasets import dataset_utils
from text_model.text_preprocessing import _load_embedding_weights_glove, _paragraph_to_ids
from image_model.im_model import load_batch_with_text, get_init_fn
from datasets.convert_to_dataset import get_split_with_text
import matplotlib.pyplot as plt
_POST_SIZE = 50
_CONFIG = {'mode': 'train',
'dataset_dir': 'data',
'text_dir': 'text_model',
'emb_dir': 'embedding_weights',
'filename': 'glove.6B.50d.txt',
'initial_lr': 1e-3,
'decay_factor': 0.3,
'batch_size': 64,
'im_features_size': 256,
'rnn_size': 1024,
'final_endpoint': 'Mixed_5c',
'fc_size': 512}
def train_deep_sentiment(checkpoints_dir, train_dir, num_steps):
"""Fine tune the inception model, retraining the last layer.
Parameters:
dataset_dir: The directory containing the data.
checkpoints_dir: The directory contained the pre-trained model.
train_dir: The directory to save the trained model.
num_steps: The number of steps training the model.
"""
if tf.gfile.Exists(train_dir):
# Delete old model
tf.gfile.DeleteRecursively(train_dir)
tf.gfile.MakeDirs(train_dir)
with tf.Graph().as_default():
model = DeepSentiment2(_CONFIG)
# Specify the loss function:
one_hot_labels = slim.one_hot_encoding(model.labels, model.nb_emotions)
slim.losses.softmax_cross_entropy(model.logits, one_hot_labels)
total_loss = slim.losses.get_total_loss()
# Create some summaries to visualize the training process
# Use tensorboard --logdir=train_dir, careful with path (add Documents/tumblr-sentiment in front of train_dir)
# Different from the logs, because computed on different mini batch of data
tf.summary.scalar('Loss', total_loss)
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=model.learning_rate)
train_op = slim.learning.create_train_op(total_loss, optimizer)
batch_size = _CONFIG['batch_size']
initial_lr = _CONFIG['initial_lr']
decay_factor = _CONFIG['decay_factor']
nb_batches = model.dataset.num_samples / batch_size
train_step_fn.step = 0
train_step_fn.epoch = 0
# Run the training:
final_loss = slim.learning.train(
train_op,
logdir=train_dir,
init_fn=get_init_fn(checkpoints_dir),
save_interval_secs=600,
save_summaries_secs=600,
train_step_fn=train_step_fn,
number_of_steps=num_steps)
print('Finished training. Last batch loss {0:.3f}'.format(final_loss))
def evaluate_deep_sentiment(checkpoint_dir, log_dir, mode, num_evals):
"""Visualise results with: tensorboard --logdir=logdir. Now has train/validation curves on the same plot
Parameters:
checkpoint_dir: Checkpoint of the saved model during training.
log_dir: Directory to save logs.
mode: train or validation.
num_evals: Number of batches to evaluate (mean of the batches is displayed).
"""
with tf.Graph().as_default():
config = _CONFIG.copy()
config['mode'] = mode
model = DeepSentiment2(config)
# Accuracy metrics
accuracy = slim.metrics.streaming_accuracy(tf.cast(model.labels, tf.int32),
tf.cast(tf.argmax(model.logits, 1), tf.int32))
# Choose the metrics to compute:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'accuracy': accuracy,
})
for metric_name, metric_value in names_to_values.iteritems():
tf.summary.scalar(metric_name, metric_value)
log_dir = os.path.join(log_dir, mode)
# Evaluate every eval_interval_secs secs or if not specified,
# every time the checkpoint_dir changes
# tf.get_variable variables are also restored
slim.evaluation.evaluation_loop(
'',
checkpoint_dir,
log_dir,
num_evals=num_evals,
eval_op=names_to_updates.values())
def correlation_matrix(nb_batches, checkpoint_dir):
"""Computes logits and labels of the input posts and save them as numpy files.
Parameters:
checkpoint_dir: Checkpoint of the saved model during training.
"""
with tf.Graph().as_default():
config = _CONFIG.copy()
config['mode'] = 'validation'
model = DeepSentiment(config)
# Load model
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
scaffold = monitored_session.Scaffold(
init_op=None, init_feed_dict=None,
init_fn=None, saver=None)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master='',
config=None)
posts_logits = []
posts_labels = []
with monitored_session.MonitoredSession( # Generate queue
session_creator=session_creator, hooks=None) as session:
for i in range(nb_batches):
np_logits, np_labels = session.run([model.logits, model.labels])
posts_logits.append(np_logits)
posts_labels.append(np_labels)
posts_logits, posts_labels = np.vstack(posts_logits), np.hstack(posts_labels)
np.save('data/posts_logits.npy', posts_logits)
np.save('data/posts_labels.npy', posts_labels)
return posts_logits, posts_labels
def word_most_relevant(top_words, num_classes, checkpoint_dir):
"""Compute gradient of W_embedding to get the word most relevant to a label.
Parameters:
checkpoint_dir: Checkpoint of the saved model during training.
"""
with tf.Graph().as_default():
config = _CONFIG.copy()
mode = 'validation'
dataset_dir = config['dataset_dir']
text_dir = config['text_dir']
emb_dir = config['emb_dir']
filename = config['filename']
initial_lr = config['initial_lr']
#batch_size = config['batch_size']
im_features_size = config['im_features_size']
rnn_size = config['rnn_size']
final_endpoint = config['final_endpoint']
tf.logging.set_verbosity(tf.logging.INFO)
batch_size = 50
image_size = inception_v1.default_image_size
images = tf.placeholder(tf.float32, [batch_size, image_size, image_size, 3])
texts = tf.placeholder(tf.int32, [batch_size, _POST_SIZE])
seq_lens = tf.placeholder(tf.int32, [batch_size])
#self.learning_rate = tf.Variable(initial_lr, trainable=False)
#self.lr_rate_placeholder = tf.placeholder(tf.float32)
#self.lr_rate_assign = self.learning_rate.assign(self.lr_rate_placeholder)
#self.dataset = get_split_with_text(mode, dataset_dir)
#image_size = inception_v1.default_image_size
#images, _, texts, seq_lens, self.labels = load_batch_with_text(self.dataset, batch_size, height=image_size, width=image_size)
# Create the model, use the default arg scope to configure the batch norm parameters.
is_training = (mode == 'train')
with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
images_features, _ = inception_v1.inception_v1(images, final_endpoint=final_endpoint,
num_classes=im_features_size, is_training=is_training)
# Text model
vocabulary, embedding = _load_embedding_weights_glove(text_dir, emb_dir, filename)
vocab_size, embedding_dim = embedding.shape
word_to_id = dict(zip(vocabulary, range(vocab_size)))
# Unknown words = vector with zeros
embedding = np.concatenate([embedding, np.zeros((1, embedding_dim))])
word_to_id['<ukn>'] = vocab_size
vocab_size = len(word_to_id)
nb_emotions = num_classes
with tf.variable_scope('Text'):
# Word embedding
W_embedding = tf.get_variable('W_embedding', [vocab_size, embedding_dim], trainable=False)
#self.embedding_placeholder = tf.placeholder(tf.float32, [vocab_size, embedding_dim])
#self.embedding_init = W_embedding.assign(self.embedding_placeholder)
input_embed = tf.nn.embedding_lookup(W_embedding, texts)
#input_embed_dropout = tf.nn.dropout(input_embed, self.keep_prob)
# LSTM
cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell, input_embed, sequence_length=seq_lens, dtype=tf.float32)
# Need to convert seq_lens to int32 for stack
texts_features = tf.gather_nd(rnn_outputs, tf.stack([tf.range(batch_size), tf.cast(seq_lens, tf.int32) - 1], axis=1))
# Concatenate image and text features
concat_features = tf.concat([images_features, texts_features], axis=1)
W_softmax = tf.get_variable('W_softmax', [im_features_size + rnn_size, nb_emotions])
b_softmax = tf.get_variable('b_softmax', [nb_emotions])
logits = tf.matmul(concat_features, W_softmax) + b_softmax
# Initialise image
#image_init = tf.random_normal([image_size, image_size, 3])
#image_init = inception_preprocessing.preprocess_image(image_init, image_size, image_size, is_training=False)
#image_init = tf.expand_dims(image_init, 0)
# Load model
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
scaffold = monitored_session.Scaffold(
init_op=None, init_feed_dict=None,
init_fn=None, saver=None)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master='',
config=None)
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=None) as session:
nb_iter = len(top_words) / batch_size
scores = []
for i in range(nb_iter):
np_images = np.zeros((batch_size, image_size, image_size, 3))
np_texts = np.ones((batch_size, _POST_SIZE), dtype=np.int32) * (vocab_size - 1)
np_texts[:, 0] = top_words[i*batch_size : (i+1)*batch_size]
np_seq_lens = np.ones(batch_size, dtype=np.int32)
scores.append(session.run(logits, feed_dict={images: np_images, texts: np_texts, seq_lens: np_seq_lens}))
scores = np.vstack(scores)
np.save('data/top_words_scores.npy', scores)
np.save('data/top_words.npy', top_words)
return scores, vocabulary, word_to_id
def outliers_detection(checkpoint_dir):
"""Find outliers using Euclidean distance in the last dense layer.
Parameters:
checkpoint_dir: Checkpoint of the saved model during training.
"""
with tf.Graph().as_default():
config = _CONFIG.copy()
config['mode'] = 'validation'
model = DeepSentiment(config)
# Load model
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
scaffold = monitored_session.Scaffold(
init_op=None, init_feed_dict=None,
init_fn=None, saver=None)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master='',
config=None)
im_features_size = config['im_features_size']
rnn_size = config['rnn_size']
dense_mean = np.zeros((im_features_size + rnn_size))
with monitored_session.MonitoredSession( # Generate queue
session_creator=session_creator, hooks=None) as session:
batch_size = config['batch_size']
nb_batches = model.dataset.num_samples / batch_size
for i in range(nb_batches):
current_dense = session.run(model.concat_features)
weight = float(i) * batch_size / ((i+1) * batch_size)
dense_mean = weight * dense_mean + (1-weight) * current_dense.mean(axis=0)
# Now look at outliers
max_norms = np.zeros((batch_size))
max_post_ids = np.zeros((batch_size))
max_logits = np.zeros((batch_size, model.dataset.num_classes))
for i in range(nb_batches):
current_dense, np_post_ids, current_logits = session.run([model.concat_features, model.post_ids,
model.logits])
current_diff = np.linalg.norm(current_dense - dense_mean, axis=1)
for k in range(batch_size):
if current_diff[k] > max_norms[k]:
max_norms[k] = current_diff[k]
max_post_ids[k] = np_post_ids[k]
max_logits[k] = current_logits[k]
np.save('data/max_norms.npy', max_norms)
np.save('data/max_post_ids.npy', max_post_ids)
np.save('data/max_logits.npy', max_logits)
return max_norms, max_post_ids, max_logits
def day_of_week_trend(checkpoint_dir):
"""Compute day of week trend.
Parameters:
checkpoint_dir: Checkpoint of the saved model during training.
"""
with tf.Graph().as_default():
config = _CONFIG.copy()
config['mode'] = 'validation'
model = DeepSentiment(config)
# Load model
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
scaffold = monitored_session.Scaffold(
init_op=None, init_feed_dict=None,
init_fn=None, saver=None)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master='',
config=None)
posts_logits = []
posts_labels = []
posts_days = []
posts_ids = []
with monitored_session.MonitoredSession( # Generate queue
session_creator=session_creator, hooks=None) as session:
batch_size = config['batch_size']
nb_batches = model.dataset.num_samples / batch_size
for i in range(nb_batches):
np_logits, np_labels, np_days, np_post_ids = session.run([model.logits, model.labels,
model.days, model.post_ids])
posts_logits.append(np_logits)
posts_labels.append(np_labels)
posts_days.append(np_days)
posts_ids.append(np_post_ids)
posts_logits, posts_labels = np.vstack(posts_logits), np.hstack(posts_labels)
posts_days, posts_ids = np.hstack(posts_days), np.hstack(posts_ids)
np.save('data/posts_logits_week.npy', posts_logits)
np.save('data/posts_labels_week.npy', posts_labels)
np.save('data/posts_days_week.npy', posts_days)
np.save('data/posts_ids_week.npy', posts_ids)
return posts_logits, posts_labels, posts_days, posts_ids
def oasis_evaluation(checkpoint_dir):
"""Compute gradient of W_embedding to get the word most relevant to a label.
Parameters:
checkpoint_dir: Checkpoint of the saved model during training.
"""
with tf.Graph().as_default():
config = _CONFIG.copy()
mode = 'validation'
dataset_dir = config['dataset_dir']
text_dir = config['text_dir']
emb_dir = config['emb_dir']
filename = config['filename']
initial_lr = config['initial_lr']
#batch_size = config['batch_size']
im_features_size = config['im_features_size']
rnn_size = config['rnn_size']
final_endpoint = config['final_endpoint']
tf.logging.set_verbosity(tf.logging.INFO)
batch_size = 1
image_size = inception_v1.default_image_size
images = tf.placeholder(tf.float32, [image_size, image_size, 3])
images_prep = inception_preprocessing.preprocess_image(images, image_size, image_size, is_training=False)
images_prep_final = tf.expand_dims(images_prep, 0)
texts = tf.placeholder(tf.int32, [batch_size, _POST_SIZE])
seq_lens = tf.placeholder(tf.int32, [batch_size])
#self.learning_rate = tf.Variable(initial_lr, trainable=False)
#self.lr_rate_placeholder = tf.placeholder(tf.float32)
#self.lr_rate_assign = self.learning_rate.assign(self.lr_rate_placeholder)
#self.dataset = get_split_with_text(mode, dataset_dir)
#image_size = inception_v1.default_image_size
#images, _, texts, seq_lens, self.labels = load_batch_with_text(self.dataset, batch_size, height=image_size, width=image_size)
# Create the model, use the default arg scope to configure the batch norm parameters.
is_training = (mode == 'train')
with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
images_features, _ = inception_v1.inception_v1(images_prep_final, final_endpoint=final_endpoint,
num_classes=im_features_size, is_training=is_training)
# Text model
vocabulary, embedding = _load_embedding_weights_glove(text_dir, emb_dir, filename)
vocab_size, embedding_dim = embedding.shape
word_to_id = dict(zip(vocabulary, range(vocab_size)))
# Unknown words = vector with zeros
embedding = np.concatenate([embedding, np.zeros((1, embedding_dim))])
word_to_id['<ukn>'] = vocab_size
vocab_size = len(word_to_id)
nb_emotions = 8
with tf.variable_scope('Text'):
# Word embedding
W_embedding = tf.get_variable('W_embedding', [vocab_size, embedding_dim], trainable=False)
#self.embedding_placeholder = tf.placeholder(tf.float32, [vocab_size, embedding_dim])
#self.embedding_init = W_embedding.assign(self.embedding_placeholder)
input_embed = tf.nn.embedding_lookup(W_embedding, texts)
#input_embed_dropout = tf.nn.dropout(input_embed, self.keep_prob)
# LSTM
cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell, input_embed, sequence_length=seq_lens, dtype=tf.float32)
# Need to convert seq_lens to int32 for stack
texts_features = tf.gather_nd(rnn_outputs, tf.stack([tf.range(batch_size), tf.cast(seq_lens, tf.int32) - 1], axis=1))
# Concatenate image and text features
concat_features = tf.concat([images_features, texts_features], axis=1)
W_softmax = tf.get_variable('W_softmax', [im_features_size + rnn_size, nb_emotions])
b_softmax = tf.get_variable('b_softmax', [nb_emotions])
logits = tf.matmul(concat_features, W_softmax) + b_softmax
# Initialise image
#image_init = tf.random_normal([image_size, image_size, 3])
#image_init = inception_preprocessing.preprocess_image(image_init, image_size, image_size, is_training=False)
#image_init = tf.expand_dims(image_init, 0)
# Load model
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
scaffold = monitored_session.Scaffold(
init_op=None, init_feed_dict=None,
init_fn=None, saver=None)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master='',
config=None)
# Load oasis dataset
df_oasis = pd.read_csv('data/oasis/OASIS.csv', encoding='utf-8')
df_oasis['image'] = df_oasis['Theme'].map(lambda x: load_image(x))
df_oasis['Theme'] = df_oasis['Theme'].map(lambda x: ''.join([i for i in x if not i.isdigit()]).strip())
vocabulary, embedding = _load_embedding_weights_glove(text_dir, emb_dir, filename)
word_to_id = dict(zip(vocabulary, range(len(vocabulary))))
df_oasis['text_list'], df_oasis['text_len'] = zip(*df_oasis['Theme'].map(lambda x:
_paragraph_to_ids(x, word_to_id,
_POST_SIZE, emotions='')))
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=None) as session:
nb_iter = 2#df_oasis.shape[0] / batch_size
scores = []
for i in range(nb_iter):
np_images = df_oasis['image'][(i * batch_size):((i+1) * batch_size)]
np_texts = np.vstack(df_oasis['text_list'][(i * batch_size):((i+1) * batch_size)])
np_seq_lens = df_oasis['text_len'][(i * batch_size):((i+1) * batch_size)].values
print(np_images.shape)
session.run(images, feed_dict={images: np_images})
print(np_texts.shape)
session.run(texts, feed_dict={texts: np_texts})
print(np_seq_lens.shape)
session.run(seq_lens, feed_dict={seq_lens: np_seq_lens})
#scores.append(session.run(logits, feed_dict={images: np_images, texts: np_texts, seq_lens: np_seq_lens}))
scores = np.vstack(scores)
np.save('data/oasis_logits.npy', scores)
return scores
| [
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
11192,
273,
11125,
13,
3642,
822,
1330,
18862,
198,
6738,
11192,
... | 2.199198 | 9,970 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.node import PipingSystemUndergroundPipeCircuit
log = logging.getLogger(__name__) | [
11748,
28686,
198,
11748,
20218,
7753,
198,
11748,
555,
715,
395,
198,
11748,
18931,
198,
6738,
12972,
312,
69,
1330,
3254,
24765,
4971,
198,
11748,
12972,
312,
69,
198,
6738,
12972,
312,
69,
13,
312,
69,
1330,
33389,
198,
6738,
12972,
... | 3.217391 | 69 |
from extractor.models import *
| [
6738,
7925,
273,
13,
27530,
1330,
1635,
628
] | 4 | 8 |
"""
Author: Lukas Mandrake, Shawn Anderson
Date : 12/4/19
Brief :
Notes :
Copyright 2019 California Institute of Technology. ALL RIGHTS RESERVED.
U.S. Government Sponsorship acknowledged.
"""
import importlib
import os
def repo_path():
"""
little function to help resolve location of doctest_files back in repository
:return: the absolute path to the root of the repository.
"""
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def repo_relpath(start=None):
"""
:param start: the current working directory relative to repo root path
:return: Returns the relative path to the root of the repository.
>>> test_path = os.path.join(repo_abspath(), 'genetic_algorithm', 'mutate')
>>> repo_relpath(start=test_path)
'../..'
>>> test_path = os.path.join(repo_abspath(), 'genetic_algorithm')
>>> repo_relpath(start=test_path)
'..'
"""
if start is None:
start = ''
return os.path.relpath(repo_abspath(), start)
def repo_abspath():
"""
:return: the absolute path to the directory containing the mlib module.
"""
toolbox_specs = importlib.util.find_spec('toga')
return os.path.realpath(os.path.dirname(toolbox_specs.submodule_search_locations[0]))
def doctest_input_path():
"""
:return: the path to the doctest input files
"""
# return os.path.join(repo_path(), 'tests', 'doctest_input_files')
return os.path.join(repo_path(), 'tests', 'doctest_files')
def doctest_output_path():
"""
:return: the path to the doctest output files.
"""
return os.path.join(repo_path(), 'tests', 'doctest_working')
if __name__ == '__main__':
import doctest
doctest.testmod()
print("repo_path: ", repo_path())
print("repo_relpath: ", repo_relpath())
print("repo_abspath: ", repo_abspath())
print("module_path: ", module_path())
print("doctest_input_path: ", doctest_input_path())
print("doctest_output_path: ", doctest_output_path())
| [
37811,
198,
13838,
25,
28102,
292,
13314,
33788,
11,
25225,
9918,
198,
10430,
220,
1058,
1105,
14,
19,
14,
1129,
198,
33,
3796,
1058,
198,
16130,
1058,
198,
15269,
13130,
3442,
5136,
286,
8987,
13,
220,
11096,
371,
34874,
15731,
1137,
... | 2.687166 | 748 |
#!python3
# the difficulty is [0, 1, 0, ..., 0], [0, 8, 0, ..., 0], [0, 64, 0, ..., 0]
import matplotlib.pyplot as plt
topo_2_allblocks = {
'line': [69, 491, 1452],
'reg2': [99, 505, 920],
'reg3': [123, 579, 679],
'reg4': [122, 492, 624]
}
topo_2_validblocks = {
'line': [65, 361, 383],
'reg2': [86, 183, 179],
'reg3': [99, 206, 197],
'reg4': [108, 197, 175]
}
diff_2_allblocks = {
'1': [69, 99, 123, 122],
'8': [491, 505, 579, 492],
'64': [1452, 920, 679, 624]
}
diff_2_validblocks = {
'1': [65, 86, 99, 108],
'8': [361, 183, 206, 197],
'64': [383, 179, 197, 175]
}
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def autolabel(rects, ax):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{:.2f}'.format(float(height)), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset
textcoords="offset points", ha='center', va='bottom')
plot_diff()
plot_topo()
| [
2,
0,
29412,
18,
198,
2,
262,
8722,
318,
685,
15,
11,
352,
11,
657,
11,
2644,
11,
657,
4357,
685,
15,
11,
807,
11,
657,
11,
2644,
11,
657,
4357,
685,
15,
11,
5598,
11,
657,
11,
2644,
11,
657,
60,
198,
11748,
2603,
29487,
801... | 2.064982 | 554 |
import logging
import time
from datetime import datetime
from functools import wraps
| [
11748,
18931,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
1257,
310,
10141,
1330,
27521,
628
] | 4.3 | 20 |
# Imports from here
import argparse
import functions_train
import os
parser = argparse.ArgumentParser()
parser.add_argument('--arch', type = str, default = 'vgg19', help = 'which CNN Model should be used for pretraining, choose between vgg13, vgg16, vgg19, densenet121, densenet161, alexnet | (default = vgg19)')
parser.add_argument('--save_directory', type = str, default = 'SavedModel/', help = 'directory to save trained model | (default = SavedModel/)')
parser.add_argument('--learningrate', type = float, default = 0.001, help = 'give learningrate as a float | (default = 0.001)')
parser.add_argument('--hidden_units', type = int, default = 508, help = 'give number of hidden units as an integer | (default = 508)')
parser.add_argument('--epochs', type = int, default = 1, help = 'give number of epochs as an integer | (default = 1)')
parser.add_argument('--gpu', type = str, default = 'cuda', help = 'cuda or cpu | (default = cuda)')
args = parser.parse_args()
# Run functions from functions_train.py
functions_train.information(args.arch, args.learningrate, args.hidden_units, args.epochs, args.save_directory, args.gpu)
trainloader, validloader, train_data = functions_train.training_input()
model, save_architecture, input_layer = functions_train.pretrained_model(args.arch)
model, criterion, optimizer = functions_train.classifier(args.hidden_units, args.learningrate, model, input_layer)
model = functions_train.training_network(args.epochs, args.gpu, model, trainloader, validloader, criterion, optimizer)
store = functions_train.saving_model(args.save_directory, train_data, args.learningrate, args.epochs, model, optimizer)
# Save architecture model and filepath
os.path.join(args.save_directory, 'save_progress.txt')
with open("save_progress.txt", "w") as output:
output.write(str(store) + "\n" + str(args.arch))
| [
2,
1846,
3742,
422,
994,
198,
11748,
1822,
29572,
198,
11748,
5499,
62,
27432,
220,
198,
11748,
28686,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
198,
48610,
13,
2860,
62,
49140,
10786,
438,
998,
3256,
2099,
... | 3.189329 | 581 |
from os.path import abspath, dirname, join, split
from glob import glob
from functools import partial
from subprocess import Popen, PIPE
import gzip
from click import echo
from psycopg2 import (connect, OperationalError)
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from natsort import natsorted
from amgut.lib.config_manager import AMGUT_CONFIG
from amgut.lib.data_access.sql_connection import SQLConnectionHandler
get_db_file = partial(join, join(dirname(dirname(abspath(__file__))), '..',
'db'))
LAYOUT_FP = get_db_file('ag_unpatched.sql')
INITIALIZE_FP = get_db_file('initialize.sql')
POPULATE_FP = get_db_file('ag_test_patch22.sql.gz')
PATCHES_DIR = get_db_file('patches')
def _check_db_exists(db, cursor):
r"""Check if the database db exists on the postgres server
Parameters
----------
db : str
The database name
cursor : psycopg2.cursor
The cursor connected to the server
"""
cursor.execute('SELECT datname FROM pg_database')
# It's a list of tuple, so just create the tuple to check if exists
return (db,) in cursor.fetchall()
def initialize(verbose=False):
"""Initialize the database with permissions and, optionally, a new user
Parameters
----------
verbose : bool, optional
Show messages while working, default False
"""
conn = connect(user=AMGUT_CONFIG.user, password=AMGUT_CONFIG.password,
host=AMGUT_CONFIG.host, port=AMGUT_CONFIG.port,
database=AMGUT_CONFIG.database)
cur = conn.cursor()
if verbose:
echo('Granting privileges')
cur.execute("""GRANT USAGE ON schema public, ag, barcodes
TO %s""" % AMGUT_CONFIG.user)
cur.execute('GRANT CONNECT ON DATABASE %s TO %s' %
(AMGUT_CONFIG.database, AMGUT_CONFIG.user))
cur.execute('GRANT INSERT, UPDATE, DELETE, SELECT ON ALL TABLES IN SCHEMA'
' public, ag, barcodes TO %s;' % AMGUT_CONFIG.user)
cur.execute('GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public, ag, barcodes'
' TO %s;' % AMGUT_CONFIG.user)
conn.commit()
def patch_db(patches_dir=PATCHES_DIR, verbose=False):
"""Patches the database schema based on the settings table
Pulls the current patch from the settings table and applies all subsequent
patches found in the patches directory.
"""
conn = SQLConnectionHandler()
current_patch = conn.execute_fetchone(
"SELECT current_patch FROM settings")[0]
current_patch_fp = join(patches_dir, current_patch)
sql_glob = join(patches_dir, '*.sql')
patch_files = natsorted(glob(sql_glob))
if current_patch == 'unpatched':
next_patch_index = 0
elif current_patch_fp not in patch_files:
raise RuntimeError("Cannot find patch file %s" % current_patch)
else:
next_patch_index = patch_files.index(current_patch_fp) + 1
patch_update_sql = "UPDATE settings SET current_patch = %s"
for patch_fp in patch_files[next_patch_index:]:
patch_filename = split(patch_fp)[-1]
with conn.get_postgres_cursor() as cur:
cur.execute('SET SEARCH_PATH TO ag, barcodes, public')
with open(patch_fp, 'U') as patch_file:
if verbose:
echo('\tApplying patch %s...' % patch_filename)
cur.execute(patch_file.read())
cur.execute(patch_update_sql, [patch_filename])
conn._connection.commit()
# Idempotent patches implemented in Python can be run here
| [
6738,
28686,
13,
6978,
1330,
2352,
6978,
11,
26672,
3672,
11,
4654,
11,
6626,
198,
6738,
15095,
1330,
15095,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
850,
14681,
1330,
8099,
268,
11,
350,
4061,
36,
198,
11748,
308,
13344,
... | 2.482687 | 1,444 |
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from neo4j.graph import (
Node,
Path,
Relationship,
)
from neo4j.spatial import (
CartesianPoint,
WGS84Point,
)
from neo4j.time import (
Date,
DateTime,
Duration,
Time,
)
| [
2,
15069,
357,
66,
8,
366,
8199,
78,
19,
73,
1,
198,
2,
21227,
19,
73,
10710,
9564,
685,
5450,
1378,
710,
78,
19,
73,
13,
785,
60,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
21227,
19,
73,
13,
198,
2,
198,
2,
49962,
739,
26... | 3.045775 | 284 |
import os
import lldb
from lldb.plugins.scripted_process import ScriptedProcess | [
11748,
28686,
198,
198,
11748,
32660,
9945,
198,
6738,
32660,
9945,
13,
37390,
13,
12048,
276,
62,
14681,
1330,
12327,
276,
18709
] | 3.636364 | 22 |
"""
****************************************************************************************************
:copyright (c) 2019-2021 URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the copyright holder nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****************************************************************************************************
"""
import os
from geojson_modelica_translator.jinja_filters import ALL_CUSTOM_FILTERS
from jinja2 import Environment, FileSystemLoader
class PackageParser(object):
"""
Class to read and modify the package.mo and the package.order file
"""
def __init__(self, path=None):
"""
Create an instance to manage the package.mo/order file. If no path is provided then the user
must add in their own package and order data. Or the user can load from the new_from_template
class method.
:param path: string, path to where the package.mo and package.order reside.
"""
self.path = path
self.order_data = None # This is stored as a string for now.
self.package_data = None
self.load()
self.template_env = Environment(
loader=FileSystemLoader(
searchpath=os.path.join(
os.path.dirname(os.path.abspath(__file__)), "templates"
)
)
)
self.template_env.filters.update(ALL_CUSTOM_FILTERS)
@classmethod
def new_from_template(cls, path, name, order, within=None):
"""
Create new package data based on the package.mo template. If within is not specified, then it is
assumed that this is a top level package and will load from the package_base template.
:param path: string, the path where the resulting files will be saved to.
:param name: string, the name of the model
:param order: list, ordered list of which models will be loaded (saved to package.order)
:param within: string, (optional), name where this package is within.
"""
klass = PackageParser(path)
if within:
template = klass.template_env.get_template("package.mot")
else:
template = klass.template_env.get_template("package_base.mot")
klass.package_data = template.render(within=within, name=name, order=order)
klass.order_data = "\n".join(order)
return klass
def load(self):
"""
Load the package.mo and package.mo data from the member variable path
"""
filename = os.path.join(self.path, "package.mo")
if os.path.exists(filename):
with open(filename, "r") as f:
self.package_data = f.read()
filename = os.path.join(self.path, "package.order")
if os.path.exists(filename):
with open(filename, "r") as f:
self.order_data = f.read()
def save(self):
"""
Save the updated files to the same location
"""
with open(os.path.join(os.path.join(self.path, "package.mo")), "w") as f:
f.write(self.package_data)
with open(os.path.join(os.path.join(self.path, "package.order")), "w") as f:
f.write(self.order_data)
f.write("\n")
@property
def order(self):
"""
Return the order of the packages from the package.order file
:return: list, list of the loaded models in the package.order file
"""
data = self.order_data.split("\n")
if "" in data:
data.remove("")
return data
def rename_model(self, old_model, new_model):
"""
Rename the model name in the package.order file
:param old_model: string, existing name
:param new_model: string, new name
"""
self.order_data = self.order_data.replace(old_model, new_model)
def add_model(self, new_model_name, insert_at=-1):
"""Insert a new model into the package> Note that the order_data is stored as a string right now,
so there is a bit of a hack to get this to work correctly.
:param new_model_name: string, name of the new model to add to the package order.
:param insert_at: int, location to insert package, if 0 at beginning, -1 at end
"""
data = self.order_data.split("\n")
if insert_at == -1:
data.append(new_model_name)
else:
data.insert(insert_at, new_model_name)
self.order_data = "\n".join(data)
# remove any empty lines
self.order_data = self.order_data.replace('\n\n', '\n')
class InputParser(object):
"""
Class to read in Modelica files (.mo) and provide basic operations.
"""
def save(self):
"""
Save the resulting file to the same file from which it was initialized
:return:
"""
self.save_as(self.modelica_filename)
def save_as(self, new_filename):
"""
Save the resulting file with a new filename
:param new_filename:
:return:
"""
with open(new_filename, "w") as f:
f.write(self.serialize())
def remove_object(self, obj_name):
"""
Remove an object by a name. Can be any part of the object name.
:param obj_name: string, object name to match
:return:
"""
index, obj = self.find_model_object(obj_name)
if index is not None:
del self.model["objects"][index]
def replace_within_string(self, new_string):
"""
Replacement of the path portion of the within string
:param new_string: string, what to replace the existing within string with.
"""
self.within = new_string
def find_model_object(self, obj_name):
"""
Find a model object in the list of parsed objects
:param obj_name: string, name (including the instance)
:return: list, index and string of object
"""
for index, o in enumerate(self.model["objects"]):
if obj_name in o:
return index, self.model["objects"][index]
return None, None
def reload(self):
"""
Reparse the data. This will remove any unsaved changes.
"""
self.init_vars()
self.parse_mo()
def replace_model_string(self, model_name, model_instance, old_string, new_string):
"""
Go through the models and find the model_name with a model_instance and change the value in the field to
the new_value. This will replace the entire value of the model field.
This will not work with arrays or lists (e.g., {...}, [...])
:param model_name: string, name of the model
:param model_instance: string, instance of the model
:param old_string: string, name of the old string to replace
:param new_string: string, the new string
"""
index, _model = self.find_model_object(f"{model_name} {model_instance}")
if index is not None:
self.model["objects"][index] = self.model["objects"][index].replace(
old_string, new_string
)
def add_model_object(self, model_name, model_instance, data):
"""
Add a new model object to the model
:param model_name: string
:param model_instance: string
:param data: list of strings
"""
str = f" {model_name} {model_instance}\n"
for d in data:
str += f" {d}\n"
self.model["objects"].append(str)
def add_parameter(self, var_type, var_name, value, description):
"""Add a new parameter. Will be prepended to the top of the models list
:param var_type: string, type of Modelica variable, Real, Integer, String, Modelica.SIunits.Area, etc.
:param var_name: string, name of the variable. Note that this does not check for conflicts.
:param value: variant, value to set the variable name to.
:param description: string, description of the parameter
"""
# is the value is a string, then wrap in quotes
if isinstance(value, str):
value = f'"{value}"'
# parameter Real fraLat= 0.8 "Fraction latent of sensible persons load = 0.8 for home, 1.25 for office.";
new_str = f" parameter {var_type} {var_name}={value} \"{description}\";\n"
self.model["objects"].insert(0, new_str)
def add_connect(self, a, b, annotation):
"""
Add a new connection of port a to port b. The annotation will be appended on a new line.
:param a: string, port a
:param b: string, port b
:param annotation: string, description
"""
self.connections.append(f" connect({a}, {b})\n {annotation};\n")
def find_connect(self, port_a, port_b):
"""
Find an existing connection that has port_a and/or port_b. If there are more than one, then it will only
return the first.
:param port_a:
:param port_b:
:return:
"""
for index, c in enumerate(self.connections):
if not port_a:
raise Exception("Unable to replace string in connect if unknown port A")
if not port_b:
if f"({port_a}, " in c:
return index, c
if port_a and port_b:
if f"({port_a}, {port_b})" in c:
return index, c
return None, None
def replace_connect_string(self, a, b, new_a, new_b, replace_all=False):
"""
Replace content of the connect string with new_a and/or new_b
:param a: string, existing port a
:param b: string, existing port b
:param new_a: string, new port (or none)
:param new_b: string, new port b (or none
:param replace_all: boolean, allow replacemnt of all strings
"""
# find the connection that matches a, b
index, c = self.find_connect(a, b)
while index:
if index:
if new_a:
self.connections[index] = self.connections[index].replace(a, new_a)
if new_b:
self.connections[index] = self.connections[index].replace(b, new_b)
if not replace_all:
break
else:
index, c = self.find_connect(a, b)
def remove_connect_string(self, a, b):
"""
Remove a connection string that matches the a, b.
:param a: string, existing port a
:param b: string, existing port b
"""
# find the connection that matches a, b
index, c = self.find_connect(a, b)
if index:
del self.connections[index]
def serialize(self):
"""
Serialize the modelica object to a string with line feeds
:return: string
"""
str = f"within {self.within};\n"
str += f"model {self.model['name']}\n"
str += f"{self.model['comment']}\n\n"
for o in self.model["objects"]:
for lx in o:
str += lx
str += "equation\n"
for c in self.connections:
str += c
for e in self.equations:
str += e
str += f"end {self.model['name']};\n"
return str
| [
37811,
198,
17174,
17174,
17174,
2466,
198,
25,
22163,
4766,
357,
66,
8,
13130,
12,
1238,
2481,
471,
27912,
1565,
8738,
11,
10302,
329,
45276,
6682,
11,
11419,
11,
290,
584,
20420,
13,
198,
198,
3237,
2489,
10395,
13,
198,
198,
7738,
... | 2.460453 | 5,171 |
test_cases = int(input())
for test in range(test_cases):
john_packs, jack_packs = map(int, list(input().split()))
john_pack_list = list(input().split())
jack_pack_list = list(input().split())
john_pack_list = [int(x) for x in john_pack_list]
jack_pack_list = [int(x) for x in jack_pack_list]
john_pack_list.sort()
jack_pack_list.sort()
# print(jack_pack_list)
john_total_votes = jack_total_votes =0
for i in john_pack_list:
john_total_votes += int(i)
for i in jack_pack_list:
jack_total_votes += int(i)
# print(str(john_pack_list) + " : " + str(jack_pack_list) + " : " + str(john_total_votes) + " : " + str(jack_total_votes))
num_of_iteration = min (john_packs, jack_packs)
i = 0
while i < num_of_iteration and john_total_votes <= jack_total_votes:
john_total_votes += int(jack_pack_list[jack_packs-i-1]) - int(john_pack_list[i])
jack_total_votes += int(john_pack_list[i]) - int(jack_pack_list[jack_packs-i-1])
i+=1
# print("john_total_votes: " + str(john_total_votes) + ", jack_total_votes: " + str(jack_total_votes))
if (john_total_votes > jack_total_votes):
print(i)
else:
print("-1") | [
201,
198,
9288,
62,
33964,
796,
493,
7,
15414,
28955,
201,
198,
1640,
1332,
287,
2837,
7,
9288,
62,
33964,
2599,
201,
198,
220,
220,
220,
45610,
62,
32377,
11,
14509,
62,
32377,
796,
3975,
7,
600,
11,
1351,
7,
15414,
22446,
35312,
... | 2.207381 | 569 |
"""
Abstract interface for Embedder.
Authors:
Christian Dallago
"""
import abc
import logging
import tempfile
from typing import List, Generator, Optional, Iterable, ClassVar, Any, Dict, Union
import torch
from numpy import ndarray
from bio_embeddings.utilities import (
get_model_file,
get_model_directories_from_zip,
get_device,
)
logger = logging.getLogger(__name__)
class EmbedderWithFallback(EmbedderInterface, abc.ABC):
""" Batching embedder that will fallback to the CPU if the embedding on the GPU failed """
_model: Any
@abc.abstractmethod
@abc.abstractmethod
def _get_fallback_model(self):
""" Returns a (cached) cpu model """
...
def embed_batch(self, batch: List[str]) -> Generator[ndarray, None, None]:
"""Tries to get the embeddings in this order:
* Full batch GPU
* Single Sequence GPU
* Single Sequence CPU
Single sequence processing is done in case of runtime error due to
a) very long sequence or b) too large batch size
If this fails, you might want to consider lowering batch_size and/or
cutting very long sequences into smaller chunks
Returns unprocessed embeddings
"""
# No point in having a fallback model when the normal model is CPU already
if self._device.type == "cpu":
yield from self._embed_batch_impl(batch, self._model)
return
try:
yield from self._embed_batch_impl(batch, self._model)
except RuntimeError as e:
if len(batch) == 1:
logger.error(
f"RuntimeError for sequence with {len(batch[0])} residues: {e}. "
f"This most likely means that you don't have enough GPU RAM to embed a protein this long. "
f"Embedding on the CPU instead, which is very slow"
)
yield from self._embed_batch_impl(batch, self._get_fallback_model())
else:
logger.error(
f"Error processing batch of {len(batch)} sequences: {e}. "
f"You might want to consider adjusting the `batch_size` parameter. "
f"Will try to embed each sequence in the set individually on the GPU."
)
for sequence in batch:
try:
yield from self._embed_batch_impl([sequence], self._model)
except RuntimeError as e:
logger.error(
f"RuntimeError for sequence with {len(sequence)} residues: {e}. "
f"This most likely means that you don't have enough GPU RAM to embed a protein this long."
)
yield from self._embed_batch_impl(
[sequence], self._get_fallback_model()
)
| [
37811,
198,
23839,
7071,
329,
13302,
276,
1082,
13,
198,
198,
30515,
669,
25,
198,
220,
4302,
360,
439,
3839,
198,
37811,
198,
198,
11748,
450,
66,
198,
11748,
18931,
198,
11748,
20218,
7753,
198,
6738,
19720,
1330,
7343,
11,
35986,
1... | 2.259004 | 1,305 |
import matplotlib
matplotlib.use('Agg')
import argparse
import datetime
import models.ICW_FMRI_GAN
import nibabel
import numpy as np
import os
import shutil
import timeit
import torch
from brainpedia.brainpedia import Brainpedia
from brainpedia.fmri_processing import invert_preprocessor_scaling
from torch.autograd import Variable
from utils.sampling import noise
from utils.plot import Plot
parser = argparse.ArgumentParser(description="Train ICW_FMRI_GAN.")
parser.add_argument('train_data_dir', help='the directory containing real fMRI data to train on')
parser.add_argument('train_data_dir_cache', help='the directory to use as a cache for the train_data_dir preprocessing')
parser.add_argument('output_dir', help='the directory to save training results')
args = parser.parse_args()
# ========== OUTPUT DIRECTORIES ==========
shutil.rmtree(args.output_dir, ignore_errors=True)
os.makedirs(args.output_dir)
# ========== Hyperparameters ==========
DOWNSAMPLE_SCALE = 0.25
MULTI_TAG_LABEL_ENCODING = True
TRAINING_STEPS = 200000
BATCH_SIZE = 50
MODEL_DIMENSIONALITY = 64
CONDITONING_DIMENSIONALITY = 5
CRITIC_UPDATES_PER_GENERATOR_UPDATE = 1
LAMBDA = 10
NOISE_SAMPLE_LENGTH = 128
# ========== HOUSEKEEPING ==========
CUDA = torch.cuda.is_available()
np.random.seed(1)
torch.manual_seed(1)
if CUDA:
torch.cuda.manual_seed(1)
# ========== Data ==========
brainpedia = Brainpedia(data_dirs=[args.train_data_dir],
cache_dir=args.train_data_dir_cache,
scale=DOWNSAMPLE_SCALE,
multi_tag_label_encoding=MULTI_TAG_LABEL_ENCODING)
all_brain_data, all_brain_data_tags = brainpedia.all_data()
brainpedia_generator = Brainpedia.batch_generator(all_brain_data, all_brain_data_tags, BATCH_SIZE, CUDA)
brain_data_shape, brain_data_tag_shape = brainpedia.sample_shapes()
# ========== Models ==========
generator = models.ICW_FMRI_GAN.Generator(input_size=NOISE_SAMPLE_LENGTH,
output_shape=brain_data_shape,
dimensionality=MODEL_DIMENSIONALITY,
num_classes=brain_data_tag_shape[0],
conditioning_dimensionality=CONDITONING_DIMENSIONALITY,
cudaEnabled=CUDA)
critic = models.ICW_FMRI_GAN.Critic(dimensionality=MODEL_DIMENSIONALITY,
num_classes=brain_data_tag_shape[0],
conditioning_dimensionality=CONDITONING_DIMENSIONALITY,
cudaEnabled=CUDA)
# ========= Training =========
for training_step in range(1, TRAINING_STEPS + 1):
# Train critic
for critic_step in range(CRITIC_UPDATES_PER_GENERATOR_UPDATE):
real_brain_img_data_batch, labels_batch = next(brainpedia_generator)
real_brain_img_data_batch = Variable(real_brain_img_data_batch)
labels_batch = Variable(labels_batch)
noise_sample_c = Variable(noise(size=(labels_batch.shape[0], NOISE_SAMPLE_LENGTH), cuda=CUDA))
synthetic_brain_img_data_batch = generator(noise_sample_c, labels_batch)
_ = critic.train(real_brain_img_data_batch, synthetic_brain_img_data_batch, labels_batch, LAMBDA)
# Train generator
noise_sample_g = Variable(noise(size=(labels_batch.shape[0], NOISE_SAMPLE_LENGTH), cuda=CUDA))
synthetic_brain_img_data_batch = generator(noise_sample_g, labels_batch)
critic_output = critic(synthetic_brain_img_data_batch, labels_batch)
_ = generator.train(critic_output)
if training_step % 10000 == 0:
# Save model at checkpoint
torch.save(generator.state_dict(), "{0}generator".format(args.output_dir))
torch.save(critic.state_dict(), "{0}critic".format(args.output_dir))
# Save model at checkpoint
torch.save(generator.state_dict(), "{0}generator".format(args.output_dir))
torch.save(critic.state_dict(), "{0}critic".format(args.output_dir))
| [
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
198,
11748,
1822,
29572,
198,
11748,
4818,
8079,
198,
11748,
4981,
13,
2149,
54,
62,
23264,
7112,
62,
45028,
198,
11748,
33272,
9608,
198,
11748,
299,
... | 2.337806 | 1,714 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='BubbleBox',
version='0.1.4',
author="Audun Skau Hansen",
author_email="a.s.hansen@kjemi.uio.no",
description="A molecular dynamics educational tool for Jupyter Notebooks",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.uio.no/audunsh/bubblebox",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
628,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
... | 2.526316 | 266 |
"""Test TotalConnectDevice."""
from device import TotalConnectDevice
from const import DEVICE_INFO_BASIC_1
def tests_init():
"""Test __init__()."""
test_device = TotalConnectDevice(DEVICE_INFO_BASIC_1)
assert test_device.id == DEVICE_INFO_BASIC_1["DeviceID"]
# test with missing flags
del DEVICE_INFO_BASIC_1["DeviceFlags"]
test_device = TotalConnectDevice(DEVICE_INFO_BASIC_1)
assert test_device.flags == {} | [
37811,
14402,
7472,
13313,
24728,
526,
15931,
198,
198,
6738,
3335,
1330,
7472,
13313,
24728,
198,
198,
6738,
1500,
1330,
5550,
27389,
62,
10778,
62,
33,
1921,
2149,
62,
16,
198,
198,
4299,
5254,
62,
15003,
33529,
198,
220,
220,
220,
... | 2.75 | 160 |
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.33
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _fileunpacker
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
_newclass = 0
del types
FileUnpacker_swigregister = _fileunpacker.FileUnpacker_swigregister
FileUnpacker_swigregister(FileUnpacker)
PackageFileLoc_swigregister = _fileunpacker.PackageFileLoc_swigregister
PackageFileLoc_swigregister(PackageFileLoc)
PackageIndexEntry_swigregister = _fileunpacker.PackageIndexEntry_swigregister
PackageIndexEntry_swigregister(PackageIndexEntry)
Package_swigregister = _fileunpacker.Package_swigregister
Package_swigregister(Package)
cvar = _fileunpacker.cvar
Package.kCrcSize = _fileunpacker.cvar.Package_kCrcSize
Package.kCrcOffset = _fileunpacker.cvar.Package_kCrcOffset
Package.kVersionSize = _fileunpacker.cvar.Package_kVersionSize
Package.kVersionOffset = _fileunpacker.cvar.Package_kVersionOffset
Package.kIndexOffsetSize = _fileunpacker.cvar.Package_kIndexOffsetSize
Package.kIndexOffsetOffset = _fileunpacker.cvar.Package_kIndexOffsetOffset
Package_CalculateCrc = _fileunpacker.Package_CalculateCrc
Package_ReadCrc = _fileunpacker.Package_ReadCrc
Package_ReadVersion = _fileunpacker.Package_ReadVersion
Package_FileSize = _fileunpacker.Package_FileSize
kDbRootPacket = _fileunpacker.kDbRootPacket
kDbRoot2Packet = _fileunpacker.kDbRoot2Packet
kQtpPacket = _fileunpacker.kQtpPacket
kQtp2Packet = _fileunpacker.kQtp2Packet
kImagePacket = _fileunpacker.kImagePacket
kTerrainPacket = _fileunpacker.kTerrainPacket
kVectorPacket = _fileunpacker.kVectorPacket
IndexItem_swigregister = _fileunpacker.IndexItem_swigregister
IndexItem_swigregister(IndexItem)
PacketBundle_swigregister = _fileunpacker.PacketBundle_swigregister
PacketBundle_swigregister(PacketBundle)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
13,
22,
198,
2,
198,
2,
15069,
2177,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
77... | 3.060296 | 879 |
from collections import namedtuple
from games import (Game)
# self.validSpaces = ()
class FlagrantCopy(Game):
"""A flagrant copy of TicTacToe, from game.py
It's simplified, so that moves and utility are calculated as needed
Play TicTacToe on an h x v board, with Max (first player) playing 'X'.
A state has the player to move and a board, in the form of
a dict of {(x, y): Player} entries, where Player is 'X' or 'O'."""
# defines the order of play
def utility(self, state, player):
"Return the value to player; 1 for win, -1 for loss, 0 otherwise."
try:
return state.utility if player == 'X' else -state.utility
except:
pass
board = state.board
util = self.check_win(board, 'X')
if util == 0:
util = -self.check_win(board, 'O')
state.utility = util
return util if player == 'X' else -util
# # Did I win?
# def check_win(self, board, player):
# # check rows
# for y in range(1, self.v + 1):
# if self.k_in_row(board, (1,y), player, (1,0)):
# return 1
# # check columns
# for x in range(1, self.h + 1):
# if self.k_in_row(board, (x,1), player, (0,1)):
# return 1
# # check \ diagonal
# if self.k_in_row(board, (1,1), player, (1,1)):
# return 1
# # check / diagonal
# if self.k_in_row(board, (3,1), player, (-1,1)):
# return 1
# return 0
# does player have K in a row? return 1 if so, 0 if not
# def k_in_row(self, board, start, player, direction):
# "Return true if there is a line through start on board for player."
# (delta_x, delta_y) = direction
# x, y = start
# n = 0 # n is number of moves in row
# while board.get((x, y)) == player:
# n += 1
# x, y = x + delta_x, y + delta_y
# x, y = start
# while board.get((x, y)) == player:
# n += 1
# x, y = x - delta_x, y - delta_y
# n -= 1 # Because we counted start itself twice
# return n >= self.k
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
return self.utility(state, 'X') != 0 or len(self.actions(state)) == 0
myGame = FlagrantCopy()
won = GameState(
to_move = 'O',
board = {(5,1): 'O', (5,2): 'O', (5,3): 'O',
(2,1): 'X', (2,2): 'X',
},
label = 'won'
)
winin1 = GameState(
to_move = 'X',
board = {(1,1): 'X', (1,2): 'X', (1,3): 'X',
(2,1): 'O', (2,2): 'O', (2,3): 'O'
},
label = 'winin1'
)
losein1 = GameState(
to_move = 'O',
board = {(2,1): 'X', (2,2): 'X',
(4,1): 'O', (4,2): 'O', (4,3): 'O',
(3,1): 'X', (3,1): 'X'
},
label = 'losein1'
)
winin2 = GameState(
to_move = 'X',
board = {(5,1): 'O', (5,3): 'O',
(6,1): 'X', (6,3): 'X'},
label = 'winin2'
)
lost = GameState(
to_move = 'X',
board = {(0,2): 'O', (0,3): 'X',
(1,1): 'X', (1,2): 'O', (1,3): 'O',
(2, 1): 'X', (2, 2): 'O', (2, 3): 'X',
(3, 1): 'X', (3, 2): 'O', (3, 3): 'O',
(4, 1): 'O', (4, 2): 'X', (4, 3): 'X',
(5, 1): 'O', (5, 2): 'O', (5, 3): 'O',
(6, 1): 'X', (6, 2): 'X', (6, 3): 'X',
},
label = 'lost'
)
myGames = {
myGame: [
won,
winin1, losein1, winin2,
lost,
]
} | [
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
1830,
1330,
357,
8777,
8,
198,
220,
220,
220,
220,
1303,
220,
220,
2116,
13,
12102,
4561,
2114,
796,
7499,
198,
198,
4871,
19762,
5250,
29881,
7,
8777,
2599,
198,
220,
220,
220,
37... | 1.909434 | 1,855 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import numpy as np
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def kaiser_parameters(ripple, width):
'''
ripple - Both passband and stopband ripple strength in dB.
width - Difference between fs (stopband frequency) i fp (passband frequency). Normalized so that 1 corresponds to pi radians / sample. That is, the frequency is expressed as a fraction of the Nyquist frequency.
'''
a = abs(ripple)
beta = kaiser_beta(a)
numtaps = (a - 7.95) / 2.285 / (np.pi * width) + 1
return int(np.ceil(numtaps)), beta
def lowpass_kaiser_fir_filter(rate=16000, cutoff_freq=4000, width=400, attenuation=65):
'''
rate - Signal sampling rate.
cuttof_freq - Filter cutoff frequency in Hz.
width - Difference between fs (stopband frequency) i fp (passband frequency) in Hz.
attenuation - Signal attenuation in the stopband, given in dB.
Returns: h(n) - impulse response of lowpass sinc filter with applied Kaiser window.
'''
nyq = rate / 2
cutoff_freq = cutoff_freq / nyq
numtaps, beta = kaiser_parameters(attenuation, float(width) / nyq)
if numtaps % 2 == 0:
numtaps += 1
pass_zero = True # zato sto je lowpass
pass_nyq = False # zato sto je lowpass
cutoff = np.hstack(([0.0]*pass_zero, cutoff_freq, [1.0]*pass_nyq))
bands = cutoff.reshape(-1,2)
alpha = 0.5 * (numtaps-1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * np.sinc(right * m)
h -= left * np.sinc(left * m)
window = kaiser_window(numtaps, beta)
h = h * window
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
| [
11748,
299,
32152,
355,
45941,
198,
198,
62,
72,
15,
32,
796,
685,
198,
220,
220,
220,
532,
19,
13,
35038,
2682,
23237,
2414,
3720,
29626,
29088,
1120,
36,
12,
1507,
11,
198,
220,
220,
220,
513,
13,
26073,
3720,
2231,
20356,
1828,
... | 2.033986 | 1,736 |
import sys
import pygame
# ENGINE
TIMER = pygame.USEREVENT + 1
DELAY = 17
# GRAPHICS
CAPTION = 'CHIP-EIGHT EMULATOR'
WIDTH = 64
HEIGHT = 32
SCALE = 10
COLOR_OFF = pygame.Color(0, 0, 0, 255)
COLOR_ON = pygame.Color(255, 255, 255, 255)
| [
11748,
25064,
198,
11748,
12972,
6057,
198,
198,
2,
36924,
8881,
198,
51,
3955,
1137,
796,
12972,
6057,
13,
2937,
9338,
53,
3525,
1343,
352,
198,
35,
3698,
4792,
796,
1596,
198,
198,
2,
10863,
31300,
19505,
198,
33177,
24131,
796,
705... | 2.27619 | 105 |
import torch
| [
11748,
28034,
628
] | 4.666667 | 3 |
import pytest
from sovtokenfees.constants import FEES
from sovtokenfees.test.constants import NYM_FEES_ALIAS
from plenum.common.constants import STATE_PROOF
from plenum.test.stasher import delay_rules
from plenum.test.delayers import req_delay
from sovtokenfees.test.helper import send_and_check_auth_rule
| [
11748,
12972,
9288,
198,
6738,
523,
85,
30001,
69,
2841,
13,
9979,
1187,
1330,
18630,
1546,
198,
6738,
523,
85,
30001,
69,
2841,
13,
9288,
13,
9979,
1187,
1330,
6645,
44,
62,
15112,
1546,
62,
1847,
43429,
198,
198,
6738,
458,
44709,
... | 2.980769 | 104 |
import logging
from .Base import Base
import time
LOG = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
764,
14881,
1330,
7308,
198,
11748,
640,
198,
198,
25294,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628
] | 3.307692 | 26 |
from PIL import Image
import numpy as np
import os
import os.path as osp
from tqdm import tqdm
orig_path = '.' # insert here the source path where original COCO annotations are
dest_path = '.' # the destination folder, which should be this one'
for split in ["train2017", "val2017"]:
annotations = f"{orig_path}/annotations/{split}"
nov_ann = f"{dest_path}/annotations_my/{split}"
# clear folder if exists
if osp.exists(nov_ann):
print("Removing existing")
os.rmdir(nov_ann)
os.makedirs(nov_ann)
# remap labels in the novel interval (+1 for Stuff, +1 and stuff on 0 for objects)
mapping = np.zeros((256,), dtype=np.int8)
for i, cl in enumerate(range(91)):
mapping[cl] = i + 1
mapping[255] = 255
target_transform = lambda x: Image.fromarray(mapping[x])
for f in tqdm(os.listdir(annotations)):
lbl = Image.open(osp.join(annotations, f))
lbl = target_transform(lbl)
lbl.save(osp.join(nov_ann, f))
| [
6738,
350,
4146,
1330,
7412,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
11612,
62,
6978,
796,
705,
2637,
220,
1303,
75... | 2.488722 | 399 |
from AppKit import NSURL, NSDocumentController
from vanilla import *
from defconAppKit.windows.baseWindow import BaseWindowController
from lib.doodleDocument import DoodleDocument
from mojo.UI import OpenGlyphWindow, OpenSpaceCenter, OpenFontInfoSheet
# notifications
fonts = OpenFont(showUI=False)
if not isinstance(fonts, list):
fonts = [fonts]
for font in fonts:
## small bug if the font has no units per em set (already fixed in the dev version)
if font.info.unitsPerEm is None:
font.info.unitsPerEm = 1000
SimpleFontWindow(font) | [
6738,
2034,
20827,
1330,
10896,
21886,
11,
399,
10305,
7990,
22130,
198,
6738,
16858,
1330,
1635,
198,
6738,
825,
1102,
4677,
20827,
13,
28457,
13,
8692,
27703,
1330,
7308,
27703,
22130,
198,
198,
6738,
9195,
13,
67,
27106,
24941,
1330,
... | 3.020833 | 192 |
# coding:utf-8
from handlers.base import BaseHandler
from models.Approve import Approve
from models.ApproveRequest import ApproveRequest
from models.ApproveRelation import ApproveRelation
from dao.base import session
import logging
logger = logging.getLogger('boilerplate.' + __name__)
import requests
import json
from tornado.web import authenticated
import time
from sqlalchemy import * | [
2,
19617,
25,
40477,
12,
23,
198,
198,
6738,
32847,
13,
8692,
1330,
7308,
25060,
198,
6738,
4981,
13,
4677,
305,
303,
1330,
20010,
303,
198,
6738,
4981,
13,
4677,
305,
303,
18453,
1330,
20010,
303,
18453,
198,
6738,
4981,
13,
4677,
... | 3.688679 | 106 |
from django.db import models
from django.contrib.auth.models import AbstractUser, BaseUserManager, PermissionsMixin
from rest_framework_simplejwt.tokens import RefreshToken
from phone_field import PhoneField
# Create your models here.
# Custom User model for verify user emails and sending verification links
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
12982,
11,
7308,
12982,
13511,
11,
2448,
8481,
35608,
259,
198,
6738,
1334,
62,
30604,
62,
36439,
73,
46569,
13,
83,
... | 3.843373 | 83 |
#Description
#This Python3 script combines Afterglow PS3 USB button-press data with 2 motor activation.
#testbed information
#This was developed as a deliberately low powered and lightweight combination. It is also affordable at near $200 USD current pricing for all the parts. The Pi3 is used for it's common use, versatility, and teachability. (An Arduino can't easily be substituted because of the other existing hardware. The GPIO pins use different wires. Power is wired differenly, etc. This is really meant for the Python and Pi crowd.)
#The advantage of using a Pi powered setup like this is the available computing power. Every compatible mod, sensor, and binary package can still be used. There is just enough code and parts here to make something drivable. You, the tinkerer, can make it smarter, faster, and stronger with what you can do. There is plenty of Pi battery power left to support extra sensors, actuators, and servos.
#Hardware used is:
#$35 RPI3
#$ 7 L298N motor controller ($10 for 2, but you only need 1 to drive the treads with.)
#$10 5v 2.5a 3800mAH phone charger battery
#$25 7.2v 1200mAH Traxxas NimH #2925X.
#$25 Afterglow PS3 gamepad clone. Wireless USB equipped. Not bluetooth capable. (ONLY BUTTONS WORK AT THIS TIME 02/2021)
#$10 Pi wire kit
# Only 18 wires were used on this chassis.
# QTY 7 from the pi to the motor controller.
# QTY 8 from the motor controller to the Traxxas battery.
# QTY 2 on the motor battery.
# QTY 1 from the phone charger to the Pi3B+.
# The other advantage of this simpler initial setup is the room for growth. You can still add
#SAFETY warning.
#No warranty of parts, labor, or service is guaranteed or implied with use of this information. Safety is up to you first. Don't drink and drive. Don't use higher current unless necessary, either. These are purposely sourced as friendly parts for hobbyist and experimental use.
#BATTERY INFO VERY IMPORTANT
#The Pi should run for 4+ hours on a 3800mAH battery. The motors for about 30-45 minutes. The drive battery is admittedly small even on a basic chassis with low weight. It is about as fast as a slow-normal walking speed. 100% motor usage is like a brisk walk. This setup uses a 7.2v. You could easily use an 11.4v RC battery, but be sure to dial the motors back.
#If a larger battery is used, turn the power down!
#7.2v is a slow walk and makes for easy turning.
#p.start(60)
#p2.start(65)
#turn down the commanded power like this.
#p.start(30) for 11.2v should keep the speed down.
#p2.start(35) for 11.2v should keep the speed down.
#the current settings of (60-65) for the 2 drive motors ensure easier turning. Just enough power is requested to avoid stalling the motors during steering or pivoting. Low RPM and power usage makes for #easier transition of motor direction. That is how the tank chassis steers.
#Usage on carpet isn't advised due to static moreso than hair and dirt. (Don't chase dogs, either. Chewing and drool isn't good for electronics.)
# 2 motor instructions to work with in the gamepad scripting. Currently analog sticks do not function.
#(select) stop, (start) exit, forward, back, 3 speeds lo-med-hi
#import evdev
from evdev import InputDevice, categorize, ecodes
import RPi.GPIO as GPIO
from time import sleep
import pygame
#creates object 'gamepad' to store the data
#you can call it whatever you like
gamepad = InputDevice('/dev/input/event0')
#button code variables (change to suit your device)
aBtn = 305
bBtn = 307
xBtn = 304
yBtn = 308
up = 46
down = 32
left = 317
right = 318
#'select' is for stop function.
# 'start' is for gpio cleanup.
select = 314
start = 315
lTrig = 310
rTrig = 311
#analog stick press. Might not work yet.
left = 317
right = 318
#middle buttons
start = 315
select = 314
#upper triggers
lTrig = 310
rTrig = 311
#These are GPIO output addresses for each motor.
in1 = 24 # R Motor GPIO address
in2 = 23 # R Motor GPIO address
in3 = 17 # L Motor GPIO address
in4 = 27 # L Motor GPIO address
en = 25
en2 = 22
temp1=1
GPIO.setmode(GPIO.BCM)
GPIO.setup(in1,GPIO.OUT)
GPIO.setup(in2,GPIO.OUT)
GPIO.setup(en,GPIO.OUT)
GPIO.setup(in3,GPIO.OUT)
GPIO.setup(in4,GPIO.OUT)
GPIO.setup(en2,GPIO.OUT)
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.LOW)
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.LOW)
p=GPIO.PWM(en,1000)
p2=GPIO.PWM(en2,1000)
#Motor power setup here. Just one speed.
p.start(60)
p2.start(65) #l motor is a little weaker on my setup.
#Compensate with slightly more juice going to the weaker motor to help it drive straighter.
print("\n")
print("The default speed & direction of motor is Medium & Forward.....")
print("r-run s-stop f-forward y-forward motor-2 b-backward l-low m-medium h-high e-exit")
print("\n")
#prints out device info at start
print(gamepad)
#loop and filter by event code and print the mapped label
for event in gamepad.read_loop():
if event.type == ecodes.EV_KEY:
if event.value == 1:
if event.code == yBtn:
print("Y = Forward")
#if(temp1==1):
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.HIGH)
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.HIGH)
x='z' #very important! Motor "runs away" without this code in every instruction!
elif event.code == bBtn:
print("B = Pivot Left")
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.HIGH)
#Below is from Backwards function to spin inside motor backwards, bit like an e-brake."
GPIO.output(in1,GPIO.HIGH)
GPIO.output(in2,GPIO.LOW)
x='z'
elif event.code == aBtn:
print("A = Pivot Right")
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.HIGH)
#Below is from Backwards function to spin inside motor backwards, bit like an e-brake."
GPIO.output(in3,GPIO.HIGH)
GPIO.output(in4,GPIO.LOW)
x='z'
elif event.code == xBtn:
print("X = Backwards")
GPIO.output(in1,GPIO.HIGH)
GPIO.output(in2,GPIO.LOW)
GPIO.output(in3,GPIO.HIGH)
GPIO.output(in4,GPIO.LOW)
x='z'
elif event.code == up:
print("up")
elif event.code == down:
print("down")
elif event.code == left:
print("left")
elif event.code == right:
print("right")
elif event.code == start:
x='z'
#elif x=='e':
GPIO.cleanup()
print("Start Button = GPIO Clean up")
break
elif event.code == select:
print("Select Button = Stop")
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.LOW)
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.LOW)
x='z'
elif event.code == lTrig:
print("Left Bumper = Left Motor Stop")
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.LOW)
x='z'
elif event.code == rTrig:
print("Right Bumper = Right Motor Stop")
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.LOW)
x='z'
| [
2,
11828,
198,
198,
2,
1212,
11361,
18,
4226,
21001,
2293,
4743,
322,
6599,
18,
8450,
4936,
12,
8439,
1366,
351,
362,
5584,
14916,
13,
220,
628,
198,
198,
2,
9288,
3077,
1321,
198,
198,
2,
1212,
373,
4166,
355,
257,
14593,
1877,
1... | 2.412388 | 3,116 |
from Blockchain import Blockchain
from Wallet import Wallet
from Transaction import Transaction
from DataConverter import BlockDataIO
import pytest
# Blockchain initialization
# Mining Difficulty and reward
blockchain = Blockchain(2, 10)
# Creating two new wallets:
# Each wallet has a user name. createNewWallet function
# generates cryptographic key pair for the wallet user.
# This unique key pair will be used for transactions.
# Users will be defined by their public key and sign
# their transactions with their private key.
wallet1 = Wallet("person1")
wallet1.createNewWallet()
wallet2 = Wallet("person2")
wallet2.createNewWallet()
# Transactions
# Force transaction to person1:
blockchain.forceTransaction(wallet1.publicKey, 10000)
# Mine the pending transaction:
blockchain.handleTransaction("null")
# wallet1 sends 295 coins to wallet2.
for i in range(1, 10):
blockchain.addTransaction(Transaction(
wallet1.publicKey, wallet2.publicKey, i * i, wallet1.privateKey))
# Miner gains the block mining rewards.
blockchain.handleTransaction(wallet2.publicKey)
# Transaction between two wallets:
blockchain.addTransaction(Transaction(
wallet1.publicKey, wallet2.publicKey, 10, wallet1.privateKey))
blockchain.handleTransaction(wallet2.publicKey)
# Updating balances of users:
wallet1.updateTransactions(blockchain)
wallet2.updateTransactions(blockchain)
person1Balance = wallet1.coins # 10000 - 295 = 9705
person2Balance = wallet2.coins # 295 + 10*2 = 315
print(f"Balance of person1: {person1Balance}")
print(f"Balance of person2: {person2Balance}")
| [
6738,
29724,
1330,
29724,
198,
6738,
37249,
1330,
37249,
198,
6738,
45389,
1330,
45389,
198,
6738,
6060,
3103,
332,
353,
1330,
9726,
6601,
9399,
198,
11748,
12972,
9288,
198,
198,
2,
29724,
37588,
198,
198,
2,
29269,
27419,
290,
6721,
1... | 3.609091 | 440 |
#!/usr/bin/env python
# coding: utf-8
"""
Make latex file for supplementary figures
"""
import os
import argparse
import pandas
import numpy
from narps import Narps
preamble = '''\\documentclass[10pt]{article}
\\usepackage[margin=0.5in]{geometry}
\\geometry{letterpaper}
\\usepackage{graphicx}
\\usepackage{amssymb}
\\usepackage{epstopdf}
\\usepackage{booktabs}
\\usepackage{caption}
\\usepackage{array}
\\title{Supplementary Tables}
\\author{Botvinick-Nezer et al.}
\\begin{document}
'''
finale = '\\end{document}\n'
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(
description='Make latex file for supplementary tables')
parser.add_argument('-b', '--basedir',
help='base directory')
parser.add_argument('-t', '--test',
action='store_true',
help='use testing mode (no processing)')
args = parser.parse_args()
# set up base directory
if args.basedir is not None:
basedir = args.basedir
elif 'NARPS_BASEDIR' in os.environ:
basedir = os.environ['NARPS_BASEDIR']
print("using basedir specified in NARPS_BASEDIR")
else:
basedir = '/data'
print("using default basedir:", basedir)
narps = Narps(basedir)
if not args.test:
make_supp_table_file(narps)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
37811,
198,
12050,
47038,
2393,
329,
43871,
5538,
198,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
19798,
292,
198,
11... | 2.40177 | 565 |
from app import app, db
| [
6738,
598,
1330,
598,
11,
20613,
198
] | 3.428571 | 7 |
# Modifications Copyright 2016-2017 Reddit, Inc.
#
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from unittest import mock
from uuid import uuid4
from cqlmapper import columns, LWTException
from cqlmapper.batch import Batch
from cqlmapper.management import sync_table, drop_table
from cqlmapper.models import Model
from cqlmapper.query import IfNotExistsWithCounterColumn
from cqlmapper.query_set import ModelQuerySet
from tests.integration.base import BaseCassEngTestCase
from tests.integration import PROTOCOL_VERSION
| [
2,
3401,
6637,
15069,
1584,
12,
5539,
10750,
11,
3457,
13,
198,
2,
198,
2,
15069,
2211,
12,
5304,
6060,
1273,
897,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
... | 3.549383 | 324 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# fluctmatch --- https://github.com/tclick/python-fluctmatch
# Copyright (c) 2013-2017 The fluctmatch Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the New BSD license.
#
# Please cite your use of fluctmatch in published work:
#
# Timothy H. Click, Nixon Raj, and Jhih-Wei Chu.
# Calculation of Enzyme Fluctuograms from All-Atom Molecular Dynamics
# Simulation. Meth Enzymology. 578 (2016), 327-342,
# doi:10.1016/bs.mie.2016.05.024.
#
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import logging
import logging.config
import os
from os import path
import click
from MDAnalysis.lib.util import which
from fluctmatch.analysis import thermodynamics
@click.command("thermo", short_help="Calculate thermodynamic properties.")
@click.option(
"-s",
"topology",
metavar="FILE",
default="fluctmatch.xplor.psf",
show_default=True,
type=click.Path(
exists=False,
file_okay=True,
resolve_path=False,
),
help="Topology file (e.g., tpr gro g96 pdb brk ent psf)",
)
@click.option(
"-f",
"trajectory",
metavar="FILE",
default="cg.dcd",
show_default=True,
type=click.Path(
exists=False,
file_okay=True,
resolve_path=False,
),
help="Trajectory file (e.g. xtc trr dcd)",
)
@click.option(
"-d",
"datadir",
metavar="DIR",
default=path.join(os.getcwd(), "data"),
show_default=True,
type=click.Path(exists=True, file_okay=False, resolve_path=True),
help="Directory",
)
@click.option(
"-l",
"--logfile",
metavar="LOG",
show_default=True,
default=path.join(os.getcwd(), "thermo.log"),
type=click.Path(exists=False, file_okay=True, resolve_path=True),
help="Log file",
)
@click.option(
"-o",
"outdir",
metavar="DIR",
default=os.getcwd(),
show_default=True,
type=click.Path(exists=False, file_okay=False, resolve_path=True),
help="Directory",
)
@click.option(
"-e",
"--exec",
"nma_exec",
metavar="FILE",
envvar="CHARMMEXEC",
default=which("charmm"),
show_default=True,
type=click.Path(exists=False, file_okay=True, resolve_path=True),
help="CHARMM executable file",
)
@click.option(
"-t",
"--temperature",
metavar="TEMP",
type=click.FLOAT,
default=300.0,
show_default=True,
help="Temperature of simulation",
)
@click.option(
"-c",
"--charmm",
"charmm_version",
metavar="VERSION",
default=41,
show_default=True,
type=click.IntRange(27, None, clamp=True),
help="CHARMM version",
)
| [
2,
532,
9,
12,
10363,
25,
21015,
26,
7400,
12,
10394,
25,
604,
26,
33793,
12,
8658,
82,
12,
14171,
25,
45991,
26,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
7400,
11338,
28,
19,
4292,
8658,
6482,
10394,
28,
19... | 2.393836 | 1,168 |
# USAGE
# python simple_request.py
# import the necessary packages
import requests
# initialize the Keras REST API endpoint URL along with the input
# image path
KERAS_REST_API_URL = "http://47.100.63.158/predict"
DATA_PATH = "test.txt"
# load the input image and construct the payload for the request
data = open(DATA_PATH, "r").read()
payload = {"data": data}
# submit the request
r = requests.post(KERAS_REST_API_URL, files=payload).json()
print(r)
# ensure the request was sucessful
#
if r["success"]:
print(r['TNe'])
# otherwise, the request failed
else:
print("Request failed")
| [
2,
1294,
11879,
198,
2,
21015,
2829,
62,
25927,
13,
9078,
198,
198,
2,
1330,
262,
3306,
10392,
198,
11748,
7007,
198,
198,
2,
41216,
262,
17337,
292,
30617,
7824,
36123,
10289,
1863,
351,
262,
5128,
198,
2,
2939,
3108,
198,
42839,
1... | 3.02551 | 196 |
""" CEUB - Bacharelado em Ciência da Computação (BCC) - Prof. Barbosa
Teclas de atalho: ctlr <d>, duplica linha. ctrl <y>, apaga linha. ctrl </>, comenta linha.
Monitoria (plantão de dúvidas): https://monitoriaceub.github.io/Monitoria/index.html
- Com base nos conceitos de POO, implemente a entidade pessoa com estas características:
nome, peso, altura e data de nascimento. Resolva os itens:
1- Crie a classe Pessoa
2- Crie o método construtor: ele recebe quatro parâmetros que serão armazenados nos atributos.
No construtor, crie os três atributos da classe (nome, peso, altura e data de nascimento)
3- No método main, crie o objeto pessoa1 e passe os argumentos.
4- Mostre o objeto criado, o objeto pessoa1, teste (rode) a classe
5- Crie os métodos get (consultar) e set (alterar) para os atributos nome e dta_nascimento.
6- No main, teste os métodos gets dos atributos da classe Pessoa (consulte e mostre)
Mostre o atributo nome, altere o nome do objeto pessoa1 e mostre o atributo dta_nascimento
7- Use o método set para alterar o valor do atributo dta_nascimento para 2005-12-13. Teste
8- Crie o método IMC (Índice de Massa Corporal), ele recebe o objeto, calcula e retorna o valor
do imc. O IMC é calculado dividindo o peso (em kg) pela altura ao quadrado (em metros).
9- No programa (main), crie o objeto pessoa2 e passe os argumentos
10- Teste o item anterior, ou seja, mostre o valor dos atributos do objeto pessoa2
11- Altere o construtor para ele instanciar um objeto sem a dta_nascimento,
valor default 2000-01-21. Ele recebe somente o nome, o peso e a altura. Teste
12- Crei o método set_nome com crítica, evitar dados inconsistentes. Teste
13- Sobrescreva o método especial __str__ . Ele recebe o objeto e retorna os dados de uma
pessoa (nome, peso e data de nascimento). Teste.
14- Crie o método calcula_idade, ele recebe o objeto e retorna a idade da pessoa. Teste abaixo:
No final do main, altere a data da pessoa1 para: (2000, 11, 13). Qual a idade da pessoa1?
15- Crie o método mais_velho, ele compara a dta_nascimento de duas pessoas quaisquer e
mostra a dta_nascimento do mais velho ou a mensagem "As datas são iguais.". Teste.
"""
import datetime
# def __init__(self, nome, peso, altura, dta_nascimento): # Método construtor
# def set_nome(self, novo_nome): # Sem crítica
# self.nome = novo_nome
# def set_nome(self, novo_nome): # Com crítica, solução 2
# if isinstance(novo_nome, str):
# if len(novo_nome) >= 3: # RN: nome precisa ter pelo menos 3 letras
# self.nome = novo_nome
# else:
# print('ERRO: Nome inválido')
# else:
# print('ERRO: Tipo inválido')
# pessoa1.mais_velho(pessoa2) # Chamada do método no main
# def mais_velho(self, obj): # Solução 2, usando o método calcula_idade()
# if self.calcula_idade() < obj.calcula_idade():
# print("dta_nascimento mais velho: ", obj.dta_nascimento)
# print("dta_nascimento mais novo: ", self.dta_nascimento)
# elif obj.calcula_idade() < self.calcula_idade():
# print("dta_nascimento mais velho: ", self.dta_nascimento)
# print("dta_nascimento mais novo: ", obj.dta_nascimento)
# else:
# print("As datas sao iguais.....")
if __name__ == '__main__': # Atalho: mai <tab>
dta_nascimento_1 = datetime.date(1993, 12, 13) # datetime.date(ano, mes, dia)
pessoa1 = Pessoa("Carlos", 71, 1.80, dta_nascimento_1) # Chama construtor (__init__)
# pessoa1 = Pessoa("Carlos", 71, 1.80, datetime.date(1993, 12, 13)) # Equivalente as 2 linhas
print(pessoa1) # print(pessoa1.__str__()) # Chama o método __str__
''' <__main__.Pessoa object at 0x0000014E7C0F9FD0> '''
nome = pessoa1.get_nome() # Usando variável
print("- Pessoa 1:\nNome:", nome)
# print("- Pessoa 1:\nNome:", pessoa1.get_nome()) # Equivalente as duas anteriores
print("dta_nascimento:", pessoa1.get_dta_nascimento()) # Direto no print
pessoa1.set_nome("Ailton") # Pode usar um input
print('Nome:', pessoa1.get_nome()) # Teste
dta_nascimento_2 = datetime.date(2005, 12, 23) # Solução 1
pessoa1.set_dta_nascimento(dta_nascimento_2)
# ano = int(input('Ano: ')) # Solução 2
# mes = int(input('Mês: '))
# dia = int(input('Dia: '))
# dta_nascimento_2 = datetime.date(ano, mes, dia)
# pessoa1.set_dta_nascimento(dta_nascimento_2)
pessoa1.set_dta_nascimento(datetime.date(2005, 12, 23)) # Solução 3
print("dta_nascimento:", pessoa1.get_dta_nascimento()) # Teste
print('IMC:', pessoa1.imc()) # IMC: 21.91358024691358
print(f'IMC: {pessoa1.imc()}') # IMC: 21.91358024691358
print(f'IMC: {pessoa1.imc():.2f}') # IMC: 21.91, f-string
dta_nascimento_3 = datetime.date(2010, 11, 23)
pessoa2 = Pessoa("Maria", 63, 1.65, dta_nascimento_3) # chama método __init__ (construtor)
print("- Pessoa 2:\nNome: ", pessoa2.get_nome()) # Teste
print("dta_nascimento: ", pessoa2.get_dta_nascimento())
pessoa3 = Pessoa("Ana", 61, 1.69) # Passando só três argumentos
print('- Pessoa 3:\nData Nascimento:', pessoa3.get_dta_nascimento())
pessoa1.set_nome(2.3) # Tipo do argumento errado
print('Nome:', pessoa1.get_nome()) # Teste
pessoa1.set_nome("Rogério") # Tipo do argumento correto
print('Nome:', pessoa1.get_nome()) # Teste
print(pessoa3.__str__()) # Linhas equivalentes, __str__() é opcional
print(pessoa3)
print(pessoa1)
pessoa1.set_dta_nascimento(datetime.date(2000, 11, 13))
# pessoa1.set_dta_nascimento(datetime.date(2000, 9, 24))
print("Idade 1:", pessoa1.calcula_idade())
print("Idade 2:", pessoa2.calcula_idade())
print("Idade 3:", pessoa3.calcula_idade())
pessoa1.mais_velho(pessoa2) # objeto1.nome_metodo(objeto2)
pessoa1.mais_velho(pessoa3)
pessoa2.mais_velho(pessoa3)
| [
37811,
220,
220,
18671,
10526,
220,
220,
532,
220,
220,
25332,
20318,
4533,
795,
37685,
25792,
10782,
544,
12379,
22476,
64,
16175,
28749,
357,
2749,
34,
8,
220,
220,
532,
220,
220,
4415,
13,
10593,
8546,
198,
6767,
565,
292,
390,
379... | 1.926471 | 3,400 |
import re
import json
import httpretty
from aliyunpy.client import AliyunClient
from .config import auto_set_fixture
from .base import BaseAliyunTestCase
| [
11748,
302,
201,
198,
11748,
33918,
201,
198,
11748,
2638,
16100,
201,
198,
6738,
435,
7745,
403,
9078,
13,
16366,
1330,
978,
7745,
403,
11792,
201,
198,
6738,
764,
11250,
1330,
8295,
62,
2617,
62,
69,
9602,
201,
198,
6738,
764,
8692,... | 3.056604 | 53 |
import pandas as pd
import numpy as np
from datetime import datetime
from math import radians, cos, sin, asin, sqrt
def weekday(x):
"""
Figures out the day of the week. Outputs 1 for monday,2 for tuesday and so on.
"""
return (x.weekday()+1)
def is_weekend(x):
"""
Figures out if it was weekend. Outputs 0 or 1 for weekday or weekend.
"""
z = x.weekday()+1
return z//6
def hourly_info(x):
"""
separates the hour from time stamp. Returns hour of time.
"""
n1 = x.hour
return n1
def minute_info(x):
"""
separates the minutes from time stamp. Returns minute of time.
"""
n2 = x.minute
return n2/60
def haversine(x):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1 = x['pickup_longitude']
lat1 = x['pickup_latitude']
lon2 = x['dropoff_longitude']
lat2 = x['dropoff_latitude']
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 3956 # Radius of earth in kilometers. Use 3956 for miles
return c * r | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
10688,
1330,
2511,
1547,
11,
8615,
11,
7813,
11,
355,
259,
11,
19862,
17034,
628,
198,
4299,
28269,
7,
87,
2599,
... | 2.382406 | 557 |
import utils.module as module_utils
from evaluation.evaluators.evaluator import VoidEvaluator
from train.trainer import Trainer
class DSETrainer(Trainer):
"""
Trainer for bert distillation training.
"""
| [
11748,
3384,
4487,
13,
21412,
355,
8265,
62,
26791,
198,
6738,
12660,
13,
18206,
84,
2024,
13,
18206,
84,
1352,
1330,
18331,
36,
2100,
84,
1352,
198,
6738,
4512,
13,
2213,
10613,
1330,
31924,
628,
198,
4871,
17400,
2767,
3201,
263,
7,... | 3.191176 | 68 |
# Generated by Django 2.0 on 2017-12-20 18:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
319,
2177,
12,
1065,
12,
1238,
1248,
25,
1065,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
... | 3.1 | 50 |
#task in automate boring stuff
print("Enter a number:")
try:
n = int(input())
cyclic(n)
except ValueError:
print("Unvalied value")
| [
2,
35943,
287,
43511,
14262,
3404,
628,
198,
4798,
7203,
17469,
257,
1271,
25,
4943,
198,
28311,
25,
198,
220,
220,
220,
299,
796,
493,
7,
15414,
28955,
198,
220,
220,
220,
11700,
291,
7,
77,
8,
198,
16341,
11052,
12331,
25,
198,
... | 2.685185 | 54 |
import requests
from urllib.request import urlopen
from PIL import Image
'''Some app that get requests from api.github.com/users'''
if __name__ == '__main__':
usuario = input('nome de usuario do github: ')
geral = get_usuario(usuario)
info = subs(usuario)
sz_info_subs = list(range(len(info)))
#Screen some main datas from users, setted to pt-BR idiom
print('Nome: {}\nLinkedin: {}\nLocalização: {}\nTwitter: https://twitter.com/{}'.format(geral['name'],geral['blog'], geral['location'], geral['twitter_username']))
for n in sz_info_subs:
tabela = {'Subs':[info[n]['language'], info[n]['name']]}
print(tabela)
#Show user avatar
Image.open(urlopen(get_usuario(usuario)['avatar_url'])).show()
| [
11748,
7007,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
9654,
198,
6738,
350,
4146,
1330,
7412,
198,
198,
7061,
6,
4366,
598,
326,
651,
7007,
422,
40391,
13,
12567,
13,
785,
14,
18417,
7061,
6,
628,
198,
361,
11593,
3672,
83... | 2.41853 | 313 |
# -*- coding: utf-8 -*-
#
# Spyder documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 10 16:32:25 2009.
#
# This file is execfile()d with the current directory set to its parent dir.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# pylint: disable = invalid-name, wrong-import-order
"""Sphinx configuration file for Spyder's documentation."""
from docutils import nodes
from docutils.parsers.rst import Directive, directives
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# Standard library imports
import datetime
import os
import subprocess
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Constants
CI = os.environ.get("CI")
TRAVIS_BRANCH = os.environ.get("TRAVIS_BRANCH")
UTC_DATE = datetime.datetime.now(datetime.timezone.utc)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.githubpages",
"sphinx_panels",
"sphinx_multiversion",
]
panels_add_bootstrap_css = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
source_encoding = "utf-8"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Spyder"
copyright = ( # pylint: disable = redefined-builtin
f" 2009-{UTC_DATE.year} Spyder Doc Contributors "
"<span class='pipe-red'>|</span> "
"<a href="
"'https://github.com/spyder-ide/spyder-docs/blob/master/LICENSE.txt' "
"target='_blank' rel='noopener noreferrer'>MIT License</a>"
)
author = "The Spyder Doc Contributors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "5"
# The full version, including alpha/beta/rc tags.
release = "5"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all docs.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# CI = True
# TRAVIS_BRANCH = 'master'
html_theme = "pandas_sphinx_theme"
html_logo = '_static/images/spyder_logo.png'
html_theme_options = {
"external_links": [
{
"url": "https://www.spyder-ide.org/blog",
"name": "Blog",
},
{
"url": "/",
"name": "Docs",
},
],
"use_edit_page_button": True,
"show_powered_by": True,
"gitter_room": "spyder-ide/public",
"open_collective": "spyder",
"footer_links": [
{
"url": "https://github.com/spyder-ide/spyder",
"name": "GitHub",
},
{
"url": "https://twitter.com/Spyder_IDE",
"name": "Twitter",
},
{
"url": "https://www.facebook.com/SpyderIDE/",
"name": "Facebook",
},
{
"url": "https://www.youtube.com/channel/UCAOyvaOj7dMnavvGUkz9Djg",
"name": "YouTube",
},
{
"url": "https://instagram.com/spyderide",
"name": "Instagram",
},
{
"url": "https://groups.google.com/group/spyderlib",
"name": "Google Groups",
},
],
"page_toc_limit": 1,
"logo_link": "https://www.spyder-ide.org/",
}
html_context = {
"github_user": "spyder-ide",
"github_repo": "spyder-docs",
"github_version": "master",
"doc_path": "doc",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'spyder_bbg.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom CSS for the site
html_css_files = [
"css/driver.min.css",
"css/custom_styles.css",
]
# Custom Javascript for the site
html_js_files = [
"js/driver.min.js",
"js/custom_scripts.js",
]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {
"**": [
"versioning.html",
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# -- Options for shpinx-multiversion -----------------------------------------
# Whitelist pattern for tags (set to None to ignore all tags)
smv_tag_whitelist = r'^current$'
# Whitelist pattern for branches (set to None to ignore all branches)
smv_branch_whitelist = r'^\d+\.\w|(master)$'
# Whitelist pattern for remotes (set to None to use local branches only)
smv_remote_whitelist = r'^(origin|upstream)$'
# Pattern for released versions
smv_released_pattern = r'^heads/\d+\.\w+$'
# Format for versioned output directories inside the build directory
smv_outputdir_format = '{config.release}'
# Determine whether remote or local git branches/tags are preferred
# if their output dirs conflict
smv_prefer_remote_refs = False
# Use git ref naming if on a feature/PR branch
try:
current_tag = subprocess.run(
["git", "describe"], check=True, timeout=5,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
if current_tag.stdout.strip() == "current":
smv_outputdir_format = '{ref.name}'
except subprocess.SubprocessError: # Pass if we're not in a git repo
pass
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "Spyderdoc"
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "Spyder.tex", "Spyder Documentation", author, "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Spyder",
"Spyder Documentation",
author,
"Spyder",
"The Scientific Python Development Environment.",
"Miscellaneous",
),
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Additional Directives ---------------------------------------------------
# ReST directive for embedding Youtube and Vimeo videos.
# There are two directives added: ``youtube`` and ``vimeo``. The only
# argument is the video id of the video to include.
# Both directives have three optional arguments: ``height``, ``width``
# and ``align``. Default height is 281 and default width is 500.
# Example::
# .. youtube:: anwy2MPT5RE
# :height: 315
# :width: 560
# :align: left
# :copyright: (c) 2012 by Danilo Bargen.
# :license: BSD 3-clause
def align(argument):
"""Conversion function for the "align" option."""
return directives.choice(argument, ('left', 'center', 'right'))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
23688,
1082,
10314,
1382,
8398,
2393,
11,
2727,
416,
198,
2,
599,
20079,
87,
12,
24209,
9688,
319,
19480,
5979,
838,
1467,
25,
2624,
25,
1495,
3717,
13,
1... | 2.972119 | 4,053 |
'''
Authors: Dr. Lloyd Windrim and Dr. Mitch Bryson
Required packages: numpy, scipy, scikit-learn
The primary purpose of this module is to remove and load the ground. The function removeGround() uses the TreePointCloud()
class to remove the ground from the input pointcloud, and save the ground as a mesh (plyfile). The function
load_ground_surface() can then be used to load the created ground mesh into python as points.
'''
import numpy as np
import os
from scipy.spatial import Delaunay
from scipy.spatial import KDTree
from plyfile import PlyData, PlyElement
# TreePointCloud - class to store pointcloud and associated surfaces
# create_ground_points - estimate ground points based on grid minimas and median filtering
# save_ground_surface - output estimated ground surface as a triangular mesh
# initialise_grid - initialise parameters and offset of a grid to store pointcloud for
# further spatial processing
# create_ground_grid - Grids ground points using nearest neighbours
# load_ground_surface - Load a ply file containing an estimated ground surface | [
7061,
6,
198,
220,
220,
220,
46665,
25,
1583,
13,
22361,
3086,
3036,
290,
1583,
13,
20472,
9092,
1559,
198,
220,
220,
220,
20906,
10392,
25,
299,
32152,
11,
629,
541,
88,
11,
629,
1134,
270,
12,
35720,
628,
220,
220,
220,
383,
416... | 3.805461 | 293 |
from django.test import TestCase
from opportunity.models import Opportunity
from common.models import Address
from accounts.models import Account
from contacts.models import Contact
from common.models import User
from django.utils import timezone
# Create your tests here.
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
3663,
13,
27530,
1330,
32675,
198,
6738,
2219,
13,
27530,
1330,
17917,
198,
6738,
5504,
13,
27530,
1330,
10781,
198,
6738,
13961,
13,
27530,
1330,
14039,
198,
6738,
2219,
13,
2... | 4.428571 | 63 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
## modified to use mwclient
##
##
# Edit a Wikipedia article with your favourite editor.
#
# (C) Gerrit Holl 2004
# Distributed under the terms of the MIT license.
# Version 0.4.
#
# TODO: - non existing pages
# - edit conflicts
# - minor edits
# - watch/unwatch
# - ...
__metaclass__ = type
__version__ = "$Id: editarticle.py 5846 2008-08-24 20:53:27Z siebrand $"
import sys
import os
import string
import optparse
import tempfile
from sys import path
# for mwclient
import config_manager as config
import client as mwclient
config.verify(["editor"])
msg = {
'ar': u'تعديل يدوي: %s',
'de': u'Manuelle Bearbeitung: %s',
'en': u'Manual edit: %s',
'he': u'עריכה ידנית: %s',
'ja': u'手動編集: %s',
'pt': u'Editando manualmente com bot: %s',
'sv': u'Manuell redigering: %s',
'is': u'Handvirk breyting: %s',
'zh': u'手動編輯: %s',
}
#################################################################
ui = uiFake()
import threading
output_lock = threading.Lock()
input_lock = threading.Lock()
output_cache = []
def output(text, decoder = None, newline = True, toStdout = False):
"""Output a message to the user via the userinterface.
Works like print, but uses the encoding used by the user's console
(console_encoding in the configuration file) instead of ASCII.
If decoder is None, text should be a unicode string. Otherwise it
should be encoded in the given encoding.
If newline is True, a linebreak will be added after printing the text.
If toStdout is True, the text will be sent to standard output,
so that it can be piped to another process. All other text will
be sent to stderr. See: http://en.wikipedia.org/wiki/Pipeline_%28Unix%29
text can contain special sequences to create colored output. These
consist of the escape character \03 and the color name in curly braces,
e. g. \03{lightpurple}. \03{default} resets the color.
"""
output_lock.acquire()
try:
if decoder:
text = unicode(text, decoder)
elif type(text) is not unicode:
if verbose:
print "DBG> BUG: Non-unicode (%s) passed to wikipedia.output without decoder!" % type(text)
print traceback.print_stack()
print "DBG> Attempting to recover, but please report this problem"
try:
text = unicode(text, 'utf-8')
except UnicodeDecodeError:
text = unicode(text, 'iso8859-1')
if newline:
text += u'\n'
log(text)
if input_lock.locked():
cache_output(text, toStdout = toStdout)
else:
print "Calling output with " + str(type(text)) + " " + str(type(toStdout))
ui.uioutput(text, toStdout = toStdout)
finally:
output_lock.release()
import difflib
def showDiff(oldtext, newtext):
"""
Prints a string showing the differences between oldtext and newtext.
The differences are highlighted (only on Unix systems) to show which
changes were made.
"""
# For information on difflib, see http://pydoc.org/2.3/difflib.html
color = {
'+': 'lightgreen',
'-': 'lightred',
}
diff = u''
colors = []
# This will store the last line beginning with + or -.
lastline = None
# For testing purposes only: show original, uncolored diff
# for line in difflib.ndiff(oldtext.splitlines(), newtext.splitlines()):
# print line
for line in difflib.ndiff(oldtext.splitlines(), newtext.splitlines()):
if line.startswith('?'):
# initialize color vector with None, which means default color
lastcolors = [None for c in lastline]
# colorize the + or - sign
lastcolors[0] = color[lastline[0]]
# colorize changed parts in red or green
for i in range(min(len(line), len(lastline))):
if line[i] != ' ':
lastcolors[i] = color[lastline[0]]
diff += lastline + '\n'
# append one None (default color) for the newline character
colors += lastcolors + [None]
elif lastline:
diff += lastline + '\n'
# colorize the + or - sign only
lastcolors = [None for c in lastline]
lastcolors[0] = color[lastline[0]]
colors += lastcolors + [None]
lastline = None
if line[0] in ('+', '-'):
lastline = line
# there might be one + or - line left that wasn't followed by a ? line.
if lastline:
diff += lastline + '\n'
# colorize the + or - sign only
lastcolors = [None for c in lastline]
lastcolors[0] = color[lastline[0]]
colors += lastcolors + [None]
result = u''
lastcolor = None
for i in range(len(diff)):
if colors[i] != lastcolor:
if lastcolor is None:
result += '\03{%s}' % colors[i]
else:
result += '\03{default}'
lastcolor = colors[i]
result += diff[i]
output(result)
def inputunicode(question, password = False):
"""
Works like raw_input(), but returns a unicode string instead of ASCII.
Unlike raw_input, this function automatically adds a space after the
question.
"""
# TODO: make sure this is logged as well
print question + ' '
if password:
import getpass
text = getpass.getpass('')
else:
text = raw_input()
text = unicode(text, config.get("console_encoding"))
return text
###################################################################
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2235,
198,
2235,
9518,
284,
779,
285,
86,
16366,
198,
2235,
220,
198,
2235,
220,
198,
198,
2,
5312,
257,
15312,
2708,
351,
... | 2.386722 | 2,410 |
from functools import partial
import tkinter as tk
from tkinter import IntVar, StringVar, Variable, ttk
from typing import Callable, List, Tuple
from collections import namedtuple
from ror.ror_solver import TIE_RESOLVERS
from ror.WeightedResultAggregator import WeightedResultAggregator
from utils.AlphaValueWithWeight import AlphaValueWithWeight
from utils.tk.AlphaValueCountSliderFrame import DEFAULT_NUMBER_OF_ALPHA_VALUES, AlphaValueCountSliderFrame
from utils.tk.CustomDialog import CustomDialog
from utils.ScrollableFrame import ScrollableFrame
from utils.tk.TieResolverPicker import TieResolverPicker
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from enum import Enum
import copy
Coordinates = namedtuple('Coordinates', ['x', 'y'])
WeightedAggregatorOptionsDialogResult = Tuple[List[AlphaValueWithWeight], str]
| [
6738,
1257,
310,
10141,
1330,
13027,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
2558,
19852,
11,
10903,
19852,
11,
35748,
11,
256,
30488,
198,
6738,
19720,
1330,
4889,
540,
11,
7343,
11,
309,
29291,
198,
... | 3.349265 | 272 |
from calendar import timegm
from datetime import datetime, timedelta
from json import loads
from fluiddb.data.system import createSystemData
from fluiddb.model.comment import CommentAPI
from fluiddb.model.object import ObjectAPI
from fluiddb.model.user import UserAPI, getUser
from fluiddb.model.value import TagValueAPI
from fluiddb.schema.scripts.trending_hashtags import extractTrendingHashtags
from fluiddb.testing.basic import FluidinfoTestCase
from fluiddb.testing.resources import ConfigResource, DatabaseResource
| [
6738,
11845,
1330,
640,
39870,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
33918,
1330,
15989,
198,
198,
6738,
6562,
1638,
65,
13,
7890,
13,
10057,
1330,
2251,
11964,
6601,
198,
6738,
6562,
1638,
65,
13,
19849,... | 3.735714 | 140 |
'''
Convolutional autoencoder on MNIST dataset using Keras functional API
'''
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Activation, Input, BatchNormalization
from keras.layers import Conv2D, Conv2DTranspose
from keras.callbacks import TensorBoard
from keras.optimizers import Adam
from keras.utils import to_categorical
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
# Parameters
batch_size = 128
epochs = 3
Tboard = TensorBoard(log_dir='./autoencoder_graph')
# Load the MNIST data
# Autoencoder
if __name__ == '__main__':
main()
| [
7061,
6,
198,
198,
3103,
85,
2122,
282,
1960,
6571,
66,
12342,
319,
29060,
8808,
27039,
1262,
17337,
292,
10345,
7824,
198,
198,
7061,
6,
628,
198,
6738,
41927,
292,
13,
19608,
292,
1039,
1330,
285,
77,
396,
198,
6738,
41927,
292,
1... | 2.985714 | 210 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 tianyou pan <sherry0429 at SOAPython>
"""
import logging
import tornado.web
import json
class MirrorBaseHttpHandler(tornado.web.RequestHandler):
"""
这个类是服务的底层抽象。
它将http的Get,Put,Post,Delete请求分别于四个服务函数关联。
业务逻辑不需要于此类打交道。
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def get_current_user(self):
"""
通过它给予所有服务统一的用户验证逻辑
:return:
"""
return self.get_secure_cookie('user_keygen', 'none')
def post(self, *args, **kwargs):
"""
提交操作,用于提交参数启动某些任务
:param args:
:param kwargs:
:return:
"""
if hasattr(self, 'service'):
data = self.service.start_service(self.request.arguments, self)
if data is not None:
self.write(self.transform_data(data))
def put(self, *args, **kwargs):
"""
更新操作,用于提交参数并改变某些已运行的任务的行为
:param args:
:param kwargs:
:return:
"""
if hasattr(self, 'service'):
data = self.service.update_service_arguments(self.request.arguments, self)
if data is not None:
self.write(self.transform_data(data))
def get(self, *args, **kwargs):
"""
获取操作,用于获取服务中的属性数据,一般只带有少量参数,也可用于获取文件
:param args:
:param kwargs:
:return:
"""
if hasattr(self, 'service'):
data = self.service.get_service_data(self.request.arguments, self)
if data is not None:
self.write(self.transform_data(data))
def delete(self, *args, **kwargs):
"""
删除操作,用于启动服务的删除操作,用于清空数据,回收资源,重置状态等
:param args:
:param kwargs:
:return:
"""
if hasattr(self, 'service'):
data = self.service.delete_service_resource(self.request.arguments, self)
if data is not None:
self.write(self.transform_data(data))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
15269,
357,
34,
8,
2177,
256,
666,
5832,
3425,
1279,
82,
13372,
3023,
1959,
379,
12809,
2969,
7535,
29,
198,
37811,
198,
11748,
18931,
198,
11748,
33718... | 1.555296 | 1,284 |
#!/usr/bin/env python
import csv
import sys
import json
print('[')
delim=';'
quotechar='"'
with sys.stdin as csvfile:
r = csv.DictReader(csvfile, delimiter=delim, quotechar=quotechar)
for row in r:
json.dump(row, sys.stdout)
sys.stdout.write(',\n')
print('{ "_meta": { "comment": "export of national CSIRTs from contacts.cert.at. Please report errors to kaplan@cert.at" } } ]')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
269,
21370,
198,
11748,
25064,
198,
11748,
33918,
198,
198,
4798,
10786,
58,
11537,
198,
198,
12381,
320,
11639,
26,
6,
198,
22708,
10641,
11639,
30543,
198,
198,
4480,
25064... | 2.460606 | 165 |
default_app_config = 'tomato.apps.TomatoConfig'
| [
12286,
62,
1324,
62,
11250,
796,
705,
39532,
5549,
13,
18211,
13,
13787,
5549,
16934,
6,
198
] | 2.823529 | 17 |