content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import aiohttp
import asyncio
import os
import time
from datetime import datetime
from telethon import events
from telethon.tl.types import DocumentAttributeVideo
import json
import subprocess
import math
from pySmartDL import SmartDL
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from userbot import LOGS, CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.utils import admin_cmd, humanbytes, progress, time_formatter
from userbot.uniborgConfig import Config
thumb_image_path = Config.TMP_DOWNLOAD_DIRECTORY + "/thumb_image.jpg"
import io
@borg.on(admin_cmd(pattern="uploadir (.*)", outgoing=True))
async def uploadir(udir_event):
"""
#For .uploadir command, allows you to upload everything from a folder in the server
"""
input_str = udir_event.pattern_match.group(1)
if os.path.exists(input_str):
await udir_event.edit("Processing ...")
lst_of_files = []
for r, d, f in os.walk(input_str):
for file in f:
lst_of_files.append(os.path.join(r, file))
for file in d:
lst_of_files.append(os.path.join(r, file))
LOGS.info(lst_of_files)
uploaded = 0
await udir_event.edit(
"Found {} files. Uploading will start soon. Please wait!".format(
len(lst_of_files)))
for single_file in lst_of_files:
if os.path.exists(single_file):
# https://stackoverflow.com/a/678242/4723940
caption_rts = os.path.basename(single_file)
c_time = time.time()
if not caption_rts.lower().endswith(".mp4"):
await udir_event.client.send_file(
udir_event.chat_id,
single_file,
caption=caption_rts,
force_document=False,
allow_cache=False,
reply_to=udir_event.message.id,
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, udir_event, c_time, "Uploading...",
single_file)))
else:
thumb_image = os.path.join(input_str, "thumb.jpg")
c_time = time.time()
metadata = extractMetadata(createParser(single_file))
duration = 0
width = 0
height = 0
if metadata.has("duration"):
duration = metadata.get("duration").seconds
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
await udir_event.client.send_file(
udir_event.chat_id,
single_file,
caption=caption_rts,
thumb=thumb_image,
force_document=False,
allow_cache=False,
reply_to=udir_event.message.id,
attributes=[
DocumentAttributeVideo(
duration=duration,
w=width,
h=height,
round_message=False,
supports_streaming=True,
)
],
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, udir_event, c_time, "Uploading...",
single_file)))
os.remove(single_file)
uploaded = uploaded + 1
await udir_event.edit(
"Uploaded {} files successfully !!".format(uploaded))
else:
await udir_event.edit("404: Directory Not Found")
@borg.on(admin_cmd(pattern="upload (.*)", outgoing=True))
def get_video_thumb(file, output=None, width=90):
""" Get video thumbnail """
metadata = extractMetadata(createParser(file))
popen = subprocess.Popen(
[
"ffmpeg",
"-i",
file,
"-ss",
str(
int((0, metadata.get("duration").seconds
)[metadata.has("duration")] / 2)),
"-filter:v",
"scale={}:-1".format(width),
"-vframes",
"1",
output,
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
if not popen.returncode and os.path.lexists(file):
return output
return None
def extract_w_h(file):
""" Get width and height of media """
command_to_run = [
"ffprobe",
"-v",
"quiet",
"-print_format",
"json",
"-show_format",
"-show_streams",
file,
]
# https://stackoverflow.com/a/11236144/4723940
try:
t_response = subprocess.check_output(command_to_run,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
LOGS.warning(exc)
else:
x_reponse = t_response.decode("UTF-8")
response_json = json.loads(x_reponse)
width = int(response_json["streams"][0]["width"])
height = int(response_json["streams"][0]["height"])
return width, height
@borg.on(admin_cmd(pattern="uploadas(stream|vn|all) (.*)", outgoing=True))
async def uploadas(uas_event):
"""
#For .uploadas command, allows you to specify some arguments for upload.
"""
await uas_event.edit("Processing ...")
type_of_upload = uas_event.pattern_match.group(1)
supports_streaming = False
round_message = False
spam_big_messages = False
if type_of_upload == "stream":
supports_streaming = True
if type_of_upload == "vn":
round_message = True
if type_of_upload == "all":
spam_big_messages = True
input_str = uas_event.pattern_match.group(2)
thumb = None
file_name = None
if "|" in input_str:
file_name, thumb = input_str.split("|")
file_name = file_name.strip()
thumb = thumb.strip()
else:
file_name = input_str
thumb_path = "a_random_f_file_name" + ".jpg"
thumb = get_video_thumb(file_name, output=thumb_path)
if os.path.exists(file_name):
metadata = extractMetadata(createParser(file_name))
duration = 0
width = 0
height = 0
if metadata.has("duration"):
duration = metadata.get("duration").seconds
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
try:
if supports_streaming:
c_time = time.time()
await uas_event.client.send_file(
uas_event.chat_id,
file_name,
thumb=thumb,
caption=input_str,
force_document=False,
allow_cache=False,
reply_to=uas_event.message.id,
attributes=[
DocumentAttributeVideo(
duration=duration,
w=width,
h=height,
round_message=False,
supports_streaming=True,
)
],
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, uas_event, c_time, "Uploading...",
file_name)))
elif round_message:
c_time = time.time()
await uas_event.client.send_file(
uas_event.chat_id,
file_name,
thumb=thumb,
allow_cache=False,
reply_to=uas_event.message.id,
video_note=True,
attributes=[
DocumentAttributeVideo(
duration=0,
w=1,
h=1,
round_message=True,
supports_streaming=True,
)
],
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, uas_event, c_time, "Uploading...",
file_name)))
elif spam_big_messages:
await uas_event.edit("TBD: Not (yet) Implemented")
return
os.remove(thumb)
await uas_event.edit("Uploaded successfully !!")
except FileNotFoundError as err:
await uas_event.edit(str(err))
else:
await uas_event.edit("404: File Not Found")
CMD_HELP.update({
"upload":
".upload <path in server>\
\nUsage: Uploads a locally stored file to the chat."
})
| [
11748,
257,
952,
4023,
198,
11748,
30351,
952,
198,
11748,
28686,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
5735,
400,
261,
1330,
2995,
198,
6738,
5735,
400,
261,
13,
28781,
13,
19199,
1330,
16854,
33682,
1079... | 1.800691 | 5,208 |
import cherrypy
import sys
import mysql.connector
from collections import OrderedDict
#Define database variables
DATABASE_USER = 'root'
DATABASE_HOST = '127.0.0.1'
DATABASE_NAME = 'feedND'
#Create connection to MySQL
cnx = mysql.connector.connect(user=DATABASE_USER, host=DATABASE_HOST, database=DATABASE_NAME)
cursor = cnx.cursor()
application = cherrypy.Application(ExampleApp(), None)
| [
11748,
23612,
9078,
198,
11748,
25064,
198,
11748,
48761,
13,
8443,
273,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
2,
7469,
500,
6831,
9633,
198,
35,
1404,
6242,
11159,
62,
29904,
796,
705,
15763,
6,
198,
35,
1404,
6242,... | 2.805755 | 139 |
import math
@stars
if __name__ == '__main__':
explain('prints out all math functions')
print(dir(math))
| [
11748,
10688,
198,
198,
31,
30783,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
4727,
10786,
17190,
503,
477,
10688,
5499,
11537,
198,
220,
220,
220,
3601,
7,
15908,
7,
11018,
4008,
628
] | 2.804878 | 41 |
from django.conf.urls import url
from orders import views
urlpatterns = [
url(r'^orders/settlement/$', views.OrderSettlementView.as_view()),
url(r'^orders/$', views.OrdersView.as_view()),
url(r'^orders/(?P<order_id>\d+)/uncommentgoods/$',views.OrdersUnCommentView.as_view()),
url(r'^orders/(?P<order_id>\d+)/comments/$',views.OrdersCommentView.as_view()),
url(r'^skus/(?P<pk>\d+)/comments/$',views.OrdersCommentSkuView.as_view()),
] | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
6266,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
6361,
14,
17744,
1732,
32624,
3256,
5009,
13,
18743,
50,
27331... | 2.364583 | 192 |
"""
thermodynamics
"""
def x(yf, y, yg):
"""Returns the quality of the two-phase mixture"""
if y < yf:
return 0
elif y > yg:
return 1
else:
return (y - yf) / (yg - yf)
if __name__ == '__main__':
from clay.tests import testif
from clay.utils import qualify
single_phase_tests = [
((2, 1, 3), 0),
((2, 4, 3), 1)
]
for test in single_phase_tests:
testif('returns correct quality for single-phase mixture (x: {})'.format(test[1]),
x(*test[0]),
test[1],
name=qualify(x))
two_phase_tests = [
((1, 5, 8), 0.57143),
((0.00079275, 0.01505, 0.04925), 0.29422)
]
for test in two_phase_tests:
testif('returns correct quality for two-phase mixture (x: {})'.format(test[1]),
round(x(*test[0]), 5),
test[1],
name=qualify(x))
| [
198,
37811,
198,
490,
76,
44124,
198,
198,
37811,
198,
198,
4299,
2124,
7,
88,
69,
11,
331,
11,
331,
70,
2599,
198,
220,
220,
220,
37227,
35561,
262,
3081,
286,
262,
734,
12,
40715,
11710,
37811,
198,
220,
220,
220,
611,
331,
1279... | 1.993435 | 457 |
# -*- coding: utf-8 -*-
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Development Team: Stanislav WEB
"""
from src.lib import ArgumentsError
from src.lib import BrowserError
from src.lib import PackageError
from src.lib import ReporterError
from src.lib import TplError
from src.lib import args
from src.lib import browser
from src.lib import events
from src.lib import package
from src.lib import reporter
from src.lib import tpl
from . import execution_time
from .exceptions import SrcError
class Controller(object):
"""Controller class"""
def __init__(self):
"""
Init constructor
:raise SrcError
"""
events.terminate()
try:
interpreter = package.check_interpreter()
if interpreter is not True:
raise SrcError(tpl.error(key='unsupported', actual=interpreter.get('actual'),
expected=interpreter.get('expected')))
self.ioargs = args().get_arguments()
except ArgumentsError as e:
raise SrcError(tpl.error(e.message))
@execution_time(log=tpl)
def run(self):
"""
Bootstrap action
:raise SrcError
:return: None
"""
try:
tpl.message(package.banner())
if 'host' in self.ioargs:
getattr(self, 'scan_action')(self.ioargs)
else:
for action in self.ioargs.keys():
if hasattr(self, '{0}_action'.format(action)) and callable(
getattr(self, '{0}_action'.format(action))):
getattr(self, '{func}_action'.format(func=action))()
break
except (SrcError, PackageError, BrowserError, AttributeError) as e:
raise SrcError(tpl.error(e.message))
@staticmethod
def examples_action():
"""
Show examples action
:return: None
"""
tpl.message(package.examples())
@staticmethod
def update_action():
"""
App update action
:raise SrcError
:return: None
"""
try:
tpl.message(package.update())
except (AttributeError, PackageError) as e:
raise SrcError(e)
@staticmethod
def version_action():
"""
Show app version action
:raise SrcError
:return: None
"""
try:
tpl.message(package.version())
except (AttributeError, PackageError) as e:
raise SrcError(e)
@staticmethod
def local_version():
"""
Show app local version
:raise SrcError
:return: None
"""
try:
tpl.message(package.local_version())
except (AttributeError, PackageError) as e:
raise SrcError(e)
@classmethod
def scan_action(cls, params):
"""
URL scan action
:param dict params: console input args
:raise SrcError
:return: None
"""
try:
brows = browser(params)
if True is reporter.is_reported(params.get('host')):
try:
tpl.prompt(key='logged')
except KeyboardInterrupt:
tpl.cancel(key='abort')
if reporter.default is params.get('reports'):
tpl.info(key='use_reports')
brows.ping()
brows.scan()
brows.done()
except (AttributeError, BrowserError, ReporterError, TplError) as e:
raise SrcError(e.message)
except (KeyboardInterrupt, SystemExit):
tpl.cancel(key='abort')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
770,
1430,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
220,
220,
220,
340,
739,
262,
2846,
286,
262,
2... | 2.206382 | 1,943 |
#
# Copyright 2017-2018 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import attr
from xarray import Variable
from xarray.core import indexing
from xarray.core.utils import FrozenOrderedDict
from xarray.backends.api import open_dataset as _open_dataset
from xarray.backends.common import AbstractDataStore, BackendArray
import cfgrib
FLAVOURS = {
'eccodes': {
'dataset': {
'encode_time': False,
'encode_vertical': False,
'encode_geography': False,
},
},
'ecmwf': {
'variable_map': {
'forecast_reference_time': 'time',
'forecast_period': 'step',
'time': 'valid_time',
'air_pressure': 'level',
'topLevel': 'level',
},
'type_of_level_map': {
'hybrid': 'L{GRIB_hybrid_level_count}',
},
},
'cds': {
'variable_map': {
'number': 'realization',
'forecast_period': 'leadtime',
'air_pressure': 'plev',
'latitude': 'lat',
'longitude': 'lon',
'topLevel': 'level',
},
'type_of_level_map': {
'hybrid': 'L{GRIB_hybrid_level_count}',
},
},
}
@attr.attrs()
| [
2,
198,
2,
15069,
2177,
12,
7908,
3427,
9072,
329,
13398,
12,
17257,
15615,
4558,
40924,
357,
2943,
14326,
37,
737,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
... | 2.380435 | 828 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-15 16:13
from __future__ import unicode_literals
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
20,
319,
2177,
12,
1157,
12,
1314,
1467,
25,
1485,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.883117 | 77 |
# -*- encoding=utf-8 -*-
import utils
from modules.downloader import Downloader
class Scheduler:
""" 调度模块
"""
if __name__ == '__main__':
Scheduler().process()
| [
2,
532,
9,
12,
21004,
28,
40477,
12,
23,
532,
9,
12,
198,
11748,
3384,
4487,
198,
6738,
13103,
13,
15002,
263,
1330,
10472,
263,
628,
198,
4871,
27774,
18173,
25,
198,
220,
220,
220,
37227,
5525,
108,
225,
41753,
99,
162,
101,
94,... | 2.346667 | 75 |
# Write a short Python function that takes a positive integer n and returns
# the sum of the squares of all the odd positive integers smaller than n.
print(odd_squares(9))
print(odd_squares(4002))
print(odd_squares(833))
print(odd_squares(6))
print(odd_squares(112))
| [
2,
19430,
257,
1790,
11361,
2163,
326,
2753,
257,
3967,
18253,
299,
290,
5860,
198,
2,
262,
2160,
286,
262,
24438,
286,
477,
262,
5629,
3967,
37014,
4833,
621,
299,
13,
628,
198,
4798,
7,
5088,
62,
16485,
3565,
7,
24,
4008,
198,
4... | 3.214286 | 84 |
#! /usr/bin/env python3
"""Downloads all graphs used in the experiments to the correct locations.
Query results are sparse matrices. Any matrix which isn't square is removed
since it does not define a graph.
Usage: python3 setyp.py
"""
import ssgetpy
import os
# Maximum results when querying website.
QUERY_LIMIT = 1000
# Max non-zero values in matrix allowed. This roughly corresponds to edges.
NON_ZERO_LIMIT = 200000
# Base location for ssgetpy to place graphs when downloading.
SAVE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'real.mtx')
groups = [
"DIMACS10",
"Hamm",
"AG-Monien",
"Nasa",
]
graph_entries = []
for g in groups:
graph_entries += search(g, NON_ZERO_LIMIT)
print(f'Graphs found: {len(graph_entries)}')
for e in graph_entries:
e.download(format="MM", destpath=SAVE_PATH, extract=True)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
10002,
82,
477,
28770,
973,
287,
262,
10256,
284,
262,
3376,
7064,
13,
198,
198,
20746,
2482,
389,
29877,
2603,
45977,
13,
4377,
17593,
543,
2125,
470,
6616,
318,
4... | 2.785714 | 308 |
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp
from kfp import dsl, components
from typing import NamedTuple
@components.create_component_from_func
@components.create_component_from_func
def generate_resource_request() -> NamedTuple('output', [('memory', str), ('cpu', str)]):
'''Returns the memory and cpu request'''
from collections import namedtuple
resource_output = namedtuple('output', ['memory', 'cpu'])
return resource_output('500Mi', '200m')
@dsl.pipeline(
name='Runtime resource request pipeline',
description='An example on how to make resource requests at runtime.'
)
if __name__ == '__main__':
kfp.compiler.Compiler().compile(resource_request_pipeline, __file__ + '.yaml')
| [
2,
15069,
33448,
383,
24921,
891,
9319,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 3.385638 | 376 |
"""
DeepLabv3 for image segmentation, implemented in Gluon.
Original paper: 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587.
"""
__all__ = ['DeepLabv3', 'deeplabv3_resnetd50b_voc', 'deeplabv3_resnetd101b_voc', 'deeplabv3_resnetd152b_voc',
'deeplabv3_resnetd50b_coco', 'deeplabv3_resnetd101b_coco', 'deeplabv3_resnetd152b_coco',
'deeplabv3_resnetd50b_ade20k', 'deeplabv3_resnetd101b_ade20k', 'deeplabv3_resnetd50b_cityscapes',
'deeplabv3_resnetd101b_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import conv1x1, conv1x1_block, conv3x3_block
from .resnetd import resnetd50b, resnetd101b, resnetd152b
class DeepLabv3FinalBlock(HybridBlock):
"""
DeepLabv3 final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
class ASPPAvgBranch(HybridBlock):
"""
ASPP branch with average pooling.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
upscale_out_size : tuple of 2 int or None
Spatial size of output image for the bilinear upsampling operation.
"""
class AtrousSpatialPyramidPooling(HybridBlock):
"""
Atrous Spatial Pyramid Pooling (ASPP) module.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
class DeepLabv3(HybridBlock):
"""
DeepLabv3 model from 'Rethinking Atrous Convolution for Semantic Image Segmentation,'
https://arxiv.org/abs/1706.05587.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def get_deeplabv3(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DeepLabv3 model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = DeepLabv3(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def deeplabv3_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_voc", **kwargs)
def deeplabv3_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_voc", **kwargs)
def deeplabv3_resnetd152b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-152b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_voc", **kwargs)
def deeplabv3_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_coco", **kwargs)
def deeplabv3_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_coco", **kwargs)
def deeplabv3_resnetd152b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-152b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_coco", **kwargs)
def deeplabv3_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_ade20k",
**kwargs)
def deeplabv3_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_ade20k",
**kwargs)
def deeplabv3_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_cityscapes",
**kwargs)
def deeplabv3_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_cityscapes",
**kwargs)
if __name__ == "__main__":
_test()
| [
37811,
198,
220,
220,
220,
10766,
17822,
85,
18,
329,
2939,
10618,
341,
11,
9177,
287,
402,
2290,
261,
13,
198,
220,
220,
220,
13745,
3348,
25,
705,
49,
2788,
8040,
1629,
7596,
34872,
2122,
329,
12449,
5109,
7412,
1001,
5154,
341,
4... | 2.650494 | 5,668 |
import os
from dodo_commands import CommandError, Dodo
from dodo_commands.framework.util import to_arg_list
if Dodo.is_main(__name__):
args = _args()
Dodo.safe = len(args.extra_dirs) == 0
_copy_extra_dirs(args.build_dir, args.extra_dirs)
try:
Dodo.run(
[
"docker",
"build",
"-t",
args.docker_image,
"-f",
args.docker_file,
*to_arg_list(args.build_args),
".",
],
cwd=args.build_dir,
) # noqa
finally:
_remove_extra_dirs(args.build_dir, args.extra_dirs)
| [
11748,
28686,
198,
198,
6738,
20764,
78,
62,
9503,
1746,
1330,
9455,
12331,
11,
15990,
78,
198,
6738,
20764,
78,
62,
9503,
1746,
13,
30604,
13,
22602,
1330,
284,
62,
853,
62,
4868,
628,
628,
198,
198,
361,
15990,
78,
13,
271,
62,
... | 1.737662 | 385 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import random
import sys
import time
import traceback
from six.moves import configparser
from datahub import DataHub
from datahub.exceptions import ResourceExistException, ResourceNotFoundException, InvalidParameterException
current_path = os.path.split(os.path.realpath(__file__))[0]
root_path = os.path.join(current_path, '../..')
configer = configparser.ConfigParser()
configer.read(os.path.join(current_path, '../datahub.ini'))
access_id = configer.get('datahub', 'access_id')
access_key = configer.get('datahub', 'access_key')
endpoint = configer.get('datahub', 'endpoint')
print("=======================================")
print("access_id: %s" % access_id)
print("access_key: %s" % access_key)
print("endpoint: %s" % endpoint)
print("=======================================\n\n")
if not access_id or not access_key or not endpoint:
print("[access_id, access_key, endpoint] must be set in datahub.ini!")
sys.exit(-1)
dh = DataHub(access_id, access_key, endpoint)
# run directly
if __name__ == '__main__':
test = TestProject()
test.test_list_project()
test.test_create_and_delete_project()
test.test_create_invalid_project()
test.test_get_unexist_project()
test.test_get_exist_project()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
... | 3.24025 | 641 |
import sqlite3
#connection obj that represent our database with in memory database to create a file called employee database file
conn = sqlite3.connect(':memory:') #so it can start from scratch, if you dont want to keep deleting a database over and over..you add :memory:
#to create a cursor to start running sql command
c = conn.cursor()
#c.execute("""CREATE TABLE employees (
#first text,
#last text,
#pay integer
#)""")
#to start adding data into the database
c.execute("INSERT INTO employees VALUES ('Corey', 'real', 500)")
conn.commit()
c.execute("SELECT * FROM employees WHERE last='real'")
print (c.fetchall())
conn.commit()
conn.close()
| [
11748,
44161,
578,
18,
198,
2,
38659,
26181,
326,
2380,
674,
6831,
351,
287,
4088,
6831,
284,
2251,
257,
2393,
1444,
6538,
6831,
2393,
198,
37043,
796,
44161,
578,
18,
13,
8443,
7,
10354,
31673,
25,
11537,
1303,
568,
340,
460,
923,
... | 2.906122 | 245 |
import pytest
from check_wheel_contents.errors import WheelValidationError
from check_wheel_contents.util import (
comma_split,
find_wheel_dirs,
is_data_dir,
is_dist_info_dir,
is_stubs_dir,
pymodule_basename,
)
@pytest.mark.parametrize(
"filename,expected",
[
("foo.py", "foo"),
("FOO.PY", None),
("foo.pyc", None),
("foo.pyo", None),
(".py", None),
("py", None),
("not-an-identifier.py", "not-an-identifier"),
("def.py", "def"),
("extra.ext.py", "extra.ext"),
("foo.cpython-38-x86_64-linux-gnu.so", "foo"),
("graph.cpython-37m-darwin.so", "graph"),
("foo.cp38-win_amd64.pyd", "foo"),
("foo.cp38-win32.pyd", "foo"),
("foo.so", "foo"),
("foo.pyd", "foo"),
("_ffi.abi3.so", "_ffi"),
],
)
@pytest.mark.parametrize(
"sin,lout",
[
("", []),
(" ", []),
(",", []),
(" , ", []),
(" , , ", []),
("foo", ["foo"]),
("foo,bar", ["foo", "bar"]),
("foo, bar", ["foo", "bar"]),
("foo ,bar", ["foo", "bar"]),
(" foo , bar ", ["foo", "bar"]),
(" foo , , bar ", ["foo", "bar"]),
("foo,,bar", ["foo", "bar"]),
("foo bar", ["foo bar"]),
(",foo", ["foo"]),
("foo,", ["foo"]),
],
)
@pytest.mark.parametrize(
"name,expected",
[
("somepackage-1.0.0.dist-info", True),
("somepackage.dist-info", False),
("somepackage-1.0.0-1.dist-info", False),
("somepackage-1.0.0.data", False),
("SOME_._PaCkAgE-0.dist-info", True),
("foo-1!2+local.dist-info", True),
("foo-1_2_local.dist-info", True),
(".dist-info", False),
],
)
@pytest.mark.parametrize(
"name,expected",
[
("somepackage-1.0.0.data", True),
("somepackage.data", False),
("somepackage-1.0.0-1.data", False),
("somepackage-1.0.0.dist-info", False),
("SOME_._PaCkAgE-0.data", True),
("foo-1!2+local.data", True),
("foo-1_2_local.data", True),
(".data", False),
],
)
@pytest.mark.parametrize(
"name,expected",
[
("foo-stubs", True),
("foo-stub", False),
("foo-STUBS", False),
("-stubs", False),
("def-stubs", False),
("has-hyphen-stubs", False),
("has.period-stubs", False),
],
)
@pytest.mark.parametrize(
"namelist,project,version,expected",
[
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"foo-1.0.dist-info/RECORD",
],
"foo",
"1.0",
("foo-1.0.dist-info", None),
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"foo-1.0.dist-info/RECORD",
"foo-1.0.data/scripts/bar",
],
"foo",
"1.0",
("foo-1.0.dist-info", "foo-1.0.data"),
),
(
[
"foo.py",
"FOO-1.0.0.dist-info/WHEEL",
"FOO-1.0.0.dist-info/RECORD",
"foo-1.data/scripts/bar",
],
"foo",
"1.0",
("FOO-1.0.0.dist-info", "foo-1.data"),
),
(
[
"foo.py",
"FOO-1.0_1.dist-info/WHEEL",
"FOO-1.0_1.dist-info/RECORD",
],
"foo",
"1.0.post1",
("FOO-1.0_1.dist-info", None),
),
],
)
@pytest.mark.parametrize(
"namelist,project,version,msg",
[
(
[
"foo.py",
"foo-1.0.dist/WHEEL",
],
"foo",
"1.0",
"No .dist-info directory in wheel",
),
(
[
"foo.py",
"bar-1.0.dist-info/WHEEL",
],
"foo",
"1.0",
"Project & version of wheel's .dist-info directory do not match wheel"
" name: 'bar-1.0.dist-info'",
),
(
[
"foo.py",
"foo-2.0.dist-info/WHEEL",
],
"foo",
"1.0",
"Project & version of wheel's .dist-info directory do not match wheel"
" name: 'foo-2.0.dist-info'",
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"bar-2.0.dist-info/RECORD",
],
"foo",
"1.0",
"Wheel contains multiple .dist-info directories",
),
(
[
"foo.py",
"FOO-1.0.0.dist-info/WHEEL",
"foo-1.dist-info/RECORD",
],
"foo",
"1.0",
"Wheel contains multiple .dist-info directories",
),
(
["foo.py", ".dist-info/WHEEL"],
"foo",
"1.0",
"No .dist-info directory in wheel",
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"foo-1.0.data/scripts/bar",
"FOO-1.data/headers/foo.h",
],
"foo",
"1.0",
"Wheel contains multiple .data directories",
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"bar-1.0.data/scripts/bar",
],
"foo",
"1.0",
"Project & version of wheel's .data directory do not match"
" wheel name: 'bar-1.0.data'",
),
(
[
"foo.py",
"foo-1.0.dist-info/WHEEL",
"foo-2.0.data/scripts/bar",
],
"foo",
"1.0",
"Project & version of wheel's .data directory do not match"
" wheel name: 'foo-2.0.data'",
),
],
)
| [
11748,
12972,
9288,
198,
6738,
2198,
62,
22001,
62,
3642,
658,
13,
48277,
1330,
15810,
7762,
24765,
12331,
198,
6738,
2198,
62,
22001,
62,
3642,
658,
13,
22602,
1330,
357,
198,
220,
220,
220,
39650,
62,
35312,
11,
198,
220,
220,
220,
... | 1.599103 | 3,789 |
from setuptools import setup
setup(name='pycdhit',
version='0.0.1',
description='CDHIT results analysis tool set',
#url='http://github.com/storborg/funniest',
author='Blazej Marciniak',
author_email='blazejmarciniak@gmail.com',
license='Apache 2.0',
packages=['pycdhit'],
install_requires=[
'scipy', 'numpy', 'pandas', 'matplotlib'
],
scripts=[],
zip_safe=False)
| [
6738,
900,
37623,
10141,
1330,
9058,
201,
198,
201,
198,
40406,
7,
3672,
11639,
9078,
10210,
17945,
3256,
201,
198,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
15,
13,
16,
3256,
201,
198,
220,
220,
220,
220,
220,
6764,
11639,
8610... | 2.106977 | 215 |
"""A filter that can parse sphinx docs
"""
__author__ = 'jay'
__version__ = (0, 0, 1, 'alpha', 0)
catalog = ['read']
form = {
"lang" : {
"verbose": "Language",
"require": False,
"type": "select",
"choices": [["python2", "Python 2"], ["python3", "Python 3"]],
"value": "python2",
},
"trigger_pattern": {
"verbose": "Trigger Pattern",
"require": False,
"helper_text": "Sphinx will regenerate docs once trigger is matched."
" Empty means regenerate docs every commit.",
},
"docs_root": {
"verbose": "Documentation Root",
"helper_text": "Where the generated docs locate."
},
"working_dir": {
"verbose": "Working Directory",
"require": False,
"helper_text": "Path to working tree that build command run in.",
},
"build_command": {
"verbose": "Build Command",
"require": False,
"type": "textarea",
"helper_text": "Command that builds docs. Empty means docs are"
" already there",
},
"ignore_errors": {
"verbose": "Ignore Errors",
"type": "checkbox",
"value": False,
},
}
from main import Filter
__all__ = ['Filter']
| [
37811,
32,
8106,
326,
460,
21136,
599,
20079,
87,
34165,
198,
37811,
198,
198,
834,
9800,
834,
796,
705,
33708,
6,
198,
834,
9641,
834,
796,
357,
15,
11,
657,
11,
352,
11,
705,
26591,
3256,
657,
8,
198,
198,
9246,
11794,
796,
3725... | 2.253546 | 564 |
# Generated by Django 2.2.4 on 2019-08-20 20:44
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
19,
319,
13130,
12,
2919,
12,
1238,
1160,
25,
2598,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.990196 | 102 |
"""
Copyright (c) 2018 Doyub Kim
I am making my contributions/submissions to this project solely in my personal
capacity and am not conveying any rights to any intellectual property of any
third parties.
"""
import pyjet
import unittest
if __name__ == '__main__':
main()
| [
37811,
198,
15269,
357,
66,
8,
2864,
360,
726,
549,
6502,
198,
198,
40,
716,
1642,
616,
9284,
14,
7266,
8481,
284,
428,
1628,
9944,
287,
616,
2614,
198,
42404,
290,
716,
407,
24748,
1112,
597,
2489,
284,
597,
9028,
3119,
286,
597,
... | 3.5375 | 80 |
"""
Процентная ставка по вкладу составляет P процентов годовых, которые
прибавляются к сумме вклада. Вклад составляет X рублей Y копеек.
Определите размер вклада через год. При решении этой задачи нельзя
пользоваться условными инструкциями и циклами.
Формат ввода
Программа получает на вход целые числа P, X, Y.
Формат вывода
Программа должна вывести два числа: величину вклада через год
в рублях и копейках. Дробная часть копеек отбрасывается.
"""
p, x, y = float(input()), float(input()), float(input())
res = (100 * x + y) * (100 + p) / 100
print(int(res // 100), int(res % 100))
| [
37811,
198,
140,
253,
21169,
15166,
141,
228,
16843,
22177,
20375,
22177,
16142,
40623,
220,
21727,
20375,
16142,
38857,
31583,
16142,
12466,
123,
15166,
12466,
110,
31583,
30143,
16142,
43666,
35072,
220,
21727,
15166,
21727,
20375,
16142,
3... | 1.091078 | 538 |
import os
import hashlib
import asyncio
from aiohttp import web
from multidict import MultiDict
from services.utils import (METADATA_SERVICE_HEADER, METADATA_SERVICE_VERSION,
SERVICE_BUILD_TIMESTAMP, SERVICE_COMMIT_HASH,
web_response)
from .utils import get_json_from_env
UI_SERVICE_VERSION = "{metadata_v}-{timestamp}-{commit}".format(
metadata_v=METADATA_SERVICE_VERSION,
timestamp=SERVICE_BUILD_TIMESTAMP or "",
commit=SERVICE_COMMIT_HASH or ""
)
class AdminApi(object):
"""
Provides administrative routes for the UI Service,
such as health checks, version info and custom navigation links.
"""
async def version(self, request):
"""
---
description: Returns the version of the metadata service
tags:
- Admin
produces:
- 'text/plain'
responses:
"200":
description: successful operation. Return the version number
"405":
description: invalid HTTP Method
"""
return web.Response(text=str(UI_SERVICE_VERSION))
async def ping(self, request):
"""
---
description: This end-point allow to test that service is up.
tags:
- Admin
produces:
- 'text/plain'
responses:
"202":
description: successful operation. Return "pong" text
"405":
description: invalid HTTP Method
"""
return web.Response(text="pong", headers=MultiDict(
{METADATA_SERVICE_HEADER: METADATA_SERVICE_VERSION}))
async def links(self, request):
"""
---
description: Provides custom navigation links for UI.
tags:
- Admin
produces:
- 'application/json'
responses:
"200":
description: Returns the custom navigation links for UI
schema:
$ref: '#/definitions/ResponsesLinkList'
"405":
description: invalid HTTP Method
"""
return web_response(status=200, body=self.navigation_links)
async def get_notifications(self, request):
"""
---
description: Provides System Notifications for the UI
tags:
- Admin
produces:
- 'application/json'
responses:
"200":
description: Returns list of active system notification
schema:
$ref: '#/definitions/ResponsesNotificationList'
"405":
description: invalid HTTP Method
"""
processed_notifications = []
for notification in self.notifications:
try:
if "message" not in notification:
continue
# Created at is required and "start" is used by default if not value provided
# Notification will be ignored if both "created" and "start" are missing
created = notification.get("created", notification.get("start", None))
if not created:
continue
processed_notifications.append({
"id": notification.get("id", hashlib.sha1(
str(notification).encode('utf-8')).hexdigest()),
"type": notification.get("type", "info"),
"contentType": notification.get("contentType", "text"),
"message": notification.get("message", ""),
"url": notification.get("url", None),
"urlText": notification.get("urlText", None),
"created": created,
"start": notification.get("start", None),
"end": notification.get("end", None)
})
except:
pass
# Filter notifications based on query parameters
# Supports eq,ne.lt,le,gt,ge operators for all the fields
return web_response(status=200, body=list(
filter(filter_notifications, processed_notifications)))
async def status(self, request):
"""
---
description: Display system status information, such as cache
tags:
- Admin
produces:
- 'application/json'
responses:
"200":
description: Return system status information, such as cache
"405":
description: invalid HTTP Method
"""
cache_status = {}
for store in [self.cache_store.artifact_cache, self.cache_store.dag_cache, self.cache_store.log_cache]:
try:
# Use client ping to verify communcation, True = ok
await store.cache.ping()
ping = True
except Exception as ex:
ping = str(ex)
try:
# Use Check -action to verify Cache communication, True = ok
await store.cache.request_and_return([store.cache.check()], None)
check = True
except Exception as ex:
check = str(ex)
# Extract list of worker subprocesses
worker_list = []
cache_server_pid = store.cache._proc.pid if store.cache._proc else None
if cache_server_pid:
try:
proc = await asyncio.create_subprocess_shell(
"pgrep -P {}".format(cache_server_pid),
stdout=asyncio.subprocess.PIPE)
stdout, _ = await proc.communicate()
if stdout:
pids = stdout.decode().splitlines()
proc = await asyncio.create_subprocess_shell(
"ps -p {} -o pid,%cpu,%mem,stime,time,command".format(",".join(pids)),
stdout=asyncio.subprocess.PIPE)
stdout, _ = await proc.communicate()
worker_list = stdout.decode().splitlines()
except Exception as ex:
worker_list = str(ex)
else:
worker_list = "Unable to get cache server pid"
# Extract current cache data usage in bytes
current_size = 0
try:
cache_data_path = os.path.abspath(store.cache._root)
proc = await asyncio.create_subprocess_shell(
"du -s {} | cut -f1".format(cache_data_path),
stdout=asyncio.subprocess.PIPE)
stdout, _ = await proc.communicate()
if stdout:
current_size = int(stdout.decode())
except Exception as ex:
current_size = str(ex)
cache_status[store.__class__.__name__] = {
"restart_requested": store.cache._restart_requested,
"is_alive": store.cache._is_alive,
"pending_requests": list(store.cache.pending_requests),
"root": store.cache._root,
"prev_is_alive": store.cache._prev_is_alive,
"action_classes": list(map(lambda cls: cls.__name__, store.cache._action_classes)),
"max_actions": store.cache._max_actions,
"max_size": store.cache._max_size,
"current_size": current_size,
"ping": ping,
"check_action": check,
"proc": {
"pid": store.cache._proc.pid,
"returncode": store.cache._proc.returncode,
} if store.cache._proc else None,
"workers": worker_list
}
return web_response(status=200, body={
"cache": cache_status
})
| [
11748,
28686,
198,
11748,
12234,
8019,
198,
11748,
30351,
952,
198,
198,
6738,
257,
952,
4023,
1330,
3992,
198,
6738,
1963,
312,
713,
1330,
15237,
35,
713,
198,
6738,
2594,
13,
26791,
1330,
357,
47123,
2885,
13563,
62,
35009,
27389,
62,... | 2.005609 | 3,922 |
from unittest import TestCase
from django_tally.user_def.lang import parse, KW, serialize
from django_tally.user_def.lang.parser import parse_tokens
source = """
(do
(defn fib (n)
(if (in '(0 1) n)
1
(+ (fib (- n 1)) (fib (- n 2)))))
(fib 10)
[1 2 3]
{1 2 3}
#{1 2 3}
#[1 2 3]
^foo)
"""
body = [
[
KW('do'),
[
KW('defn'), KW('fib'), [KW('n')],
[
KW('if'), [KW('in'), [KW('quote'), [0, 1]], KW('n')],
1,
[
KW('+'),
[KW('fib'), [KW('-'), KW('n'), 1]],
[KW('fib'), [KW('-'), KW('n'), 2]],
],
],
],
[KW('fib'), 10],
[KW('list'), 1, 2, 3],
[KW('tuple'), 1, 2, 3],
[KW('dict'), 1, 2, 3],
[KW('set'), 1, 2, 3],
[KW('quote'), [KW('unquote'), KW('foo')]],
]
]
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
42625,
14208,
62,
83,
453,
13,
7220,
62,
4299,
13,
17204,
1330,
21136,
11,
509,
54,
11,
11389,
1096,
198,
6738,
42625,
14208,
62,
83,
453,
13,
7220,
62,
4299,
13,
17204,
13,
... | 1.570458 | 589 |
from conftest import run_setup_sql
from pgbedrock import memberships as memb
from pgbedrock import attributes
ROLE1 = 'charlie'
ROLE2 = 'barney'
ROLE3 = 'wacko'
DESIRED_GROUP1 = 'desired_group1'
DESIRED_GROUP2 = 'desired_group2'
CURRENT_GROUP1 = 'current_group1'
CURRENT_GROUP2 = 'current_group2'
Q_HAS_ROLE = "SELECT pg_has_role('{}', '{}', 'member')"
DUMMY = 'foo'
@run_setup_sql([
attributes.Q_CREATE_ROLE.format(ROLE1),
attributes.Q_CREATE_ROLE.format(ROLE2),
attributes.Q_CREATE_ROLE.format(ROLE3),
attributes.Q_CREATE_ROLE.format(CURRENT_GROUP1),
attributes.Q_CREATE_ROLE.format(DESIRED_GROUP1),
attributes.Q_CREATE_ROLE.format(DESIRED_GROUP2),
attributes.Q_ALTER_ROLE.format(ROLE1, 'SUPERUSER'),
memb.Q_GRANT_MEMBERSHIP.format(CURRENT_GROUP1, ROLE3),
])
def test_analyze_memberships(cursor):
"""
Test:
* one superuser (to make sure they don't get evaluated)
* two users, both of which will be removed from a group and added to a group
"""
spec = {
ROLE1: {'member_of': [DESIRED_GROUP1]},
ROLE2: {'member_of': [DESIRED_GROUP1, DESIRED_GROUP2]},
ROLE3: {'member_of': [DESIRED_GROUP1]}
}
expected = set([
memb.SKIP_SUPERUSER_MEMBERSHIPS_MSG.format(ROLE1),
memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP1, ROLE2),
memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP2, ROLE2),
memb.Q_GRANT_MEMBERSHIP.format(DESIRED_GROUP1, ROLE3),
memb.Q_REVOKE_MEMBERSHIP.format(CURRENT_GROUP1, ROLE3),
])
actual = memb.analyze_memberships(spec, cursor, verbose=False)
assert set(actual) == expected
| [
6738,
369,
701,
395,
1330,
1057,
62,
40406,
62,
25410,
198,
6738,
23241,
3077,
10823,
1330,
1866,
5748,
355,
1066,
65,
198,
6738,
23241,
3077,
10823,
1330,
12608,
628,
198,
13252,
2538,
16,
796,
705,
354,
7063,
494,
6,
198,
13252,
253... | 2.213225 | 741 |
"""
Apps.py:
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class AccountsConfig(AppConfig):
"""
AccountsConfig(AppConfig):
Accounts app configuration
"""
name = 'accounts'
| [
37811,
198,
48433,
13,
9078,
25,
198,
37811,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
... | 2.712766 | 94 |
from flask import render_template
| [
6738,
42903,
1330,
8543,
62,
28243,
198
] | 4.857143 | 7 |
"""
Django settings for {{ project_name }} project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import dj_database_url
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "key"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'taggit',
'taggit_templatetags2',
'tinymce',
'blog',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
TINYMCE_INCLUDE_JQUERY = True
BROWSER_SPELLCHECKER = True
TINYMCE_DEFAULT_CONFIG = {
'height': 360,
'width': 1120,
'cleanup_on_startup': True,
'custom_undo_redo_levels': 20,
'selector': 'textarea',
'theme': 'modern',
'plugins': '''
paste textcolor save link image media preview codesample contextmenu
table code lists fullscreen insertdatetime nonbreaking
contextmenu directionality searchreplace wordcount visualblocks
visualchars code fullscreen autolink lists charmap print hr
anchor pagebreak
''',
'toolbar1': '''
fullscreen preview bold italic underline | fontselect,
fontsizeselect | forecolor backcolor | alignleft alignright |
aligncenter alignjustify | indent outdent | bullist numlist table |
| link image media | codesample |
''',
'toolbar2': '''
visualblocks visualchars |
charmap hr pagebreak nonbreaking anchor | code |
''',
'contextmenu': 'formats | link image',
'menubar': True,
'statusbar': True,
'codesample_dialog_height':500,
'codesample_dialog_width':300,
}
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles' )
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'blog', 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Activate Django-Heroku.
django_heroku.settings(locals())
| [
37811,
198,
35,
73,
14208,
6460,
329,
22935,
1628,
62,
3672,
34949,
1628,
319,
2332,
11601,
13,
1114,
517,
7508,
11,
766,
25,
198,
5450,
1378,
12567,
13,
785,
14,
11718,
23063,
14,
11718,
23063,
12,
28241,
14208,
12,
28243,
198,
198,
... | 2.438517 | 2,212 |
import sys
from src.model.migrations import migrate_schema
if __name__ == "__main__":
if len(sys.argv) == 1:
print(
"""Specify argument.
Possible commands:
migrate
convert users
convert blogs
For launching flask server see README.md"""
)
elif sys.argv[1] == "migrate":
from src import create_app
from src.model import db
create_app()
migrate_schema(db.get_database())
elif sys.argv[1] == "convert":
if len(sys.argv) == 3:
from converters import convert
convert(sys.argv[2])
else:
print("Use convert <type>")
else:
print("Unrecognized command")
| [
11748,
25064,
198,
6738,
12351,
13,
19849,
13,
76,
3692,
602,
1330,
32492,
62,
15952,
2611,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
611,
18896,
7,
17597,
13,
853,
85,
8,
6624,
352,
25,
... | 2.206452 | 310 |
import random
from curses import *
from random import randint
depth = 5
| [
11748,
4738,
198,
6738,
43878,
1330,
1635,
198,
6738,
4738,
1330,
43720,
600,
198,
198,
18053,
796,
642,
628,
198
] | 3.75 | 20 |
"""
Module for resolving circular imports via "interfaces"
"""
import abc
| [
37811,
198,
26796,
329,
31038,
18620,
17944,
2884,
366,
3849,
32186,
1,
198,
37811,
198,
198,
11748,
450,
66,
628,
628,
198
] | 3.590909 | 22 |
#To complete
# Main
from sys import setrecursionlimit
setrecursionlimit(11000)
n=int(input())
arr=list(int(i) for i in input().strip().split(' '))
x=int(input())
idx=0
print(firstIndex(arr, x, idx))
| [
2,
2514,
1844,
198,
198,
2,
8774,
198,
6738,
25064,
1330,
900,
8344,
24197,
32374,
198,
2617,
8344,
24197,
32374,
7,
1157,
830,
8,
198,
77,
28,
600,
7,
15414,
28955,
198,
3258,
28,
4868,
7,
600,
7,
72,
8,
329,
1312,
287,
5128,
2... | 2.597403 | 77 |
#!/usr/bin/env python
#coding: utf-8
import watchtipstest
import unittest
from index import app
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(AppController)
watchtipstest.main(suite)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
66,
7656,
25,
3384,
69,
12,
23,
198,
198,
11748,
2342,
22504,
301,
395,
198,
11748,
555,
715,
395,
198,
6738,
6376,
1330,
598,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
8... | 2.682353 | 85 |
#!/usr/bin/env python
import sys
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
1388,
3419,
628
] | 2.517241 | 29 |
from random import *
import copy
| [
6738,
4738,
1330,
1635,
198,
11748,
4866,
198
] | 4.125 | 8 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import datetime
import decimal
import inspect
import time
import warnings
from operator import itemgetter
from itertools import islice
try:
from itertools import izip_longest
except ImportError: # Python 3
from itertools import zip_longest as izip_longest
from .compat import basestring, unicode, long
class Field(object):
"""Base mapping field class."""
_MappingProxy = MetaMapping('_MappingProxy', (object,), {}) # Python 3 workaround
class Record(Mapping):
"""ASTM record mapping class."""
class Component(Mapping):
"""ASTM component mapping class."""
class TextField(Field):
"""Mapping field for string values."""
class ConstantField(Field):
"""Mapping field for constant values.
>>> class Record(Mapping):
... type = ConstantField(default='S')
>>> rec = Record()
>>> rec.type
'S'
>>> rec.type = 'W'
Traceback (most recent call last):
...
ValueError: Field changing not allowed
"""
class IntegerField(Field):
"""Mapping field for integer values."""
class DecimalField(Field):
"""Mapping field for decimal values."""
class DateField(Field):
"""Mapping field for storing date/time values."""
format = '%Y%m%d'
class TimeField(Field):
"""Mapping field for storing times."""
format = '%H%M%S'
class DateTimeField(Field):
"""Mapping field for storing date/time values."""
format = '%Y%m%d%H%M%S'
class SetField(Field):
"""Mapping field for predefined set of values."""
class ComponentField(Field):
"""Mapping field for storing record component."""
class RepeatedComponentField(Field):
"""Mapping field for storing list of record components."""
# update docstrings from list
for name, obj in inspect.getmembers(Proxy):
if getattr(list, name, None) is None\
or name in ['__module__', '__doc__']:
continue
if not inspect.isfunction(obj):
continue
obj.__doc__ = getattr(list, name).__doc__
del name, obj
class NotUsedField(Field):
"""Mapping field for value that should be used. Acts as placeholder.
On attempt to assign something to it raises :exc:`UserWarning` and rejects
assigned value."""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
2211,
10009,
911,
273,
259,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
3788,
318,
11971,
355,
3417,
287,
262,
2393,
27975,
4... | 2.967233 | 824 |
# ОПРЕДЕЛЕНИЕ РЕАЛЬНОГО ПЕРЕДАТОЧНОГО ОТНОШЕНИЯ: #
# ВЫПОЛНИТЕ СЛЕДУЮЩИЕ ДЕЙСТВИЯ: #
# После старта скрипта, в мониторе будут появляться значения 0.00
# При вращении ротора мотора (в любую сторону), эти значения будут увеличиваться.
# Поворачивайте вручную ротор мотора до тех пор пока вал редуктора не повернётся на 1 полный оборот.
# В мониторе появится значение равное передаточному отношению редуктора. Оно может быть не целым!
# Для большей точности советуем поворачивать ротор мотора до тех пор, пока вал редуктора не повернётся 10 раз, и разделить полученное значение на 10.
#
from pyiArduinoI2Cmotor import * # Подключаем библиотеку для работы с мотором I2C-flash.
mot = pyiArduinoI2Cmotor(0x09) # Объявляем объект mot для работы с функциями и методами библиотеки pyiArduinoI2Cmotor, указывая адрес модуля на шине I2C.
# Если объявить объект без указания адреса (mot = pyiArduinoI2Cmotor ), то адрес будет найден автоматически.
mot.begin() # Инициируем работу с мотором.
mot.delSum() # Сбрасываем количество совершённых оборотов вала.
mot.setReducer(1.0) # Указываем передаточное отношение редуктора как 1:1.
#mot.setMagnet(7) # Укажите реальное количество магнитных полюсов одной полярности, кольцевого магнита установленного на роторе мотора.
# Для определения этого значения воспользуйтесь примером библиотеки FindMagnet.
#
while True: #
print( mot.getSum(MOT_REV) ) # Выводим количество полных оборотов вала.
sleep(.2) #
| [
2,
12466,
252,
140,
253,
140,
254,
140,
243,
140,
242,
140,
243,
140,
249,
140,
243,
140,
251,
140,
246,
140,
243,
12466,
254,
140,
243,
140,
238,
140,
249,
140,
105,
140,
251,
140,
252,
140,
241,
140,
252,
12466,
253,
140,
243,... | 1.009188 | 1,959 |
"""
Utils
This file provides an implementation of helping classes and functions.
"""
import abc
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
try:
import cPickle as pickle
except ModuleNotFoundError:
import pickle
class Picklable:
"""
Simple class for saving (and loading) functionality using pickle.
"""
def save(self, name: str = None, concat: bool = False):
"""
Save object using pickle.
Parameters
----------
name: str
The filename for the saved object.
concat: bool
Whether to add the class name to the file name.
"""
if name is None:
name = self.__class__.__name__
if concat:
name = self.__class__.__name__ + "_" + name
with open(name, "wb") as output:
pickle.dump(self, output, -1)
def load(self, name: str = None, concat: bool = False):
"""
Load object using pickle.
Parameters
----------
name: str
The filename for the loaded object.
concat: bool
Whether to add the class name to the file name.
"""
if name is None:
name = self.__class__.__name__
if concat:
name = self.__class__.__name__ + "_" + name
with open(name, "rb") as input:
return pickle.load(input)
class Visualizable(abc.ABC):
"""
A simple abstract class requiring the implementation of a visualize function.
"""
@abc.abstractmethod
def visualize(self):
"""
This function visualize the outputs or state of the object.
"""
raise NotImplementedError
def visualize_labels(
labels, title="Visualization of labels", mode: str = "lines"
) -> None:
"""
Plot labels.
Parameters
----------
title: str
Title of the plot.
mode: str
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
"""
if len(labels.shape) == 1:
labels = pd.DataFrame(labels, columns=["labels"])
fig = go.Figure()
fig.update_layout(title=title)
fig.update_yaxes(title_text="labels")
for i in range(labels.shape[1]):
fig.add_trace(
go.Scatter(
x=labels.index,
y=labels.iloc[:, i],
name=labels.columns[i],
mode=mode,
)
)
fig.show()
def visualize_data(
X, y, downprojector=None, title: str = "Visualization of data"
) -> None:
"""
Plot data in 2D.
Parameters
----------
X : iterable
Training data.
y : iterable
Training targets.
downprojector : callable, default=None
Data downprojection method for visualization.
title: str
Title of the plot.
"""
if downprojector is not None:
embedding = downprojector.fit_transform(X)
else:
embedding = X.iloc[:, :2].values
data = pd.DataFrame(embedding, columns=["X Value", "Y Value"], index=X.index)
data["Category"] = y
fig = px.scatter(
data,
x=data.columns[0],
y=data.columns[1],
color=data["Category"],
hover_name=data.index,
)
fig.update_layout(title=title)
fig.show()
| [
37811,
198,
18274,
4487,
198,
198,
1212,
2393,
3769,
281,
7822,
286,
5742,
6097,
290,
5499,
13,
198,
37811,
198,
198,
11748,
450,
66,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
7110,
306,
13,
42712,
355,
279,
87,
198,
11748,
... | 2.303069 | 1,564 |
import os
import unittest
from test import fixture
from .config import yaml_config
| [
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
6738,
1332,
1330,
29220,
198,
198,
6738,
220,
764,
11250,
1330,
331,
43695,
62,
11250,
628,
628,
628,
628
] | 3.321429 | 28 |
import argparse
from shutil import copyfile
import os
from Bio import SeqIO
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Add fasta to decoy db')
parser.add_argument('--decoy_fasta', required=True)
cwd=os.path.dirname(os.path.realpath(__file__))
NANO_DIR=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
parser.add_argument('--assembly_dir', default=f'{NANO_DIR}/genomes' )
parser.add_argument('--config_folder', help='Config file folder', default=f'{NANO_DIR}/config' )
FLAGS = parser.parse_args()
decoy_name=os.path.basename(os.path.splitext(FLAGS.decoy_fasta)[0])
path= f'{FLAGS.assembly_dir}/refseq/plasmid/{decoy_name}.fa'
copyfile(FLAGS.decoy_fasta,path)
with open(f'{FLAGS.config_folder}/plasmid.genome_set' ,'a') as config_plasmid:
config_plasmid.write(f'{decoy_name}\n')
assemblyLengthWriter = open(f'{FLAGS.assembly_dir}/assembly_length' , 'a')
assemblyPathWriter = open(f'{FLAGS.assembly_dir}/assembly_path' , 'a')
assemblyTaxidWriter = open(f'{FLAGS.assembly_dir}/assembly_tax_id' , 'a')
sequenceSummaryWriter = open(f'{FLAGS.assembly_dir}/sequence_summary' , 'a')
totalLength=0
with open(path, 'rt') as fi:
for record in SeqIO.parse(fi, 'fasta'):
totalLength += len(record)
sequenceSummaryWriter.write(f"{record.id}\t{len(record)}\t{decoy_name}\n")
assemblyLengthWriter.write(f"{decoy_name}\t{totalLength}\n")
assemblyPathWriter.write(f"{decoy_name}\t{path}\n")
arbitrary_taxid='35'
assemblyTaxidWriter.write(f"{decoy_name}\t1000000099\t1000000099\t1000000001\t{arbitrary_taxid}\n")
| [
11748,
1822,
29572,
201,
198,
6738,
4423,
346,
1330,
4866,
7753,
201,
198,
11748,
28686,
201,
198,
6738,
16024,
1330,
1001,
80,
9399,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
22... | 2.308844 | 735 |
from .__version__ import __version__
from .interface import WriteError, RemoteStorage, StorageLevel
from .exceptions import *
from .local import *
from .ssh import SSHLocation
| [
6738,
764,
834,
9641,
834,
1330,
11593,
9641,
834,
198,
6738,
764,
39994,
1330,
19430,
12331,
11,
21520,
31425,
11,
20514,
4971,
198,
6738,
764,
1069,
11755,
1330,
1635,
198,
6738,
764,
12001,
1330,
1635,
198,
6738,
764,
45824,
1330,
67... | 4 | 44 |
from django.conf.urls import url, include
urlpatterns = [
url(r'^$', 'megashop.views.home', name='home'),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
2291,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
3,
3256,
705,
28917,
1077,
404,
13,
33571,
13,
11195,
3256,
1438,
11639,
11195,
33809,... | 2.456522 | 46 |
class Config(object):
"""Config class for quick test"""
DEBUG = False
TICKER_LIST = ['AMZN', 'DIS', 'FOX', 'GE', 'GILD', 'GOOGL', 'HPE', 'HLT', 'HFC', 'INFO', 'IBM', 'JPM', 'LKQ', 'MSFT', 'MSCI', 'NDAQ', 'NFLX', 'NVDA', 'PLTR', 'RH', 'SPCE', 'YUM', 'ZTS']
| [
4871,
17056,
7,
15252,
2599,
198,
220,
220,
220,
37227,
16934,
1398,
329,
2068,
1332,
37811,
198,
220,
220,
220,
16959,
796,
10352,
198,
220,
220,
220,
309,
11860,
1137,
62,
45849,
796,
37250,
2390,
57,
45,
3256,
705,
26288,
3256,
705... | 2.169355 | 124 |
#!/usr/bin/env python3
# imports go here
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
#
# Free Coding session for 2015-06-09
# Written by Matt Warren
#
lena = sp.misc.lena()
X = np.reshape(lena, (-1, 1))
connectivity = grid_to_graph(*lena.shape)
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
17944,
467,
994,
198,
198,
11748,
640,
355,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
355,
599,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
4... | 2.504425 | 339 |
import datetime
import uuid
import pytest
from sqlalchemy.exc import IntegrityError, SQLAlchemyError
from app import db
from app.dao.organisation_dao import (
dao_add_service_to_organisation,
dao_add_user_to_organisation,
dao_get_organisation_by_email_address,
dao_get_organisation_by_id,
dao_get_organisation_by_service_id,
dao_get_organisation_services,
dao_get_organisations,
dao_get_users_for_organisation,
dao_update_organisation,
)
from app.models import Organisation, Service
from tests.app.db import (
create_domain,
create_email_branding,
create_letter_branding,
create_organisation,
create_service,
create_user,
)
@pytest.mark.parametrize('domain_list, expected_domains', (
(['abc', 'def'], {'abc', 'def'}),
(['ABC', 'DEF'], {'abc', 'def'}),
([], set()),
(None, {'123', '456'}),
pytest.param(
['abc', 'ABC'], {'abc'},
marks=pytest.mark.xfail(raises=IntegrityError)
),
))
@pytest.mark.parametrize('domain, expected_org', (
('unknown.gov.uk', False),
('example.gov.uk', True),
))
| [
11748,
4818,
8079,
198,
11748,
334,
27112,
198,
198,
11748,
12972,
9288,
198,
6738,
44161,
282,
26599,
13,
41194,
1330,
39348,
12331,
11,
16363,
2348,
26599,
12331,
198,
198,
6738,
598,
1330,
20613,
198,
6738,
598,
13,
67,
5488,
13,
997... | 2.377919 | 471 |
# 解释器模式
class AbstractExpression:
'''抽象解释器'''
class TerminalExpression(AbstractExpression):
'''继承抽象解释器,具体解释器终端'''
if __name__ == '__main__':
context = Context()
context.name = 'alex'
arrs = [NotTerminalExpression(),TerminalExpression(),TerminalExpression()]
for entry in arrs:
entry.interpreter(context)
| [
2,
5525,
100,
96,
34932,
232,
161,
247,
101,
162,
101,
94,
28156,
237,
198,
198,
4871,
27741,
16870,
2234,
25,
198,
220,
220,
220,
705,
7061,
162,
232,
121,
164,
109,
94,
164,
100,
96,
34932,
232,
161,
247,
101,
7061,
6,
628,
19... | 1.858696 | 184 |
# -*- coding: utf-8 -*-
__author__ = "Paul Schifferer <dm@sweetrpg.com>"
"""Common exceptions.
"""
class ObjectNotFound(Exception):
"""An exception for objects not found."""
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
834,
9800,
834,
796,
366,
12041,
41665,
11882,
1279,
36020,
31,
34751,
81,
6024,
13,
785,
24618,
198,
37811,
17227,
13269,
13,
198,
37811,
628,
198,
4871,
9515,
3673,
21... | 2.878788 | 66 |
from typing import List, Tuple
arr, window_size = read_input()
print(" ".join(map(str, moving_average(arr, window_size))))
| [
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
198,
3258,
11,
4324,
62,
7857,
796,
1100,
62,
15414,
3419,
198,
4798,
7203,
27071,
22179,
7,
8899,
7,
2536,
11,
3867,
62,
23913,
7,
3258,
11,
4324,
62,
7857,
35514,
198
] | 3.1 | 40 |
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
import numpy as np
import struct
import subprocess
from simpa.utils import Tags, Settings
from simpa.core.simulation_modules.optical_simulation_module import OpticalForwardModuleBase
from simpa.core.device_digital_twins import PhotoacousticDevice
from simpa.core.device_digital_twins.illumination_geometries.illumination_geometry_base import IlluminationGeometryBase
import json
import os
import gc
from typing import List, Dict, Tuple, Union
class MCXAdapter(OpticalForwardModuleBase):
"""
This class implements a bridge to the mcx framework to integrate mcx into SIMPA. This adapter only allows for
computation of fluence, for computations of diffuse reflectance, take a look at `simpa.ReflectanceMcxAdapter`
.. note::
MCX is a GPU-enabled Monte-Carlo model simulation of photon transport in tissue:
Fang, Qianqian, and David A. Boas. "Monte Carlo simulation of photon migration in 3D
turbid media accelerated by graphics processing units."
Optics express 17.22 (2009): 20178-20190.
"""
def __init__(self, global_settings: Settings):
"""
initializes MCX-specific configuration and clean-up instances
:param global_settings: global settings used during simulations
"""
super(MCXAdapter, self).__init__(global_settings=global_settings)
self.mcx_json_config_file = None
self.mcx_volumetric_data_file = None
self.frames = None
self.mcx_output_suffixes = {'mcx_volumetric_data_file': '.mc2'}
def define_settings_and_execute(self,
_illumination_geometry,
_assumed_anisotropy):
"""
Defines the settigs and runs mcx
"""
settings_dict = self.get_mcx_settings(illumination_geometry=_illumination_geometry,
assumed_anisotropy=_assumed_anisotropy)
self.generate_mcx_json_input(settings_dict=settings_dict)
# run the simulation
cmd = self.get_command()
self.run_mcx(cmd)
# Read output
return self.read_mcx_output()[Tags.DATA_FIELD_FLUENCE]
def forward_model(self,
absorption_cm: np.ndarray,
scattering_cm: np.ndarray,
anisotropy: np.ndarray,
illumination_geometry: Union[IlluminationGeometryBase, PhotoacousticDevice]) -> Dict:
"""
runs the MCX simulations. Binary file containing scattering and absorption volumes is temporarily created as
input for MCX. A JSON serializable file containing the configuration required by MCx is also generated.
The set of flags parsed to MCX is built based on the Tags declared in `self.component_settings`, the results
from MCX are used to populate an instance of Dict and returned.
:param absorption_cm: array containing the absorption of the tissue in `cm` units
:param scattering_cm: array containing the scattering of the tissue in `cm` units
:param anisotropy: array containing the anisotropy of the volume defined by `absorption_cm` and `scattering_cm`
:param illumination_geometry: and instance of `IlluminationGeometryBase` defining the illumination geometry
:return: `Dict` containing the results of optical simulations, the keys in this dictionary-like object
depend on the Tags defined in `self.component_settings`
"""
if Tags.MCX_ASSUMED_ANISOTROPY in self.component_settings:
_assumed_anisotropy = self.component_settings[Tags.MCX_ASSUMED_ANISOTROPY]
else:
_assumed_anisotropy = 0.9
self.generate_mcx_bin_input(absorption_cm=absorption_cm,
scattering_cm=scattering_cm,
anisotropy=anisotropy,
assumed_anisotropy=_assumed_anisotropy)
fluence = None
if isinstance(illumination_geometry, list):
# per convention this list has at least two elements
fluence = self.define_settings_and_execute(illumination_geometry[0], _assumed_anisotropy)
for idx in range(1, len(illumination_geometry)):
# we already looked at the 0th element, so go from 1 to n-1
fluence += self.define_settings_and_execute(illumination_geometry[idx], _assumed_anisotropy)
fluence = fluence / len(illumination_geometry)
else:
fluence = self.define_settings_and_execute(illumination_geometry, _assumed_anisotropy)
struct._clearcache()
# clean temporary files
self.remove_mcx_output()
return {Tags.DATA_FIELD_FLUENCE: fluence}
def generate_mcx_json_input(self, settings_dict: Dict) -> None:
"""
generates JSON serializable file with settings needed by MCX to run simulations.
:param settings_dict: dictionary to be saved as .json
:return: None
"""
tmp_json_filename = self.global_settings[Tags.SIMULATION_PATH] + "/" + \
self.global_settings[Tags.VOLUME_NAME] + ".json"
self.mcx_json_config_file = tmp_json_filename
self.temporary_output_files.append(tmp_json_filename)
with open(tmp_json_filename, "w") as json_file:
json.dump(settings_dict, json_file, indent="\t")
def get_mcx_settings(self,
illumination_geometry: IlluminationGeometryBase,
assumed_anisotropy: np.ndarray,
**kwargs) -> Dict:
"""
generates MCX-specific settings for simulations based on Tags in `self.global_settings` and
`self.component_settings` . Among others, it defines the volume type, dimensions and path to binary file.
:param illumination_geometry: and instance of `IlluminationGeometryBase` defining the illumination geometry
:param assumed_anisotropy:
:param kwargs: dummy, used for class inheritance
:return: dictionary with settings to be used by MCX
"""
mcx_volumetric_data_file = self.global_settings[Tags.SIMULATION_PATH] + "/" + \
self.global_settings[Tags.VOLUME_NAME] + "_output"
for name, suffix in self.mcx_output_suffixes.items():
self.__setattr__(name, mcx_volumetric_data_file + suffix)
self.temporary_output_files.append(mcx_volumetric_data_file + suffix)
if Tags.TIME_STEP and Tags.TOTAL_TIME in self.component_settings:
dt = self.component_settings[Tags.TIME_STEP]
time = self.component_settings[Tags.TOTAL_TIME]
else:
time = 5e-09
dt = 5e-09
self.frames = int(time / dt)
source = illumination_geometry.get_mcx_illuminator_definition(self.global_settings)
settings_dict = {
"Session": {
"ID": mcx_volumetric_data_file,
"DoAutoThread": 1,
"Photons": self.component_settings[Tags.OPTICAL_MODEL_NUMBER_PHOTONS],
"DoMismatch": 0
},
"Forward": {
"T0": 0,
"T1": time,
"Dt": dt
},
"Optode": {
"Source": source
},
"Domain": {
"OriginType": 0,
"LengthUnit": self.global_settings[Tags.SPACING_MM],
"Media": [
{
"mua": 0,
"mus": 0,
"g": 1,
"n": 1
},
{
"mua": 1,
"mus": 1,
"g": assumed_anisotropy,
"n": 1
}
],
"MediaFormat": "muamus_float",
"Dim": [self.nx, self.ny, self.nz],
"VolumeFile": self.global_settings[Tags.SIMULATION_PATH] + "/" +
self.global_settings[Tags.VOLUME_NAME] + ".bin"
}}
if Tags.MCX_SEED not in self.component_settings:
if Tags.RANDOM_SEED in self.global_settings:
settings_dict["Session"]["RNGSeed"] = self.global_settings[Tags.RANDOM_SEED]
else:
settings_dict["Session"]["RNGSeed"] = self.component_settings[Tags.MCX_SEED]
return settings_dict
def get_command(self) -> List:
"""
generates list of commands to be parse to MCX in a subprocess
:return: list of MCX commands
"""
cmd = list()
cmd.append(self.component_settings[Tags.OPTICAL_MODEL_BINARY_PATH])
cmd.append("-f")
cmd.append(self.mcx_json_config_file)
cmd.append("-O")
cmd.append("F")
return cmd
@staticmethod
def run_mcx(cmd: List) -> None:
"""
runs subprocess calling MCX with the flags built with `self.get_command`. Rises a `RuntimeError` if the code
exit of the subprocess is not 0.
:param cmd: list defining command to parse to `subprocess.run`
:return: None
"""
results = None
try:
results = subprocess.run(cmd)
except:
raise RuntimeError(f"MCX failed to run: {cmd}, results: {results}")
def generate_mcx_bin_input(self,
absorption_cm: np.ndarray,
scattering_cm: np.ndarray,
anisotropy: np.ndarray,
assumed_anisotropy: np.ndarray) -> None:
"""
generates binary file containing volume scattering and absorption as input for MCX
:param absorption_cm: Absorption in units of per centimeter
:param scattering_cm: Scattering in units of per centimeter
:param anisotropy: Dimensionless scattering anisotropy
:param assumed_anisotropy:
:return: None
"""
absorption_mm, scattering_mm = self.pre_process_volumes(**{'absorption_cm': absorption_cm,
'scattering_cm': scattering_cm,
'anisotropy': anisotropy,
'assumed_anisotropy': assumed_anisotropy})
op_array = np.asarray([absorption_mm, scattering_mm])
[_, self.nx, self.ny, self.nz] = np.shape(op_array)
# create a binary of the volume
optical_properties_list = list(np.reshape(op_array, op_array.size, "F"))
del absorption_cm, absorption_mm, scattering_cm, scattering_mm, op_array
gc.collect()
mcx_input = struct.pack("f" * len(optical_properties_list), *optical_properties_list)
del optical_properties_list
gc.collect()
tmp_input_path = self.global_settings[Tags.SIMULATION_PATH] + "/" + \
self.global_settings[Tags.VOLUME_NAME] + ".bin"
self.temporary_output_files.append(tmp_input_path)
with open(tmp_input_path, "wb") as input_file:
input_file.write(mcx_input)
del mcx_input, input_file
struct._clearcache()
gc.collect()
def read_mcx_output(self, **kwargs) -> Dict:
"""
reads the temporary output generated with MCX
:param kwargs: dummy, used for class inheritance compatibility
:return: `Dict` instance containing the MCX output
"""
with open(self.mcx_volumetric_data_file, 'rb') as f:
data = f.read()
data = struct.unpack('%df' % (len(data) / 4), data)
fluence = np.asarray(data).reshape([self.nx, self.ny, self.nz, self.frames], order='F')
fluence *= 100 # Convert from J/mm^2 to J/cm^2
if np.shape(fluence)[3] == 1:
fluence = np.squeeze(fluence, 3)
results = dict()
results[Tags.DATA_FIELD_FLUENCE] = fluence
return results
def remove_mcx_output(self) -> None:
"""
deletes temporary MCX output files from the file system
:return: None
"""
for f in self.temporary_output_files:
if os.path.isfile(f):
os.remove(f)
def pre_process_volumes(self, **kwargs) -> Tuple:
"""
pre-process volumes before running simulations with MCX. The volumes are transformed to `mm` units
:param kwargs: dictionary containing at least the keys `scattering_cm, absorption_cm, anisotropy` and
`assumed_anisotropy`
:return: `Tuple` of volumes after transformation
"""
return self.volumes_to_mm(**kwargs)
@staticmethod
def volumes_to_mm(**kwargs) -> Tuple:
"""
transforms volumes into `mm` units
:param kwargs: dictionary containing at least the keys `scattering_cm, absorption_cm, anisotropy` and
`assumed_anisotropy`
:return: `Tuple` of volumes after transformation
"""
scattering_cm = kwargs.get('scattering_cm')
absorption_cm = kwargs.get('absorption_cm')
absorption_mm = absorption_cm / 10
scattering_mm = scattering_cm / 10
# FIXME Currently, mcx only accepts a single value for the anisotropy.
# In order to use the correct reduced scattering coefficient throughout the simulation,
# we adjust the scattering parameter to be more accurate in the diffuse regime.
# This will lead to errors, especially in the quasi-ballistic regime.
given_reduced_scattering = (scattering_mm * (1 - kwargs.get('anisotropy')))
# If the anisotropy is 1, all scattering is forward scattering which is equal to no scattering at all
if kwargs.get("assumed_anisotropy") == 1:
scattering_mm = given_reduced_scattering * 0
else:
scattering_mm = given_reduced_scattering / (1 - kwargs.get('assumed_anisotropy'))
scattering_mm[scattering_mm < 1e-10] = 1e-10
return absorption_mm, scattering_mm
@staticmethod
def post_process_volumes(**kwargs) -> Tuple:
"""
post-processes volumes after MCX simulations. Dummy function implemented for compatibility with inherited
classes
:param kwargs: dictionary containing at least the key `volumes` to be transformed
:return:
"""
arrays = kwargs.get('arrays')
return tuple(a for a in arrays)
| [
2,
30628,
55,
12,
8979,
15269,
8206,
25,
33448,
7458,
286,
49452,
8366,
11998,
11,
32975,
37,
57,
198,
2,
30628,
55,
12,
8979,
15269,
8206,
25,
33448,
2365,
988,
10299,
17231,
75,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
... | 2.194358 | 6,735 |
for i in range(int(input())):
n = input()
if len(n)>10:
print(n[0], len(n)-2, n[-1], sep="")
else:
print(n)
| [
1640,
1312,
287,
2837,
7,
600,
7,
15414,
28955,
2599,
198,
220,
220,
220,
299,
796,
5128,
3419,
198,
220,
220,
220,
611,
18896,
7,
77,
8,
29,
940,
25,
198,
220,
220,
220,
220,
220,
220,
220,
3601,
7,
77,
58,
15,
4357,
18896,
7... | 1.766234 | 77 |
import sys
from pathlib import Path
import os
import setuptools
long_description = (Path(__file__).parent / "README.md").read_text()
if sys.version_info < (3, 6):
sys.exit("Python>=3.6 is required by Forte.")
VERSION_VAR = "VERSION"
version = {}
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "forte/version.py")
) as fp:
exec(fp.read(), version)
if VERSION_VAR not in version or not version[VERSION_VAR]:
raise ValueError(
f"Cannot find {VERSION_VAR} in forte/version.py. Please make sure that "
f"{VERSION_VAR} is correctly defined and formatted in forte/version.py."
)
setuptools.setup(
name="forte",
version=version[VERSION_VAR],
url="https://github.com/asyml/forte",
description="Forte is extensible framework for building composable and "
"modularized NLP workflows.",
long_description=long_description,
long_description_content_type="text/markdown",
license="Apache License Version 2.0",
packages=setuptools.find_namespace_packages(
include=["ft.*", "ftx.*", "forte"]
),
include_package_data=True,
platforms="any",
install_requires=[
"sortedcontainers>=2.1.0",
"numpy>=1.16.6",
"jsonpickle>=1.4",
"pyyaml>=5.4",
"smart-open>=1.8.4",
"typed_astunparse>=2.1.4",
"funcsigs>=1.0.2",
"typed_ast>=1.5.0",
"jsonschema>=3.0.2",
'typing>=3.7.4;python_version<"3.5"',
"typing-inspect>=0.6.0",
'dataclasses~=0.7;python_version<"3.7"',
'importlib-resources>=5.1.4;python_version<"3.7"',
"asyml-utilities",
],
extras_require={
"data_aug": [
"transformers>=4.15.0",
"nltk",
"texar-pytorch>=0.1.4",
"requests",
],
"ir": ["texar-pytorch>=0.1.4", "tensorflow>=1.15.0"],
"remote": ["fastapi>=0.65.2", "uvicorn>=0.14.0", "requests"],
"audio_ext": ["soundfile>=0.10.3"],
"stave": ["stave>=0.0.1.dev12"],
"models": [
"torch>=1.1.0",
"torchtext==0.4.0",
"tqdm>=4.36.1",
"texar-pytorch>=0.1.4",
"tensorflow>=1.15.0",
],
"test": [
"ddt",
"testfixtures",
"testbook",
"termcolor",
"transformers>=4.15.0",
"nltk",
],
"wikipedia": ["rdflib==4.2.2"],
# transformers 4.10.0 will break the translation model we used here
"nlp": ["texar-pytorch>=0.1.4"],
"extractor": ["texar-pytorch>=0.1.4"],
},
entry_points={
"console_scripts": [
"generate_ontology = forte.command_line.generate_ontology.__main__:main"
]
},
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
)
| [
11748,
25064,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
28686,
198,
198,
11748,
900,
37623,
10141,
198,
198,
6511,
62,
11213,
796,
357,
15235,
7,
834,
7753,
834,
737,
8000,
1220,
366,
15675,
11682,
13,
9132,
11074,
961,
62,
5239,
... | 1.980242 | 1,569 |
import discord
from discord.ext import commands
from googleapiclient.discovery import build
youtube=build('youtube','v3',developerKey='')
client=commands.Bot(command_prefix = ">")
@client.event
@client.event
#await message.channel.send(message.content)
@client.command()
@client.command()
@client.command()
@client.command(aliases=["mb","maari boys"])
@client.command()
@client.command()
client.run("")
| [
11748,
36446,
201,
198,
6738,
36446,
13,
2302,
1330,
9729,
201,
198,
6738,
23645,
499,
291,
75,
1153,
13,
67,
40821,
1330,
1382,
201,
198,
201,
198,
11604,
28,
11249,
10786,
11604,
41707,
85,
18,
3256,
16244,
263,
9218,
28,
7061,
8,
... | 2.551136 | 176 |
import xlrd
import numpy as np
import sys
import math
a = 3
b = 7
mean = round( a / b,2 )
print(mean)
| [
11748,
2124,
75,
4372,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
11748,
10688,
198,
64,
796,
513,
198,
65,
796,
767,
198,
32604,
796,
2835,
7,
257,
1220,
275,
11,
17,
1267,
198,
4798,
7,
32604,
8,
198
] | 2.487805 | 41 |
from datetime import date
import pytest
from tvseries.core.forms import TVSerieForm
from tvseries.config import TestConfig
@pytest.mark.usefixtures('client_class')
| [
6738,
4818,
8079,
1330,
3128,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
31557,
25076,
13,
7295,
13,
23914,
1330,
3195,
7089,
494,
8479,
198,
6738,
31557,
25076,
13,
11250,
1330,
6208,
16934,
628,
198,
31,
9078,
9288,
13,
4102,
13,
... | 3.36 | 50 |
import numpy as np
import numba
import scipy
def estimate_nfactor_act(X, C=1):
"""
estimate number of factors given data matrix X (n*p)
threshold on eigenvalues of correlation matrix (bias corrected)
https://arxiv.org/abs/1909.10710
K = # eigenvalues of sample corr that > 1 + sqrt(p / (n-1))
"""
n, p = X.shape
# 1. get sample correlation matrix and eigenvalues
corr = np.corrcoef(X.T)
evals = np.flip(np.linalg.eigvalsh(corr)) # order large to small
# 2. get bias corrected eigenvalues
evals_adj = np.zeros(p - 1)
for i in range(p - 1):
mi = (
np.sum(1.0 / (evals[(i + 1) :] - evals[i]))
+ 4.0 / (evals[i + 1] - evals[i])
) / (p - i)
rho = (p - i) / (n - 1)
evals_adj[i] = -1.0 / (-(1 - rho) / evals[i] + rho * mi)
# 3. threshold to estimate number of factors
thres = 1.0 + np.sqrt(p / (n - 1)) * C
return np.where(evals_adj > thres)[0][-1] + 1 # max_j that lambda_j > thres
sign = lambda x: x and (1 if x >0 else -1)
def POET(Y, K=-np.inf, C=-np.inf, thres='soft', matrix='cor'):
"""
Estimates large covariance matrices in approximate factor models by thresholding principal orthogonal complements.
Y:p by n matrix of raw data, where p is the dimensionality, n is the sample size. It is recommended that Y is de-meaned, i.e., each row has zero mean
K: number of factors
C: float, the positive constant for thresholding.C=0.5 performs quite well for soft thresholding
thres: str, choice of thresholding. K=0 corresponds to threshoding the sample covariance directly
matrix: the option of thresholding either correlation or covairance matrix.'cor': threshold the error correlation matrix then transform back to covariance matrix. 'vad': threshold the error covariance matrix directly.
Return:
SigmaY: estimated p by p covariance matrix of y_t
SigmaU: estimated p by p covariance matrix of u_t
factors: estimated unobservable factors in a K by T matrix form
loadings: estimated factor loadings in a p by K matrix form
"""
if K == -np.inf:
try:
K = estimate_nfactor_act(Y)
except IndexError:
print("ill-formed matrix Y, provide K with suggestion (K>0 and K<8)")
return
# if K==-np.inf:
# K=_estimate_K(Y)
# Y: p feature * n obs
p, n = Y.shape
Y = Y- Y.mean(axis=1)[:, np.newaxis]
if K>0:
Dd, V = np.linalg.eigh(Y.T @ Y)
Dd = Dd[::-1]
V = np.flip(V,axis=1)
F = np.sqrt(n)*V[:,:K] #F is n by K
LamPCA = Y @ F / n
uhat = Y - LamPCA @ F.T # p by n
Lowrank = LamPCA @ LamPCA.T
rate = 1/np.sqrt(p)+np.sqrt((np.log(p))/n)
else:
uhat = Y # Sigma_y itself is sparse
rate = np.sqrt((np.log(p))/n)
Lowrank = np.zeros([p,p])
SuPCA = uhat @ uhat.T / n
SuDiag = np.diag(np.diag(SuPCA))
if matrix == 'cor':
R = np.linalg.inv(SuDiag**(1/2)) @ SuPCA @ np.linalg.inv(SuDiag**(1/2))
if matrix == 'vad':
R = SuPCA
if C == -np.inf:
C = POETCmin(Y,K,thres,matrix)+0.1
uu = np.zeros([p,p,n])
roottheta = np.zeros([p,p])
lambda_ = np.zeros([p,p])
for i in range(p):
for j in range(i+1): # symmetric matrix
uu[i,j,:] = uhat[i,] * uhat[j,]
roottheta[i,j] = np.std(uu[i,j,:],ddof=1)
lambda_[i,j] = roottheta[i,j]*rate*C
lambda_[j,i] = lambda_[i,j]
Rthresh = np.zeros([p,p])
if thres == 'soft':
for i in range(p):
for j in range(i+1):
if np.abs(R[i,j]) < lambda_[i,j] and j < i:
Rthresh[i,j] = 0
elif j == i:
Rthresh[i,j] = R[i,j]
else:
Rthresh[i,j]=sign(R[i,j])*(abs(R[i,j])-lambda_[i,j])
Rthresh[j,i] = Rthresh[i,j]
elif thres == 'hard':
for i in range(p):
for j in range(i+1):
if np.abs(R[i,j]) < lambda_[i,j] and j < i:
Rthresh[i,j] = 0
else:
Rthresh[i,j] = R[i,j]
Rthresh[j,i] = Rthresh[i,j]
elif thres == 'scad':
for i in range(p):
for j in range(i+1):
if j == i:
Rthresh[i,j] = R[i,j]
elif abs(R[i,j] < lambda_[i,j]):
Rthresh[i,j] = 0
elif abs(R[i,j])<2*lambda_[i,j]:
Rthresh[i,j]=sign(R[i,j])*(abs(R[i,j])-lambda_[i,j])
elif abs(R[i,j])<3.7*lambda_[i,j]:
Rthresh[i,j]=((3.7-1)*R[i,j]-sign(R[i,j])*3.7*lambda_[i,j])/(3.7-2)
else:
Rthresh[i,j] = R[i,j]
Rthresh[j,i] = Rthresh[i,j]
SigmaU = np.zeros([p,p])
if matrix == 'cor':
SigmaU = SuDiag**(1/2) @ Rthresh * SuDiag**(1/2)
if matrix == 'vad':
SigmaU = Rthresh
SigmaY = SigmaU + Lowrank
result = DotDict({'SigmaU':SigmaU,
'SigmaY':SigmaY,
'factors':F.T,
'loadings':LamPCA})
return result
def POETCmin(Y,K,thres,matrix):
"""
This function is for determining the minimum constant in the threshold that guarantees the positive
definiteness of the POET estimator.
"""
p, n = Y.shape
if f(50)*f(-50)<0:
roots = scipy.optimize.fsolve(f,[-50,50])
result = max(0,roots)
else:
result = 0
return result
def POETKhat(Y):
"""
This function is for calculating the optimal number of factors in an approximate factor model.
"""
p, n = Y.shape
Y = Y- Y.mean(axis=1)[:, np.newaxis]
#Hallin and Liska method
c=np.arange(0.05, 5.05,0.05)
re=20
rmax=10
IC=np.zeros([2,re,rmax,100])
gT1HL, gT2HL, pi, ni=np.ones(20),np.ones(20),np.ones(20),np.ones(20)
for i in range(re): #generate the subsets, "re" of them
pi[i]=min(i*np.floor(p/re)+min(p,5),p)
ni[i]=min(i*np.floor(n/re)+min(n,5),n)
if i==re-1:
pi[i]=p
ni[i]=n
Yi=Y[:int(pi[i]),:int(ni[i])]
frob=np.zeros(rmax)
penal=np.zeros(rmax)
for k in range(min(int(pi[i]),int(ni[i]),rmax)):
Dd, V = np.linalg.eigh(Yi.T @ Yi)
Dd = Dd[::-1]
V = np.flip(V,axis=1)
F = V[:,:k+1]
LamPCA = Yi @ F / ni[i]
uhat = Yi - LamPCA @ (F.T) # pi by ni
frob[k]=sum(np.diag(uhat @ (uhat.T)))/(pi[i]*ni[i])
gT1HL[i]=np.log((pi[i]*ni[i])/(pi[i]+ni[i]))*(pi[i]+ni[i])/(pi[i]*ni[i])
gT2HL[i]=np.log(min(pi[i],ni[i]))*(pi[i]+ni[i])/(pi[i]*ni[i])
for l in range(100): # only fills in the ICs up to k, which may be <rmax
IC[0,i,k,l]=np.log(frob[k])+c[l]*(k+1)*gT1HL[i]
IC[1,i,k,l]=np.log(frob[k])+c[l]*(k+1)*gT2HL[i]
rhat=np.zeros([2,re,100])
for i in range(re):
for l in range(100):
m=min(pi[i],ni[i],rmax)
temp1=np.argmin(IC[0,i,:int(m),l])
rhat[0,i,l]=temp1
temp2=np.argmin(IC[1,i,:int(m),l])
rhat[1,i,l]=temp2
rhat+=1
sc1, sc2 = np.zeros(100), np.zeros(100)
for l in range(100):
sc1[l] = np.std(rhat[0,:,l],ddof=1)
sc2[l] = np.std(rhat[1,:,l],ddof=1)
c1vec=np.where(sc1==0)
ctemp1=c1vec[0][0]
c1=c[ctemp1]
K1HL=rhat[0,0,ctemp1]
c2vec=np.where(sc2==0)
ctemp2=c2vec[0][0]
c2=c[ctemp2]
K2HL=rhat[1,0,ctemp2]
c=1
rmax=10
IC=np.zeros([2,rmax])
frob, penal = np.zeros(rmax), np.zeros(rmax)
for k in range(rmax):
Dd, V = np.linalg.eigh(Y.T @ Y)
Dd = Dd[::-1]
V = np.flip(V,axis=1)
F = V[:,:k+1]
LamPCA = Y @ F / n
uhat = Y - LamPCA @ (F.T) # p by n
frob[k]=sum(np.diag(uhat @ uhat.T))/(p*n)
gT1BN=np.log(np.log((p*n))/(p+n))*(p+n)/(p*n)
gT2BN=np.log(min(p,n))*(p+n)/(p*n)
IC[0,k]=np.log(frob[k]) +(k+1)*gT1BN
IC[1,k]=np.log(frob[k]) +(k+1)*gT2BN
K1BN = np.argmin(IC[0,:])
K2BN = np.argmin(IC[1,:])
result = DotDict({"K1HL":K1HL,"K2HL":K2HL,"K1BN":K1BN,"K2BN":K2BN,"IC":IC})
return result
if __name__ == "__main__":
mat=np.array([
[0.8841665, -0.2017119 , 0.7010793 ,-0.8378639],
[-0.2017119, 2.2415674 ,-0.9365252 , 1.8725689],
[ 0.7010793 ,-0.9365252 , 1.7681529 ,-0.6699727],
[-0.8378639 ,1.8725689, -0.6699727 , 2.5185530],
])
a =POET(mat,K=3,C=0.5, thres='soft', matrix='vad')
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
997,
7012,
201,
198,
11748,
629,
541,
88,
201,
198,
201,
198,
201,
198,
4299,
8636,
62,
77,
31412,
62,
529,
7,
55,
11,
327,
28,
16,
2599,
201,
198,
220,
220,
220,
37227,
201,
198,
... | 1.714644 | 5,190 |
metainfo = """<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<metainfo>
<schemaVersion>2.0</schemaVersion>
<application>
<name>LLAP</name>
<comment>LLAP is a daemon service that works with a cache and works on SQL constructs.</comment>
<version>%(version)s</version>
<exportedConfigs>None</exportedConfigs>
<exportGroups>
<exportGroup>
<name>Servers</name>
<exports>
<export>
<name>instances</name>
<value>${LLAP_HOST}:${site.global.listen_port}</value>
</export>
</exports>
</exportGroup>
</exportGroups>
<components>
<component>
<name>LLAP</name>
<category>MASTER</category>
<compExports>Servers-instances</compExports>
<commandScript>
<script>scripts/llap.py</script>
<scriptType>PYTHON</scriptType>
</commandScript>
</component>
</components>
<osSpecifics>
<osSpecific>
<osType>any</osType>
<packages>
<package>
<type>tarball</type>
<name>files/llap-%(version)s.tar.gz</name>
</package>
</packages>
</osSpecific>
</osSpecifics>
</application>
</metainfo>
"""
appConfig = """
{
"schema": "http://example.org/specification/v2.0.0",
"metadata": {
},
"global": {
"application.def": ".slider/package/LLAP/llap-%(version)s.zip",
"java_home": "%(java_home)s",
"site.global.app_user": "yarn",
"site.global.app_root": "${AGENT_WORK_ROOT}/app/install/",
"site.global.app_tmp_dir": "${AGENT_WORK_ROOT}/tmp/",
"site.global.app_logger": "%(daemon_logger)s",
"site.global.app_log_level": "%(daemon_loglevel)s",
"site.global.additional_cp": "%(hadoop_home)s",
"site.global.daemon_args": "%(daemon_args)s",
"site.global.library_path": "%(hadoop_home)s/lib/native",
"site.global.memory_val": "%(heap)d",
"site.global.pid_file": "${AGENT_WORK_ROOT}/app/run/llap-daemon.pid",
"internal.chaos.monkey.probability.amlaunchfailure": "0",
"internal.chaos.monkey.probability.containerfailure": "%(monkey_percentage)d",
"internal.chaos.monkey.interval.seconds": "%(monkey_interval)d",
"internal.chaos.monkey.enabled": "%(monkey_enabled)s"%(slider_appconfig_global_append)s
},
"components": {
"slider-appmaster": {
"jvm.heapsize": "%(slider_am_jvm_heapsize)dM",
"slider.hdfs.keytab.dir": "%(slider_keytab_dir)s",
"slider.am.login.keytab.name": "%(slider_keytab)s",
"slider.keytab.principal.name": "%(slider_principal)s"
}
}
}
"""
resources = """
{
"schema" : "http://example.org/specification/v2.0.0",
"metadata" : {
},
"global" : {
"yarn.log.include.patterns": ".*\\\\.done"
},
"components": {
"slider-appmaster": {
"yarn.memory": "%(slider.am.container.mb)d",
"yarn.component.instances": "1"
},
"LLAP": {
"yarn.role.priority": "1",
"yarn.component.instances": "%(instances)d",
"yarn.resource.normalization.enabled": "false",
"yarn.memory": "%(container.mb)d",
"yarn.component.placement.policy" : "%(placement)d"
}
}
}
"""
# placement policy "4" is a bit-mask
# only bit set is Slider PlacementPolicy.ANTI_AFFINITY_REQUIRED(4)
runner = """
#!/bin/bash -e
BASEDIR=$(dirname $0)
slider stop %(name)s --wait 10 || slider stop %(name)s --force --wait 30
slider destroy %(name)s --force || slider destroy %(name)s
slider install-package --name LLAP --package $BASEDIR/llap-%(version)s.zip --replacepkg
slider create %(name)s --resources $BASEDIR/resources.json --template $BASEDIR/appConfig.json %(queue.string)s
"""
| [
4164,
391,
6513,
796,
37227,
47934,
19875,
2196,
2625,
16,
13,
15,
13984,
29,
198,
27,
28112,
198,
220,
220,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
220,
220,
18920,
5964,
11704,
13,
220,
4091... | 2.391703 | 1,856 |
import pytest
from validoot.operations import And, Or
from validoot.exceptions import ValidationError
| [
11748,
12972,
9288,
198,
198,
6738,
4938,
1025,
13,
3575,
602,
1330,
843,
11,
1471,
198,
6738,
4938,
1025,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198
] | 3.814815 | 27 |
#!/usr/bin/env python3
import csv
import numpy as np
from sklearn import decomposition
import matplotlib
import matplotlib.cm
matplotlib.use("Agg") # don't try to use $DISPLAY
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.gridspec as gridspec
from circledist import circledist
import matplotlib_style
if __name__ == "__main__":
args = parse_args()
loc = parse_locations(args.csv_file)
indlist = parse_list(args.ind_file)
rmidx = [i for i, ind in enumerate(indlist) if ind not in loc]
for i in rmidx:
print(f"{indlist[i]} has no location")
lats = [loc[ind][0] for ind in indlist if ind in loc]
lons = [loc[ind][1] for ind in indlist if ind in loc]
ref_lat, ref_lon = 30, 120
dists = [
circledist(ref_lon, ref_lat, loc[ind][1], loc[ind][0])
for ind in indlist
if ind in loc
]
n_pcs = 6
C = np.loadtxt(args.cov_file)
pca = decomposition.PCA(n_components=n_pcs)
pc = pca.fit_transform(C)
pdf = PdfPages(args.out_file)
fig_w, fig_h = plt.figaspect(9.0 / 16.0)
cmap = matplotlib.cm.get_cmap("plasma")
distnorm = matplotlib.colors.Normalize(vmin=np.min(dists), vmax=np.max(dists))
for pc_i in range(n_pcs - 1):
fig1 = plt.figure(figsize=(fig_w, fig_h))
gs1 = gridspec.GridSpec(1, 1)
ax1 = fig1.add_subplot(gs1[0])
x = np.delete(pc[:, pc_i], rmidx)
y = np.delete(pc[:, pc_i + 1], rmidx)
ax1.scatter(
x,
y,
s=50,
marker="o",
alpha=1,
lw=1,
# edgecolor=cmap(latnorm(lats)),
# edgecolor=cmap(lonnorm(lons)),
facecolor=cmap(distnorm(dists)),
# facecolor="none",
)
for i in rmidx:
ax1.scatter(pc[i, pc_i], pc[i, pc_i + 1], s=50, marker="x", c="black")
ax1.set_xlabel(f"PC{pc_i+1}")
ax1.set_ylabel(f"PC{pc_i+2}")
cb = fig1.colorbar(matplotlib.cm.ScalarMappable(norm=distnorm, cmap=cmap))
cb.ax.get_yaxis().labelpad = 15
cb.ax.set_ylabel("Distance from 120$^\circ$E, 30$^\circ$N", rotation=270)
fig1.tight_layout()
pdf.savefig(figure=fig1)
fig1 = plt.figure(figsize=(fig_w, fig_h))
gs1 = gridspec.GridSpec(1, 1)
ax1 = fig1.add_subplot(gs1[0])
ax1.bar(list(range(1, n_pcs + 1)), pca.explained_variance_)
ax1.set_xlabel("Principal component")
ax1.set_ylabel("Percentage variance explained")
ax1.set_title(
"Scree plot (total variance explained: {:.2f}\%)".format(
np.sum(pca.explained_variance_)
)
)
fig1.tight_layout()
pdf.savefig(figure=fig1)
pdf.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
269,
21370,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
1330,
26969,
9150,
198,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
112... | 2.002911 | 1,374 |
# -*- coding: utf-8 -*-
# Copyright SweetCase Project, Re_Coma(Ha Jeong Hyun). All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import os.path
import json
# Config Name Labels
"""
서버 설정 파일로 필요시
추가 가능
"""
class ConfigNameLabels(Enum):
"""
서버를 설정하기 위한 Config.json의 key들
HOST: Redis 서버의 주소
PORT: Redis Server를 접속하기 위한 포트
PSWD: Redis Server를 접속하기 위한 패스워드
MAX_QUEUE_SIZE: 각 시스템의 큐의 최대 크기를 설정
MAX_NOTE_SIZE: AI 시스템이 최대로 작곡할 수 잇는 노트의 갯수(TODO 연구 필요)
SERIAL: 연결관련 확인용 인증코드
MODELS: AI 모델이 저장되어 있는 위치들(장르에 따라 다름)
TMP_DIR = AI 작곡을 마치고 클라이언트로 보내기 전에 임시로 저장되는 파일의 위치
"""
HOST = "host"
PORT = "port"
PSWD = "pswd"
MAX_QUEUE_SIZE = "max_queue_size"
MAX_NOTE_SIZE = "max_note_size"
USE_GPU_VALUE = "use_gpu_value"
SERIAL = "serial"
MODELS = "models"
TMP_DIR = "tmp_dir"
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
15335,
20448,
4935,
11,
797,
62,
5377,
64,
7,
23303,
3852,
506,
6707,
403,
737,
1439,
6923,
33876,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,... | 1.566411 | 911 |
import enum
| [
11748,
33829,
628,
198
] | 3.5 | 4 |
# -*-coding:utf-8-*-
import time
import datetime
import os
import random
import shelve
import threading
from queue import Queue
import win32api, win32gui, win32con, win32com.client
from ctypes import *
from PIL import ImageGrab, Image as PLI_Image, ImageTk
from tkinter import *
from tkinter import ttk
import tkinter.messagebox as messagebox
from tkinter.scrolledtext import ScrolledText
app = Application()
# 隐藏console窗口
try:
test = sys.argv[1]
except IndexError:
test = False
if test == 'test':
pass
else:
whnd = windll.kernel32.GetConsoleWindow()
if whnd:
windll.user32.ShowWindow(whnd, 0)
windll.kernel32.CloseHandle(whnd)
app.master.title('就你破势多')
app.init_window_place(app.master, 1.1, 4)
app.mainloop()
| [
2,
532,
9,
12,
66,
7656,
25,
40477,
12,
23,
12,
9,
12,
198,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
7497,
303,
198,
11748,
4704,
278,
198,
6738,
16834,
1330,
4670,
518,
198,
11748,
... | 2.51505 | 299 |
import pytest
from app.models import User
| [
11748,
12972,
9288,
198,
6738,
598,
13,
27530,
1330,
11787,
628
] | 3.909091 | 11 |
# -*- coding: utf-8 -*-
# Copyright 2017 Vector Creations Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements the TCP replication protocol used by synapse to
communicate between the master process and its workers (when they're enabled).
Further details can be found in docs/tcp_replication.rst
Structure of the module:
* client.py - the client classes used for workers to connect to master
* command.py - the definitions of all the valid commands
* protocol.py - contains bot the client and server protocol implementations,
these should not be used directly
* resource.py - the server classes that accepts and handle client connections
* streams.py - the definitons of all the valid streams
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
2177,
20650,
5844,
602,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
... | 3.846154 | 325 |
#Write a Python program to extract the filename from a given path.
import os
print(os.path.basename(__file__)) | [
2,
16594,
257,
11361,
1430,
284,
7925,
262,
29472,
422,
257,
1813,
3108,
13,
198,
11748,
28686,
198,
4798,
7,
418,
13,
6978,
13,
12093,
12453,
7,
834,
7753,
834,
4008
] | 3.548387 | 31 |
import os
import matplotlib
matplotlib.use("Agg")
from plateo import AssemblyPlan
from plateo.parsers import plate_from_content_spreadsheet
from plateo.containers.plates import Plate4ti0960
from plateo.exporters import (picklist_to_labcyte_echo_picklist_file,
PlateTextPlotter,
AssemblyPicklistGenerator,
picklist_to_assembly_mix_report)
from plateo.tools import human_volume
import flametree
from pandas import pandas
from collections import OrderedDict
import matplotlib.pyplot as plt
from Bio import SeqIO
| [
11748,
28686,
198,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
7203,
46384,
4943,
198,
198,
6738,
7480,
78,
1330,
10006,
20854,
198,
6738,
7480,
78,
13,
79,
945,
364,
1330,
7480,
62,
6738,
62,
11299,
62,
43639,
21... | 2.457143 | 245 |
from sympy import symbols, diff, solve, Eq
from task1 import get_Euler_descr
x1, x2, x3, t, v1, v2, v3 = symbols('x1 x2 x3 t v1 v2 v3')
#from testdata import eq1, eq2, eq3
#print(get_velocity_Euler(eq1, eq2, eq3))
#print(get_acceleration_Euler(eq1, eq2, eq3))
| [
6738,
10558,
88,
1330,
14354,
11,
814,
11,
8494,
11,
412,
80,
198,
6738,
4876,
16,
1330,
651,
62,
36,
18173,
62,
20147,
81,
198,
198,
87,
16,
11,
2124,
17,
11,
2124,
18,
11,
256,
11,
410,
16,
11,
410,
17,
11,
410,
18,
796,
1... | 2.165289 | 121 |
"""
Class for the hex game logic. Unaltered skeleton code.
EDIT: Changed board representation from dict to numpy.ndarray.
EDIT: Changed numbers for the player-color indicators.
:version: FINAL
:date:
:author: Aske Plaat
:edited by: Joery de Vries
"""
import numpy as np
| [
37811,
198,
9487,
329,
262,
17910,
983,
9156,
13,
791,
282,
4400,
18328,
2438,
13,
198,
198,
24706,
25,
32068,
3096,
10552,
422,
8633,
284,
299,
32152,
13,
358,
18747,
13,
198,
24706,
25,
32068,
3146,
329,
262,
2137,
12,
8043,
21337,
... | 3.37037 | 81 |
import re
import sys
from io import StringIO
from corehq.util.teeout import tee_output
from testil import assert_raises, eq
| [
11748,
302,
198,
11748,
25064,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
6738,
4755,
71,
80,
13,
22602,
13,
660,
68,
448,
1330,
30479,
62,
22915,
198,
6738,
1332,
346,
1330,
6818,
62,
430,
2696,
11,
37430,
628,
628,
628,
198
] | 3.119048 | 42 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetOutpostsResult',
'AwaitableGetOutpostsResult',
'get_outposts',
]
@pulumi.output_type
class GetOutpostsResult:
"""
A collection of values returned by getOutposts.
"""
@property
@pulumi.getter
def arns(self) -> Sequence[str]:
"""
Set of Amazon Resource Names (ARNs).
"""
return pulumi.get(self, "arns")
@property
@pulumi.getter(name="availabilityZone")
@property
@pulumi.getter(name="availabilityZoneId")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
"""
Set of identifiers.
"""
return pulumi.get(self, "ids")
@property
@pulumi.getter(name="ownerId")
@property
@pulumi.getter(name="siteId")
# pylint: disable=using-constant-test
def get_outposts(availability_zone: Optional[str] = None,
availability_zone_id: Optional[str] = None,
owner_id: Optional[str] = None,
site_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOutpostsResult:
"""
Provides details about multiple Outposts.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.outposts.get_outposts(site_id=data["aws_outposts_site"]["id"])
```
:param str availability_zone: Availability Zone name.
:param str availability_zone_id: Availability Zone identifier.
:param str owner_id: AWS Account identifier of the Outpost owner.
:param str site_id: Site identifier.
"""
__args__ = dict()
__args__['availabilityZone'] = availability_zone
__args__['availabilityZoneId'] = availability_zone_id
__args__['ownerId'] = owner_id
__args__['siteId'] = site_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:outposts/getOutposts:getOutposts', __args__, opts=opts, typ=GetOutpostsResult).value
return AwaitableGetOutpostsResult(
arns=__ret__.arns,
availability_zone=__ret__.availability_zone,
availability_zone_id=__ret__.availability_zone_id,
id=__ret__.id,
ids=__ret__.ids,
owner_id=__ret__.owner_id,
site_id=__ret__.site_id)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 2.454313 | 1,171 |
#!/usr/bin/python
import argparse;
import os;
import sys;
import matplotlib as mpl;
mpl.use('Agg');
import matplotlib.pyplot as plt;
import json;
from matplotlib import ticker;
import numpy as np
filename="cost-analysis-result.pdf";
main();
# Col 0 are the x points
# Col 1 is the series 50/100 marker
# Col 2 is the series cat data
# Col 3 is the series no cat data
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
1822,
29572,
26,
198,
11748,
28686,
26,
198,
11748,
25064,
26,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
26,
198,
76,
489,
13,
1904,
10786,
46384,
24036,
198,
11748,
2603,
294... | 2.983871 | 124 |
import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="django-inbound-rules",
version="1.1.0",
description="Django Inbound Rules is an app to allow or restrict group of users on specified url(s) based on CIDR blocks(now IPv4 only) excluding user with superuser permissions.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/nilesh-kr-dubey/django-inbound-rules",
author="Nilesh Kumar Dubey",
author_email="nileshdubeyindia@gmail.com",
license="MIT",
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.0",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
packages=['inbound'],
install_requires=[
"Django >= 2.0",
],
include_package_data=True,
project_urls={
'Documentation': 'https://github.com/nilesh-kr-dubey/django-inbound-rules/tree/master/docs',
},
)
| [
198,
11748,
3108,
8019,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
2,
383,
8619,
7268,
428,
2393,
198,
39,
9338,
796,
3108,
8019,
13,
15235,
7,
834,
7753,
834,
737,
8000,
198,
198,
2,
383,
2420,
286,
262,
20832,
11682,
2393... | 2.610915 | 568 |
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
83,
15799,
1330,
5436,
32572,
420,
1352,
628,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
22... | 1.666667 | 96 |
from selenium import webdriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver import ActionChains
#REPLACE WITH YOUR DRIVER PATH. EXAMPLES FOR CHROME AND PHANTOMJS
driver = webdriver.PhantomJS(executable_path='../phantomjs-2.1.1-macosx/bin/phantomjs')
#driver = webdriver.Chrome(executable_path='../chromedriver/chromedriver')
driver.implicitly_wait(5)
driver.get('http://www.pythonscraping.com/')
driver.get_screenshot_as_file('tmp/pythonscraping.png')
driver.close() | [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
47960,
13,
732,
1350,
1732,
1330,
5313,
20180,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
1330,
7561,
1925,
1299,
628,
198,
2,
2200,
6489,... | 2.837989 | 179 |
"""oilandrope URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views.i18n import JavaScriptCatalog
urlpatterns = [
# JavaScript translations
path('jsi18n/', JavaScriptCatalog.as_view(), name='javascript-catalog'),
]
urlpatterns += i18n_patterns(
# Main site
path('', include('core.urls')),
# Admin site
path('admin/', admin.site.urls),
# API
path('api/', include('api.urls')),
# Common
path('common/', include('common.urls')),
# Auth system
path('accounts/', include('registration.urls')),
# Bot
path('bot/', include('bot.urls')),
# Dynamic Menu
path('dynamic_menu/', include('dynamic_menu.urls')),
# React FrontEnd
path('frontend/', include('frontend.urls')),
# Roleplay
path('roleplay/', include('roleplay.urls')),
prefix_default_language=False,
)
if settings.DEBUG: # pragma: no cover
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
37811,
9437,
28092,
431,
10289,
28373,
198,
198,
464,
4600,
6371,
33279,
82,
63,
1351,
11926,
32336,
284,
5009,
13,
1114,
517,
1321,
3387,
766,
25,
198,
220,
220,
220,
3740,
1378,
31628,
13,
28241,
648,
404,
305,
752,
13,
785,
14,
2... | 2.773163 | 626 |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import re
# Access and edit Google Sheets by gspread
import gspread
# Module to transform gsheets to data frame
import gspread_dataframe as gs_to_df
from oauth2client.service_account import ServiceAccountCredentials
import datetime as dt
from pathlib import *
import sys
path = PurePath('__file__')
sys.path.insert(0, str(Path(path.parent).resolve().parent))
from cpm import functions as f
TEMPLATE = "Feedback_Template"
MATRICULA = "3. Planilha Matrículas 2019 - 1o sem"
MATR_ABA = "João XXIII"
MATR_CLEANED = "J23_Matrícula_2019-1S"
main()
planilhas
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
2420,
62,
15603,
341,
25,
198,
2,
220,
220,
220,
... | 2.423077 | 364 |
a = 1
b = 2
c = a + b
sum = 2
while c <= 4000000:
if c%2 == 0:
sum =sum + c
a = b
b = c
c = a + b
print sum | [
64,
796,
352,
198,
65,
796,
362,
198,
66,
796,
257,
1343,
275,
198,
16345,
796,
362,
198,
4514,
269,
19841,
604,
10535,
25,
198,
197,
361,
269,
4,
17,
6624,
657,
25,
198,
197,
197,
16345,
796,
16345,
1343,
269,
198,
197,
64,
796... | 1.83871 | 62 |
from sciencebeam_utils.utils.xml import (
set_or_remove_attrib
)
from sciencebeam_gym.utils.bounding_box import (
BoundingBox
)
from sciencebeam_gym.structured_document import (
AbstractStructuredDocument,
get_scoped_attrib_name,
get_attrib_by_scope
)
TAG_ATTRIB_NAME = 'tag'
| [
6738,
3783,
40045,
62,
26791,
13,
26791,
13,
19875,
1330,
357,
198,
220,
220,
220,
900,
62,
273,
62,
28956,
62,
1078,
822,
198,
8,
198,
198,
6738,
3783,
40045,
62,
1360,
76,
13,
26791,
13,
7784,
278,
62,
3524,
1330,
357,
198,
220,... | 2.559322 | 118 |
import numpy as np
a = np.arange(12)
# shape array with 3 rows and
# 4 columns
a = a.reshape(3,4)
print('Original array is:')
print(a)
print()
print('Modified array is:')
# iterating an array
for x in np.nditer(a):
print(x) | [
11748,
299,
32152,
355,
45941,
198,
64,
796,
45941,
13,
283,
858,
7,
1065,
8,
198,
220,
198,
2,
5485,
7177,
351,
513,
15274,
290,
220,
198,
2,
604,
15180,
198,
64,
796,
257,
13,
3447,
1758,
7,
18,
11,
19,
8,
198,
220,
198,
479... | 2.393939 | 99 |
import logging
import os
import re
from pathlib import Path
from typing import Any
from mdscript.files_dependencies_manager import FilesDependenciesManager
from mdscript.watcher import Watcher
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
4377,
198,
198,
6738,
45243,
12048,
13,
16624,
62,
45841,
3976,
62,
37153,
1330,
13283,
35,
2690,
3976,
13511,
198,
6738,
45243,... | 4 | 49 |
emojisAsciiToUtf8Strict = None
emojisAsciiToUtf8 = None | [
368,
13210,
271,
1722,
979,
72,
2514,
18274,
69,
23,
1273,
2012,
796,
6045,
198,
368,
13210,
271,
1722,
979,
72,
2514,
18274,
69,
23,
796,
6045
] | 2.037037 | 27 |
from django.conf import settings
from django.test import TestCase
from mail_templated import EmailMessage
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
6920,
62,
11498,
489,
515,
1330,
9570,
12837,
198
] | 3.925926 | 27 |
from flask import *
app = Flask(__name__)
template = '<!DOCTYPE html><html><body>\
<h1>Online Calculator</h1>\
<form action="/" method="post">\
expression:<br>\
<input type="text" name="expression" value="">\
<input type="submit" value="Submit">\
</form><h2>%s </h2></body></html>'
@app.route('/',methods=['GET'])
@app.route('/',methods=['POST'])
if __name__=="__main__":
app.run("0.0.0.0",port = 5005,debug=False)
| [
6738,
42903,
1330,
1635,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
198,
28243,
796,
705,
27,
0,
18227,
4177,
56,
11401,
27711,
6927,
6494,
6927,
2618,
29,
59,
198,
220,
220,
220,
1279,
71,
16,
29,
14439,
43597,
3556,
71,
1... | 2.282828 | 198 |
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module for Rank psychological embedding model.
Classes:
Rank: Class that uses ordinal observations that are anchored by a
designated query stimulus.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from psiz.keras.models.psych_embedding import PsychologicalEmbedding
import psiz.keras.layers
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.models', name='Rank'
)
class Rank(PsychologicalEmbedding):
"""Psychological embedding inferred from ranked similarity judgments.
Attributes:
See PsychologicalEmbedding.
"""
def __init__(self, behavior=None, **kwargs):
"""Initialize.
Arguments:
See PschologicalEmbedding.
Raises:
ValueError: If arguments are invalid.
"""
# Initialize behavioral component.
if behavior is None:
behavior = psiz.keras.layers.RankBehavior()
kwargs.update({'behavior': behavior})
super().__init__(**kwargs)
def call(self, inputs):
"""Call.
Arguments:
inputs: A dictionary of inputs:
stimulus_set: dtype=tf.int32, consisting of the
integers on the interval [0, n_stimuli[
shape=(batch_size, n_max_reference + 1, n_outcome)
is_select: dtype=tf.bool, the shape implies the
maximum number of selected stimuli in the data
shape=(batch_size, n_max_select, n_outcome)
groups: dtype=tf.int32, Integers indicating the
group membership of a trial.
shape=(batch_size, k)
"""
# Grab inputs.
stimulus_set = inputs['stimulus_set']
is_select = inputs['is_select'][:, 1:, :]
groups = inputs['groups']
# Define some useful variables before manipulating inputs.
max_n_reference = tf.shape(stimulus_set)[-2] - 1
# Repeat `stimulus_set` `n_sample` times in a newly inserted
# axis (axis=1).
# TensorShape([batch_size, n_sample, n_ref + 1, n_outcome])
stimulus_set = psiz.utils.expand_dim_repeat(
stimulus_set, self.n_sample, axis=1
)
# Enbed stimuli indices in n-dimensional space:
# TensorShape([batch_size, n_sample, n_ref + 1, n_outcome, n_dim])
if self._use_group['stimuli']:
z = self.stimuli([stimulus_set, groups])
else:
z = self.stimuli(stimulus_set)
# Split query and reference embeddings:
# z_q: TensorShape([batch_size, sample_size, 1, n_outcome, n_dim]
# z_r: TensorShape([batch_size, sample_size, n_ref, n_outcome, n_dim]
z_q, z_r = tf.split(z, [1, max_n_reference], -3)
# The tf.split op does not infer split dimension shape. We know that
# z_q will always have shape=1, but we don't know `max_n_reference`
# ahead of time.
z_q.set_shape([None, None, 1, None, None])
# Pass through similarity kernel.
# TensorShape([batch_size, sample_size, n_ref, n_outcome])
if self._use_group['kernel']:
sim_qr = self.kernel([z_q, z_r, groups])
else:
sim_qr = self.kernel([z_q, z_r])
# Zero out similarities involving placeholder IDs by creating
# a mask based on reference indices. We drop the query indices
# because they have effectively been "consumed" by the similarity
# operation.
is_present = tf.cast(
tf.math.not_equal(stimulus_set[:, :, 1:], 0), K.floatx()
)
sim_qr = sim_qr * is_present
# Prepare for efficient probability computation by adding
# singleton dimension for `n_sample`.
is_select = tf.expand_dims(
tf.cast(is_select, K.floatx()), axis=1
)
# Determine if outcome is legitamate by checking if at least one
# reference is present. This is important because not all trials have
# the same number of possible outcomes and we need to infer the
# "zero-padding" of the outcome axis.
is_outcome = is_present[:, :, 0, :]
# Compute probability of different behavioral outcomes.
if self._use_group['behavior']:
probs = self.behavior([sim_qr, is_select, is_outcome, groups])
else:
probs = self.behavior([sim_qr, is_select, is_outcome])
return probs
def _ranked_sequence_probability(sim_qr, n_select):
"""Return probability of a ranked selection sequence.
Arguments:
sim_qr: A 3D tensor containing pairwise similarity values.
Each row (dimension 0) contains the similarity between
a trial's query stimulus and reference stimuli. The
tensor is arranged such that the first column
corresponds to the first selection in a sequence, and
the last column corresponds to the last selection
(dimension 1). The third dimension indicates
different samples.
shape = (n_trial, n_reference, n_sample)
n_select: Scalar indicating the number of selections made
by an agent.
Returns:
A 2D tensor of probabilities.
shape = (n_trial, n_sample)
Notes:
For example, given query Q and references A, B, and C, the
probability of selecting reference A then B (in that order)
would be:
P(A)P(B|A) = s_QA/(s_QA + s_QB + s_QC) * s_QB/(s_QB + s_QC)
where s_QA denotes the similarity between the query and
reference A.
The probability is computed by starting with the last
selection for efficiency and numerical stability. In the
provided example, this corresponds to first computing the
probability of selecting B second, given that A was
selected first.
"""
n_trial = sim_qr.shape[0]
n_sample = sim_qr.shape[2]
# Initialize.
seq_prob = np.ones((n_trial, n_sample), dtype=np.float64)
selected_idx = n_select - 1
denom = np.sum(sim_qr[:, selected_idx:, :], axis=1)
for i_selected in range(selected_idx, -1, -1):
# Compute selection probability.
prob = np.divide(sim_qr[:, i_selected], denom)
# Update sequence probability.
# seq_prob = np.multiply(seq_prob, prob)
seq_prob *= prob
# Update denominator in preparation for computing the probability
# of the previous selection in the sequence.
if i_selected > 0:
# denom = denom + sim_qr[:, i_selected-1, :]
denom += sim_qr[:, i_selected - 1, :]
return seq_prob
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
12131,
383,
350,
13396,
57,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
... | 2.461205 | 3,003 |
from rest_framework import viewsets
from rest_framework.response import Response
from dalme_api.serializers import CommentSerializer
from dalme_app.models import Comment
from dalme_api.access_policies import CommentAccessPolicy
from dalme_app.models import *
class Comments(viewsets.ModelViewSet):
""" API endpoint for managing comments """
permission_classes = (CommentAccessPolicy,)
queryset = Comment.objects.all()
serializer_class = CommentSerializer
| [
6738,
1334,
62,
30604,
1330,
5009,
1039,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
288,
282,
1326,
62,
15042,
13,
46911,
11341,
1330,
18957,
32634,
7509,
198,
6738,
288,
282,
1326,
62,
1324,
13,
27530,
1330,
18957,
... | 3.583333 | 132 |
import numpy as np
from matplotlib import pyplot as plt
from mvn.utils.misc import find_min, drop_na
| [
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
198,
6738,
285,
85,
77,
13,
26791,
13,
44374,
1330,
1064,
62,
1084,
11,
4268,
62,
2616,
628,
628,
628,
628,
628
] | 2.846154 | 39 |
expected_output = {
'vrf': {
'vxlan-1009': {
'address_family': {
'ipv4': {
'count_multicast_starg': 11,
'count_multicast_sg': 18,
'count_multicast_total': 29,
'count_multicast_starg_prefix': 0,
'group_count': 18,
'avg_source_per_group': 1.0,
'groups': {
'225.1.1.17/32': {
'source_count': 1,
'source': {
'(*,G)': {
'packets': 6,
'bytes': 636,
'aps': 106,
'pps': 0,
'bitrate': 0.000,
'bitrate_unit': 'bps',
'oifs': 1,
},
'1.1.91.67': {
'packets': 145,
'bytes': 7505,
'aps': 51,
'pps': 0,
'bitrate': 27.200,
'bitrate_unit': 'bps',
'oifs': 1,
}
}
}
}
}
}
}
}
}
| [
40319,
62,
22915,
796,
1391,
198,
220,
220,
220,
705,
37020,
69,
10354,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
705,
85,
87,
9620,
12,
3064,
24,
10354,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
... | 1.248601 | 1,251 |
# Generic imports
import os, sys, glob, shutil
import numpy as np
# Find the number of envs
main_dir = '.'
envs = [f.path for f in os.scandir(main_dir) if f.is_dir()]
# Process names
tmp = []
for env in envs:
env = env[2:]
if (env[0:3] == 'env'):
tmp.append(env)
# Printing
envs = tmp
print('I found ',str(len(envs)),' environments')
# Create final dirs if necessary
path = 'sorted_envs'
png_path = path+'/png'
csv_path = path+'/csv'
sol_path = path+'/sol'
best_path = path+'/best'
if (not os.path.isdir(path)):
os.mkdir(path)
if (not os.path.isdir(png_path)):
os.mkdir(png_path)
if (not os.path.isdir(csv_path)):
os.mkdir(csv_path)
if (not os.path.isdir(sol_path)):
os.mkdir(sol_path)
if (not os.path.isdir(best_path)):
os.mkdir(best_path)
# Read env contents
n_outputs = 10
looping = True
glb_index = 1
loc_index = 0
ring_size = 250
ring_buffer = np.zeros([ring_size])
ring_index = 0
avg_rew = 0.0
avg_reward = []
reward = []
# Loop until no more shapes can be found
while looping:
# Copy loc index to check if loop must be stopped
loc_index_cp = loc_index
# Loop over envs
for env in envs:
img = env+'/save/png/shape_'+str(glb_index)+'.png'
csv = env+'/save/csv/shape_'+str(glb_index)+'.csv'
sol = env+'/save/sol/'+str(glb_index)+'.png'
sol_u = env+'/save/sol/'+str(glb_index)+'_u.png'
sol_v = env+'/save/sol/'+str(glb_index)+'_v.png'
sol_p = env+'/save/sol/'+str(glb_index)+'_p.png'
# If files exists, copy
if os.path.isfile(img):
shutil.copy(img, png_path+'/'+str(loc_index)+'.png')
if os.path.isfile(csv):
shutil.copy(csv, csv_path+'/'+str(loc_index)+'.csv')
if os.path.isfile(sol_u):
shutil.copy(sol_u, sol_path+'/'+str(loc_index)+'_u.png')
if os.path.isfile(sol_v):
shutil.copy(sol_v, sol_path+'/'+str(loc_index)+'_v.png')
if os.path.isfile(sol_p):
shutil.copy(sol_p, sol_path+'/'+str(loc_index)+'_p.png')
if os.path.isfile(sol):
shutil.copy(sol, sol_path+'/'+str(loc_index)+'.png')
# All the following is done only if computation ended well
# Store reward and check max reward
filename = env+'/save/reward_penalization'
line = None
with open(filename) as f:
line = f.read().split('\n')[glb_index-1]
line = line.split(' ')
# Handle reward
if (len(line)>1):
# Retrieve and store reward
rew = float(line[1])
ring_buffer[ring_index] = rew
# Compute new average
avg_rew = np.sum(ring_buffer)/ring_size
avg_reward.append(avg_rew)
reward.append(rew)
# Update ring buffer index
ring_index += 1
if (ring_index == ring_size): ring_index = 0
# Update index
loc_index += 1
# Stop looping if index has not changed
if (loc_index == loc_index_cp):
looping = False
# Update global index
glb_index += 1
# Sort reward
sort_rew = np.argsort(-1.0*np.asarray(reward))
# Write reward to file
filename = path+'/reward'
with open(filename, 'w') as f:
for i in range(len(reward)):
f.write(str(i)+' ')
f.write(str(reward[i])+' ')
f.write(str(avg_reward[i]))
f.write('\n')
# Copy best solutions
for i in range(n_outputs):
img = png_path+'/'+str(sort_rew[i])+'.png'
if os.path.isfile(img):
shutil.copy(img, best_path+'/.')
# Printing
print('I found '+str(loc_index)+' shapes in total')
print('Best rewards are:')
for i in range(n_outputs):
print(' '+str(reward[sort_rew[i]])+' for shape '+str(sort_rew[i]))
| [
2,
42044,
17944,
198,
11748,
28686,
11,
25064,
11,
15095,
11,
4423,
346,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
9938,
262,
1271,
286,
551,
14259,
198,
12417,
62,
15908,
796,
705,
2637,
198,
268,
14259,
796,
685,
69,
13,
69... | 1.981744 | 1,972 |
import datetime
import os
import numpy as np
from quantities import *
gf2N = 9.80665 / 1000
dirpath = os.path.dirname(os.path.realpath(__file__))
| [
11748,
4818,
8079,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
17794,
1330,
1635,
198,
198,
70,
69,
17,
45,
796,
860,
13,
1795,
36879,
1220,
8576,
198,
198,
15908,
6978,
796,
28686,
13,
6978,
13,
15908,... | 2.678571 | 56 |
#!/usr/bin/env python3
import math
if __name__ == "__main__":
print("Hello There!")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
10688,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7203,
15496,
1318,
2474,
8,
198
] | 2.540541 | 37 |
from django.urls import path
from . import views
from django.conf.urls import url
app_name = 'pool'
urlpatterns = [
path('pool/', views.Pool.as_view(), name='pool'),
path('pool/cab/', views.PoolCab.as_view(), name='cab'),
path('pool/food/', views.PoolFood.as_view(), name='food'),
path('pool/others/', views.PoolMisc.as_view(), name='others'),
path('store/', views.Store.as_view(), name='store'),
path('find/', views.Find.as_view(), name='find'),
# path('resource', views.PoolResource.as_view(), name='edit_self'),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
1324,
62,
3672,
796,
705,
7742,
6,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220... | 2.645631 | 206 |
import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
import time, math
from collections import OrderedDict
import pybullet as p
import numpy as np
from util import pybullet_util
from config.manipulator_config import ManipulatorConfig
from pnc.manipulator_pnc.manipulator_interface import ManipulatorInterface
if __name__ == "__main__":
# Environment Setup
p.connect(p.GUI)
p.resetDebugVisualizerCamera(cameraDistance=4.0,
cameraYaw=0,
cameraPitch=-45,
cameraTargetPosition=[1.5, 0., 0.])
p.setGravity(0, 0, -9.8)
p.setPhysicsEngineParameter(fixedTimeStep=ManipulatorConfig.DT,
numSubSteps=ManipulatorConfig.N_SUBSTEP)
if ManipulatorConfig.VIDEO_RECORD:
if not os.path.exists('video'):
os.makedirs('video')
for f in os.listdir('video'):
os.remove('video/' + f)
p.startStateLogging(p.STATE_LOGGING_VIDEO_MP4, "video/atlas.mp4")
# Create Robot, Ground
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
robot = p.loadURDF(cwd +
"/robot_model/manipulator/three_link_manipulator.urdf",
useFixedBase=True)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
nq, nv, na, joint_id, link_id, pos_basejoint_to_basecom, rot_basejoint_to_basecom = pybullet_util.get_robot_config(
robot)
# Set Initial Config
p.resetJointState(robot, 0, -np.pi / 6., 0.)
p.resetJointState(robot, 1, np.pi / 6., 0.)
p.resetJointState(robot, 2, np.pi / 3., 0.)
# Joint Friction
pybullet_util.set_joint_friction(robot, joint_id, 0.1)
# Construct Interface
interface = ManipulatorInterface()
# Run Sim
t = 0
dt = ManipulatorConfig.DT
count = 0
while (1):
# Get SensorData
sensor_data = pybullet_util.get_sensor_data(robot, joint_id, link_id,
pos_basejoint_to_basecom,
rot_basejoint_to_basecom)
# Compute Command
command = interface.get_command(sensor_data)
# Apply Trq
pybullet_util.set_motor_trq(robot, joint_id, command)
p.stepSimulation()
time.sleep(dt)
t += dt
count += 1
| [
11748,
28686,
198,
11748,
25064,
198,
66,
16993,
796,
28686,
13,
1136,
66,
16993,
3419,
198,
17597,
13,
6978,
13,
33295,
7,
66,
16993,
8,
198,
11748,
640,
11,
10688,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
11748,
12972... | 2.008432 | 1,186 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from PyQt4 import QtGui
from PyQt4 import QtCore
import Database
from DioView import DioView
from DioDetails import DioDetails
from DioScanDialog import DioScanDialog
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
4738,
198,
6738,
9485,
48,
83,
19,
1330,
33734,
8205,
72,
198,
6738,
9485,
48,
83,
19,
1330,
33734,
14055... | 2.857143 | 84 |
# -*- coding: utf-8 -*-
import mutagen
import os
import json
current_dir = os.getcwd()
fileNameList_all = os.listdir(current_dir)
fileNameList_shouldUse = []
failelist = []
success_amount = 0 #https://www.jianshu.com/p/53cf61220828 感谢这位老哥的帖子
#提取同目录下所有mp3的封面图和一些其他信息
if(len(fileNameList_all) > 0):
for filename in fileNameList_all:
if(filename.endswith('.json' or filename.endswith('.jpg'))):
os.remove(filename)
if(len(fileNameList_all)>0):
for temp_name in fileNameList_all:
if str(temp_name).endswith(".mp3"):
fileNameList_shouldUse.append(temp_name)
# print('should process music file: ' + fileNameList_shouldUse)
if(len(fileNameList_shouldUse) > 0):
for temp_name in fileNameList_shouldUse:
AudioFileAssetsExport(temp_name)
print('------------------------------------------------------------------------------------------------------------------------------')
print('---------extract img success: ' + str(success_amount) + 'extract img fail: ' + str(len(fileNameList_shouldUse) - success_amount) + '----------------')
print(failelist)
os.system('pause')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
4517,
11286,
198,
11748,
28686,
198,
11748,
33918,
198,
220,
198,
14421,
62,
15908,
796,
28686,
13,
1136,
66,
16993,
3419,
198,
7753,
5376,
8053,
62,
439,
79... | 2.5 | 452 |
"""
Output formats.
"""
from .rst import RST
from .console import Console
from .json import JSON
from .svg import SVG
from .png import PNG
| [
37811,
198,
26410,
17519,
13,
198,
37811,
198,
198,
6738,
764,
81,
301,
1330,
371,
2257,
198,
6738,
764,
41947,
1330,
24371,
198,
6738,
764,
17752,
1330,
19449,
198,
6738,
764,
21370,
70,
1330,
45809,
198,
6738,
764,
11134,
1330,
36182,... | 3.333333 | 42 |
import logging
log = logging.getLogger('WebPage')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import os
import urllib
import web
from viz import Viz, \
VizBanner
TEMPLATE_PATH = os.path.join('templates')
LOOK_AND_FEEL = 'dust'
| [
11748,
18931,
201,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
10786,
13908,
9876,
11537,
201,
198,
6404,
13,
2617,
4971,
7,
6404,
2667,
13,
24908,
8,
201,
198,
6404,
13,
2860,
25060,
7,
35067,
25060,
28955,
201,
198,
201,
198,
1174... | 2.333333 | 117 |
#
# Created by Denis Doci
#
# Copyright Mars Inc.
#
# For internal use only
#
import requests
import json
import pandas as pd
import numpy as np
import datetime
import itertools
import math
from datetime import date, timedelta
########################################
########################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
############################################################
| [
2,
198,
2,
15622,
416,
33089,
360,
1733,
198,
2,
198,
2,
15069,
8706,
3457,
13,
198,
2,
198,
2,
1114,
5387,
779,
691,
198,
2,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
... | 7.401408 | 142 |
import unittest
import datetime
from forthic.interpreter import Interpreter
from forthic.module import Module, ModuleWord
from tests.tests_py.sample_date_module import SampleDateModule
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
4818,
8079,
198,
6738,
6071,
291,
13,
3849,
3866,
353,
1330,
4225,
3866,
353,
198,
6738,
6071,
291,
13,
21412,
1330,
19937,
11,
19937,
26449,
198,
6738,
5254,
13,
41989,
62,
9078,
13,
39873,
62,
4475,... | 3.243243 | 74 |
import tensorflow as tf
from learntools.core import *
qvars = bind_exercises(globals(), [
Q1, Q2, Q3,
],
var_format='q_{n}',
)
__all__ = list(qvars)
| [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
26338,
10141,
13,
7295,
1330,
1635,
628,
628,
198,
198,
44179,
945,
796,
11007,
62,
1069,
2798,
2696,
7,
4743,
672,
874,
22784,
685,
198,
220,
220,
220,
220,
220,
220,
220,
1195,
... | 2.111111 | 81 |
i = 0
for i in range(0, 100):
if i % 2 == 0:
print(i)
i+= i
| [
72,
796,
657,
198,
1640,
1312,
287,
2837,
7,
15,
11,
1802,
2599,
198,
220,
220,
220,
611,
1312,
4064,
362,
6624,
657,
25,
198,
220,
220,
220,
220,
220,
220,
220,
3601,
7,
72,
8,
198,
220,
220,
220,
1312,
47932,
1312,
198
] | 1.727273 | 44 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Hay diversas maneras de recibir información del usuario en la terminal
Para pedir información al usuario DURANTE la ejecución de un script
podemos usar la función "raw_input" (python2) o "input" (python3) y guardar la respuesta en una variable
como se puede ver en el script: "helloWorldUTF8.py"
A veces queremos recibir información del usuario desde que EJECUTAMOS el
script, es decir, desde un principio.
Ejemplos de ejecución:
>> python argumentosSimple.py Santiago Chávez
>> python argumentosSimple.py "Santiago Chávez"
>> python argumentosSimple.py "Santiago Chávez" utf8 > test.txt
"""
# Importamos una librería para poder usar sus funcionalidades
# La librería "sys" no permite acceder a información del sistema
import sys
# La librería "sys" nos permite acceder a los "argumentos" que fueron invocados al ejecutar este script
nombreScript = sys.argv[0] # El índice "0" siempre contiene el nombre del script actual: "argumentosSimple.py"
argumentos = [] # Definimos la variable "argumentos" como una "lista vacía"
# Recorremos los argumentos del 1 al total de argumentos
for i in range(1,len(sys.argv)):
argumentos.append(sys.argv[i]) # El índice "i" trae el argumento actual (si es que existe)
# Buscamos la cadena "utf8" en los argumentos recibidos
# Si existe creamos una variable "utf8" para acordarnos
utf8 = False
if "utf8" in argumentos:
utf8 = True
argumentos.remove("utf8") # Elimina el argumento "utf8" de la lista
# Por último imprimimos los argumentos invocados por el usuario
print(u"Argumentos recibidos:")
for i in range(len(argumentos)):
if utf8:
# Si se recibió "utf8" en los argumentos codificamos la salida
print("\t",i+1,".",argumentos[i].encode('utf-8'))
else:
# De lo contrario imprimimos tal cual
print("\t",i+1,".",argumentos[i]) | [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
197,
31306,
15070,
292,
582,
263,
292,
390,
664,
571,
343,
4175,
32009,
18840,
1619,
514,
84,
4982,
551... | 2.65942 | 690 |