content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from qgis.core import *
import networkx as nx
import network as net
import time
from test.utilities import get_qgis_app
QGIS_APP = get_qgis_app()
inSmallNetworkShp = r'C:\JL\Testing\pyGNAT\NetworkFeatures\In\NHD_SmallNetwork.shp'
inMedNetworkShp = r'C:\JL\Testing\pyGNAT\NetworkFeatures\In\NHD_MedNetwork.shp'
inBraidsShp = r'C:\JL\Testing\pyGNAT\NetworkFeatures\In\NHD_Braids.shp'
inDisconnectedNetworkShp = r'C:\JL\Testing\pyGNAT\NetworkFeatures\In\NHD_Disconnected.shp'
inFlowDirectionShp = r'C:\JL\Testing\pyGNAT\NetworkFeatures\In\NHD_Flow_Direction_small2.shp'
inDuplicatesShp = r'C:\JL\Testing\pyGNAT\NetworkFeatures\In\NHD_Duplicates.shp'
outShp = r'C:\JL\Testing\pyGNAT\NetworkFeatures\Out\test.shp'
outDir = r'C:\JL\Testing\pyGNAT\NetworkFeatures\Out'
# timing
t0 = time.time()
#network_layer = QgsVectorLayer(inBraidsShp, 'inNetwork', 'ogr')
#network_layer = QgsVectorLayer(inMedNetworkShp, 'inNetwork', 'ogr')
#network_layer = QgsVectorLayer(inDisconnectedNetworkShp, 'inNetwork', 'ogr')
#network_layer = QgsVectorLayer(inFlowDirectionShp, 'inNetwork', 'ogr')
#network_layer = QgsVectorLayer(inDuplicatesShp, 'inNetwork', 'ogr')
network_layer = QgsVectorLayer(inSmallNetworkShp, 'inNetwork', 'ogr')
theNetwork = net.Network(network_layer)
list_SG = theNetwork.get_subgraphs()
id_G = theNetwork.calc_network_id(list_SG)
subnet_G = theNetwork.select_by_attribute(id_G, "NetworkID", "net001")
# dupes_G = theNetwork.error_dup(subnet_G)
# source_node = theNetwork.find_node_with_ID(subnet_G, "_FID_", 2)
# flow_error_G = theNetwork.error_flow(subnet_G, source_node)
theNetwork.add_attribute(subnet_G, "edge_type", "connector")
outflow_G = theNetwork.get_outflow_edges(subnet_G, "edge_type", "outflow")
headwater_G = theNetwork.get_headwater_edges(subnet_G, "edge_type", "headwater")
braid_complex_G = theNetwork.get_complex_braids(subnet_G, "edge_type", "braid")
braid_simple_G = theNetwork.get_simple_braids(subnet_G, "edge_type", "braid")
gnat_G = theNetwork.merge_subgraphs(subnet_G,
outflow_G,
headwater_G,
braid_complex_G,
braid_simple_G)
# set node types
theNetwork.set_node_types()
# calculate river kilometers
outflow_G = theNetwork.select_by_attribute(gnat_G, "edge_type", "outflow")
outflow_node = next(v for u, v, key, data in outflow_G.edges_iter(keys=True, data=True))
theNetwork.add_attribute(gnat_G, 'river_km', -9999)
for u,v,key,data in gnat_G.edges_iter(keys=True, data=True):
path_len = nx.shortest_path_length(gnat_G,
source=u,
target=outflow_node,
weight='_calc_len_')
river_km = path_len/1000
data['river_km'] = river_km
theNetwork.streamorder()
theNetwork._nx_to_shp(gnat_G, outDir)
# # stream order
# processed_G = nx.MultiDiGraph()
# theNetwork.add_attribute(gnat_G, 'stream_order', '-9999')
# i = 1
# headwater_G = theNetwork.get_headwater_edges(gnat_G, "edge_type", "headwater")
# for u,v,k,d in gnat_G.edges_iter(data=True, keys=True):
# if headwater_G.has_edge(u, v, key=k):
# gnat_G.add_edge(u,v,key=k, stream_order=i)
# processed_G.add_edge(u,v,key=k)
#
# del u, v, k, d
#
# prev_sel_G = theNetwork.select_by_attribute(theNetwork.gnat_G, "stream_order", i)
# for u,v,k,d in gnat_G.edges_iter(data=True, keys=True):
# if prev_sel_G.has_edge(u, v, key=k):
# out_edges = gnat_G.out_edges(v, data=True, keys=True)
# for e in out_edges:
# if gnat_G.node[e[1]]['node_type'] == 'TC' or gnat_G.node[e[1]]['node_type'] == 'CB':
# gnat_G.edge[e[0]][e[1]][e[2]]['stream_order'] = i+1
# else:
# gnat_G.edge[e[0]][e[1]][e[2]]['stream_order'] = i
# next_sel_G = theNetwork.select_by_attribute(theNetwork.gnat_G, "stream_order", i + 1)
# i += 1
print time.time() - t0, "seconds (wall time)" #print time elapsed in "seconds wall time" | [
6738,
10662,
70,
271,
13,
7295,
1330,
1635,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
3127,
355,
2010,
198,
11748,
640,
198,
198,
6738,
1332,
13,
315,
2410,
1330,
651,
62,
80,
70,
271,
62,
1324,
198,
198,
48,
38,
1797,
62,
... | 2.127435 | 1,899 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from extendes.rom import *
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
9117,
274,
13,
398,
1330,
1635,
628,
628,
628,
198
] | 2.25 | 36 |
import pytest
import py_cui
test_string_A = "Hello world, etc 123 @ testing @ ++-- Test"
test_string_B = " Test string number two Space"
test_string_C = "Hi"
dummy_grid = py_cui.grid.Grid(3,3,30,30)
dummy_widget = py_cui.widgets.Widget('1', 'Test', dummy_grid, 1,1,1,1,1,0)
dummy_renderer = py_cui.renderer.Renderer(None, None)
| [
11748,
12972,
9288,
198,
11748,
12972,
62,
66,
9019,
628,
198,
9288,
62,
8841,
62,
32,
796,
366,
15496,
995,
11,
3503,
17031,
2488,
4856,
2488,
19969,
438,
6208,
1,
198,
9288,
62,
8841,
62,
33,
796,
366,
220,
220,
220,
220,
6208,
... | 2.342657 | 143 |
# Condições aninhadas comparação a repetição aninhada | [
2,
9724,
72,
16175,
127,
113,
274,
281,
259,
18108,
292,
552,
3301,
16175,
28749,
257,
46152,
72,
16175,
28749,
281,
259,
71,
4763
] | 2.208333 | 24 |
# Copyright (c) 2019 ISciences, LLC.
# All rights reserved.
#
# WSIM is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, List, Optional
from wsim_workflow.config_base import ConfigBase
from wsim_workflow import dates
import wsim_workflow.paths as paths
from forcing.cfsv2 import CFSForecast
from forcing.cpc_global_daily import CPCGlobalDaily
from static.default_static import DefaultStatic
config = CPCConfig
| [
2,
15069,
357,
66,
8,
13130,
3180,
979,
3007,
11,
11419,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
25290,
3955,
318,
11971,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
198,
2,
921,
743,
... | 3.732 | 250 |
import battle_simulation.constants as const
from battle_simulation.Pokemon import Pokemon
from battle_simulation.battle_common import unittest_failure_msg
import unittest
import os
from pathlib import Path
import pandas as pd
import json
import copy
| [
11748,
3344,
62,
14323,
1741,
13,
9979,
1187,
355,
1500,
198,
6738,
3344,
62,
14323,
1741,
13,
48034,
1330,
14878,
198,
6738,
3344,
62,
14323,
1741,
13,
38471,
62,
11321,
1330,
555,
715,
395,
62,
32165,
495,
62,
19662,
198,
11748,
555... | 3.746269 | 67 |
## Program: VMTK
## Language: Python
## Date: January 10, 2018
## Version: 1.4
## Copyright (c) Richard Izzo, Luca Antiga, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## Richard Izzo (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtkimagevesselenhancement as vesselenhancement
#TODO: VEDM Method is currently diabled in vtkVmtk (vtkvmtkVesselEnhancingDiffusion3DImageFilter). Find a fix or remove.
@pytest.mark.parametrize("enhance_method,paramid", [
('frangi', '0'),
('sato', '1'),
('ved', '2'),
])
@pytest.mark.parametrize("scaled,alpha,beta,gamma,paramid", [
(1, 0.5, 0.5, 5.0, '0'),
(1, 1.5, 0.5, 5.0, '1'),
(1, 0.5, 1.5, 5.0, '2'),
(1, 0.5, 0.5, 8.0, '3'),
(0, 1.5, 0.5, 5.0, '4'),
(0, 0.5, 1.5, 5.0, '5'),
(0, 0.5, 0.5, 8.0, '6'),
])
@pytest.mark.parametrize("alpha1,alpha2,paramid", [
(0.5, 4.5, '0'),
(1.5, 2.0, '1'),
(1.5, 4.5, '2'),
])
@pytest.mark.skip(reason='failing on linux for unknown reason')
@pytest.mark.parametrize("alpha,beta,gamma,c,timestep,epsilon,wstrength,\
sensitivity,numiterations,numdiffusioniterations,paramid", [
(1.5, 0.5, 5.0, 1E-6, 1E-2, 1E-2, 25.0, 5.0, 1, 0, '0'),
(0.5, 1.5, 5.0, 1E-6, 1E-2, 1E-2, 25.0, 5.0, 1, 0, '1'),
(0.5, 0.5, 8.0, 1E-6, 1E-2, 1E-2, 25.0, 5.0, 1, 0, '2'),
(0.5, 0.5, 5.0, 2E-6, 1E-2, 1E-2, 25.0, 5.0, 1, 0, '3'),
(0.5, 0.5, 5.0, 1E-6, 2E-2, 1E-2, 25.0, 5.0, 1, 0, '4'),
(0.5, 0.5, 5.0, 1E-6, 1E-2, 2E-2, 25.0, 5.0, 1, 0, '5'),
(0.5, 0.5, 5.0, 1E-6, 1E-2, 1E-2, 30.0, 5.0, 1, 0, '6'),
(0.5, 0.5, 5.0, 1E-6, 1E-2, 1E-2, 25.0, 8.0, 1, 0, '7'),
(0.5, 0.5, 5.0, 1E-6, 1E-2, 1E-2, 25.0, 5.0, 3, 0, '8'),
(0.5, 0.5, 5.0, 1E-6, 1E-2, 1E-2, 25.0, 5.0, 1, 1, '9'),
]) | [
2235,
6118,
25,
569,
13752,
42,
198,
2235,
15417,
25,
220,
11361,
198,
2235,
7536,
25,
220,
220,
220,
220,
220,
3269,
838,
11,
2864,
198,
2235,
10628,
25,
220,
220,
352,
13,
19,
198,
198,
2235,
220,
220,
15069,
357,
66,
8,
6219,
... | 1.846637 | 1,115 |
import json
from . import exceptions
class CommandsConfigDefaults():
"""Default configuration values"""
json_cfg_file = "config/commands_settings.json"
if __name__ == '__main__':
cfg = CommandsConfig()
print(cfg.get_account_source_infos("al"))
print(cfg.roles.keys())
| [
11748,
33918,
201,
198,
201,
198,
6738,
764,
1330,
13269,
201,
198,
201,
198,
201,
198,
201,
198,
4871,
49505,
16934,
7469,
13185,
33529,
201,
198,
220,
220,
220,
37227,
19463,
8398,
3815,
37811,
201,
198,
220,
220,
220,
33918,
62,
37... | 2.618644 | 118 |
from PIL import Image, ImageDraw
from PIL import ImageFont
from os_tools import tools
def load_img(img_path):
"""Will load image to a variable.
Parameters:
:param img_path: the path to the image file
:return image file to work on
"""
img = Image.open(img_path)
return img.convert('RGBA')
def create_new_image(width,
height,
fixed_background_color=None,
gradient_background_color_start=None,
gradient_background_color_end=None):
"""Will create a new image
Parameters:
:param width: the width of the new image
:param height: the height of the new image
:param fixed_background_color: (optional) a static background color (none for transparent)
:param gradient_background_color_start: (optional) for a gradient background color, this will be the starting color
:param gradient_background_color_end: (optional) for a gradient background color, this will be the ending color
"""
if fixed_background_color is None:
fixed_background_color = (255, 0, 0, 0)
image = Image.new('RGBA', (width, height), fixed_background_color)
else:
image = Image.new('RGBA', (width, height), tools.hex_to_rgb(fixed_background_color))
if gradient_background_color_start is not None and gradient_background_color_end is not None:
image = set_gradient(width, height, gradient_background_color_start, gradient_background_color_end)
return image
def tilt_image(image, degrees):
"""Will tilt an image by degrees
Parameters:
:param image: the image you loaded (from load_img)
:param degrees: the degrees to tilt
:return the tilted image
"""
return image.rotate(degrees, expand=1)
def paste_image(background_img, img_to_paste, x, y):
"""Will paste image on a given background
Parameters:
:param background_img: the img which will be served as the background
(load it from load_img)
:param img_to_paste: the image to paste on the background (load it from load_img)
:param x: the x position in which to paste the image
:param y: the y position in which to paste the image
"""
background_img.paste(img_to_paste, (int(x), int(y)), img_to_paste)
def resize_img_by_height(img, desired_height):
"""Will resize an image by height
Parameters:
:param img: the img which will be resized (load it from load_img)
:param desired_height: the image desired height
:return resized image by height (the width will be resized by ratio)
"""
percent_multiplier = (desired_height / float(img.size[1]))
desired_width = int((float(img.size[0]) * float(percent_multiplier)))
return img.resize((desired_width, desired_height), Image.ANTIALIAS)
def resize_img_by_width(img, desired_width):
"""Will resize an image by width
Parameters:
:param img: the img which will be resized (load it from load_img)
:param desired_width: the image desired width
:return resized image by width (the height will be resized by ratio)
"""
percent_multiplier = (desired_width / float(img.size[0]))
desired_height = int((float(img.size[1]) * float(percent_multiplier)))
return img.resize((desired_width, desired_height), Image.ANTIALIAS)
def resize_img_by_width_and_height(img, desired_width, desired_height):
"""Will resize an image by width and height
Parameters:
:param img: the img which will be resized (load it from load_img)
:param desired_width: the image desired width
:param desired_height: the image desired height
:return resized image by width and height
"""
return img.resize((desired_width, desired_height), Image.ANTIALIAS)
def save_img(img, dest):
"""Will save the image to a given destination
Parameters:
:param img: the image to save
:param dest the path to save the file
"""
img.save(dest, 'PNG')
| [
6738,
350,
4146,
1330,
7412,
11,
7412,
25302,
198,
6738,
350,
4146,
1330,
7412,
23252,
198,
6738,
28686,
62,
31391,
1330,
4899,
628,
198,
4299,
3440,
62,
9600,
7,
9600,
62,
6978,
2599,
198,
220,
220,
220,
37227,
8743,
3440,
2939,
284,... | 2.912177 | 1,355 |
"""
Flask-Arango
-------------
Flask extension that provides integration with the Arango graph database using
the pyArango library. Under initial development.
"""
from setuptools import setup
setup(
name='Flask-Arango',
version='0.1.1',
url='https://github.com/grucin/flask-arango',
license='Apache License, 2.0',
author='Grzegorz Rucinski',
author_email='grucin@gmail.com',
description='Flask extension providing integration with Arango.',
long_description=__doc__,
py_modules=['flask_arango'],
zip_safe=False,
include_package_data=True,
install_requires=[
'Flask >= 0.10',
'pyArango >= 1.0.3'
],
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| [
37811,
198,
7414,
2093,
12,
3163,
14208,
198,
32501,
198,
198,
7414,
2093,
7552,
326,
3769,
11812,
351,
262,
943,
14208,
4823,
6831,
1262,
198,
1169,
12972,
3163,
14208,
5888,
13,
4698,
4238,
2478,
13,
198,
198,
37811,
198,
6738,
900,
... | 2.73031 | 419 |
#!/usr/bin/env python3
from warnings import simplefilter
import numpy
import plotly
from plotly.offline import plot
import plotly.graph_objs as go
import h5py
import click
import colorsys
from md_davis.common.stats import *
class TimeSeries:
"""Create a time series object from data like RMSD and radius of gyration """
def from_xvg(filename):
"""Load data from .xvg file """
pass
def from_hdf5(filename):
"""Load data from HDF5 file """
pass
def continuous_errorbar(x, y, err, name, hover_text=None,
line_color=None, fill_color=None, dash=None, showlegend=True):
""" return continuous errorbar plotly trace """
if not line_color:
line_color = 'rgb(31,119,180)'
if not fill_color:
fill_color = 'rgba(31,119,180,0.3)'
upper_bound = go.Scatter(name=name, x=x, y=y + err, mode='lines',
line=dict(width=0), legendgroup=name, showlegend=False, hoverinfo='none',
fillcolor=fill_color, fill='tonexty')
trace = go.Scatter(name=name, x=x, y=y, mode='lines',
line=dict(color=line_color, dash=dash), showlegend=showlegend,
legendgroup=name, text=hover_text,
hoverinfo='text+y', fillcolor=fill_color, fill='tonexty')
lower_bound = go.Scatter(name=name, x=x, y=y - err, mode='lines',
line=dict(width=0), legendgroup=name, showlegend=False, hoverinfo='none')
# Trace order can be important
# with continuous error bars
return [lower_bound, trace, upper_bound]
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(name='timeseries', context_settings=CONTEXT_SETTINGS)
@click.option('-o', '--output', default='output.html', help='Output file name')
@click.option('-t', '--title', default='', help='Title')
@click.option('-w', '--window', default=100, help='Window size for averaging')
@click.argument('files')
if __name__ == "__main__":
timeseries_plot()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
14601,
1330,
2829,
24455,
198,
11748,
299,
32152,
198,
11748,
7110,
306,
198,
6738,
7110,
306,
13,
2364,
1370,
1330,
7110,
198,
11748,
7110,
306,
13,
34960,
62,
672,
845... | 2.602151 | 744 |
#!/usr/bin/env python
from sulley import *
from ap_requests import *
from optparse import OptionParser
import re
import socket
import struct
import time
# Assume that wireless card is in monitor mode on appropriate channel
# Saves from lot of dependencies (lorcon, pylorcon...)
###############
if __name__ == '__main__':
usage = 'usage: %prog [options]'
parser = OptionParser(usage)
parser.add_option('--sta_mac', dest='sta_mac', help='STA MAC address (fuzzer)')
parser.add_option('--iface', dest='iface', help='injection interface')
parser.add_option('--skip', dest='skip', help='skip tests (int)', type='int', default=0)
parser.add_option('--ssid', dest='ssid', help='AP ssid (fuzzed)')
parser.add_option('--ap_mac', dest='ap_mac', help='AP MAC address (fuzzed)')
parser.add_option('--channel', dest='channel', help='AP channel (fuzzed)', type=int)
parser.add_option('--ap_config', dest='ap_config', help='AP config: Open, WPA-PSK, WPA-EAP, RSN-PSK, RSN-EAP')
parser.add_option('--save', dest='save', help='save results', action='store_true', default=False)
parser.add_option('--truncate', dest='truncate', help='truncate frames option', action='store_true', default=False)
parser.add_option('--crash_retries', dest='crash_retries', type=int, default=10)
parser.add_option('--delay', dest='delay', type=int, default=1)
parser.add_option('--delay_reboot', dest='delay_reboot', type=int, default=10)
parser.add_option('--state_wait_time', dest='state_wait_time', type=int, default=2)
parser.add_option('--log_level', dest='log_level', type=int, default=3)
parser.add_option('--crash_threshold', dest='crash_threshold', type=int, default=3)
parser.add_option('--fname', dest='fname', help='defining saved results file (conjointly with --save)', default=None)
(options, args) = parser.parse_args()
if not options.sta_mac:
parser.error('STA MAC address must be set')
if not re.search(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', options.sta_mac, re.I).group():
parser.error('STA MAC address invalid format')
if not options.iface:
parser.error('injection interface must be set')
if not options.ssid:
parser.error('AP ssid must be set')
if len(options.ssid) > 32:
parser.error('AP ssid must be <= 32 characters')
if not options.ap_mac:
parser.error('AP MAC address must be set')
if not re.search(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', options.ap_mac, re.I).group():
parser.error('AP MAC address invalid format')
if not options.channel:
parser.error('AP channel must be set')
if not options.ap_config:
parser.error('AP config must be set')
if options.ap_config not in [ 'Open', 'WPA-PSK', 'WPA-EAP', 'RSN-PSK', 'RSN-EAP' ]:
parser.error('AP incorrect configuration')
if options.save:
if options.fname:
FNAME = fname
else:
FNAME = 'audits/ap-%s-%s.session' % (options.ap_mac, options.ap_config)
STA_MAC = options.sta_mac
IFACE = options.iface
SAVE_RESULTS = options.save
SKIP = options.skip
SSID = options.ssid
AP_MAC = options.ap_mac
CHANNEL = options.channel
AP_CONFIG = options.ap_config
CRASH_RETRIES = options.crash_retries
DELAY = options.delay
STATE_WAIT_TIME = options.state_wait_time
DELAY_REBOOT = options.delay_reboot
LOG_LEVEL = options.log_level
CRASH_THRESHOLD = options.crash_threshold
TRUNCATE = options.truncate
fuzz_ap()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
33154,
1636,
1330,
1635,
198,
6738,
2471,
62,
8897,
3558,
1330,
1635,
198,
6738,
2172,
29572,
1330,
16018,
46677,
198,
198,
11748,
302,
198,
11748,
17802,
198,
11748,
2878,
19... | 2.520227 | 1,409 |
"""test page for Flask and Heroku"""
from flask import Flask, render_template
app = Flask(__name__)
# make the application
#APP = Flask(__name__)
# app = Flask(__name__)
# make the route
@app.route("/")
# # make second route
# @app.route("/about")
# # func for about
# def preds():
# return render_template('about.html') | [
37811,
9288,
2443,
329,
46947,
290,
2332,
11601,
37811,
198,
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
628,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
2,
787,
262,
3586,
198,
2,
24805,
796,
46947,
7,
834,
3672,
834,
8,
... | 2.857143 | 119 |
#!/usr/bin/env python
#棄却法でオラクル分布から重みを生成する
import numpy as np
import numpy.matlib
import random
#活性化関数
with open('./temp/setting.py') as f:
exec(f.read())
#ファイル読み込み
dist = np.loadtxt('./output/numerical-ridgelet.txt', usecols = range(J))
Max = np.max(np.abs(dist))
Min = np.min(np.abs(dist))
j = 0
tries = 0
a = np.zeros(number_of_hidden_nodes) #a_j
b = np.zeros(number_of_hidden_nodes) #b_j
c = np.zeros(number_of_hidden_nodes) #c_j
#棄却法で(a_j, b_j)を生成する
print('oracle sampling...')
while(True):
tries += 1
ra = random.randint(0, I - 1)
rb = random.randint(0, J - 1)
r = random.uniform(Min, Max)
if(r <= abs(dist[ra][rb])):
a[j] = -30 + ra * Delta_a
b[j] = -30 + rb * Delta_b
j += 1
if(j >= number_of_hidden_nodes):
print("done. ({0} times tried)".format(tries))
break
#最小二乗法でc_jを求める
x = np.linspace(-1, 1, N, dtype = np.float32)
xs3d = np.kron(np.ones([number_of_hidden_nodes, number_of_hidden_nodes, 1]), x) #(x_s)_ijs
ai3d = np.kron(np.ones([1, number_of_hidden_nodes, N]), a.reshape(number_of_hidden_nodes, 1, 1)) #(a_i)_ijs
bi3d = np.kron(np.ones([1, number_of_hidden_nodes, N]), b.reshape(number_of_hidden_nodes, 1, 1)) #(b_i)_ijs
aj3d = np.kron(np.ones([number_of_hidden_nodes, 1, N]), a.reshape(1, number_of_hidden_nodes, 1)) #(a_j)_ijs
bj3d = np.kron(np.ones([number_of_hidden_nodes, 1, N]), b.reshape(1, number_of_hidden_nodes, 1)) #(b_j)_ijs
A = np.sum(eta(ai3d * xs3d - bi3d) * eta(aj3d * xs3d - bj3d), axis = 2) #s軸で総和を取る
xs2d = np.matlib.repmat(x, number_of_hidden_nodes, 1) #(x_s)_is
ai2d = np.matlib.repmat(a, N, 1).transpose() #(a_i)_is
bi2d = np.matlib.repmat(b, N, 1).transpose() #(b_i)_is
vec_b = np.sum(f(xs2d) * eta(ai2d * xs2d - bi2d), axis = 1) #s軸で総和を取る
c = np.linalg.inv(A).dot(vec_b) #連立一次方程式を解く
print('writing to ./output/oracle-sampled-weight.txt')
with open("./output/oracle-sampled-weight.txt", mode = 'w') as f:
for j in range(number_of_hidden_nodes):
f.write("{0:.6f} {1:.6f} {2:.6f}\n".format(a[j], b[j], c[j]))
print('done.')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
162,
96,
226,
39355,
112,
37345,
243,
30640,
20513,
9263,
14099,
9202,
26344,
228,
30585,
225,
27370,
36853,
34932,
235,
2515,
123,
31758,
37955,
22755,
238,
33623,
25748,
198,
1... | 1.83871 | 1,085 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main experiment file for the StreetLearn agent, based on an implementation of
Importance Weighted Actor-Learner Architectures.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
See https://arxiv.org/abs/1802.01561 for the full paper.
Note that this derives from code previously published by Lasse Espeholt
under an Apache license at:
https://github.com/deepmind/scalable_agent
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import functools
import os
import sys
import time
import numpy as np
from six.moves import range
import sonnet as snt
import tensorflow as tf
from streetlearn.python.agents import goal_nav_agent
from streetlearn.python.agents import city_nav_agent
from streetlearn.python.scalable_agent import py_process
from streetlearn.python.scalable_agent import vtrace
from streetlearn.python.environment import default_config
from streetlearn.python.environment import streetlearn
nest = tf.contrib.framework.nest
flags = tf.app.flags
FLAGS = tf.app.flags.FLAGS
flags.DEFINE_string('logdir', '/tmp/agent', 'TensorFlow log directory.')
flags.DEFINE_enum('mode', 'train', ['train', 'test'], 'Training or test mode.')
# Flags used for testing.
flags.DEFINE_integer('test_num_episodes', 10, 'Number of episodes per level.')
# Flags used for distributed training.
flags.DEFINE_integer('task', -1, 'Task id. Use -1 for local training.')
flags.DEFINE_enum('job_name', 'learner', ['learner', 'actor'],
'Job name. Ignored when task is set to -1.')
flags.DEFINE_string('master', '', 'Session master.')
# Training.
flags.DEFINE_integer('total_environment_frames', int(1e9),
'Total environment frames to train for.')
flags.DEFINE_integer('num_actors', 1, 'Number of actors.')
flags.DEFINE_integer('batch_size', 1, 'Batch size for training.')
flags.DEFINE_integer('unroll_length', 50, 'Unroll length in agent steps.')
flags.DEFINE_integer('seed', 1, 'Random seed.')
# Loss settings.
flags.DEFINE_float('entropy_cost', 0.00025, 'Entropy cost/multiplier.')
flags.DEFINE_float('baseline_cost', .5, 'Baseline cost/multiplier.')
flags.DEFINE_float('discounting', .99, 'Discounting factor.')
flags.DEFINE_enum('reward_clipping', 'abs_one', ['abs_one', 'soft_asymmetric'],
'Reward clipping.')
flags.DEFINE_float('heading_prediction_cost', 1.0,
'Auxiliary cost/multiplier for heading prediction.')
flags.DEFINE_float('xy_prediction_cost', 1.0,
'Auxiliary cost/multiplier for XY position prediction.')
flags.DEFINE_float('target_xy_prediction_cost', 1.0,
'Auxiliary cost/multiplier for XY target prediction.')
# Environment settings.
flags.DEFINE_string('game_name', 'curriculum_courier_game',
'Game name for the StreetLearn agent.')
flags.DEFINE_string('level_names', 'manhattan_lowres',
'Lavel name for the StreetLearn agent.')
flags.DEFINE_string('dataset_paths', None, 'Path were the levels are stored.')
flags.DEFINE_integer('width', 84, 'Width of observation.')
flags.DEFINE_integer('height', 84, 'Height of observation.')
flags.DEFINE_integer('graph_width', 84, 'Width of graph visualisation.')
flags.DEFINE_integer('graph_height', 84, 'Height of graph visualisation.')
flags.DEFINE_integer('graph_zoom', 1, 'Zoom in graph visualisation.')
flags.DEFINE_string('start_pano', '',
'Pano at root of partial graph (default: full graph).')
flags.DEFINE_integer('graph_depth', 200, 'Depth of the pano graph.')
flags.DEFINE_integer('frame_cap', 1000, 'Number of frames / episode.')
flags.DEFINE_string('action_set', 'streetlearn_fast_rotate',
'Set of actions used by the agent.')
flags.DEFINE_float('rotation_speed', 22.5,
'Rotation speed of the actor.')
flags.DEFINE_string('observations',
'view_image;graph_image;latlng;target_latlng;yaw;yaw_label;'
'latlng_label;target_latlng_label',
'Observations used by the agent.')
flags.DEFINE_float('timestamp_start_curriculum', time.time(),
'Timestamp at the start of the curriculum.')
flags.DEFINE_float('hours_curriculum_part_1', 0.0,
'Number of hours for 1st part of curriculum.')
flags.DEFINE_float('hours_curriculum_part_2', 24.0,
'Number of hours for 2nd part of curriculum.')
flags.DEFINE_float('min_goal_distance_curriculum', 500.0,
'Maximum distance to goal at beginning of curriculum.')
flags.DEFINE_float('max_goal_distance_curriculum', 3500.0,
'Maximum distance to goal at end of curriculum.')
flags.DEFINE_float('bbox_lat_min', 0, 'Minimum latitude.')
flags.DEFINE_float('bbox_lat_max', 100, 'Maximum latitude.')
flags.DEFINE_float('bbox_lng_min', 0, 'Minimum longitude.')
flags.DEFINE_float('bbox_lng_max', 100, 'Maximum longitude.')
flags.DEFINE_float('min_radius_meters', 100.0, 'Radius of goal area.')
flags.DEFINE_float('max_radius_meters', 200.0, 'Radius of early rewards.')
flags.DEFINE_float('proportion_of_panos_with_coins', 0, 'Proportion of coins.')
# Agent settings.
flags.DEFINE_string('agent', 'city_nav_agent', 'Agent name.')
# Optimizer settings.
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate.')
flags.DEFINE_float('decay', .99, 'RMSProp optimizer decay.')
flags.DEFINE_float('momentum', 0., 'RMSProp momentum.')
flags.DEFINE_float('epsilon', .1, 'RMSProp epsilon.')
# Structure to be sent from actors to learner.
ActorOutput = collections.namedtuple(
'ActorOutput', 'level_name agent_state env_outputs agent_outputs')
AgentOutput = collections.namedtuple('AgentOutput',
'action policy_logits baseline heading')
StepOutputInfo = collections.namedtuple('StepOutputInfo',
'episode_return episode_step')
StepOutput = collections.namedtuple('StepOutput',
'reward info done observation')
class FlowEnvironment(object):
"""An environment that returns a new state for every modifying method.
The environment returns a new environment state for every modifying action and
forces previous actions to be completed first. Similar to `flow` for
`TensorArray`.
Note that this is a copy of the code previously published by Lasse Espeholt
under an Apache license at:
https://github.com/deepmind/scalable_agent
"""
def __init__(self, env):
"""Initializes the environment.
Args:
env: An environment with `initial()` and `step(action)` methods where
`initial` returns the initial observations and `step` takes an action
and returns a tuple of (reward, done, observation). `observation`
should be the observation after the step is taken. If `done` is
True, the observation should be the first observation in the next
episode.
"""
self._env = env
def initial(self):
"""Returns the initial output and initial state.
Returns:
A tuple of (`StepOutput`, environment state). The environment state should
be passed in to the next invocation of `step` and should not be used in
any other way. The reward and transition type in the `StepOutput` is the
reward/transition type that lead to the observation in `StepOutput`.
"""
with tf.name_scope('flow_environment_initial'):
initial_reward = tf.constant(0.)
initial_info = StepOutputInfo(tf.constant(0.), tf.constant(0))
initial_done = tf.constant(True)
initial_observation = self._env.initial()
initial_output = StepOutput(initial_reward, initial_info, initial_done,
initial_observation)
# Control dependency to make sure the next step can't be taken before the
# initial output has been read from the environment.
with tf.control_dependencies(nest.flatten(initial_output)):
initial_flow = tf.constant(0, dtype=tf.int64)
initial_state = (initial_flow, initial_info)
return initial_output, initial_state
def step(self, action, state):
"""Takes a step in the environment.
Args:
action: An action tensor suitable for the underlying environment.
state: The environment state from the last step or initial state.
Returns:
A tuple of (`StepOutput`, environment state). The environment state should
be passed in to the next invocation of `step` and should not be used in
any other way. On episode end (i.e. `done` is True), the returned reward
should be included in the sum of rewards for the ending episode and not
part of the next episode.
"""
with tf.name_scope('flow_environment_step'):
flow, info = nest.map_structure(tf.convert_to_tensor, state)
# Make sure the previous step has been executed before running the next
# step.
with tf.control_dependencies([flow]):
reward, done, observation = self._env.step(action)
with tf.control_dependencies(nest.flatten(observation)):
new_flow = tf.add(flow, 1)
# When done, include the reward in the output info but not in the
# state for the next step.
new_info = StepOutputInfo(info.episode_return + reward,
info.episode_step + 1)
new_state = new_flow, nest.map_structure(
lambda a, b: tf.where(done, a, b),
StepOutputInfo(tf.constant(0.), tf.constant(0)), new_info)
output = StepOutput(reward, new_info, done, observation)
return output, new_state
def build_actor(agent, env, level_name, action_set):
"""Builds the actor loop."""
# Initial values.
initial_env_output, initial_env_state = env.initial()
initial_agent_state = agent.initial_state(1)
initial_action = tf.zeros([1], dtype=tf.int32)
dummy_agent_output, _ = agent(
(initial_action,
nest.map_structure(lambda t: tf.expand_dims(t, 0), initial_env_output)),
initial_agent_state)
initial_agent_output = nest.map_structure(
lambda t: tf.zeros(t.shape, t.dtype), dummy_agent_output)
# All state that needs to persist across training iterations. This includes
# the last environment output, agent state and last agent output. These
# variables should never go on the parameter servers.
persistent_state = nest.map_structure(
create_state, (initial_env_state, initial_env_output, initial_agent_state,
initial_agent_output))
def step(input_, unused_i):
"""Steps through the agent and the environment."""
env_state, env_output, agent_state, agent_output = input_
# Run agent.
action = agent_output[0]
batched_env_output = nest.map_structure(lambda t: tf.expand_dims(t, 0),
env_output)
agent_output, agent_state = agent((action, batched_env_output), agent_state)
# Convert action index to the native action.
action = agent_output[0][0]
raw_action = tf.gather(action_set, action)
env_output, env_state = env.step(raw_action, env_state)
return env_state, env_output, agent_state, agent_output
# Run the unroll. `read_value()` is needed to make sure later usage will
# return the first values and not a new snapshot of the variables.
first_values = nest.map_structure(lambda v: v.read_value(), persistent_state)
_, first_env_output, first_agent_state, first_agent_output = first_values
# Use scan to apply `step` multiple times, therefore unrolling the agent
# and environment interaction for `FLAGS.unroll_length`. `tf.scan` forwards
# the output of each call of `step` as input of the subsequent call of `step`.
# The unroll sequence is initialized with the agent and environment states
# and outputs as stored at the end of the previous unroll.
# `output` stores lists of all states and outputs stacked along the entire
# unroll. Note that the initial states and outputs (fed through `initializer`)
# are not in `output` and will need to be added manually later.
output = tf.scan(step, tf.range(FLAGS.unroll_length), first_values)
_, env_outputs, _, agent_outputs = output
# Update persistent state with the last output from the loop.
assign_ops = nest.map_structure(lambda v, t: v.assign(t[-1]),
persistent_state, output)
# The control dependency ensures that the final agent and environment states
# and outputs are stored in `persistent_state` (to initialize next unroll).
with tf.control_dependencies(nest.flatten(assign_ops)):
# Remove the batch dimension from the agent state/output.
first_agent_state = nest.map_structure(lambda t: t[0], first_agent_state)
first_agent_output = nest.map_structure(lambda t: t[0], first_agent_output)
agent_outputs = nest.map_structure(lambda t: t[:, 0], agent_outputs)
# Concatenate first output and the unroll along the time dimension.
full_agent_outputs, full_env_outputs = nest.map_structure(
lambda first, rest: tf.concat([[first], rest], 0),
(first_agent_output, first_env_output), (agent_outputs, env_outputs))
output = ActorOutput(
level_name=level_name, agent_state=first_agent_state,
env_outputs=full_env_outputs, agent_outputs=full_agent_outputs)
# No backpropagation should be done here.
return nest.map_structure(tf.stop_gradient, output)
def plot_logits_2d(logits, num_x, num_y):
"""Plot logits as 2D images."""
logits_2d = tf.reshape(logits, shape=[-1, num_y, num_x])
logits_2d = tf.expand_dims(tf.expand_dims(logits_2d[:, ::-1, :], 1), -1)
return logits_2d
def build_learner(agent, agent_state, env_outputs, agent_outputs):
"""Builds the learner loop.
Args:
agent: A snt.RNNCore module outputting `AgentOutput` named tuples, with an
`unroll` call for computing the outputs for a whole trajectory.
agent_state: The initial agent state for each sequence in the batch.
env_outputs: A `StepOutput` namedtuple where each field is of shape
[T+1, ...].
agent_outputs: An `AgentOutput` namedtuple where each field is of shape
[T+1, ...].
Returns:
A tuple of (done, infos, and environment frames) where
the environment frames tensor causes an update.
"""
learner_outputs, _ = agent.unroll(agent_outputs.action, env_outputs,
agent_state)
# Use last baseline value (from the value function) to bootstrap.
bootstrap_value = learner_outputs.baseline[-1]
# At this point, the environment outputs at time step `t` are the inputs that
# lead to the learner_outputs at time step `t`. After the following shifting,
# the actions in agent_outputs and learner_outputs at time step `t` is what
# leads to the environment outputs at time step `t`.
agent_outputs = nest.map_structure(lambda t: t[1:], agent_outputs)
rewards, infos, done, observations = nest.map_structure(
lambda t: t[1:], env_outputs)
learner_outputs = nest.map_structure(lambda t: t[:-1], learner_outputs)
observation_names = FLAGS.observations.split(';')
if FLAGS.reward_clipping == 'abs_one':
clipped_rewards = tf.clip_by_value(rewards, -1, 1)
elif FLAGS.reward_clipping == 'soft_asymmetric':
squeezed = tf.tanh(rewards / 5.0)
# Negative rewards are given less weight than positive rewards.
clipped_rewards = tf.where(rewards < 0, .3 * squeezed, squeezed) * 5.
discounts = tf.to_float(~done) * FLAGS.discounting
# Compute V-trace returns and weights.
# Note, this is put on the CPU because it's faster than on GPU. It can be
# improved further with XLA-compilation or with a custom TensorFlow operation.
with tf.device('/cpu'):
vtrace_returns = vtrace.from_logits(
behaviour_policy_logits=agent_outputs.policy_logits,
target_policy_logits=learner_outputs.policy_logits,
actions=agent_outputs.action,
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs.baseline,
bootstrap_value=bootstrap_value)
# Compute loss as a weighted sum of the baseline loss, the policy gradient
# loss and an entropy regularization term.
rl_loss_policy_gradient = compute_policy_gradient_loss(
learner_outputs.policy_logits, agent_outputs.action,
vtrace_returns.pg_advantages)
rl_loss_baseline = FLAGS.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs.baseline)
rl_loss_entropy = FLAGS.entropy_cost * compute_entropy_loss(
learner_outputs.policy_logits)
total_loss = rl_loss_policy_gradient + rl_loss_baseline + rl_loss_entropy
# Add auxiliary loss for heading prediction.
if 'yaw_label' in observation_names:
idx_yaw_label = observation_names.index('yaw_label')
yaw_logits = learner_outputs.heading
yaw_labels = tf.cast(observations[idx_yaw_label], dtype=tf.int32)
heading_loss = FLAGS.heading_prediction_cost * compute_classification_loss(
yaw_logits, yaw_labels)
total_loss += heading_loss
# Add auxiliary loss for XY position and XY target position prediction.
if 'latlng_label' in observation_names:
idx_latlng_label = observation_names.index('latlng_label')
xy_logits = learner_outputs.xy
xy_labels = tf.cast(observations[idx_latlng_label], dtype=tf.int32)
xy_loss = FLAGS.xy_prediction_cost * compute_classification_loss(
xy_logits, xy_labels)
total_loss += xy_loss
if 'target_latlng_label' in observation_names:
idx_target_latlng_label = observation_names.index('target_latlng_label')
target_xy_logits = learner_outputs.target_xy
target_xy_labels = tf.cast(observations[idx_target_latlng_label],
dtype=tf.int32)
target_xy_loss = (
FLAGS.target_xy_prediction_cost * compute_classification_loss(
target_xy_logits, target_xy_labels))
total_loss += target_xy_loss
# Optimization
num_env_frames = tf.train.get_global_step()
learning_rate = tf.train.polynomial_decay(FLAGS.learning_rate, num_env_frames,
FLAGS.total_environment_frames, 0)
optimizer = tf.train.RMSPropOptimizer(learning_rate, FLAGS.decay,
FLAGS.momentum, FLAGS.epsilon)
train_op = optimizer.minimize(total_loss)
# Merge updating the network and environment frames into a single tensor.
with tf.control_dependencies([train_op]):
num_env_frames_and_train = num_env_frames.assign_add(
FLAGS.batch_size * FLAGS.unroll_length)
# Adding a few summaries: RL losses and actions.
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar('rl_loss_policy_gradient',
rl_loss_policy_gradient)
tf.summary.scalar('rl_loss_baseline', rl_loss_baseline)
tf.summary.scalar('rl_loss_entropy', rl_loss_entropy)
if 'yaw_label' in observation_names:
tf.summary.scalar('heading_loss', heading_loss)
if 'latlng_label' in observation_names:
tf.summary.scalar('xy_loss', xy_loss)
if 'target_latlng_label' in observation_names:
tf.summary.scalar('target_xy_loss', target_xy_loss)
tf.summary.scalar('total_loss', total_loss)
tf.summary.histogram('action', agent_outputs.action)
# Adding a few summaries: agent's view and graph.
idx_frame = observation_names.index('view_image')
frame = observations[idx_frame]
tf.summary.image('frame', frame[:3, 0, :, :, :])
idx_graph = observation_names.index('graph_image')
street_graph = observations[idx_graph]
tf.summary.image('street_graph', street_graph[:3, 0, :, :, :])
# Adding a few summaries: current and target lat/lng.
idx_latlng = observation_names.index('latlng')
latlng = observations[idx_latlng]
tf.summary.histogram('current_lat', latlng[:, 0, 0])
tf.summary.histogram('current_lng', latlng[:, 0, 1])
idx_target_latlng = observation_names.index('target_latlng')
target_latlng = observations[idx_target_latlng]
target_latlng = tf.Print(target_latlng, [target_latlng])
tf.summary.histogram('target_lat', target_latlng[:, 0, 0])
tf.summary.histogram('target_lng', target_latlng[:, 0, 1])
# Adding a few summaries: yaw.
if 'yaw' in observation_names:
idx_yaw = observation_names.index('yaw')
yaw = observations[idx_yaw]
tf.summary.histogram('yaw', yaw[:, 0])
# Adding a few summaries: heading prediction.
if 'yaw_label' in observation_names:
img_yaw_labels = tf.expand_dims(
tf.expand_dims(tf.one_hot(tf.cast(yaw_labels, tf.int32), 16), 1), -1)
img_yaw_logits = tf.expand_dims(
tf.expand_dims(tf.nn.softmax(tf.cast(yaw_logits, tf.float32)), 1), -1)
tf.summary.image("yaw_labels", img_yaw_labels[:, :, 0, :, :])
tf.summary.image("yaw_logits", img_yaw_logits[:, :, 0, :, :])
# Adding a few summaries: XY position prediction.
if 'latlng_label' in observation_names:
img_xy_labels = plot_logits_2d(
tf.one_hot(tf.cast(xy_labels[:, 0], tf.int32), 32*32), 32, 32)
img_xy_logits = plot_logits_2d(
tf.nn.softmax(tf.cast(xy_logits[:, 0, :], tf.float32)), 32, 32)
tf.summary.image("xy_labels", img_xy_labels[:, 0, :, :, :])
tf.summary.image("xy_logits", img_xy_logits[:, 0, :, :, :])
# Adding a few summaries: XY position prediction.
if 'target_latlng_label' in observation_names:
img_target_xy_labels = plot_logits_2d(
tf.one_hot(tf.cast(target_xy_labels[:, 0], tf.int32), 32*32), 32, 32)
img_target_xy_logits = plot_logits_2d(
tf.nn.softmax(tf.cast(target_xy_logits, tf.float32)), 32, 32)
tf.summary.image("target_xy_labels", img_target_xy_labels[:, 0, :, :, :])
tf.summary.image("target_xy_logits", img_target_xy_logits[:, 0, :, :, :])
return done, infos, num_env_frames_and_train
def create_environment(level_name, seed, is_test=False):
"""Creates an environment wrapped in a `FlowEnvironment`."""
observations = FLAGS.observations.split(';')
tf.logging.info('Observations requested:')
tf.logging.info(observations)
config = {
'status_height': 0,
'width': FLAGS.width,
'height': FLAGS.height,
'graph_width': FLAGS.graph_width,
'graph_height': FLAGS.graph_height,
'graph_zoom': FLAGS.graph_zoom,
'game_name': FLAGS.game_name,
'goal_timeout': FLAGS.frame_cap,
'frame_cap': FLAGS.frame_cap,
'full_graph': (FLAGS.start_pano == ''),
'start_pano': FLAGS.start_pano,
'min_graph_depth': FLAGS.graph_depth,
'max_graph_depth': FLAGS.graph_depth,
'proportion_of_panos_with_coins':
FLAGS.proportion_of_panos_with_coins,
'timestamp_start_curriculum': FLAGS.timestamp_start_curriculum,
'hours_curriculum_part_1': FLAGS.hours_curriculum_part_1,
'hours_curriculum_part_2': FLAGS.hours_curriculum_part_2,
'min_goal_distance_curriculum': FLAGS.min_goal_distance_curriculum,
'max_goal_distance_curriculum': FLAGS.max_goal_distance_curriculum,
'observations': observations,
'bbox_lat_min': FLAGS.bbox_lat_min,
'bbox_lat_max': FLAGS.bbox_lat_max,
'bbox_lng_min': FLAGS.bbox_lng_min,
'bbox_lng_max': FLAGS.bbox_lng_max,
'min_radius_meters': FLAGS.min_radius_meters,
'max_radius_meters': FLAGS.max_radius_meters,
}
config = default_config.ApplyDefaults(config)
tf.logging.info(config)
game = default_config.CreateGame(config['game_name'], config)
dataset_path = FLAGS.dataset_paths + '/' + level_name
tf.logging.info(dataset_path)
p = py_process.PyProcess(
StreetLearnImpalaAdapter, dataset_path, config, game)
return FlowEnvironment(p.proxy)
@contextlib.contextmanager
def pin_global_variables(device):
"""Pins global variables to the specified device."""
with tf.variable_scope('', custom_getter=getter) as vs:
yield vs
def create_agent(num_actions):
"""Create the agent."""
assert FLAGS.agent in ['goal_nav_agent', 'city_nav_agent']
if FLAGS.agent == 'city_nav_agent':
agent = city_nav_agent.CityNavAgent(
num_actions, observation_names=FLAGS.observations)
else:
agent = goal_nav_agent.GoalNavAgent(
num_actions, observation_names=FLAGS.observations)
return agent
def train(action_set, level_names):
"""Train."""
if is_single_machine():
local_job_device = ''
shared_job_device = ''
is_actor_fn = lambda i: True
is_learner = True
global_variable_device = '/gpu'
server = tf.train.Server.create_local_server()
server_target = FLAGS.master
filters = []
else:
local_job_device = '/job:%s/task:%d' % (FLAGS.job_name, FLAGS.task)
shared_job_device = '/job:learner/task:0'
is_actor_fn = lambda i: FLAGS.job_name == 'actor' and i == FLAGS.task
is_learner = FLAGS.job_name == 'learner'
# Placing the variable on CPU, makes it cheaper to send it to all the
# actors. Continual copying the variables from the GPU is slow.
global_variable_device = shared_job_device + '/cpu'
cluster = tf.train.ClusterSpec({
'actor': ['localhost:%d' % (8001 + i) for i in range(FLAGS.num_actors)],
'learner': ['localhost:8000']
})
server = tf.train.Server(cluster, job_name=FLAGS.job_name,
task_index=FLAGS.task)
server_target = server.target
filters = [shared_job_device, local_job_device]
# Only used to find the actor output structure.
with tf.Graph().as_default():
agent = create_agent(len(action_set))
env = create_environment(level_names[0], seed=1)
structure = build_actor(agent, env, level_names[0], action_set)
flattened_structure = nest.flatten(structure)
dtypes = [t.dtype for t in flattened_structure]
shapes = [t.shape.as_list() for t in flattened_structure]
with tf.Graph().as_default(), \
tf.device(local_job_device + '/cpu'), \
pin_global_variables(global_variable_device):
tf.set_random_seed(FLAGS.seed) # Makes initialization deterministic.
# Create Queue and Agent on the learner.
with tf.device(shared_job_device):
queue = tf.FIFOQueue(1, dtypes, shapes, shared_name='buffer')
agent = create_agent(len(action_set))
# Build actors and ops to enqueue their output.
enqueue_ops = []
for i in range(FLAGS.num_actors):
if is_actor_fn(i):
level_name = level_names[i % len(level_names)]
tf.logging.info('Creating actor %d with level %s', i, level_name)
env = create_environment(level_name, seed=i + 1)
actor_output = build_actor(agent, env, level_name, action_set)
with tf.device(shared_job_device):
enqueue_ops.append(queue.enqueue(nest.flatten(actor_output)))
# If running in a single machine setup, run actors with QueueRunners
# (separate threads).
if is_learner and enqueue_ops:
tf.train.add_queue_runner(tf.train.QueueRunner(queue, enqueue_ops))
# Build learner.
if is_learner:
# Create global step, which is the number of environment frames processed.
tf.get_variable(
'num_environment_frames',
initializer=tf.zeros_initializer(),
shape=[],
dtype=tf.int64,
trainable=False,
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
# Create batch (time major) and recreate structure.
dequeued = queue.dequeue_many(FLAGS.batch_size)
dequeued = nest.pack_sequence_as(structure, dequeued)
dequeued = dequeued._replace(
env_outputs=make_time_major(dequeued.env_outputs),
agent_outputs=make_time_major(dequeued.agent_outputs))
with tf.device('/gpu'):
# Using StagingArea allows us to prepare the next batch and send it to
# the GPU while we're performing a training step. This adds up to 1 step
# policy lag.
flattened_output = nest.flatten(dequeued)
area = tf.contrib.staging.StagingArea(
[t.dtype for t in flattened_output],
[t.shape for t in flattened_output])
stage_op = area.put(flattened_output)
data_from_actors = nest.pack_sequence_as(structure, area.get())
# Unroll agent on sequence, create losses and update ops.
output = build_learner(agent, data_from_actors.agent_state,
data_from_actors.env_outputs,
data_from_actors.agent_outputs)
# Create MonitoredSession (to run the graph, checkpoint and log).
tf.logging.info('Creating MonitoredSession, is_chief %s', is_learner)
# config = tf.ConfigProto(allow_soft_placement=True)
config = tf.ConfigProto(allow_soft_placement=True, device_filters=filters)
with tf.train.MonitoredTrainingSession(
server_target,
is_chief=is_learner,
checkpoint_dir=FLAGS.logdir,
save_checkpoint_secs=600,
save_summaries_secs=30,
log_step_count_steps=50000,
config=config,
hooks=[py_process.PyProcessHook()]) as session:
if is_learner:
tf.logging.info('is_learner')
# Logging.
level_returns = {level_name: [] for level_name in level_names}
summary_writer = tf.summary.FileWriterCache.get(FLAGS.logdir)
# Prepare data for first run.
session.run_step_fn(
lambda step_context: step_context.session.run(stage_op))
# Execute learning and track performance.
num_env_frames_v = 0
while num_env_frames_v < FLAGS.total_environment_frames:
tf.logging.info(num_env_frames_v)
level_names_v, done_v, infos_v, num_env_frames_v, _ = session.run(
(data_from_actors.level_name,) + output + (stage_op,))
level_names_v = np.repeat([level_names_v], done_v.shape[0], 0)
for level_name, episode_return, episode_step in zip(
level_names_v[done_v],
infos_v.episode_return[done_v],
infos_v.episode_step[done_v]):
episode_frames = episode_step
tf.logging.info('Level: %s Episode return: %f',
level_name, episode_return)
summary = tf.summary.Summary()
summary.value.add(tag=level_name + '/episode_return',
simple_value=episode_return)
summary.value.add(tag=level_name + '/episode_frames',
simple_value=episode_frames)
summary_writer.add_summary(summary, num_env_frames_v)
else:
tf.logging.info('actor')
# Execute actors (they just need to enqueue their output).
while True:
session.run(enqueue_ops)
def test(action_set, level_names):
"""Test."""
level_returns = {level_name: [] for level_name in level_names}
with tf.Graph().as_default():
agent = create_agent(len(action_set))
outputs = {}
for level_name in level_names:
env = create_environment(level_name, seed=1, is_test=True)
outputs[level_name] = build_actor(agent, env, level_name, action_set)
with tf.train.SingularMonitoredSession(
checkpoint_dir=FLAGS.logdir,
hooks=[py_process.PyProcessHook()]) as session:
for level_name in level_names:
tf.logging.info('Testing level: %s', level_name)
while True:
done_v, infos_v = session.run((
outputs[level_name].env_outputs.done,
outputs[level_name].env_outputs.info
))
returns = level_returns[level_name]
returns.extend(infos_v.episode_return[1:][done_v[1:]])
if len(returns) >= FLAGS.test_num_episodes:
tf.logging.info('Mean episode return: %f', np.mean(returns))
break
if __name__ == '__main__':
tf.app.run()
| [
2,
15069,
2864,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330... | 2.560247 | 12,648 |
from peco.template import TextNode
from .read_whitespace import read_whitespace
from .syntax_error import SyntaxError
| [
198,
6738,
613,
1073,
13,
28243,
1330,
8255,
19667,
198,
6738,
764,
961,
62,
1929,
2737,
10223,
1330,
1100,
62,
1929,
2737,
10223,
198,
6738,
764,
1837,
41641,
62,
18224,
1330,
26375,
897,
12331,
628
] | 3.428571 | 35 |
import gym
import unittest
import numpy as np
from connect_four.evaluation.board import Board
from connect_four.evaluation.victor.evaluator import evaluator
from connect_four.envs.connect_four_env import ConnectFourEnv
if __name__ == '__main__':
unittest.main()
| [
11748,
11550,
198,
11748,
555,
715,
395,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
2018,
62,
14337,
13,
18206,
2288,
13,
3526,
1330,
5926,
198,
6738,
2018,
62,
14337,
13,
18206,
2288,
13,
32433,
273,
13,
18206,
84,
1352... | 3.044944 | 89 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-03 11:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
18,
319,
2177,
12,
2919,
12,
3070,
1367,
25,
3510,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.883117 | 77 |
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib import style
from os import path
import numpy as np
style.use('ggplot')
rcParams.update({'font.size': 20})
fig, axes = plt.subplots(nrows=8, ncols=1, sharex=True, figsize=(30, 30))
df7 = pd.read_csv('Algonquin_Citygate.csv')
a = df7.plot(x='Trade date', y='Price', kind="line", color='b',subplots=False ,ax=axes[7],legend=False,title='Algonquin Citygates')
for line in axes[7].lines:
line.set_linewidth(1)
df0 = pd.read_csv('TETCO_M3.csv')
g = df0.plot(x='Trade date', y='Price', kind="line", color='b',subplots=False, ax=axes[6],legend=False,title='TETCO-M3')
for line in axes[6].lines:
line.set_linewidth(1)
df1 = pd.read_csv('Chicago_Citygates.csv')
b = df1.plot(x='Trade date', y='Price', kind="line", color='b',subplots=False, ax=axes[5],legend=False,title='Chicago Citygates')
for line in axes[5].lines:
line.set_linewidth(1)
df2 = pd.read_csv('Henry.csv')
c = df2.plot(x='Trade date', y='Price', kind="line",\
color='b',subplots=False, ax=axes[0],legend=False,title='Henry')
for line in axes[0].lines:
line.set_linewidth(1)
df3 = pd.read_csv('Malin.csv')
df3.plot(x='Trade date', y='Price', kind="line"\
, color='b',subplots=False, ax=axes[1],legend=False,title='Malin')
for line in axes[1].lines:
line.set_linewidth(1)
df4 = pd.read_csv('PG&E.csv')
e = df4.plot(x='Trade date', y='Price', kind="line", color='b',subplots=False, ax=axes[2],legend=False,title='PG&E-Citygate')
for line in axes[2].lines:
line.set_linewidth(1)
df5 = pd.read_csv('Socal_Citygate.csv')
f = df5.plot(x='Trade date', y='Price', kind="line", color='b',subplots=False, ax=axes[4],legend=False,title='Socal-Citygate')
for line in axes[4].lines:
line.set_linewidth(1)
df6 = pd.read_csv('Socal_Ehrenberg.csv')
i = df6.plot(x='Trade date', y='Price', kind="line", color='b',subplots=False, ax=axes[3],legend=False,title='Socal-Ehrenberg')
for line in axes[3].lines:
line.set_linewidth(1)
axes[0].title.set_size(20)
axes[1].title.set_size(20)
axes[2].title.set_size(20)
axes[3].title.set_size(20)
axes[4].title.set_size(20)
axes[5].title.set_size(20)
axes[6].title.set_size(20)
axes[7].title.set_size(20)
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.legend(bbox_to_anchor=(1.068, 5.2),loc='best')
plt.tick_params(axis='x', labelsize=22)
plt.xlabel("Date", color='black', fontsize='large')
plt.ylabel("U.S. Dollars per Million British Thermal Unit ($/MMBtu)",color='black',verticalalignment='center',horizontalalignment='center', position=(1,5.5))
plt.savefig(path.basename(__file__)+".png",bbox_inches='tight')
# plt.show()
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
1330,
48321,
10044,
4105,
198,
6738,
2603,
29487,
8019,
1330,
3918,
198,
6738,
28686,
1330,
3108,
198,
11748,
2... | 2.32049 | 1,142 |
from cts.taps.corridor import get_menu
from cts.taps.dryhop import get_menu | [
6738,
269,
912,
13,
83,
1686,
13,
10215,
44425,
1330,
651,
62,
26272,
198,
6738,
269,
912,
13,
83,
1686,
13,
39140,
8548,
1330,
651,
62,
26272
] | 2.777778 | 27 |
# Initial cleaning the comment data
# Load the Pandas libraries with alias 'pd'
import pandas as pd
from os import listdir
import re
import html.parser
# Get names in dir:
lFiles = listdir(r'Data\Archive')
lFiles.pop()
df = pd.DataFrame()
lFiles = lFiles[40:]
print(str(lFiles[0]) + str(lFiles[-1]))
for f in lFiles:
dfTemp = pd.read_csv('Data/Archive/' + f)
df = df.append(dfTemp)
#dfTemp = pd.read_csv('Data/Archive/' + str(f))
#df = df.append(f)
df.to_csv('Data/' + '17_11to19_09_raw.csv')
# Read data from file 'filename.csv'
df = pd.read_csv(r'Data\09to14_raw.csv', index_col=0)
# Importing Bot user names
bots = pd.read_csv(r'Data\Bots.csv', index_col=0, sep=';')
# Removing bots from the data
df = df[~df.author.isin(bots.bot_names)]
# Removing any NA's
df.dropna()
# Cleaning the text data, fuld af pis i bunden der prøver hvert enkelt før de røg sammen, slet hvis du ikke er intra
keeplist = "?.!,'_-"
Adj_comment = pd.DataFrame([re.sub(r'[\S]+\.(net|com|org|info|edu|gov|uk|de|ca|jp|fr|au|us|ru|ch|it|nel|se|no|es|mil)'
r'[\S]*\s?|(/u/|u/)\S+|(/r/|r/)\S+|[\x00-\x1f\x7f-\xff]|[0-9]+|(&g|&l)\S+'
r'|[^\s\w'+keeplist+']', "", elem) for elem in df['body']], columns=['body'])
df['body'] = Adj_comment['body']
# Hver for sig
'''
text = "Hey, www.reddit.com og http//www.reddit.com og https//www.reddit.com og 134http//www.reddit.com "
text2 = "/u/mitnavn er, u/hallo virker det?,"
text3 = "12/r/12 /r/buttcoin r/suckaniggadickorsummin"
text4 = "I luv my <3 iphone & > > < <"
text5 = "\b \1 \f"
text = df.iloc[13]
text_ult = "Hey, www.reddit.com og http//www.reddit.com og https//www.reddit.com og 134http//www.reddit.com /u/mitnavn er, u/hallo virker det?,12/r/12 /r/buttcoin r/suckaniggadickorsummin I luv my <3 iphone & > > < < \b \1 \f"
#result = re.sub(r"http\S+", "", text2) # Starter med http.
# Filter out domains, Also filters out any text not attached to this not separated by a space
re.sub(r'[\S]+\.(net|com|org|info|edu|gov|uk|de|ca|jp|fr|au|us|ru|ch|it|nel|se|no|es|mil)[\S]*\s?','',text)
# Filter out usernames
re.sub(r"(/u/|u/)\S+", "", text2)
# Filter out subreddit
re.sub(r'(/r/|r/)\S+', "", text3)
# Edit HTML, Redigerer html i stedet for at slette det som den gamle gjorde
html.unescape(text4)
# Filter out odd character combinations
# R metoden fjerner egentlig bare > som er html for >, så ikke så umiddelbart brugbar?, jeg fjerner hvertfald både
# > og <, så vi kommer af med: <3
re.sub(r'(&g|&l)\S+', "", text4)
# Filter out control characters, lidt i tvivl om præcis hvad den gør, men noget med ting markeret med \
re.sub(r'[\x00-\x1f\x7f-\xff]', '', text5)
# Filter out numbers
re.sub(r'[0-9]+', '', text3)
'''
| [
2,
20768,
12724,
262,
2912,
1366,
198,
198,
2,
8778,
262,
16492,
292,
12782,
351,
16144,
705,
30094,
6,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
28686,
1330,
1351,
15908,
198,
11748,
302,
198,
11748,
27711,
13,
48610,
198,
198... | 2.140337 | 1,304 |
n1 = int(input('Digite um número: '))
d = n1 * 2
t = n1 * 3
r = n1 ** (1 / 2)
print(f"""Você digitou: {n1}! \nO dobro deste número é: {d} \nO triplo deste número é: {t}
A raiz deste número é: {r:.2f}""")
| [
77,
16,
796,
493,
7,
15414,
10786,
19511,
578,
23781,
299,
21356,
647,
78,
25,
705,
4008,
198,
198,
67,
796,
299,
16,
1635,
362,
198,
83,
796,
299,
16,
1635,
513,
198,
81,
796,
299,
16,
12429,
357,
16,
1220,
362,
8,
198,
198,
... | 1.791304 | 115 |
#
# This contains utilities to convert the starttime endtime (and
# result_interval) into a series of result dates. These result dates
# are put into an array, and then processing is done for each expected
# result date. This may need to be generalized to calculated expected result datetimes that
# are not one-day-models, but are monthly, or hourly. This may involved introducing a interval
# variable into the calling parameters.
#
# Returns:
# the return format is an array of strings, in order, with the strings in this format: YYYY-MM-DD
#
#
import datetime
def ndays(start_datetime, end_datetime):
"""
Utility to create an date enumeration used to create test date directories
"""
start = start_datetime # datetime.datetime(start_year, start_month, start_day)
end = end_datetime # datetime.datetime(end_year, end_month, end_day)
step = datetime.timedelta(days=1)
result = []
while start < end:
res = [start.day, start.month, start.year]
result.append(res)
start += step
return result
def expected_result_datetimes(start_datetime,end_datetime):
"""
input: expected_forecast_date_strings
output: array of test dates in directory string format
"""
res = ndays(start_datetime,end_datetime)
#
# results are list of elements:
# [month,day,year]
sorted_string_list = []
for x in res:
year_mo_dy = "%4d-%02d-%02d"%(x[2],x[1],x[0])
sorted_string_list.append(year_mo_dy)
return sorted_string_list | [
2,
198,
2,
770,
4909,
20081,
284,
10385,
262,
923,
2435,
886,
2435,
357,
392,
198,
2,
1255,
62,
3849,
2100,
8,
656,
257,
2168,
286,
1255,
9667,
13,
2312,
1255,
9667,
198,
2,
389,
1234,
656,
281,
7177,
11,
290,
788,
7587,
318,
17... | 2.889524 | 525 |
import json
from pandajedi.jedimsgprocessor.base_msg_processor import BaseMsgProcPlugin
from pandajedi.jedimsgprocessor.tape_carousel_msg_processor import TapeCarouselMsgProcPlugin
from pandajedi.jedimsgprocessor.hpo_msg_processor import HPOMsgProcPlugin
from pandajedi.jedimsgprocessor.processing_msg_processor import ProcessingMsgProcPlugin
from pandacommon.pandalogger import logger_utils
# logger
base_logger = logger_utils.setup_logger(__name__.split('.')[-1])
# Atlas iDDS message processing plugin, a bridge connect to other idds related message processing plugins
| [
11748,
33918,
198,
198,
6738,
19798,
1228,
13740,
13,
73,
276,
12078,
70,
41341,
13,
8692,
62,
19662,
62,
41341,
1330,
7308,
50108,
2964,
66,
37233,
198,
6738,
19798,
1228,
13740,
13,
73,
276,
12078,
70,
41341,
13,
83,
1758,
62,
7718,... | 3.34104 | 173 |
from storyhub.sdk.service.Configuration import Configuration
from storyhub.sdk.service.Service import Service
from storyhub.sdk.service.ServiceObject import ServiceObject
class ServiceData(ServiceObject):
"""
This represents an entire service stored within the Storyscript Hub.
"""
@classmethod
def name(self):
"""
This acts as a helper for easily accessing the name of the service.
For example the value stored within {"service":{"name":"helloworld"}}
:return: service name
"""
return self._name
| [
6738,
1621,
40140,
13,
21282,
74,
13,
15271,
13,
38149,
1330,
28373,
198,
6738,
1621,
40140,
13,
21282,
74,
13,
15271,
13,
16177,
1330,
4809,
198,
6738,
1621,
40140,
13,
21282,
74,
13,
15271,
13,
16177,
10267,
1330,
4809,
10267,
628,
... | 3.149171 | 181 |
from abc import ABCMeta, abstractmethod
NOT_IMPLEMENTED = "You should implement this."
| [
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
201,
198,
201,
198,
11929,
62,
3955,
16437,
10979,
1961,
796,
366,
1639,
815,
3494,
428,
526,
201,
198,
201,
198,
201,
198
] | 2.96875 | 32 |
from django.urls import path
from .views import (FreeArticleDetailView, FreeArticleListView,
PremiumArticleDetailView, PremiumArticleListView,
SearchResultsListView)
urlpatterns = [
path('free/', FreeArticleListView.as_view(), name='free_article_list'),
path('premium/', PremiumArticleListView.as_view(), name='premium_article_list'),
path('free/<slug:slug>', FreeArticleDetailView.as_view(), name='free_article_detail'),
path('premium/<slug:slug>', PremiumArticleDetailView.as_view(), name='premium_article_detail'),
path('search/', SearchResultsListView.as_view(), name='search_results'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
33571,
1330,
357,
11146,
14906,
11242,
603,
7680,
11,
3232,
14906,
8053,
7680,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.733333 | 240 |
import time
from timedlist import TimedList
# Create a TimedList object that will remove items older than 10 seconds within
# 3% of maxtime
tl1 = TimedList(maxtime=10, filled_percent=3.0)
tl2 = TimedList(maxtime=10, filled_percent=3.0)
for i in range(20):
tl1.append(time.time(), i)
time.sleep(0.75)
print('tl1: len:{} is_filled:{} elapsed:{}'.format(len(tl1), tl1.is_filled, tl1.elapsed))
time.sleep(0.1)
tl2.append(time.time(), i*10)
for i in range(20):
tl2.append(time.time(), i)
time.sleep(0.1)
print('tl2: len:{} is_filled:{} elapsed:{}'.format(len(tl2), tl2.is_filled, tl2.elapsed))
# Delete like a normal list
del(tl1[0])
# Access with helper methods or index
print('first item example 1: {}'.format(tl1[0]))
print('first item example 2: {} {}'.format(tl1.get_time(0), tl1.get_item(0)))
# loop over all items
for i in tl1:
print('loop: {}'.format(i))
# Prune back to a smaller list manually
print('len-before: {}'.format(len(tl1)))
tl1.prune(maxtime=5)
print('len-after: {}'.format(len(tl1)))
# Combine two TimedList
# This can only be done if the right hand's
# end time is > the left hand's end time
combined_tl = tl1 + tl2
print('len-combined: {}'.format(len(combined_tl)))
# reverse in place
combined_tl.reverse()
for i in combined_tl:
print('combined_tl: loop-after-reverse: {}'.format(i))
# same as a list's clear()
tl1.clear()
print('len-after-clear: {}'.format(len(tl1)))
# This sorts only the items and they are reassociated with different times
tl1.sort()
# __str__/__repr__ example
print(tl2)
| [
11748,
640,
198,
6738,
28805,
4868,
1330,
5045,
276,
8053,
198,
198,
2,
13610,
257,
5045,
276,
8053,
2134,
326,
481,
4781,
3709,
4697,
621,
838,
4201,
1626,
198,
2,
513,
4,
286,
17266,
742,
524,
198,
28781,
16,
796,
5045,
276,
8053,... | 2.54886 | 614 |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import keras
import argparse
import numpy as np
import tensorflow as tf
from keras import backend as K
import matplotlib.pyplot as plt
import segmentation_models as sm
from utils import visualize, freeze_session
from data_loader import Dataset, Dataloader
from keras.backend.tensorflow_backend import set_session
from augmentation import *
# Tensorflow session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = False # to log device placement (on which device the operation ran)
sess = tf.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
#ArgParse
parser = argparse.ArgumentParser(description='UNET for Multiclass Semantic Segmentation')
parser.add_argument('--train_dir', type=str, required = True, help='Train dir - with subdirs images ans masks')
parser.add_argument('--val_dir', type=str, required = True, help='Val dir - with subdirs images ans masks')
parser.add_argument('--result_dir', type=str,default="results", help='Result dir - where the model will be saved')
parser.add_argument('--image_size', type=int,default=320, help='Image size - for cropping the images to nxn images')
parser.add_argument('--image_channels', type=int,default=3, help='Image channels - number of channels of the input image')
parser.add_argument('--padding_size', type=int,default=800, help='Padding size for Val images - must be a multiple of image size')
parser.add_argument('--n_classes', type=int,default=2, help='# of classes - number of classes')
parser.add_argument('--batch_size', type=int,default=2, help='Batch size')
parser.add_argument('--epochs', type=int,default=100, help='# of Epochs')
args = parser.parse_args()
# Image Directories
train_dir = args['train_dir']
test_dir = args['val_dir']
result_dir = args['result_dir']
if not os.path.exists(result_dir):
os.mkdir(result_dir)
x_train_dir = os.path.join(train_dir, 'images')
y_train_dir = os.path.join(train_dir, 'masks')
x_valid_dir = os.path.join(test_dir, 'images')
y_valid_dir = os.path.join(test_dir, 'masks')
#Model parameters
BACKBONE = 'efficientnetb3'
BATCH_SIZE = args['batch_size']
LR = 0.0001
EPOCHS = args['epochs']
# define network parameters
activation ='softmax'
n_classes = args['n_classes']
image_size = args['image_size']
image_channels = args['image_channels']
padding_size = args['padding_size']
preprocess_input = sm.get_preprocessing(BACKBONE)
# Dataset for train images
train_dataset = Dataset(
x_train_dir,
y_train_dir,
classes=n_classes,
augmentation=get_training_augmentation(imgsize = image_size),
preprocessing=get_preprocessing(preprocess_input),
)
# Dataset for validation images
valid_dataset = Dataset(
x_valid_dir,
y_valid_dir,
classes=n_classes,
augmentation=get_validation_augmentation(imgsize = padding_size),
preprocessing=get_preprocessing(preprocess_input),
)
#create model
model = sm.Unet(BACKBONE,classes=n_classes, activation=activation)
# define optomizer
optim = keras.optimizers.Adam(LR)
# Segmentation models losses can be combined together by '+' and scaled by integer or float factor
dice_loss = sm.losses.DiceLoss(class_weights=np.ones(n_classes))
focal_loss = sm.losses.CategoricalFocalLoss()
total_loss = dice_loss + (1 * focal_loss)
metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
# compile keras model with defined optimozer, loss and metrics
model.compile(optim, total_loss, metrics)
#Dataloaders
train_dataloader = Dataloader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_dataloader = Dataloader(valid_dataset, batch_size=1, shuffle=False)
# check shapes for errors
assert train_dataloader[0][0].shape == (BATCH_SIZE, image_size, image_size, image_channels)
assert train_dataloader[0][1].shape == (BATCH_SIZE, image_size, image_size, n_classes)
# define callbacks for learning rate scheduling and best checkpoints saving
callbacks = [
keras.callbacks.ModelCheckpoint(os.path.join(result_dir,'best_model.h5'), save_weights_only=True, save_best_only=True, mode='min'),
keras.callbacks.ReduceLROnPlateau(),
]
# train model
history = model.fit_generator(
train_dataloader,
steps_per_epoch=len(train_dataloader),
epochs=EPOCHS,
callbacks=callbacks,
validation_data=valid_dataloader,
validation_steps=len(valid_dataloader),
)
# Plot training & validation iou_score values
plt.figure(figsize=(30, 5))
plt.subplot(121)
plt.plot(history.history['iou_score'])
plt.plot(history.history['val_iou_score'])
plt.title('Model iou_score')
plt.ylabel('iou_score')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
# Plot training & validation loss values
plt.subplot(122)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig(os.path.join(result_dir,'UNET-Training.png'))
plt.show()
#Save model as Tensorflow frozen graph
frozen_graph = freeze_session(K.get_session(),output_names=[out.op.name for out in model.outputs])
tf.train.write_graph(frozen_graph, result_dir , "best_model.pb", as_text=False) | [
11748,
28686,
198,
418,
13,
268,
2268,
17816,
43633,
5631,
62,
29817,
34563,
62,
39345,
34444,
20520,
796,
705,
15,
6,
198,
418,
13,
268,
2268,
17816,
10234,
62,
8697,
47,
62,
23678,
62,
25294,
62,
2538,
18697,
20520,
796,
705,
17,
... | 2.826822 | 1,894 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'A PDS Image library based on the GDAL implementation.',
'author': 'Austin Godber',
'url': 'https://github.com/godber/gdal_pds',
'download_url': 'https://github.com/godber/gdal_pds',
'author_email': 'godber@uberhip.com',
'version': '0.1',
'install_requires': [
'GDAL>=1.10.0,<=1.10.1',
'numpy>=1.8.1'
],
'tests_require': ['pytest>=2.5.0'],
'packages': ['gdal_pds'],
'scripts': [],
'name': 'gdal_pds'
}
setup(**config)
| [
28311,
25,
198,
220,
220,
220,
422,
900,
37623,
10141,
1330,
9058,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
11250,
796,
1391,
198,
220,
220,
220,
705,
11213,
10354,
705,
32,
... | 2.190647 | 278 |
from __future__ import unicode_literals
default_app_config = 'mayan.apps.django_gpg.apps.DjangoGPGApp'
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
12286,
62,
1324,
62,
11250,
796,
705,
11261,
272,
13,
18211,
13,
28241,
14208,
62,
70,
6024,
13,
18211,
13,
35,
73,
14208,
38,
6968,
4677,
6,
198
] | 2.6 | 40 |
from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
from torchvision.datasets.utils import download_url, check_integrity
class FRDEEPN(data.Dataset):
"""`FRDEEP-N <https://github.com/HongmingTang060313/FR-DEEP/>`_Dataset
Inspired by `HTRU1 <https://as595.github.io/HTRU1/>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``htru1-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = 'NVSS_PNG_dataset'
url = "http://www.jb.man.ac.uk/research/ascaife/NVSS_PNG_dataset.tar.gz"
filename = "NVSS_PNG_dataset.tar.gz"
tgz_md5 = '2584ed1e174ea71f581d0e0d6f32ef38'
train_list = [
['data_batch_1', '3a2a15d88756ba61c796378fc8574540'],
['data_batch_2', '6a04e3985397e1f67f0ad42153dca64e'],
['data_batch_3', 'd852c8200f3bbb63beacf31f3e954f9a'],
['data_batch_4', 'a5739996ca44a1a1841f2d0e6b844dd6'],
['data_batch_5', '8e2fdb3f60bf7541ca135fc8e2407f7a'],
['data_batch_6', '9e5a82500bd9742f8fefe412ada95336'],
['data_batch_7', 'f66af7795265fbe24376f669200412c4'],
['data_batch_8', '75982afc09bf480ecc521acdb39cbe46'],
['data_batch_9', '72aee306fef9acee21a0e5537bb681e4'],
['data_batch_10', '7a039ce8062a533b23b401a612c5f9b7'],
['data_batch_11', 'c0013314098c96ca4c7c20c0f17abcd3'],
]
test_list = [
['test_batch', '39fd167b9a7df12cee1ef9a804f9fa86'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '655493bdee948954f3939727b3f9e735',
}
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = np.reshape(img,(150,150))
img = Image.fromarray(img,mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
class FRDEEPF(data.Dataset):
"""`FRDEEP-F <https://github.com/HongmingTang060313/FR-DEEP/>`_Dataset
Inspired by `HTRU1 <https://as595.github.io/HTRU1/>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``htru1-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = 'FIRST_PNG_dataset'
url = "http://www.jb.man.ac.uk/research/ascaife/FIRST_PNG_dataset.tar.gz"
filename = "FIRST_PNG_dataset.tar.gz"
tgz_md5 = '2f39461e6c62fb45289559915106013a'
train_list = [
['data_batch_1', 'f34da44757c7fa3f6e6cd3d0839a4634'],
['data_batch_2', 'f56cda0d9a99305fee2bad7de0560f95'],
['data_batch_3', '93265dd849331af4e1b092f74b06450b'],
['data_batch_4', '0de8f4c18b775251f4e553e2990cd446'],
['data_batch_5', 'c6aa87400a1be6007da7cfcefd2c3e5c'],
['data_batch_6', 'cebd3fdea93abbc048a3a4d5e58528e0'],
['data_batch_7', '49497445e9380f157e78cf8d74fca1eb'],
['data_batch_8', '88e298eed2d87bbdddad83fef1482723'],
['data_batch_9', '8c40117dbf4d456e63a8a665b245aa63'],
['data_batch_10', 'f24d110cc5811ba4651630b9ee9b2989'],
['data_batch_11', 'b843dc3b7f48606235029f135d41c85e'],
]
test_list = [
['test_batch', '4e06889b1e7713deb46e62887eb37727'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '655493bdee948954f3939727b3f9e735',
}
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = np.reshape(img,(150,150))
img = Image.fromarray(img,mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
361,
25064,
13,
9641,
62,
10951,
58,
15,
60,
... | 2.009465 | 3,064 |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# yardstick comment: this is a modified copy of
# rally/rally/benchmark/runners/constant.py
"""A runner that every run arithmetically steps specified input value(s) to
the scenario. This just means step value(s) is added to the previous value(s).
It is possible to combine several named input values and run with those either
as nested for loops or combine each i:th index of each "input value list"
until the end of the shortest list is reached (optimally all lists should be
defined with the same number of values when using such iter_type).
"""
from __future__ import absolute_import
import itertools
import logging
import multiprocessing
import os
import time
import traceback
import six
from six.moves import range
from yardstick.benchmark.runners import base
LOG = logging.getLogger(__name__)
class ArithmeticRunner(base.Runner):
"""Run a scenario arithmetically stepping input value(s)
Parameters
interval - time to wait between each scenario invocation
type: int
unit: seconds
default: 1 sec
iter_type: - Iteration type of input parameter(s): nested_for_loops
or tuple_loops
type: string
unit: na
default: nested_for_loops
-
name - name of scenario option that will be increased for each invocation
type: string
unit: na
default: na
start - value to use in first invocation of scenario
type: int
unit: na
default: none
stop - value indicating end of invocation. Can be set to same
value as start for one single value.
type: int
unit: na
default: none
step - value added to start value in next invocation of scenario.
Must not be set to zero. Can be set negative if start > stop
type: int
unit: na
default: none
-
name - and so on......
"""
__execution_type__ = 'Arithmetic'
| [
2,
15069,
1946,
25,
7381,
20836,
3457,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
... | 2.871179 | 916 |
from dj_rest_auth.registration.views import VerifyEmailView
from django.conf import settings
from django.urls import path, include
from rest_framework.routers import DefaultRouter, SimpleRouter
from sonsuz_website.news.api.views import NewsViewSet, NewsImageViewSet, upload_file
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("", NewsViewSet)
# router.register("upload-image", NewsImageViewSet)
urlpatterns = router.urls
| [
6738,
42625,
62,
2118,
62,
18439,
13,
2301,
33397,
13,
33571,
1330,
49899,
15333,
7680,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
6738,
1334,
62,
30604,
13,
... | 3.305556 | 144 |
import unittest
class Node:
""" A Node of an UnorderedList. Holds data provided by the user. """
class UnorderedList:
"""
This is a stripped down version of the UnorderedList seen in the lab
"""
def to_python(self):
""" Returns this UnorderedList as a regular Python list. This method
is very handy for testing.
"""
python_list = []
current = self._head
while (current != None):
python_list.append(current.get_data())
current = current.get_next()
return python_list
def add(self,item):
""" Adds item at the beginning of the list """
new_head = Node(item)
new_head.set_next(self._head)
self._head = new_head
def remove(self, item):
""" Removes first occurrence of item from the list
If item is not found, raises an Exception.
"""
current = self._head
prev = None
while (current != None):
if (current.get_data() == item):
if prev == None: # we need to remove the head
self._head = current.get_next()
else:
prev.set_next(current.get_next())
current = current.get_next()
return # Found, exits the function
else:
prev = current
current = current.get_next()
raise Exception("Tried to remove a non existing item! Item was: " + str(item))
def occurrences(self, item):
"""
Returns the number of occurrences of item in the list.
"""
raise Exception("TODO IMPLEMENT ME!")
def shrink(self):
"""
Removes from this UnorderedList all nodes at odd indeces (1, 3, 5, ...),
supposing that the first node has index zero, the second node
has index one, and so on.
So if the UnorderedList is
'a','b','c','d','e'
a call to shrink() will transform the UnorderedList into
'a','c','e'
Must execute in O(n) where 'n' is the length of the list.
Does *not* return anything.
"""
raise Exception("TODO IMPLEMENT ME!")
class UnorderedListTest(unittest.TestCase):
""" Test cases for UnorderedList
"""
def test_add(self):
""" Remember 'add' adds stuff at the beginning of the list ! """
ul = UnorderedList()
self.assertEquals(ul.to_python(), [])
ul.add('b')
self.assertEquals(ul.to_python(), ['b'])
ul.add('a')
self.assertEquals(ul.to_python(), ['a', 'b'])
#unittest.main() | [
11748,
555,
715,
395,
198,
198,
4871,
19081,
25,
198,
220,
220,
220,
37227,
317,
19081,
286,
281,
791,
24071,
8053,
13,
9340,
82,
1366,
2810,
416,
262,
2836,
13,
37227,
628,
198,
4871,
791,
24071,
8053,
25,
198,
220,
220,
220,
37227... | 1.987944 | 1,493 |
from talon import Module
from dataclasses import dataclass
mod = Module()
mod.list("cursorless_head_tail", desc="Cursorless modifier for head or tail of line")
@dataclass
head_tail_list = [
HeadTail("head", "extendThroughStartOf", "head"),
HeadTail("tail", "extendThroughEndOf", "tail"),
]
head_tail_map = {i.cursorlessIdentifier: i.type for i in head_tail_list}
head_tail = {i.defaultSpokenForm: i.cursorlessIdentifier for i in head_tail_list}
@mod.capture(rule="{user.cursorless_head_tail}")
| [
6738,
3305,
261,
1330,
19937,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
4666,
796,
19937,
3419,
198,
4666,
13,
4868,
7203,
66,
21471,
1203,
62,
2256,
62,
13199,
1600,
1715,
2625,
34,
21471,
1203,
23157,
329,
1182,
... | 2.781421 | 183 |
"""arturo-stac-api."""
import os
from imp import load_source
from setuptools import find_namespace_packages, setup
with open("README.md") as f:
desc = f.read()
# Get version from stac-fastapi-api
__version__ = load_source(
"stac_fastapi.sqlalchemy.version",
os.path.join(os.path.dirname(__file__), "stac_fastapi/sqlalchemy/version.py"),
).__version__ # type:ignore
install_requires = [
"fastapi",
"attrs",
"pydantic[dotenv]",
"stac_pydantic==1.3.8",
"stac-fastapi.types",
"stac-fastapi.api",
"stac-fastapi.extensions",
"sqlakeyset",
"geoalchemy2<0.8.0",
"sqlalchemy==1.3.23",
"shapely",
"psycopg2-binary",
"alembic",
"fastapi-utils",
]
extra_reqs = {
"dev": ["pytest", "pytest-cov", "pytest-asyncio", "pre-commit", "requests"],
"docs": ["mkdocs", "mkdocs-material", "pdocs"],
"server": ["uvicorn[standard]>=0.12.0,<0.14.0"],
}
setup(
name="stac-fastapi.sqlalchemy",
description="An implementation of STAC API based on the FastAPI framework.",
long_description=desc,
long_description_content_type="text/markdown",
version=__version__,
python_requires=">=3.8",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
],
keywords="STAC FastAPI COG",
author=u"Arturo Engineering",
author_email="engineering@arturo.ai",
url="https://github.com/stac-utils/stac-fastapi",
license="MIT",
packages=find_namespace_packages(exclude=["alembic", "tests", "scripts"]),
zip_safe=False,
install_requires=install_requires,
tests_require=extra_reqs["dev"],
extras_require=extra_reqs,
)
| [
37811,
433,
1434,
12,
301,
330,
12,
15042,
526,
15931,
198,
11748,
28686,
198,
6738,
848,
1330,
3440,
62,
10459,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
14933,
10223,
62,
43789,
11,
9058,
198,
198,
4480,
1280,
7203,
15675,
... | 2.416887 | 758 |
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -m{{cookiecutter.package_name}}` python will execute
``__main__.py`` as a script. That means there won't be any
``{{cookiecutter.package_name}}.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``{{cookiecutter.package_name}}.__main__`` in ``sys.modules``.
"""
import argparse
import logging
import os
import sys
from pathlib import Path
from typing import Dict
import trafaret_config.commandline as trafaret_config_cmd
from servicelib.utils import search_osparc_repo_dir
from .__version__ import __version__
from .application import run_service
from .application_config import CLI_DEFAULT_CONFIGFILE, app_schema
from .resources import get_distribution_info, resources
here = Path(sys.argv[0] if __name__ =="__main__" else __file__).resolve().parent
logger = logging.getLogger(__name__)
def setup_parser(args_parser):
"""
Adds settings group to cli with options:
-c CONFIG, --config CONFIG
Configuration file (default: 'config.yaml')
--print-config Print config as it is read after parsing and exit
--print-config-vars Print variables used in configuration file
-C, --check-config Check configuration and exit
"""
trafaret_config_cmd.standard_argparse_options(
args_parser.add_argument_group('settings'),
default_config=CLI_DEFAULT_CONFIGFILE)
# Add here more options ....
return args_parser
def create_environ(*, skip_host_environ: bool = False) -> Dict[str, str]:
""" Build environment with substitutable variables
:param skip_host_environ: excludes os.environ , defaults to False
:param skip_host_environ: bool, optional
:return: a dictionary of variables to replace in config file
:rtype: Dict[str, str]
"""
# system's environment variables
environ = dict() if skip_host_environ else dict(os.environ)
# project-related environment variables
rootdir = search_osparc_repo_dir(here)
if rootdir is not None:
environ.update({
'OSPARC_SIMCORE_REPO_ROOTDIR': str(rootdir),
})
return environ
def run_parser(args_parser, args) -> Dict:
""" Parse options and returns a configuration object """
if args is None:
args = sys.argv[1:]
# ignore unknown options
options, _ = args_parser.parse_known_args(args)
config = config_from_options(options, vars=create_environ())
return config
| [
37811,
198,
26796,
326,
4909,
262,
3141,
1627,
598,
13,
198,
198,
5195,
857,
428,
2393,
2152,
11,
290,
1521,
407,
1234,
428,
287,
11593,
12417,
834,
30,
628,
220,
921,
1244,
307,
26194,
284,
1330,
1243,
422,
11593,
12417,
834,
1568,
... | 2.898429 | 955 |
import pywhatkit
from datetime import datetime as dt, timedelta
from functions import e164, close_tab, wait_seconds
import openpyxl as xl
import myexceptions as ex
import os
wellcome_message = """
********************************************************************************
********************* Whatsapp sender v0.0 *************************************
********************************************************************************
Hola! (Developed by Hernán Garrido)
Este programa envía mensajes del archivo mess.xlsx usando Whatsapp Web y Chrome.
La primera columna debe contener números de teléfono, y la segunda el mensaje.
Si necesitás saltos de línea en el mensaje, usá Alt+Enter.
Asegurate de que todos los teléfonos tengan el 15 o el 9 (válido para Argentina).
Preferentemente que tengan el formato E164 (por ejemplo, para Mendoza: +5492615555222).
Todas las celdas tienen que tener formato 'texto'.
Vamos a mandar los mensajes de a poco... Nadie quiere ser baneado :-)
Ojo que no vamos a dejar de mandar hasta encontrar una celda en blanco!
Ahora, dejá la PC libre. Si tengo algún problema, te avisaré.
"""
print(wellcome_message)
try:
path = os.getcwd()
wb = xl.load_workbook(os.path.join(os.getcwd(), "mess.xlsx"))
except FileNotFoundError:
print(ex.E2)
exit(0)
list_with_issues = []
sheet_names = wb.sheetnames
sheet = wb[sheet_names[0]]
row = 1
number = str(sheet[f'a{row}'].value)
message = str(sheet[f'b{row}'].value)
first = True
while number:
number_ok = e164(number)
if not number_ok == -1:
now_ = dt.now()
print([number, number_ok, message])
if first:
send_at = now_ + timedelta(minutes=2)
first = False
else:
send_at = now_ + timedelta(minutes=1)
pywhatkit.sendwhatmsg(number_ok, message, send_at.hour, send_at.minute)
wait_seconds(2)
close_tab()
else:
list_with_issues += number
row += 1
number = sheet[f'a{row}'].value
message = str(sheet[f'b{row}'].value)
print("Terminé bien.")
if list_with_issues:
print(f"salvo por problemas con los siguientes números: {list_with_issues}")
input("Presioná Enter para cerrar.")
| [
11748,
12972,
10919,
15813,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
355,
288,
83,
11,
28805,
12514,
201,
198,
6738,
5499,
1330,
304,
23237,
11,
1969,
62,
8658,
11,
4043,
62,
43012,
201,
198,
11748,
1280,
9078,
87,
75,
355,
2124,... | 2.470652 | 920 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2020, Yutong Xie, UIUC.
Using iteration two merge two sorted linked list
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
220,
220,
220,
15069,
12131,
11,
575,
315,
506,
48989,
11,
12454,
9598,
13,
198,
220,
220,
220,
8554,
... | 2.436508 | 126 |
from . import schema
from . import RemcallError
from .naming import CSharpNameConverter
| [
6738,
764,
1330,
32815,
198,
6738,
764,
1330,
3982,
13345,
12331,
198,
6738,
764,
77,
3723,
1330,
327,
44336,
5376,
3103,
332,
353,
628
] | 3.708333 | 24 |
'''
reads in disease threshold annotated EH results
splits annotation into different columns
fills outlier column with sample name if GT is in disease threshold
adds mean, std, median GT sizes from 1000Genomes EH runs (static file)
writes o/p as excel file
'''
import sys, os, re
from collections import namedtuple
from xlsxwriter.workbook import Workbook
annot_tsv = sys.argv[1] #output from add_gene+threshold_to_EH_column_headings2.py
xlsx = sys.argv[2] #output xlsx filename
EH = namedtuple('EH', 'pos, motif, gene, size, gt, mean, std, median')
G1K = {}
with open(os.path.expanduser("~/crg/1000G_EH_v1.0.tsv")) as f:
for i in f:
if not i.startswith("#"):
annot, mean, std, median = i.strip().split("\t")
if not annot in G1K:
G1K[annot] = [mean, std, median]
eh_gt = {}
with open(annot_tsv) as f:
annot = {e:l for e,l in enumerate(f.readline().strip().split("\t")[1:]) }
for i in f:
i = i.strip().split("\t")
sample = i[0]
if not sample in eh_gt:
eh_gt[sample] = []
for e,l in enumerate(i[1:]):
pos, motif, gene, size = annot[e].rsplit(":",3)
gt = l
if annot[e] in G1K:
mean, std, median = G1K[annot[e]]
else:
mean, std, median = "NA", "NA", "NA"
eh_gt[sample].append(EH(pos,motif,gene,size,gt,mean,std,median))
trf = {}
for i in eh_gt:
for items in eh_gt[i]:
if not items.pos in trf:
trf[items.pos] = {}
trf[items.pos].update({i:items})
samples = list(eh_gt.keys())
xlsx = xlsx if ".xlsx" in xlsx else xlsx + ".xlsx"
workbook = Workbook(xlsx)
worksheet = workbook.add_worksheet("Allsamples")
header = ["#location", "repeat motif", "gene", "disease threshold"] + ["GT."+i for i in samples] + ["1000G_mean", "1000G_std", "1000G_median", "outlier"]
worksheet.write_row(0, 0, header)
row = 1
for i in trf:
info = trf[i][samples[0]]
content = [info.pos, info.motif, info.gene, info.size]
content += [ trf[i][s].gt for s in samples ]
content += [info.mean, info.std, info.median]
gt = { s:trf[i][s].gt for s in samples }
outlier = outlier_gt(info.size, gt)
content += [ outlier ]
worksheet.write_row(row, 0, content)
row += 1
workbook.close()
| [
7061,
6,
198,
40779,
287,
4369,
11387,
24708,
515,
412,
39,
2482,
198,
22018,
896,
23025,
656,
1180,
15180,
198,
69,
2171,
503,
2505,
5721,
351,
6291,
1438,
611,
7963,
318,
287,
4369,
11387,
198,
2860,
82,
1612,
11,
14367,
11,
14288,
... | 2.111406 | 1,131 |
from .grid_search import GridSearch
from .differential_evolution import DifferentialEvolution
from .random_search import RandomSearch | [
6738,
764,
25928,
62,
12947,
1330,
24846,
18243,
198,
6738,
764,
39799,
498,
62,
1990,
2122,
1330,
20615,
498,
15200,
2122,
198,
6738,
764,
25120,
62,
12947,
1330,
14534,
18243
] | 4.433333 | 30 |
import pandas as pd
import os.path as path
import pickle
import collections as c
import numpy as np
import math
webGraph = None
if path.exists("web.pkl"):
with open("web.pkl", "rb") as f:
webGraph = pickle.load(f)
else:
df = pd.read_csv('links.srt.gz', compression='gzip', header=None, sep='\t')
webGraph = Web(df)
with open("web.pkl", "wb") as f:
pickle.dump(webGraph, f, pickle.HIGHEST_PROTOCOL)
webGraph.countInlinks()
webGraph.writeInLinkCount()
webGraph.computePageRank()
webGraph.writePageRank()
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
13,
6978,
355,
3108,
198,
11748,
2298,
293,
198,
11748,
17268,
355,
269,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
628,
198,
12384,
37065,
796,
6045,
198,
198,
361,
3108,
... | 2.456621 | 219 |
"""
This file contains view classes that can be used in other applications.
"""
from geonition_utils.http import HttpResponseNotImplemented
from django.views.generic import View
class RequestHandler(View):
"""
This class should be inherited by the view
classes to be used for implementing REST
"""
def get(self, request, *args, **kwargs):
"""
This method should be overridden
"""
return HttpResponseNotImplemented("not implemented, yet!")
def post(self, request, *args, **kwargs):
"""
This method should be overridden
"""
return HttpResponseNotImplemented("not implemented, yet!")
def put(self, request, *args, **kwargs):
"""
This method should be overridden
"""
return HttpResponseNotImplemented("not implemented, yet!")
def delete(self, request, *args, **kwargs):
"""
This method should be overridden
"""
return HttpResponseNotImplemented("not implemented, yet!")
def options(self, request, *args, **kwargs):
"""
This method should be overridden
"""
return HttpResponseNotImplemented("not implemented, yet!")
def get_user_name(self,
request,
username = None):
"""
This function will return the username / user-id of
the user that sent the request. If the name cannot be resolved
this function will return None.
e.g.
@me --> username of authenticated user
None --> username of authenticated user
"""
if username == '@me' or username == None:
username = request.user.username
return username
| [
37811,
198,
1212,
2393,
4909,
1570,
6097,
326,
460,
307,
973,
287,
584,
5479,
13,
198,
37811,
198,
6738,
4903,
261,
653,
62,
26791,
13,
4023,
1330,
367,
29281,
31077,
3673,
3546,
1154,
12061,
198,
6738,
42625,
14208,
13,
33571,
13,
41... | 2.397878 | 754 |
from __future__ import absolute_import, division, print_function
import procrunner
import pytest
import xia2.Test.regression
expected_data_files = [
"AUTOMATIC_DEFAULT_NATIVE_SWEEP1_INTEGRATE.mtz",
"AUTOMATIC_DEFAULT_free.mtz",
"AUTOMATIC_DEFAULT_scaled.sca",
"AUTOMATIC_DEFAULT_scaled_unmerged.mtz",
"AUTOMATIC_DEFAULT_scaled_unmerged.sca",
]
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
13834,
16737,
198,
11748,
12972,
9288,
198,
11748,
2124,
544,
17,
13,
14402,
13,
2301,
2234,
198,
198,
40319,
62,
7890,
62,
16624,
796,
685... | 2.352564 | 156 |
from collections import deque
tree = "1,2,3,None,None,4,5"
print(serialize(deserialize(tree))) | [
6738,
17268,
1330,
390,
4188,
198,
198,
21048,
796,
366,
16,
11,
17,
11,
18,
11,
14202,
11,
14202,
11,
19,
11,
20,
1,
198,
4798,
7,
46911,
1096,
7,
8906,
48499,
1096,
7,
21048,
22305
] | 2.638889 | 36 |
# -*- coding: utf-8 -*-
import re
from datetime import timedelta
from airflow.models import Variable
from airflow.operators.bash_operator import BashOperator
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
302,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
198,
6738,
45771,
13,
27530,
1330,
35748,
198,
6738,
45771,
13,
3575,
2024,
13,
41757,
62,
46616,
1330,
15743,
... | 3.313725 | 51 |
# Generated by Django 2.0.2 on 2020-06-01 12:28
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
17,
319,
12131,
12,
3312,
12,
486,
1105,
25,
2078,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
import os
import pytest
import sys
from cekit.cache.cli import CacheCli
| [
11748,
28686,
198,
11748,
12972,
9288,
198,
11748,
25064,
198,
198,
6738,
269,
988,
270,
13,
23870,
13,
44506,
1330,
34088,
2601,
72,
628,
628,
628,
628,
198
] | 2.892857 | 28 |
#python3.8.3
import itertools
import os
import pathlib
import shutil
#IO
from pptx import Presentation
from PIL import Image
from pdf2image import convert_from_path
import json
from io import BytesIO
import subprocess
from glob import glob
#GUI
import tkinter as tk
import tkinter.filedialog as dialog
from tkinter.messagebox import showinfo,askretrycancel
if __name__ == "__main__":
root = tk.Tk()
root.title("image2pptx2.0")
root.geometry("200x150")
app = Application(master=root)
app.mainloop() | [
2,
29412,
18,
13,
23,
13,
18,
198,
11748,
340,
861,
10141,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
4423,
346,
198,
198,
2,
9399,
198,
6738,
279,
457,
87,
1330,
21662,
341,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
... | 2.796791 | 187 |
from oaff.app.requests.common.request_type import RequestType
| [
6738,
267,
2001,
13,
1324,
13,
8897,
3558,
13,
11321,
13,
25927,
62,
4906,
1330,
19390,
6030,
628
] | 3.5 | 18 |
import comet_ml
import numpy as np
import json
import torch
import copy
import time
import math
from lib import create_agent
from lib.distributions import kl_divergence
from util.env_util import create_env, SynchronousEnv
from util.train_util import collect_episode
from util.plot_util import load_checkpoint
from local_vars import PROJECT_NAME, WORKSPACE, LOADING_API_KEY, LOGGING_API_KEY
ROLLOUT_BATCH_SIZE = 100
CKPT_SUBSAMPLE = 5
def estimate_monte_carlo_return(env, agent, env_state, state, action, n_batches):
"""
Estimates the discounted Monte Carlo return (including KL) for a policy from
a state-action pair.
Args:
env (gym.Env): the environment
agent (Agent): the agent
env_state (tuple): the environment state from MuJoCo (qpos, qvel)
state
action (np.array): the action of size [1, n_action_dims]
n_batches (int): the number of batches of Monte Carlo roll-outs
Returns numpy array of returns of size [n_batches * ROLLOUT_BATCH_SIZE].
"""
total_samples = n_batches * ROLLOUT_BATCH_SIZE
returns = np.zeros(total_samples)
initial_action = action.repeat(ROLLOUT_BATCH_SIZE, 1).numpy()
# create a synchronous environment to perform ROLLOUT_BATCH_SIZE roll-outs
env = SynchronousEnv(env, ROLLOUT_BATCH_SIZE)
for return_batch_num in range(n_batches):
if return_batch_num % 1 == 0:
print(' Batch ' + str(return_batch_num+1) + ' of ' + str(n_batches) + '.')
agent.reset(batch_size=ROLLOUT_BATCH_SIZE); agent.eval()
# set the environment
env.reset()
qpos, qvel = env_state
env.set_state(qpos=qpos, qvel=qvel)
state, reward, done, _ = env.step(initial_action)
# rollout the environment, get return
rewards = [reward.view(-1).numpy()]
kls = [np.zeros(ROLLOUT_BATCH_SIZE)]
n_steps = 1
while not done.prod():
if n_steps > 1000:
break
action = agent.act(state, reward, done)
state, reward, done, _ = env.step(action)
rewards.append(((1 - done) * reward).view(-1).numpy())
kl = kl_divergence(agent.approx_post, agent.prior, n_samples=agent.n_action_samples).sum(dim=1, keepdim=True)
kls.append(((1 - done) * kl.detach().cpu()).view(-1).numpy())
n_steps += 1
rewards = np.stack(rewards)
kls = np.stack(kls)
discounts = np.cumprod(agent.reward_discount * np.ones(kls.shape), axis=0)
discounts = np.concatenate([np.ones((1, ROLLOUT_BATCH_SIZE)), discounts])[:-1].reshape(-1, ROLLOUT_BATCH_SIZE)
rewards = discounts * (rewards - agent.alphas['pi'].cpu().numpy() * kls)
sample_returns = np.sum(rewards, axis=0)
sample_ind = return_batch_num * ROLLOUT_BATCH_SIZE
returns[sample_ind:sample_ind + ROLLOUT_BATCH_SIZE] = sample_returns
return returns
def get_agent_value_estimate(agent, state, action):
"""
Obtains the agent's value estimate for a particular state and action.
Args:
state (torch.Tensor): state of size [batch_size, n_state_dims]
action (torch.Tensor): action of size [batch_size, n_action_dims]
Returns a dictionary of action-value estimates:
direct: the estimate using the Q-network, size [batch_size]
estimate: the full estimate (using the model), size [batch_size]
"""
agent.reset(); agent.eval()
state = state.to(agent.device); action = action.to(agent.device)
direct_estimate = agent.q_value_estimator(agent, state, action, direct=True).detach().view(-1).cpu().numpy()
estimate = agent.q_value_estimator(agent, state, action).detach().view(-1).cpu().numpy()
return {'direct': direct_estimate, 'estimate': estimate}
def evaluate_estimator(exp_key, n_state_action, n_mc_samples, device_id=None):
"""
Evaluates the value estimator of a cached experiment throughout learning.
Args:
exp_key (str): the string of the comet experiment key
n_state_action (int): number of state action pairs to evaluate
n_mc_samples (int): number of Monte Carlo samples to estimate
environment returns
Returns dictionary containing:
ckpt_timesteps [n_ckpts]
value_estimates [n_ckpts, n_state_action, 1],
direct_value_estimates [n_ckpts, n_state_action, 1]
mc_estimates [n_ckpts, n_state_action, n_mc_samples]
"""
# load the experiment
comet_api = comet_ml.API(api_key=LOADING_API_KEY)
experiment = comet_api.get_experiment(project_name=PROJECT_NAME,
workspace=WORKSPACE,
experiment=exp_key)
# create the corresponding environment
param_summary = experiment.get_parameters_summary()
env_name = [a for a in param_summary if a['name'] == 'env'][0]['valueCurrent']
env = create_env(env_name)
# collect state-action samples using random policy
print('Collecting ' + str(n_state_action) + ' state-action pairs...')
sa_pairs = {'states': [], 'env_states': [], 'actions': []}
state = env.reset()
env_state = (copy.deepcopy(env.sim.data.qpos), copy.deepcopy(env.sim.data.qvel))
for _ in range(n_state_action):
action = env.action_space.sample()
next_state, reward, done, _ = env.step(action)
sa_pairs['states'].append(state)
sa_pairs['env_states'].append(env_state)
sa_pairs['actions'].append(torch.from_numpy(action).view(1, -1))
state = env.reset() if done else next_state
env_state = (copy.deepcopy(env.sim.data.qpos), copy.deepcopy(env.sim.data.qvel))
print('Done.')
# enumerate state-action pairs, estimating returns at each stage of learning
asset_list = experiment.get_asset_list()
agent_config_asset_list = [a for a in asset_list if 'agent_args' in a['fileName']]
agent_args = None
if len(agent_config_asset_list) > 0:
# if we've saved the agent config dict, load it
agent_args = experiment.get_asset(agent_config_asset_list[0]['assetId'])
agent_args = json.loads(agent_args)
agent_args = agent_args if 'opt_type' in agent_args['inference_optimizer_args'] else None
agent = create_agent(env, agent_args=agent_args, device_id=device_id)[0]
# get the list of checkpoint timesteps
ckpt_asset_list = [a for a in asset_list if 'ckpt' in a['fileName']]
ckpt_asset_names = [a['fileName'] for a in ckpt_asset_list]
ckpt_timesteps = [int(s.split('ckpt_step_')[1].split('.ckpt')[0]) for s in ckpt_asset_names]
# convert n_mc_samples to a round number of batches
n_batches = math.ceil(n_mc_samples / ROLLOUT_BATCH_SIZE)
n_mc_samples = ROLLOUT_BATCH_SIZE * n_batches
# TODO: the first dimension should be divided by CKPT_SUBSAMPLE
value_estimates = np.zeros((len(ckpt_timesteps), n_state_action, 1))
direct_value_estimates = np.zeros((len(ckpt_timesteps), n_state_action, 1))
mc_estimates = np.zeros((len(ckpt_timesteps), n_state_action, n_mc_samples))
# iterate over sub-sampled checkpoint timesteps, evaluating
ckpt_timesteps = list(np.sort(ckpt_timesteps)[::CKPT_SUBSAMPLE])
for ckpt_ind, ckpt_timestep in enumerate(ckpt_timesteps):
# load the checkpoint
print('Evaluating checkpoint ' + str(ckpt_ind + 1) + ' of ' + str(len(ckpt_timesteps)))
load_checkpoint(agent, exp_key, ckpt_timestep)
# get value estimate and estimate returns for the state-action pairs
for sa_ind, (env_state, state, act) in enumerate(zip(sa_pairs['env_states'], sa_pairs['states'], sa_pairs['actions'])):
t_start = time.time()
action_value_estimate = get_agent_value_estimate(agent, state, act)
value_estimates[ckpt_ind, sa_ind, :] = action_value_estimate['estimate']
direct_value_estimates[ckpt_ind, sa_ind, :] = action_value_estimate['direct']
returns = estimate_monte_carlo_return(env, agent, env_state, state, act, n_batches)
mc_estimates[ckpt_ind, sa_ind, :] = returns
if sa_ind % 1 == 0:
print(' Evaluated ' + str(sa_ind + 1) + ' of ' + str(len(sa_pairs['states'])) + ' state-action pairs.')
print(' Duration: ' + '{:.2f}'.format(time.time() - t_start) + ' s / state-action pair.')
# TODO: log the value estimates to comet (need to json-ify the numpy arrays)
# prev_exp = comet_ml.ExistingExperiment(api_key=LOGGING_API_KEY,
# previous_experiment=exp_key)
# prev_exp.log_asset_data(value_estimates, name='value_estimates')
# prev_exp.log_asset_data(direct_value_estimates, name='direct_value_estimates')
# prev_exp.log_asset_data(mc_estimates, name='mc_estimates')
return {'ckpt_timesteps': ckpt_timesteps,
'value_estimates': value_estimates,
'direct_value_estimates': direct_value_estimates,
'mc_estimates': mc_estimates}
| [
11748,
31733,
62,
4029,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
11748,
28034,
198,
11748,
4866,
198,
11748,
640,
198,
11748,
10688,
198,
6738,
9195,
1330,
2251,
62,
25781,
198,
6738,
9195,
13,
17080,
2455,
507,
1330,
... | 2.358446 | 3,836 |
from django.contrib.auth import forms as admin_forms
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
5107,
355,
13169,
62,
23914,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582... | 3.613636 | 44 |
names = {'anonymous','tazri','focasa','troy','farha'};
# can use for loop for access names sets
for name in names :
print("Hello, "+name.title()+"!");
# can check value is exist in sets ?
print("\n'tazri' in names : ",'tazri' in names);
print("'solus' not in names : ",'solus' not in names);
print("'xenon' in names : ",'xenon' in names); | [
14933,
796,
1391,
6,
272,
6704,
41707,
83,
1031,
380,
41707,
69,
420,
15462,
41707,
83,
3287,
41707,
16370,
3099,
6,
19629,
198,
198,
2,
460,
779,
329,
9052,
329,
1895,
3891,
5621,
198,
1640,
1438,
287,
3891,
1058,
198,
220,
220,
22... | 2.6875 | 128 |
# $ python3 -m pip install pyserial
# /dev/cu.usbserial-<XYZ> for mac,
# ...you can find your XYZ using $ python3 -m serial.tools.list_ports -v
# ...it might also be something like /dev/cu.usbmodem<XYZ>, depending on the USB Serial adapter
# My XYZ is "FTVHYZXQ", which matches my USB Serial adapter, model no. TTL232RG-VIP
# ...another option for finding XYZ is to use $ ls /dev/cu.usb*
# Another useful pyserial utility is: $ python3 -m serial.tools.miniterm
# Here's a simple brute force example:
# ser = serial.Serial("/dev/cu.usbserial-FTVHYZXQ",9600)
# To run: $ python3 test_ee101_1wire.py
import sys
import serial
from serial.tools.list_ports import comports
def ask_for_port():
"""\
Show a list of ports and ask the user for an index choice.
"""
sys.stderr.write('\nAvailable ports: <index:> <name> <desc> <hwid>\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('{:2}: {:40} {!r} {!r}\n'.format(n, port, desc, hwid))
ports.append(port)
while True:
port = raw_input('Enter index ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
try:
user_selected_port_name = ask_for_port()
print("You selected " + user_selected_port_name)
ser = serial.Serial(user_selected_port_name)
print("Press CTL+C to exit program")
i = 0
while True:
EE101Text(0,"Hello")
EE101Text(1,"Tim")
EE101Text(2,"this")
EE101Text(3,"is")
EE101Text(4,"your")
EE101Text(5,"ee101")
EE101Text(6,"ported to")
EE101Text(7,"Python on macOS")
i += 1
EE101Value(0, i)
EE101Value(1, i)
EE101Value(2, i)
EE101Value(3, i)
EE101Value(4, i)
EE101Value(5, i)
EE101Value(6, i)
EE101Value(7, i)
except KeyboardInterrupt:
print("Exiting Program")
except:
print("Error Occurs, Exiting Program")
finally:
ser.close()
pass
| [
2,
720,
21015,
18,
532,
76,
7347,
2721,
279,
893,
48499,
198,
2,
1220,
7959,
14,
27399,
13,
385,
1443,
48499,
12,
27,
34278,
57,
29,
329,
8352,
11,
198,
2,
220,
220,
220,
220,
220,
220,
2644,
5832,
460,
1064,
534,
41420,
57,
126... | 2.198148 | 1,080 |
#!/usr/bin/env python
# -*- coding: utf-8 -*- | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12
] | 2.045455 | 22 |
from . import config, models, utils
| [
6738,
764,
1330,
4566,
11,
4981,
11,
3384,
4487,
198
] | 3.6 | 10 |
# encoding: utf-8
##################################################
# This script interacts with data files to extract information and modify values given to a function.
# It is part of an exercise in which data about districts and neighbourhoods are processed
# Here we have the functions for manipulating information about neighbourhoods
#
##################################################
#
##################################################
# Author: Diego Pajarito
# Copyright: Copyright 2019, IAAC
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: diego.pajarito@iaac.net
# Status: development
##################################################
# need libraries to read external files
import csv
neighbourhoods = dict()
tags = []
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
29113,
14468,
2235,
198,
2,
770,
4226,
44020,
351,
1366,
3696,
284,
7925,
1321,
290,
13096,
3815,
1813,
284,
257,
2163,
13,
198,
2,
632,
318,
636,
286,
281,
5517,
287,
543,
1366,
546,
1281... | 4.538462 | 195 |
# Generated by Django 3.0.7 on 2020-06-22 20:13
from django.db import migrations, models
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
22,
319,
12131,
12,
3312,
12,
1828,
1160,
25,
1485,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
26791,
13,
2435,
11340,
628
] | 2.926829 | 41 |
from rest_framework.test import APITestCase, APIRequestFactory, force_authenticate
from api.v2.views import UserViewSet
from api.tests.factories import UserFactory, AnonymousUserFactory
from django.core.urlresolvers import reverse
from .base import APISanityTestCase
| [
6738,
1334,
62,
30604,
13,
9288,
1330,
3486,
2043,
395,
20448,
11,
7824,
18453,
22810,
11,
2700,
62,
41299,
5344,
198,
6738,
40391,
13,
85,
17,
13,
33571,
1330,
11787,
7680,
7248,
198,
6738,
40391,
13,
41989,
13,
22584,
1749,
1330,
11... | 3.736111 | 72 |
#!/usr/bin/env python
__name__ = 'nitimeconv'
__author__ = 'Teruaki Enoto'
__version__ = '1.02'
__date__ = '2018 April 11'
from optparse import OptionParser
from astropy.time import Time
NICER_MJDREFI = 56658.0
NICER_MJDREFF = 0.000777592592592593
NICER_TIMEZERO = -1.0
#LEAP_INIT = 2.0
NICER_MET_ORIGIN = Time('2014-01-01T00:00:00.000',format='isot',scale='utc')
FERMI_MET_ORIGIN = Time('2001-01-01T00:00:00.000',format='isot',scale='utc')
NUSTAR_MET_ORIGIN = Time('2010-01-01T00:00:00.000',format='isot',scale='utc')
RXTE_MET_ORIGIN = Time('1994-01-01T00:00:00.000',format='isot',scale='utc')
SUZAKU_MET_ORIGIN = Time('2000-01-01T00:00:00.000',format='isot',scale='utc')
SWIFT_MET_ORIGIN = Time('2001-01-01T00:00:00.000',format='isot',scale='utc')
XMM_MET_ORIGIN = Time('1998-01-01T00:00:00.000',format='isot',scale='tt')
CHANDRA_MET_ORIGIN = XMM_MET_ORIGIN
"""
Fermi seconds since 2001.0 UTC (decimal) 410227203.000
Fermi mission week (integer) 291
LIGO/GPS seconds since 1980-01-06 UTC (decimal) 1072569616.000
NuSTAR seconds since 2010.0 UTC (decimal) 126230401.000
RXTE seconds since 1994.0 UTC (decimal) 631152007.000
RXTE seconds since 1994.0 UTC (hexadecimal) 0x259e9d87
RXTE mission day number (dddd:hh:mm:ss) 7305:00:00:07.000
RXTE decimal mission day (dddd.ddd...) 7305.00008102
Suzaku seconds since 2000.0 UTC (decimal) 441849603.000
Swift seconds since 2001.0 UTC (decimal) 410227211.463
XMM/Chandra seconds since 1998.0 TT (decimal) 504921667.184
"""
usage = """
NAME
nitimeconv - Convert NICER related time into different time systems.
USAGE
%prog intime -f format -s scale
DESCRIPTION
'%prog' takes an input time with specified format and scale, and then
converts it into another time systems. This gives similar outputs of
the HEASARC "xTime - A Date/Time Conversion Utility"
https://heasarc.gsfc.nasa.gov/cgi-bin/Tools/xTime/xTime.pl
The NICER Mission Elapse Time (MET), the "TIME" column is defined as
elapsed TT seconds since the epoch 2014-01-01T00:00:00 UTC. Conversion
of this NICER timestamps to absolute time in TT_MJD can be defined as
MJD(TT) = (MJDREFI+MJDREFF) + (TIMEZERO+TIME)/86400
MJD(UTC) = (MJDREFI) + (TIMEZERO+TIME+LEAPINIT=2)/86400
This script converts the MET to MJD(TT) using the above definition, and
then calculates MJD(UTC) via the python library 'astropy' witout using
the above second equation. It is recommended to avoid representing UTC
in MJD unit. The NICER MET is compared with other METs of different
X-ray missions (Chandra/XMM, RXTE, Fermi, and Suzaku). The MET sometime
requires further time corrections, and please double check the xTime.
Important note: The default setup (flag_time_correction=False) gives
the same output of xTime. On the other hand, the header keyword "TIMEZERO"
of NICER fits files is set to be -1.0, i.e.,the time standard is that TIMEZERO
is always meant to be added to the TIME column. All proper software applies
TIMEZERO to TIME (and GTIs) columns. So, if you use this script to convert
the TIME or GTI columns in the NICER event fits files, please add
--flag_time_correction flag to add the NICER_TIMEZERO (default -1.0)
to input values.
REFERENCES
https://heasarc.gsfc.nasa.gov/docs/xte/abc/time_tutorial.html
http://docs.astropy.org/en/stable/api/astropy.time.Time.html#astropy.time.Time
EXAMPLES
1. Get the NICER TIME origion.
$ %prog 2014-01-01T00:00:00 -f isot -s utc
2. Convert Calender format to the NICER time (126266402.000).
$ %prog 2018-01-01T10:00:00 -f isot -s utc
3. Convert NICER TIME to other time format (oposite of 2).
$ %prog 126266402 -f met -s met
4. Convert MJD_UTC 58000 to other formats.
$ %prog 58000 -f mjd -s utc
5. Convert Fermi MET to other formats.
$ %prog 13000 -f met -s met_fermi
6. Convert elapsed day format (UTC) to other formats.
nitimeconv.py 2017:051:03:20:00.000 -f yday -s utc
"""
parser = OptionParser(usage=usage)
parser.add_option("-f","--format",dest="format",default="met",
action="store",help="Time format, any of (met, jd, mjd, isot, and yday)",type="string")
parser.add_option("-s","--scale",dest="scale",default="met",
action="store",help="Time scale, any of (utc, tt, met, met_nicer, met_fermi, met_nustar, met_suzaku, met_xmm, and met_chandra)",type="string")
parser.add_option("-c", "--flag_time_correction", dest="flag_time_correction",
action="store_true", default=False,
help="Flag to run the TIMEZERO correction to the input (default:False)")
parser.add_option("-z","--timezero_for_correction",dest="timezero_for_correction",default=NICER_TIMEZERO,
action="store",help="TIMEZERO value for correction (default:-1.0 s).",type="float")
(options, args) = parser.parse_args()
if len(args) != 1:
print("try: %s.py --help" % __name__)
print("usage: %s.py intime -f fomrat -s scale" % __name__)
quit()
input_value = args[0]
dump = "----- Input Time Value and Formats -----\n"
dump += "intime: %s\n" % str(input_value)
dump += "format: %s\n" % options.format
dump += "scale : %s\n" % options.scale
dump += "time zero correction: %s\n" % options.flag_time_correction
if options.flag_time_correction:
timezero = options.timezero_for_correction
dump += "TIMEZERO: %.3f (s)\n" % timezero
else:
timezero = 0.0
dump += "\n"
if options.format == "met":
if options.scale == "met" or options.scale == "met_nicer":
mission = float(input_value)
mission += timezero
mjd_tt = NICER_MJDREFI+NICER_MJDREFF+(mission)/86400.0
time_tt = Time(mjd_tt,format='mjd',scale='tt')
time_utc = time_tt.utc
elif options.scale == "met_fermi":
time_utc = Time(float(input_value)+FERMI_MET_ORIGIN.gps,format='gps',scale='utc')
time_tt = time_utc.tt
mission = time_tt.gps + NICER_MET_ORIGIN.gps
mission -= timezero
elif options.scale == "met_nustar":
time_utc = Time(float(input_value)+NUSTAR_MET_ORIGIN.gps,format='gps',scale='utc')
time_tt = time_utc.tt
mission = time_tt.gps + NICER_MET_ORIGIN.gps
mission -= timezero
elif options.scale == "met_suzaku":
time_utc = Time(float(input_value)+SUZAKU_MET_ORIGIN.gps,format='gps',scale='utc')
time_tt = time_utc.tt
mission = time_tt.gps + NICER_MET_ORIGIN.gps
mission -= timezero
elif options.scale == "met_xmm" or options.scale == "met_chandra":
time_utc = Time(float(input_value)+XMM_MET_ORIGIN.gps,format='gps',scale='utc')
time_tt = time_utc.tt
mission = time_tt.gps + NICER_MET_ORIGIN.gps
mission -= timezero
else:
if input_value.isdigit():
time = Time(float(input_value),format=options.format,scale=options.scale)
else:
time = Time(str(input_value),format=options.format,scale=options.scale)
time_tt = time.tt
time_tt.format = 'mjd'
mjd_tt = time_tt
mission = (float(mjd_tt.mjd) - NICER_MJDREFI - NICER_MJDREFF) * 86400.0
mission -= timezero
time_utc = time.utc
time_utc.format = 'mjd'
fermi_time = time_tt.gps - FERMI_MET_ORIGIN.gps
nustar_time = time_tt.gps - NUSTAR_MET_ORIGIN.gps
#rxte_time = time_tt.gps - RXTE_MET_ORIGIN.gps
suzaku_time = time_tt.gps - SUZAKU_MET_ORIGIN.gps
#swift_time = time_tt.gps - SWIFT_MET_ORIGIN.gps
xmm_time = time_tt.gps - XMM_MET_ORIGIN.gps
chandra_time = time_tt.cxcsec
dump += "----- Calendar Time Formats -----\n"
dump += "ISO8601_TT : %s (TT)\n" % time_tt.isot
dump += " JD_TT : %.8f (TT) \n" % time_tt.jd
dump += "MJD_TT : %.8f (TT)\n" % time_tt.mjd
dump += "DOY_TT : %s (TT)\n" % time_tt.yday
dump += "\n"
dump += "ISO8601_UTC: %s (UTC)\n" % time_utc.isot
dump += " JD_UTC : %.8f (UTC) \n" % time_utc.jd
dump += "MJD_UTC : %.8f (UTC) \n" % time_utc.mjd
dump += "DOY_UTC : %s (UTC)\n" % time_utc.yday
dump += "\n"
dump += "----- Mission-Specific Time Formats (Misson Elapsed Time, NET) -----\n"
dump += "Fermi seconds sicne 2001.0 UTC (decimal) : %.6f\n" % fermi_time
dump += "NuSTAR seconds since 2010.0 UTC (decimal) : %.6f\n" % nustar_time
#dump += "RXTE seconds since 1994.0 UTC (decimal) : %.8f\n" % rxte_time
dump += "Suzaku seconds since 2000.0 UTC (decimal) : %.6f\n" % suzaku_time
#dump += "Swift seconds since 2001.0 UTC (decimal): %.8f\n" % swift_time
dump += "XMM seconds since 1998.0 TT (decimal) : %.6f\n" % xmm_time
dump += "Chandra seconds since 1998.0 TT (decimal) : %.6f\n" % chandra_time
dump += "NICER seconds since 2014.0 UTC (decimal) : %.6f\n" % mission
if options.flag_time_correction:
dump += "Caution : TIMEZERO correction is included.\n"
print(dump)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
834,
3672,
834,
220,
220,
220,
796,
705,
77,
22552,
42946,
6,
198,
834,
9800,
834,
220,
796,
705,
15156,
84,
8182,
412,
1662,
78,
6,
198,
834,
9641,
834,
796,
705,
16,
13,
2... | 2.406109 | 3,536 |
#Fill in the blanks in the program below so that it prints “nit” (the reverse of the original character string “tin”).
original = "tin"
result = ____
for char in original:
result = ____
print(result)
| [
198,
2,
33762,
287,
262,
698,
2283,
287,
262,
1430,
2174,
523,
326,
340,
20842,
564,
250,
48825,
447,
251,
357,
1169,
9575,
286,
262,
2656,
2095,
4731,
564,
250,
43701,
447,
251,
737,
198,
198,
14986,
796,
366,
43701,
1,
198,
20274,... | 3.121212 | 66 |
import re
import typing
OUTPUT_VAR = "_output_"
INDENT = 1
UNINDENT = -1
INDENT_SPACES = 2
INDEX_VAR = "index"
class CodeBuilder:
"""Manage code generating context."""
class Token:
"""Token in template code"""
def extract_last_filter(text: str) -> (str, str):
"""
Extract last filter from expression like 'var | filter'.
return (var, None) when no more filters found.
"""
m = re.search(r'(\|\s*[A-Za-z0-9_]+\s*)$', text)
if m:
suffix = m.group(1)
filter_ = suffix[1:].strip()
var_name = text[:-len(suffix)].strip()
return var_name, filter_
return text, None
def parse_expr(text: str) -> (str, typing.List[str]):
"""
Parse expression to variable name and filters.
for example, "name | upper | strip" will be converted to 'name', [ 'upper', 'strip']
"""
var_name, filters = text, []
while True:
var_name, filter_ = extract_last_filter(var_name)
if filter_:
filters.insert(0, filter_)
else:
break
return var_name, filters
def create_control_token(text: str) -> Token:
"""Create control token() from source code fragment."""
text = text.strip()
m = re.match(r'^(\w+)', text)
if not m:
raise SyntaxError(f'Unknown control token: {text}')
keyword = m.group(1)
token_types = {
'for': For,
'endfor': EndFor,
}
if keyword not in token_types:
raise SyntaxError(f'Unknown control token: {text}')
return token_types[keyword]()
def create_token(text: str) -> Token:
"""Create token from source code fragment."""
if text.startswith("{{") and text.endswith("}}"):
token, content = Expr(), text[2:-2].strip()
elif text.startswith("{%") and text.endswith("%}"):
content = text[2:-2].strip()
token = create_control_token(content)
elif text.startswith("{#") and text.endswith("#}"):
token, content = Comment(), text[2:-2].strip()
else:
token, content = Text(), text
token.parse(content)
return token
def tokenize(text: str) -> typing.List[Token]:
"""Parse template text to tokens."""
segments = re.split(r'({{.*?}}|{#.*?#}|{%.*?%})', text)
segments = [x for x in segments if x]
return [create_token(x) for x in segments]
class Template:
"""Render template in flask-like syntax."""
def _generate_code(self):
"""Generate to compiled code if not done yet."""
if not self._code:
tokens = tokenize(self._text)
builder = CodeBuilder()
for token in tokens:
token.generate_code(builder)
builder.check_code()
self._code = compile(builder.source(), '', 'exec')
def render(self, ctx: dict) -> str:
"""bind context and generate result text"""
self._generate_code()
output = []
exec_ctx = (ctx or {}).copy()
exec_ctx.update({
OUTPUT_VAR: output,
'LoopVar': LoopVar,
})
exec(self._code, self._global_vars, exec_ctx)
return "".join(output)
class TemplateEngine:
"""Factory class to create Template object."""
| [
11748,
302,
198,
11748,
19720,
628,
198,
2606,
7250,
3843,
62,
53,
1503,
796,
45434,
22915,
62,
1,
198,
12115,
3525,
796,
352,
198,
4944,
12115,
3525,
796,
532,
16,
198,
12115,
3525,
62,
4303,
2246,
1546,
796,
362,
198,
12115,
6369,
... | 2.364915 | 1,351 |
# Django Imports
from django.conf.urls import patterns, include, url
from django.views.generic import RedirectView
from django.contrib import admin
from django.views.generic.base import TemplateView
from django.contrib.auth import views as auth_views
from registration.backends.default.views import ActivationView
from registration.backends.default.views import RegistrationView
# WaW Imports
from wawmembers.forms import RegistrationUniqueEmailCounters
'''
Dispatches URL requests to functions.
'''
urlpatterns = patterns('',
url(r'^$', RedirectView.as_view(url='/index/', permanent=True)),
url(r'^admin/', include(admin.site.urls)), # admin urls
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'', include('wawmembers.urls')),
url(r'^activate/complete/$',
TemplateView.as_view(template_name='registration/activation_complete.html'),
name='registration_activation_complete'),
# Activation keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]{40} because a bad activation key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
url(r'^activate/(?P<activation_key>\w+)/$',
ActivationView.as_view(),
name='registration_activate'),
url(r'^register/$',
RegistrationView.as_view(form_class=RegistrationUniqueEmailCounters),
name='registration_register'),
url(r'^register/complete/$',
TemplateView.as_view(template_name='registration/registration_complete.html'),
name='registration_complete'),
url(r'^register/complete/$',
TemplateView.as_view(template_name='registration/registration_complete.html'),
name='registration_complete'),
url(r'^login/$',
auth_views.login,
{'template_name': 'registration/login.html'},
name='auth_login'),
url(r'^logout/$',
auth_views.logout,
{'template_name': 'registration/logout.html'},
name='auth_logout'),
url(r'^password/change/$',
auth_views.password_change,
name='auth_password_change'),
url(r'^password/change/done/$',
auth_views.password_change_done,
name='password_change_done'),
url(r'^password/reset/$',
auth_views.password_reset,
name='auth_password_reset'),
url(r'^password/reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth_views.password_reset_confirm,
name='auth_password_reset_confirm'),
url(r'^password/reset/complete/$',
auth_views.password_reset_complete,
name='password_reset_complete'),
url(r'^password/reset/done/$',
auth_views.password_reset_done,
name='password_reset_done'),
)
| [
2,
37770,
1846,
3742,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
2291,
11,
19016,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
2297,
1060,
7680,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
... | 2.550551 | 1,088 |
from unittest import TestCase
from .problem_13_3_anonymous_letter import solution
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
764,
45573,
62,
1485,
62,
18,
62,
272,
6704,
62,
9291,
1330,
4610,
628
] | 3.608696 | 23 |
import os.path
import threading
import Queue
import cPickle as pickle
import datetime
from matplotlib.finance import quotes_historical_yahoo
from matplotlib.dates import num2date
from stock.globalvar import *
us_dir = HIST_DIR['us']
if not os.path.isdir(us_dir):
os.makedirs(us_dir)
#for ticker in tickers:
# for quote in quotes:
# # open-high-low-close
# day = num2date(quote[0])
# open = quote[1]
# close = quote[2]
# high = quote[3]
# low = quote[4]
# volume = quote[5]
# print "%s,%f,%f,%f,%f,%d" % (
# day.strftime("%Y-%m-%d"),
# open, high, low, close, volume)
if __name__ == "__main__":
queue = Queue.Queue()
for i in range(10):
t = Downloader(queue)
t.setDaemon(True)
t.start()
filepath = os.path.join(SYMDIR, "us_ticker")
f = open(filepath)
content = f.read()
tickers = content.split("\n")
f.close()
for ticker in tickers:
queue.put(ticker)
queue.join()
| [
11748,
28686,
13,
6978,
198,
11748,
4704,
278,
198,
11748,
4670,
518,
198,
11748,
269,
31686,
293,
355,
2298,
293,
198,
11748,
4818,
8079,
198,
6738,
2603,
29487,
8019,
13,
69,
14149,
1330,
13386,
62,
10034,
12409,
62,
40774,
198,
6738,... | 2.003759 | 532 |
import sys
import os
import platform
from PyQt5.QtWidgets import (
QApplication,
QVBoxLayout,
QWidget,
QMainWindow,
QScrollArea,
QPushButton,
QTabBar,
QTabWidget
)
from PyQt5.QtCore import Qt, QFile, QIODevice, QTextStream
from FrontEnd.Tabs import ContactTab, ShareTab, RequestTab, act
from FrontEnd.Dialogs import LoginDialog, RegisterDialog, RecoveredDialog
from FrontEnd.CustomTab import TabBar, TabWidget
#(self, secret: bytes, message: str, secret_name: str, scratch_info=None, parent=None)
if __name__ == "__main__":
app = QApplication(sys.argv)
# Style from: https://github.com/sommerc/pyqt-stylesheets/blob/master/pyqtcss/src/dark_orange/style.qss
if act.user_exists():
login = LoginDialog()
if not login.exec_():
sys.exit(-1)
else:
register = RegisterDialog()
if not register.exec_():
sys.exit(-1)
setup_logging() # connect to backend loggers
qss = "FrontEnd/styles/style3.qss"
stream = QFile(qss)
stream.open(QIODevice.ReadOnly)
app.setStyleSheet(QTextStream(stream).readAll())
stream.close()
window = MainWindow()
window.show()
sys.exit(app.exec_())
| [
11748,
25064,
198,
11748,
28686,
198,
11748,
3859,
628,
198,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
357,
198,
220,
220,
220,
1195,
23416,
11,
198,
220,
220,
220,
1195,
53,
14253,
32517,
11,
198,
220,
220,
2... | 2.437751 | 498 |
#Constantly Changing, just a plugin I use to debug whatever is broken atm
import sys
import threading
from spock.mcmap import mapdata
from spock.mcp import mcdata
from spock.utils import pl_announce
| [
2,
34184,
3875,
33680,
11,
655,
257,
13877,
314,
779,
284,
14257,
4232,
318,
5445,
379,
76,
198,
11748,
25064,
198,
11748,
4704,
278,
198,
6738,
599,
735,
13,
23209,
8899,
1330,
3975,
7890,
198,
6738,
599,
735,
13,
76,
13155,
1330,
... | 3.553571 | 56 |
from string import Template
# Change parameters
appname = "InfoVision"
compname = "Conscienciology"
descript = "Application for training remote viewing"
vmajor = 1
vminor = 1
vbuild = 2
helpurl = ""
updateurl = ""
abouturl = ""
installsize = 16692
iconpath = "src/images/tapa_olho.ico"
builder = "py"
# ---------------------------------------------------------------#
outfile = f'{appname.replace(" ", "")}-installer-{vmajor}{vminor}{vbuild}-{builder}.exe'
iconpathrev = iconpath.replace("/", '\\')
iconpathrev = f"\\{iconpathrev}"
d = {
'appname': appname,
'compname': compname,
"descript": descript,
"vmajor": vmajor,
"vminor": vminor,
"vbuild": vbuild,
"helpurl": helpurl,
"updateurl": updateurl,
"abouturl": abouturl,
"installsize": installsize,
"iconpath": iconpath,
"outfile": outfile,
"iconpathrev": iconpathrev,
}
with open('nsis_template.txt', 'r') as f:
src = MyTemplate(f.read())
result = src.substitute(d)
with open(f'{appname}-installer-{vmajor}{vminor}{vbuild}.txt', 'w') as g:
g.write(result) | [
6738,
4731,
1330,
37350,
628,
198,
2,
9794,
10007,
198,
1324,
3672,
796,
366,
12360,
44206,
1,
198,
5589,
3672,
796,
366,
9444,
979,
268,
979,
1435,
1,
198,
20147,
1968,
796,
366,
23416,
329,
3047,
6569,
11681,
1,
198,
14761,
1518,
... | 2.596154 | 416 |
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Python implementation of persistent list.
$Id$"""
import sys
import persistent
from persistent._compat import UserList
from persistent._compat import PYTHON2
# The slice object you get when you write list[:]
_SLICE_ALL = slice(None, None, None)
class PersistentList(UserList, persistent.Persistent):
"""A persistent wrapper for list objects.
Mutating instances of this class will cause them to be marked
as changed and automatically persisted.
.. versionchanged:: 4.5.2
Using the `clear` method, or deleting a slice (e.g., ``del inst[:]`` or ``del inst[x:x]``)
now only results in marking the instance as changed if it actually removed
items.
.. versionchanged:: 4.5.2
The `copy` method is available on Python 2.
"""
__super_getitem = UserList.__getitem__
__super_setitem = UserList.__setitem__
__super_delitem = UserList.__delitem__
__super_iadd = UserList.__iadd__
__super_imul = UserList.__imul__
__super_append = UserList.append
__super_insert = UserList.insert
__super_pop = UserList.pop
__super_remove = UserList.remove
__super_reverse = UserList.reverse
__super_sort = UserList.sort
__super_extend = UserList.extend
__super_clear = (
UserList.clear
if hasattr(UserList, 'clear')
else lambda inst: inst.__delitem__(_SLICE_ALL)
)
if not PYTHON2 and sys.version_info[:3] < (3, 7, 4):
# Prior to 3.7.4, Python 3 (but not Python 2) failed to properly
# return an instance of the same class.
# See https://bugs.python.org/issue27639
# and https://github.com/zopefoundation/persistent/issues/112.
# We only define the special method on the necessary versions to avoid
# any speed penalty.
if sys.version_info[:3] < (3, 7, 4):
# Likewise for __copy__, except even Python 2 needs it.
# See https://github.com/python/cpython/commit/3645d29a1dc2102fdb0f5f0c0129ff2295bcd768
if PYTHON2: # pragma: no cover
__super_setslice = UserList.__setslice__
__super_delslice = UserList.__delslice__
def clear(self):
"""
Remove all items from the list.
.. versionchanged:: 4.5.2
Now marks the list as changed, and is available
on both Python 2 and Python 3.
"""
needs_changed = bool(self)
self.__super_clear()
if needs_changed:
self._p_changed = 1
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
5878,
11,
6244,
1168,
3008,
5693,
290,
25767,
669,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
262,
1168,
3008,
... | 2.775693 | 1,119 |
import os
import random
import shlex
import shutil
import sys
import threading
import uuid
from collections import Counter
from contextlib import contextmanager
from io import StringIO
import bottle
import requests
import six
import time
from mock import Mock
from six.moves.urllib.parse import urlsplit, urlunsplit
from webtest.app import TestApp
from conans import __version__ as CLIENT_VERSION, tools
from conans.client.client_cache import ClientCache
from conans.client.command import Command
from conans.client.conan_api import migrate_and_get_client_cache, Conan, get_request_timeout
from conans.client.conan_command_output import CommandOutputer
from conans.client.conf import MIN_SERVER_COMPATIBLE_VERSION
from conans.client.output import ConanOutput
from conans.client.remote_registry import RemoteRegistry
from conans.client.rest.conan_requester import ConanRequester
from conans.client.rest.uploader_downloader import IterableToFileAdapter
from conans.client.tools.scm import Git
from conans.client.userio import UserIO
from conans.model.version import Version
from conans.test.server.utils.server_launcher import (TESTING_REMOTE_PRIVATE_USER,
TESTING_REMOTE_PRIVATE_PASS,
TestServerLauncher)
from conans.test.utils.runner import TestRunner
from conans.test.utils.test_files import temp_folder
from conans.tools import set_global_instances
from conans.util.env_reader import get_env
from conans.util.files import save_files, save, mkdir
from conans.util.log import logger
from conans.model.ref import ConanFileReference, PackageReference
from conans.model.manifest import FileTreeManifest
class TestingResponse(object):
"""Wraps a response from TestApp external tool
to guarantee the presence of response.ok, response.content
and response.status_code, as it was a requests library object.
Is instanced by TestRequester on each request"""
@property
@property
@property
@property
@charset.setter
@property
@property
class TestRequester(object):
"""Fake requests module calling server applications
with TestApp"""
@staticmethod
@staticmethod
class TestBufferConanOutput(ConanOutput):
""" wraps the normal output of the application, captures it into an stream
and gives it operators similar to string, so it can be compared in tests
"""
class MockedUserIO(UserIO):
"""
Mock for testing. If get_username or get_password is requested will raise
an exception except we have a value to return.
"""
def __init__(self, logins, ins=sys.stdin, out=None):
"""
logins is a dict of {remote: list(user, password)}
will return sequentially
"""
assert isinstance(logins, dict)
self.logins = logins
self.login_index = Counter()
UserIO.__init__(self, ins, out)
def get_password(self, remote_name):
"""Overridable for testing purpose"""
password_env = self._get_env_password(remote_name)
if password_env:
return password_env
self._raise_if_non_interactive()
sub_dict = self.logins[remote_name]
index = self.login_index[remote_name]
tmp = sub_dict[index][1]
self.login_index.update([remote_name])
return tmp
class TestClient(object):
""" Test wrap of the conans application to launch tests in the same way as
in command line
"""
def __init__(self, base_folder=None, current_folder=None,
servers=None, users=None, client_version=CLIENT_VERSION,
min_server_compatible_version=MIN_SERVER_COMPATIBLE_VERSION,
requester_class=None, runner=None, path_with_spaces=True):
"""
storage_folder: Local storage path
current_folder: Current execution folder
servers: dict of {remote_name: TestServer}
logins is a list of (user, password) for auto input in order
if required==> [("lasote", "mypass"), ("other", "otherpass")]
"""
self.all_output = "" # For debugging purpose, append all the run outputs
self.users = users or {"default":
[(TESTING_REMOTE_PRIVATE_USER, TESTING_REMOTE_PRIVATE_PASS)]}
self.client_version = Version(str(client_version))
self.min_server_compatible_version = Version(str(min_server_compatible_version))
self.base_folder = base_folder or temp_folder(path_with_spaces)
# Define storage_folder, if not, it will be read from conf file & pointed to real user home
self.storage_folder = os.path.join(self.base_folder, ".conan", "data")
self.client_cache = ClientCache(self.base_folder, self.storage_folder, TestBufferConanOutput())
self.requester_class = requester_class
self.conan_runner = runner
self.update_servers(servers)
self.init_dynamic_vars()
logger.debug("Client storage = %s" % self.storage_folder)
self.current_folder = current_folder or temp_folder(path_with_spaces)
@property
@property
@property
@property
@contextmanager
def run(self, command_line, user_io=None, ignore_error=False):
""" run a single command as in the command line.
If user or password is filled, user_io will be mocked to return this
tuple if required
"""
self.init_dynamic_vars(user_io)
with tools.environment_append(self.client_cache.conan_config.env_vars):
# Settings preprocessor
interactive = not get_env("CONAN_NON_INTERACTIVE", False)
conan = Conan(self.client_cache, self.user_io, self.runner, self.remote_manager,
interactive=interactive)
outputer = CommandOutputer(self.user_io, self.client_cache)
command = Command(conan, self.client_cache, self.user_io, outputer)
args = shlex.split(command_line)
current_dir = os.getcwd()
os.chdir(self.current_folder)
old_path = sys.path[:]
sys.path.append(os.path.join(self.client_cache.conan_folder, "python"))
old_modules = list(sys.modules.keys())
try:
error = command.run(args)
finally:
sys.path = old_path
os.chdir(current_dir)
# Reset sys.modules to its prev state. A .copy() DOES NOT WORK
added_modules = set(sys.modules).difference(old_modules)
for added in added_modules:
sys.modules.pop(added, None)
if not ignore_error and error:
logger.error(self.user_io.out)
print(self.user_io.out)
raise Exception("Command failed:\n%s" % command_line)
self.all_output += str(self.user_io.out)
return error
def save(self, files, path=None, clean_first=False):
""" helper metod, will store files in the current folder
param files: dict{filename: filecontents}
"""
path = path or self.current_folder
if clean_first:
shutil.rmtree(self.current_folder, ignore_errors=True)
save_files(path, files)
if not files:
mkdir(self.current_folder)
class StoppableThreadBottle(threading.Thread):
"""
Real server to test download endpoints
"""
server = None
port = None
| [
11748,
28686,
198,
11748,
4738,
198,
11748,
427,
2588,
198,
11748,
4423,
346,
198,
11748,
25064,
198,
11748,
4704,
278,
198,
11748,
334,
27112,
198,
6738,
17268,
1330,
15034,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
33245,
133... | 2.569252 | 2,888 |
from typing import List
from fastapi import APIRouter, Depends, Query
from boucanpy.core import only
from boucanpy.core.security import ScopedTo, TokenPayload
from boucanpy.db.models.zone import Zone
from boucanpy.core import SortQS, PaginationQS, BaseResponse
from boucanpy.core.zone import (
ZoneRepo,
ZonesResponse,
ZoneResponse,
ZoneData,
ZoneCreateForm,
)
router = APIRouter()
options = {"prefix": "/dns-server/{dns_server}"}
@router.get("/zone", name="dns_server.zone.index", response_model=ZonesResponse)
| [
6738,
19720,
1330,
7343,
198,
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
2129,
2412,
11,
43301,
198,
6738,
35833,
5171,
9078,
13,
7295,
1330,
691,
198,
6738,
35833,
5171,
9078,
13,
7295,
13,
12961,
1330,
1446,
19458,
2514,
11,
2913... | 2.891892 | 185 |
menu_name = "MOCP control"
from subprocess import call
from ui import Menu, Printer
#Some globals for LCS
main_menu = None
callback = None
#Some globals for us
i = None
o = None
#MOCP commands
main_menu_contents = [
["Toggle play/pause", mocp_toggle_play],
["Next song", mocp_next],
["Previous song", mocp_prev],
["Exit", 'exit']
]
| [
26272,
62,
3672,
796,
366,
44,
4503,
47,
1630,
1,
198,
198,
6738,
850,
14681,
1330,
869,
198,
6738,
334,
72,
1330,
21860,
11,
1736,
3849,
198,
198,
2,
4366,
15095,
874,
329,
38217,
198,
12417,
62,
26272,
796,
6045,
198,
47423,
796,
... | 2.704 | 125 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from keystone import exception
@six.add_metaclass(abc.ABCMeta)
class EndpointPolicyDriverBase(object):
"""Interface description for an Endpoint Policy driver."""
@abc.abstractmethod
def create_policy_association(self, policy_id, endpoint_id=None,
service_id=None, region_id=None):
"""Create a policy association.
:param policy_id: identity of policy that is being associated
:type policy_id: string
:param endpoint_id: identity of endpoint to associate
:type endpoint_id: string
:param service_id: identity of the service to associate
:type service_id: string
:param region_id: identity of the region to associate
:type region_id: string
:returns: None
There are three types of association permitted:
- Endpoint (in which case service and region must be None)
- Service and region (in which endpoint must be None)
- Service (in which case endpoint and region must be None)
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def check_policy_association(self, policy_id, endpoint_id=None,
service_id=None, region_id=None):
"""Check existence a policy association.
:param policy_id: identity of policy that is being associated
:type policy_id: string
:param endpoint_id: identity of endpoint to associate
:type endpoint_id: string
:param service_id: identity of the service to associate
:type service_id: string
:param region_id: identity of the region to associate
:type region_id: string
:raises keystone.exception.PolicyAssociationNotFound: If there is no
match for the specified association.
:returns: None
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_policy_association(self, policy_id, endpoint_id=None,
service_id=None, region_id=None):
"""Delete a policy association.
:param policy_id: identity of policy that is being associated
:type policy_id: string
:param endpoint_id: identity of endpoint to associate
:type endpoint_id: string
:param service_id: identity of the service to associate
:type service_id: string
:param region_id: identity of the region to associate
:type region_id: string
:returns: None
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_policy_association(self, endpoint_id=None,
service_id=None, region_id=None):
"""Get the policy for an explicit association.
This method is not exposed as a public API, but is used by
get_policy_for_endpoint().
:param endpoint_id: identity of endpoint
:type endpoint_id: string
:param service_id: identity of the service
:type service_id: string
:param region_id: identity of the region
:type region_id: string
:raises keystone.exception.PolicyAssociationNotFound: If there is no
match for the specified association.
:returns: dict containing policy_id
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_associations_for_policy(self, policy_id):
"""List the associations for a policy.
This method is not exposed as a public API, but is used by
list_endpoints_for_policy().
:param policy_id: identity of policy
:type policy_id: string
:returns: List of association dicts
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_association_by_endpoint(self, endpoint_id):
"""Remove all the policy associations with the specific endpoint.
:param endpoint_id: identity of endpoint to check
:type endpoint_id: string
:returns: None
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_association_by_service(self, service_id):
"""Remove all the policy associations with the specific service.
:param service_id: identity of endpoint to check
:type service_id: string
:returns: None
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_association_by_region(self, region_id):
"""Remove all the policy associations with the specific region.
:param region_id: identity of endpoint to check
:type region_id: string
:returns: None
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_association_by_policy(self, policy_id):
"""Remove all the policy associations with the specific policy.
:param policy_id: identity of endpoint to check
:type policy_id: string
:returns: None
"""
raise exception.NotImplemented() # pragma: no cover
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 2.678092 | 2,159 |
import torch.nn as nn
import torch.nn.functional as F
from .binary_functions import IRNetSign, RANetActSign, RANetWSign
import torch
import math
| [
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
6738,
764,
39491,
62,
12543,
2733,
1330,
14826,
7934,
11712,
11,
371,
1565,
316,
6398,
11712,
11,
371,
1565,
316,
54,
11712,
201,... | 2.65 | 60 |
import math
import numpy as np
__author__ = "Valentin Rakovic"
__copyright__ = "Copyright (c) 2017, Faculty of Electrical Engineering and Information Technologies, UKIM, Skopje, Macedonia"
__version__ = "0.1.0"
__email__ = "{valentin}@feit.ukim.edu.mk"
'''
Localization Module
Used for processing of the localization process in the REM backend
'''
def ML_grid(xs, ys, zs, rss, ulx=0, uly=15, drx=32, dry=0, nx=50, ny=50, nz=50):
'''
Localization process based on ML algorithm
Args:
xs,ys, zs: vectors of coordinates for the x, y, z axis
rss: vector of measured values on coordinates xs, ys, zs
ulx, uly, drx, dry: upper left and lower right corner coordinates of the area of interest for the loc process
nx,ny, nz: resolution for x,y,z axis
in_type: interpolation algorithm
Returns:
results (tuple consisted of estimated x,y,z coordinates and respective estimated tx power)
'''
X = np.array(xs)
#print(X)
Y = np.array(ys)
#print(Y)
Z = np.array(zs)
#print(Z)
P = np.array(rss)
#print(P)
noMeasP = len(xs)
xmin = ulx
ymin = dry
xmax = drx
ymax = uly
zmin = np.amin(zs)
zmax = np.amax(zs)
#print(zmin)
xres = abs((drx-ulx)/nx)
yres = abs((dry-uly)/ny)
zres = abs((zmax-zmin)/nz)
xE = -1
yE = -1
zE = -1
pE = -1
xsize = nx
ysize = ny
zsize = nz
hp = np.zeros(shape=((xsize+1)*(ysize+1)*(zsize+1),3))
points = np.asmatrix(hp)
#print(points)
ii = 0
for i in range(0, xsize+1):
for j in range(0, ysize+1):
for k in range(0, zsize+1):
points[ii,0] = i*xres+xmin
points[ii,1] = j*yres+ymin
points[ii,2] = k*zres+zmin
for ik in range(0,noMeasP):
if((X[ik]==points[ii,0]) and (Y[ik]==points[ii,1]) and (Z[ik]==points[ii,2])):
X[ik] += 0.00001
Y[ik] += 0.00001
Z[ik] += 0.00001
ii+=1
L = -math.inf
D = np.zeros(noMeasP)
D2 = np.zeros(noMeasP)
DP2 = np.zeros(noMeasP)
Pp = np.zeros(noMeasP)
Dnp = np.zeros(noMeasP)
Pm = np.zeros(noMeasP)
for i in range(0, (xsize+1)*(ysize+1)*(zsize+1)):
Lnp = 0
sumD = 0
sumD2 = 0
sumP = 0
sumDP2 = 0
for j in range(0,noMeasP):
D[j] = 10*math.log10(math.sqrt(math.pow(X[j]-points[i,0],2)+math.pow(Y[j]-points[i,1],2)+math.pow(Z[j]-points[i,2],2)))
D2[j] = math.pow(D[j],2)
DP2[j] = D[j]*P[j]
sumD +=D[j]
sumD2 +=D2[j]
sumP +=P[j]
sumDP2 += DP2[j]
Da = sumD/noMeasP
Da2 = math.pow(Da,2)
D2a = sumD2/noMeasP
Pa = sumP/noMeasP
DPa = sumDP2/noMeasP
P0 = (D2a*Pa-Da*DPa)/(D2a-Da2)
npp = (Da*Pa-DPa)/(D2a-Da2)
for j in range(0,noMeasP):
Pm[j] = P[j] - P0 + npp * D[j]
Lnp += math.pow(Pm[j],2)
Ln = -Lnp/2
if (Ln > L):
L = Ln
xE = points[i,0]
yE = points[i,1]
zE = points[i,2]
pE = 10*math.log10(abs(P0))
results = []
results.append(xE)
results.append(yE)
results.append(zE)
results.append(pE)
return results
| [
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
628,
198,
834,
9800,
834,
796,
366,
7762,
31371,
34299,
17215,
1,
198,
834,
22163,
4766,
834,
796,
366,
15269,
357,
66,
8,
2177,
11,
35262,
286,
40224,
14044,
290,
6188,
21852,
11,
348... | 1.922973 | 1,480 |
# coding: utf-8
# 2019/12/24 @ tongshiwei
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
13130,
14,
1065,
14,
1731,
2488,
10479,
44019,
42990,
198
] | 2.210526 | 19 |
from PyQt5 import uic, QtWidgets, QtCore, QtGui
from PyQt5.QtCore import Qt
from PyQt5.Qt import QKeySequence
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QWidget, QMessageBox
| [
6738,
9485,
48,
83,
20,
1330,
334,
291,
11,
33734,
54,
312,
11407,
11,
33734,
14055,
11,
33734,
8205,
72,
201,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
33734,
201,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
1330,
1195... | 2.079208 | 101 |
# Generated by Django 4.0.1 on 2022-03-24 10:15
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
604,
13,
15,
13,
16,
319,
33160,
12,
3070,
12,
1731,
838,
25,
1314,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
#! python
# ===============LICENSE_START=======================================================
# metadata-flatten-extractor Apache-2.0
# ===================================================================================
# Copyright (C) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ===================================================================================
# This software file is distributed by AT&T
# under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============LICENSE_END=========================================================
# -*- coding: utf-8 -*-
from os import path
import json
from pandas import DataFrame
from contentai_metadata_flatten.parsers import Flatten
| [
2,
0,
21015,
198,
2,
796,
25609,
855,
43,
2149,
24290,
62,
2257,
7227,
10052,
4770,
1421,
18604,
198,
2,
20150,
12,
2704,
41769,
12,
2302,
40450,
24843,
12,
17,
13,
15,
198,
2,
38093,
4770,
855,
198,
2,
15069,
357,
34,
8,
2177,
... | 4.318898 | 254 |
"""
argparse_helpers.py
This module holds helper functions for use with argparse.
Some of these functions can be used for the "type" property for argparse::add_arguments
calls. Simply pass the function name as such: type=<function name>.
"""
__author__ = 'sedwards'
import ast
from typing import Any, Dict, List, Optional, Tuple, Union
from _pytest.config import Config
from syslinkats.framework.validators.validate_args import validate_args_for_value
from syslinkats.tests.common.utils.testing_helper import TestingHelper
def bool_from_str(arg: Any) -> bool:
"""Take in an argument and try to return its bool equivalent.
Args:
arg (Any): This can be any type. However, in general use it should be either a bool or
str.
Returns:
bool: The bool equivalent of the passed in argument. If the argument is not a str or
bool, then False is returned.
"""
if isinstance(arg, bool):
return arg
if isinstance(arg, str):
if 'true' in arg.lower():
return True
return False
# If any other type, just return False.
return False
def int_from_str(arg: Any) -> int:
"""Take in an argument and try to return its int equivalent.
Args:
arg (Any): This can be any type. However, in general use it should be either an int or
str.
Returns:
int: The int equivalent of the passed in argument. If the argument is not a str or
int, then 0 is returned.
"""
if isinstance(arg, int):
return arg
if isinstance(arg, str):
try:
value = int(arg)
except ValueError:
return 0
return value
# If any other type, just return 0.
return 0
def literal_from_str(arg: Any) -> Any:
"""Take in a literal string and eval it."""
if arg:
try:
return ast.literal_eval(arg)
except (ValueError, SyntaxError):
# This can happen if the arg is just a normal string or is a malformed object.
# If so, just return the arg as-is.
return arg
return None
def get_val_from_arg_list(args_list: List[str], key: str, default: Any):
"""Parse an argparse args list for keys and return their associated value."""
try:
key_index = args_list.index(key)
if len(args_list) > key_index:
return args_list[key_index + 1]
return default
except ValueError:
return default
def get_arg_dict(arg_list: Union[Optional[List[str]], Optional[str]] = None,
key_list: List[Tuple[str, Any]] = None,
config: Config = None) -> Dict[str, Any]:
"""Get a dict with key: value pairs from either a list of args, or the config object."""
validate_args_for_value(key_list=key_list, config=config)
arg_dict: Dict[str, Any] = {}
if arg_list:
if isinstance(arg_list, str):
arg_list = TestingHelper.pass_through_args_converter(arg_list)
for key in key_list:
if key[0] in arg_list:
key_value = get_val_from_arg_list(args_list=arg_list, key=key[0], default=key[1])
if key_value is None:
key_value = config.getoption(key[0])
else:
key_value = config.getoption(key[0])
arg_dict[key[0]] = key_value
else:
for key in key_list:
key_value = config.getoption(key[0], default=key[1])
arg_dict[key[0]] = key_value
return arg_dict
| [
37811,
198,
853,
29572,
62,
16794,
364,
13,
9078,
198,
198,
1212,
8265,
6622,
31904,
5499,
329,
779,
351,
1822,
29572,
13,
198,
198,
4366,
286,
777,
5499,
460,
307,
973,
329,
262,
366,
4906,
1,
3119,
329,
1822,
29572,
3712,
2860,
62... | 2.41701 | 1,458 |
import shelve
if __name__ == '__main__':
s = shelve.open("22901.db")
s["name"] = "www.itdiffer.com"
s["lang"] = "python"
s["pages"] = 1000
s["contents"] = {"first":"base knowledge","second":"day day up"}
s.close()
s = shelve.open("22901.db")
name = s["name"]
print (name)
contents = s["contents"]
print (contents)
print ('======================')
print (s)
| [
11748,
7497,
303,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
264,
796,
7497,
303,
13,
9654,
7203,
23539,
486,
13,
9945,
4943,
198,
220,
220,
220,
264,
14692,
3672,
8973,
796,
366,
2503,
13,
27... | 2.377907 | 172 |
if __name__ == '__main__':
files = ['files/Matching.txt','files/Matching2.txt']
matrix = list()
with open(files[0]) as file:
for line in file:
matrix.append(list(map(int,line.split())))
vertices = [i for i in range(len(matrix))]
next_x = [[] for _ in range(len(matrix))]
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 1:
next_x[j].append(i)
H = Hungarian()
H.matching(vertices, next_x)
| [
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
3696,
796,
37250,
16624,
14,
44,
19775,
13,
14116,
41707,
16624,
14,
44,
19775,
17,
13,
14116,
20520,
201,
198,
220,
220,
220,
17593,
796,
... | 2.003817 | 262 |
import argparse
import time
from os.path import join
import os
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision.transforms import ToTensor, ToPILImage
import torchvision.utils as utils
from data_utils import display_transform
from model import Generator
parser = argparse.ArgumentParser(description='Test Single Image')
parser.add_argument('--upscale_factor', default=4, type=int, help='super resolution upscale factor')
parser.add_argument('--test_mode', default='GPU', type=str, choices=['GPU', 'CPU'], help='using GPU or CPU')
parser.add_argument('--image_path', default=r'D:\UAVLandmark\Dataset\keypoint\BBox_LR', type=str, help='test low resolution image name')
parser.add_argument('--name', default='onechannelx4', type=str, help='where to save the model')
parser.add_argument('--epoch', default=500, type=int, help='generator model epoch name')
opt = parser.parse_args()
UPSCALE_FACTOR = opt.upscale_factor
TEST_MODE = True if opt.test_mode == 'GPU' else False
IMAGE_PATH = opt.image_path
OUT_PATH = join('results', opt.name,'images_%s'%opt.epoch)
print(OUT_PATH)
os.makedirs(OUT_PATH, exist_ok=True)
MODEL_NAME = join('results', opt.name,'netG_epoch_%d_%d.pth' % (opt.upscale_factor, opt.epoch))
model = Generator(UPSCALE_FACTOR).eval()
if TEST_MODE:
model.cuda()
model.load_state_dict(torch.load(MODEL_NAME))
else:
model.load_state_dict(torch.load(MODEL_NAME, map_location=lambda storage, loc: storage))
for name in os.listdir(IMAGE_PATH):
image = Image.open(os.path.join(IMAGE_PATH, name))
image = Variable(ToTensor()(image), volatile=True).unsqueeze(0)
if TEST_MODE:
image = image.cuda()
start = time.clock()
out = model(image)
elapsed = (time.clock() - start)
print('cost' + str(elapsed) + 's')
out_img = ToPILImage()(out[0].data.cpu())
# import cv2
# import numpy as np
# out_img = np.asarray(out_img)
# out_img = cv2.bilateralFilter(out_img, d=20, sigmaColor=20, sigmaSpace=50)
# cv2.imwrite(os.path.join(OUT_PATH, name), out_img)
# out_img.save(os.path.join(OUT_PATH, 'bilateral', 'out_srf_' + str(UPSCALE_FACTOR) + '_' + name))
out_img.save(os.path.join(OUT_PATH, name))
utils.save_image(out, os.path.join(OUT_PATH,'tensor_%s'%(name)))
| [
11748,
1822,
29572,
198,
11748,
640,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
11748,
28686,
198,
198,
11748,
28034,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
6738,
28034,
10178,
13,
7... | 2.632488 | 868 |
import os
import json
dir = "test"
counts = 0
all_counts =0
with open("POLEVAL-NER_GOLD.json") as f:
main_data = json.load(f)["questions"]
for idx, doc in enumerate(main_data):
answers = doc["answers"][0]["data"]["brat"].split("\n")
new_labels = []
ents = []
for answer in answers:
if answer!= "":
val = answer.split("\t")
if len(val) > 1:
entity, text = val[0], val[1]
else:
print("Error {}".format(val))
continue
try:
category, start, end = text.split(" ")
for x in ents:
if x["category"][:3] != category[:3]:
if not( end < x["start"] or x["end"] < start) and start.isnumeric():
counts+=1
break
all_counts+=1
ents.append(({"start":start, "end":end, "category":category}))
except:
pass
print(counts)
print(all_counts) | [
11748,
28686,
198,
11748,
33918,
198,
15908,
796,
366,
9288,
1,
198,
9127,
82,
796,
657,
198,
439,
62,
9127,
82,
796,
15,
198,
4480,
1280,
7203,
16402,
2538,
23428,
12,
21479,
62,
38,
15173,
13,
17752,
4943,
355,
277,
25,
198,
220,
... | 1.678571 | 672 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import json
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exporters import JsonItemExporter
from twisted.enterprise import adbapi
from models.es_types import ArticleType
from w3lib.html import remove_tags
import pymysql
pymysql.install_as_MySQLdb()
import MySQLdb
import MySQLdb.cursors
import re
# 采用同步的机制写入mysql
#自定义json文件的导出
#采用同步的机制写入mysql
#调用scrapy提供的json export导出json文件
#将数据写入到es中 | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
2896,
500,
534,
2378,
31108,
994,
198,
2,
198,
2,
2094,
470,
6044,
284,
751,
534,
11523,
284,
262,
7283,
3620,
62,
47,
4061,
3698,
1268,
1546,
4634,
198,
2,
... | 2.126667 | 300 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" DocString: Load configuration from .yaml file."""
from pathlib import Path
from os import path
import confuse
# get path of the projet !
BASE_DIR = path.dirname(path.dirname(path.abspath(__file__)))
# BASE_DIR = '/home/alassane/Code/JimBot/chatbotapp'
# Load config.yaml
config = confuse.Configuration('chatbotapp', __name__)
config.set_file(path.join(BASE_DIR, 'settings/config.yaml'))
# Create Variable for book json path
JSON_BOOK_FR_DIR = path.join(BASE_DIR, config['books']['json']['french'].get())
JSON_BOOK_EN_DIR = path.join(BASE_DIR, config['books']['json']['english'].get())
INDEXED_CHAP_PATHS = path.join(BASE_DIR, config['books']['paths']['french'].get())
# Create Variable for book epub path
EPUB_BOOK_FR_DIR = path.join(BASE_DIR, config['books']['epub']['french'].get())
EPUB_BOOK_EN_DIR = path.join(BASE_DIR, config['books']['epub']['english'].get())
THEMES_DIR = path.join(BASE_DIR, config['books']['themes']['french'].get())
NLU_DATA_PATH = path.join(BASE_DIR, config['nlu_data_path'].get())
VERIFY = config['VERIFY'].as_str_expanded()
SECRET = config['SECRET'].as_str_expanded()
PAGE_ACCESS_TOKEN = config['PAGE_ACCESS_TOKEN'].as_str_expanded()
# Dir path for images of books coverage
COVER_IMG_PATH = path.join(BASE_DIR, config['books']['cover_img_path'].get())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
14432,
10100,
25,
8778,
8398,
422,
764,
88,
43695,
2393,
526,
15931,
628,
198,
6738,
3108,
8019,
1330,
1064... | 2.535849 | 530 |
from armulator.armv6.opcodes.abstract_opcodes.it import It
from armulator.armv6.opcodes.opcode import Opcode
| [
6738,
3211,
8927,
13,
1670,
85,
21,
13,
404,
40148,
13,
397,
8709,
62,
404,
40148,
13,
270,
1330,
632,
198,
6738,
3211,
8927,
13,
1670,
85,
21,
13,
404,
40148,
13,
404,
8189,
1330,
8670,
8189,
628
] | 2.894737 | 38 |
import math
import time
import click
from valohai_cli.utils import force_text
class Divider(LayoutElement):
"""
Full-width divider.
"""
def __init__(self, ch='#', style=None):
"""
:param ch: The character (or characters) to fill the line with
:type ch: str
:param style: Click style dictionary
"""
self.ch = force_text(ch)
self.style = (style or {})
class Flex(LayoutElement):
"""
A columnar layout element.
"""
aligners = {
'left': lambda content, width: content.ljust(width),
'right': lambda content, width: content.rjust(width),
'center': lambda content, width: content.center(width),
}
def add(self, content, flex=1, style=None, align='left'):
"""
Add a content column to the flex.
:param content: String content
:type content: str
:param flex: Flex value; if 0, the column will always take as much space as its content needs.
:type flex: int
:param style: Click style dictionary
:type style: dict
:param align: Alignment for the content (left/right/center).
:type align: str
:return: The Flex, for chaining
:rtype: Flex
"""
self.cells.append({
'content': force_text(content),
'flex': flex,
'style': style or {},
'align': align,
})
return self
class Layout:
"""
Row-oriented layout.
"""
def add(self, element):
"""
Add a LayoutElement to the Layout.
:param element: The layout element to add
:type element: LayoutElement
:return: The Layout, for chaining
:rtype: Layout
"""
assert isinstance(element, LayoutElement)
element.layout = self
self.rows.append(element)
return self
def draw(self):
"""
Draw the Layout onto screen.
"""
self.width, self.height = click.get_terminal_size()
for element in self.rows:
element.draw()
| [
11748,
10688,
198,
11748,
640,
198,
198,
11748,
3904,
198,
198,
6738,
1188,
1219,
1872,
62,
44506,
13,
26791,
1330,
2700,
62,
5239,
628,
198,
198,
4871,
4777,
1304,
7,
32517,
20180,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,... | 2.331111 | 900 |
"""Common definitions"""
from collections import namedtuple
import functools
IDN = namedtuple("IDN", ["manufacturer", "model", "serial_number", "firmware_version"])
class CommandError(Exception):
"""Raised when CME bit of SESR is set"""
def validate(func):
"""Read the Command Error bit (CME) of the Standard Event Status Register (SESR)"""
@functools.wraps(func)
return wrapper
class TekBase:
"""tekinstr base class
Attributes:
visa (pyvisa.resources.Resource): pyvisa resource
"""
| [
37811,
17227,
17336,
37811,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
11748,
1257,
310,
10141,
198,
198,
2389,
45,
796,
3706,
83,
29291,
7203,
2389,
45,
1600,
14631,
48119,
15051,
1600,
366,
19849,
1600,
366,
46911,
62,
17618,
1600,
... | 2.977528 | 178 |
"""Documentation utilities."""
def inherit_docs(cls):
"""Class decorator that makes it inherit function doc strings."""
for name in dir(cls):
member = getattr(cls, name)
if member.__doc__ is not None:
continue
for parent in cls.mro()[1:]:
if hasattr(parent, name) and getattr(parent, name).__doc__:
try:
member.__doc__ = getattr(parent, name).__doc__
except AttributeError:
pass
return cls
| [
37811,
24941,
341,
20081,
526,
15931,
628,
198,
4299,
16955,
62,
31628,
7,
565,
82,
2599,
198,
220,
220,
220,
37227,
9487,
11705,
1352,
326,
1838,
340,
16955,
2163,
2205,
13042,
526,
15931,
198,
220,
220,
220,
329,
1438,
287,
26672,
7... | 2.095618 | 251 |
# -*- coding: utf-8 -*-
from configparser import ConfigParser # [WARNING] Don't use any other `cfg` driver.
from typing import Optional, Dict, Any
from argparse import Namespace
from recc.argparse.parser.dict_parse import get_namespace_by_dict
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
4566,
48610,
1330,
17056,
46677,
220,
1303,
685,
31502,
60,
2094,
470,
779,
597,
584,
4600,
37581,
63,
4639,
13,
198,
6738,
19720,
1330,
32233,
11,
360,
713,
... | 3.205128 | 78 |
import pytest
from coderunner import create_app
@pytest.fixture()
@pytest.fixture()
def client(app):
"""A test client for the app"""
return app.test_client()
| [
11748,
12972,
9288,
198,
198,
6738,
269,
12342,
403,
1008,
1330,
2251,
62,
1324,
628,
198,
31,
9078,
9288,
13,
69,
9602,
3419,
628,
198,
31,
9078,
9288,
13,
69,
9602,
3419,
198,
4299,
5456,
7,
1324,
2599,
198,
220,
220,
220,
37227,
... | 2.746032 | 63 |
from team.models import DataTablesFilter
| [
6738,
1074,
13,
27530,
1330,
6060,
51,
2977,
22417,
628,
198
] | 3.909091 | 11 |
# -*- coding: utf-8 -*-
# @File : scheduler.py
# @Author : Kaicheng Yang
# @Time : 2022/01/26 11:04:07
import numpy as np | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
8979,
1058,
6038,
18173,
13,
9078,
198,
2,
2488,
13838,
1058,
11611,
14234,
782,
10998,
198,
2,
2488,
7575,
1058,
33160,
14,
486,
14,
2075,
1367,
25,
3023,
25... | 2.326923 | 52 |
import setuptools
with open("README.md", "r") as readme_file:
readme = readme_file.read()
setuptools.setup(
name="stripes-and-squares",
version="0.0.1-rc1",
author="Petr Machek",
description="Barcode generator with no dependencies",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/pmachek/barcode",
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.3"
)
| [
11748,
900,
37623,
10141,
628,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
1100,
1326,
62,
7753,
25,
198,
220,
220,
220,
1100,
1326,
796,
1100,
1326,
62,
7753,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
... | 2.53876 | 258 |
"""
lib/script_utils.py
lib for utility functions for cl script execution
"""
from sys import exit
def script_exit(reason: str) -> None:
"""Wrapper for script exit on error"""
print(f'exit with error: {reason}')
exit()
def check_validity_token(token: str) -> list:
"""Processes token input and checks its validity, exits if invalid"""
try:
spl_token : list = token.split('=')
if len(spl_token[0]) <= 0 or len(spl_token[1]) <= 0:
raise Exception
else:
return spl_token
except Exception:
script_exit('auth token could not be parsed')
def check_validity_episodes(episodes: str) -> list:
"""Processes episodes input and checks its validity, exits if invalid"""
try:
spl_ep : list = list(map(int, episodes.split('-')))
if len(spl_ep) != 2:
raise Exception
if spl_ep[0] > spl_ep[1]:
script_exit('start larger than end episode')
return spl_ep
except Exception:
script_exit('episodes input could not be processed')
| [
37811,
198,
220,
220,
220,
9195,
14,
12048,
62,
26791,
13,
9078,
628,
220,
220,
220,
9195,
329,
10361,
5499,
329,
537,
4226,
9706,
198,
37811,
198,
198,
6738,
25064,
1330,
8420,
628,
198,
4299,
4226,
62,
37023,
7,
41181,
25,
965,
8,... | 2.544811 | 424 |
#!/usr/bin/env python3
#
# Copyright 2017-2020 GridGain Systems.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..singleton import singleton
from ..tidenexception import TidenException
from ..tidenfabric import TidenFabric
from itertools import chain
@singleton
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
15069,
2177,
12,
42334,
24846,
38,
391,
11998,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
... | 3.736585 | 205 |
import cStringIO
lines = open("../../../SDL/include/SDL_opengl.h").readlines()
funcs = ["glUseProgram",
"glVertexAttribPointer",
"glActiveTexture",
"glEnableVertexAttribArray",
"glDisableVertexAttribArray",
"glDeleteProgram",
"glGetShaderiv",
"glGetShaderInfoLog",
"glCreateShader",
"glCreateProgram",
"glAttachShader",
"glShaderSource",
"glCompileShader",
"glBindAttribLocation",
"glLinkProgram",
"glUniform1i",
"glUniform4fv",
"glUniform1f",
"glUniformMatrix4fv",
"glBindFramebuffer",
"glGenFramebuffer",
"glGenFramebuffers",
"glCheckFramebufferStatus",
"glDeleteFramebuffers",
"glGetUniformLocation",
"glFramebufferTexture2D",
"glCompressedTexImage2D", ]
#GLAPI void APIENTRY glDeleteProgram (GLuint program);
#PFNGLDELETEPROGRAMPROC
base = cStringIO.StringIO()
init = cStringIO.StringIO()
for f in funcs:
get(f)
print base.getvalue()
print init.getvalue() | [
11748,
269,
10100,
9399,
198,
6615,
796,
1280,
7203,
40720,
40720,
40720,
10305,
43,
14,
17256,
14,
10305,
43,
62,
404,
1516,
75,
13,
71,
11074,
961,
6615,
3419,
198,
12543,
6359,
796,
14631,
4743,
11041,
15167,
1600,
220,
198,
220,
2... | 1.994709 | 567 |