content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from rest_framework import status
from rest_framework.test import APITestCase
from django.urls import reverse
from ..models import User
| [
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
9288,
1330,
3486,
2043,
395,
20448,
198,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
198,
6738,
11485,
27530,
1330,
11787,
628
] | 3.861111 | 36 |
import qgate
from qgate.script import *
# creating simulator instance
sim = qgate.simulator.cpu()
# creating quantum registers that represent qubits.
qregs = new_qregs(3)
qreg0, qreg1, qreg2 = qregs
print('1 qubit')
sim.run(H(qreg0))
qgate.dump(sim.qubits)
print('2 qubits')
sim.run(X(qreg1))
qgate.dump(sim.qubits)
print('3 qubits')
sim.run(H(qreg2))
qgate.dump(sim.qubits)
print('measure and release qreg0')
refs = new_references(3)
sim.run([measure(refs[0], qreg0),
release_qreg(qreg0)])
qgate.dump(sim.qubits)
print('measure and release qreg1')
sim.run([measure(refs[1], qreg1),
release_qreg(qreg1)])
qgate.dump(sim.qubits)
print('measure and release qreg2')
sim.run([measure(refs[2], qreg2),
release_qreg(qreg2)])
qgate.dump(sim.qubits)
obs = sim.obs(refs)
print('observaion: {}'.format(obs))
| [
11748,
10662,
10494,
198,
6738,
10662,
10494,
13,
12048,
1330,
1635,
198,
198,
2,
4441,
35375,
4554,
198,
14323,
796,
10662,
10494,
13,
14323,
8927,
13,
36166,
3419,
198,
198,
2,
4441,
14821,
28441,
326,
2380,
627,
9895,
13,
198,
80,
... | 2.241935 | 372 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import copy
import six
from six.moves import queue
import unittest
from orquesta import conducting
from orquesta import events
from orquesta.expressions import base as expressions
from orquesta.specs import loader as specs_loader
from orquesta import states
from orquesta.tests.fixtures import loader as fixture_loader
from orquesta.utils import context as ctx
from orquesta.utils import plugin
from orquesta.utils import specs
@six.add_metaclass(abc.ABCMeta)
@six.add_metaclass(abc.ABCMeta)
@six.add_metaclass(abc.ABCMeta)
@six.add_metaclass(abc.ABCMeta)
@six.add_metaclass(abc.ABCMeta)
# The conductor.get_next_tasks make copies of the task specs and render expressions
# in the task action and task input. So comparing the task specs will not match. In
# order to match in unit tests. This method is used to serialize the task specs and
# compare the lists.
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.470024 | 417 |
#Assignment 12 : Basic Data Analysis
# Basic demonstration manipulating publicly available datasets
# Simply run with your latitude and longitude to get the last day_count
# days of weather at the trail_count nearest Mountian Bike trails to your location
# within distance miles of your location
import requests,time
trail_count = 3 #select the number of x closest trails you want
day_count = 5 # select the number of days you want to go back
distance = 10 # select max distance in miles
location = {'lat':38.950, 'lon':-92.395} # for real world use you would probably leverage one of the
# ways to get location from a
api_call = 'https://www.mtbproject.com/data/get-trails?lat='+str(location['lat'])+'&lon='+str(location['lon'])+'&maxDistance='+str(distance)+'&sort=distance&key=200398187-1a12a39a32f76fde65d9afdceb2fa5a2'
# This API call returns a list of all mountainbike trails in a 10 mile radius
data = requests.get(api_call).json()
trails_data = {}
for trail in range(0,trail_count):# increment through the three closest trails
trail_name = data['trails'][trail]['name']
trails_data[trail_name] = {}
for day in range(0,day_count):# increment through the last 5 days(today ant the last 4 days) of weather
api_call = 'https://api.darksky.net/forecast/2fee19688528076274d1d30e68adf525/' \
+ str(data['trails'][0]['latitude'])+',' \
+ str(data['trails'][0]['longitude'])+','+str(int(time.time())-(86400*day)) \
+ '?exclude=currently,minutely,hourly,alerts'
trail_weather_data = requests.get(api_call).json()
trails_data[trail_name]['day'+str(day)] = trail_weather_data
print('Last '+str(day_count)+' days of weather at '+trail_name+':')
for day in trails_data[trail_name].keys():
if(day == 'day0'):
print("\tToday: ", end ='')
elif(day == 'day1'):
print("\tYesterday: ", end='')
else:
print('\t'+day.strip('day') + ' days ago: ', end='')
print('High:'+str(trails_data[trail_name][day]['daily']['data'][0]['temperatureHigh'])+
' Low:'+str(trails_data[trail_name][day]['daily']['data'][0]['temperatureLow']) +
' Precipitation:' + str(trails_data[trail_name][day]['daily']['data'][0]['precipIntensity']))
| [
2,
8021,
16747,
1105,
1058,
14392,
6060,
14691,
198,
198,
2,
14392,
13646,
29349,
7271,
1695,
40522,
198,
198,
2,
17973,
1057,
351,
534,
32477,
290,
890,
3984,
284,
651,
262,
938,
1110,
62,
9127,
220,
198,
2,
1528,
286,
6193,
379,
2... | 2.462671 | 951 |
import json
import struct
import base64
import binascii
import Crypto.Cipher.AES
| [
171,
119,
123,
11748,
33918,
201,
198,
11748,
2878,
201,
198,
11748,
2779,
2414,
201,
198,
11748,
9874,
292,
979,
72,
201,
198,
201,
198,
11748,
36579,
13,
34,
10803,
13,
32,
1546,
201,
198
] | 2.542857 | 35 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import argv
import bottle
from bottle import default_app, request, route, response, get
from pymongo import MongoClient
import json
import api
bottle.debug(True)
@get('/')
@get('/geek')
@route
@route('/login', method=["OPTIONS", "POST"])
@enable_cors
@route('/signup', method=["OPTIONS", "POST"])
@enable_cors
# --------------------------------------------- #
# speakrs api:
# ready:
# GET /speakrs
# todo:
# POST /me/set-speech-title
# GET /speakr?id=id
# --------------------------------------------- #
@get('/speakrs')
@enable_cors
@get('/speakr')
@enable_cors
# --------------------------------------------- #
# talks api:
# ready:
# GET /talks
# GET /get-talk?talkId=talkId
# todo:
# POST /rate?talkId=talkId
# --------------------------------------------- #
@get('/talks')
@enable_cors
@get('/talk')
@enable_cors
bottle.run(host='0.0.0.0', port=argv[1])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
25064,
1330,
1822,
85,
198,
198,
11748,
9294,
198,
6738,
9294,
1330,
4277,
62,
1324,
11,
2581,
11,
6339,
11,
28... | 2.580902 | 377 |
""" min oriented priority queue implemented with binary heap array based implementation """
from python_priority_queue import PriorityQueueBase
if __name__ == "__main__":
heapq = HeapPriorityQueue()
heapq.add(3,4)
heapq.add(8,21)
heapq.add(2,3)
heapq.add(4,5)
print(heapq.min())
| [
37811,
949,
25921,
8475,
16834,
9177,
351,
13934,
24575,
7177,
1912,
7822,
37227,
198,
6738,
21015,
62,
49336,
62,
36560,
1330,
34416,
34991,
14881,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
... | 2.666667 | 114 |
# Copyright 2017 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Creates a spanning tree (or possibly more than one)
This component uses the discovery component to build a view of the network
topology, constructs a spanning tree, and then disables flooding on switch
ports that aren't on the tree by setting their NO_FLOOD bit. The result
is that topologies with loops no longer turn your network into useless
hot packet soup.
Note that this does not have much of a relationship to Spanning Tree
Protocol. They have similar purposes, but this is a rather different way
of going about it.
This component is intended to replace the spanning_tree component, but
it currently has no support for dynamic topologies (that is, where
something that used to be connected to one thing now connects to
another thing) and has fairly different behavior in general, so we
still have the spanning_tree module too (for now).
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpid_to_str
from pox.lib.recoco import Timer
import time
log = core.getLogger()
| [
2,
15069,
2177,
3700,
5108,
559,
1636,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,... | 3.878282 | 419 |
''' # Copyright © 2019 AlgoExpert, LLC. All rights reserved.
# O(n) time | O(1) space
def findThreeLargestNumbers(array):
threeLargest = [None, None, None]
for num in array:
print(num)
updateLargest(threeLargest, num)
print(threeLargest)
print("_______________")
return threeLargest
def updateLargest(threeLargest, num):
if threeLargest[2] is None or num > threeLargest[2]:
shiftAndUpdate(threeLargest, num, 2)
elif threeLargest[1] is None or num > threeLargest[1]:
shiftAndUpdate(threeLargest, num, 1)
elif threeLargest[0] is None or num > threeLargest[0]:
shiftAndUpdate(threeLargest, num, 0)
def shiftAndUpdate(array, num, idx):
for i in range(idx + 1):
if i == idx:
array[i] = num
else:
array[i] = array[i + 1] '''
# Input: Array of Integers i.e [10,5,9,10,12]
# Output Sorted array of 3 largest integers, including duplicates i.e [10,10,12]
| [
7061,
6,
1303,
15069,
10673,
13130,
978,
2188,
3109,
11766,
11,
11419,
13,
1439,
2489,
10395,
13,
198,
2,
440,
7,
77,
8,
640,
930,
440,
7,
16,
8,
2272,
198,
198,
4299,
1064,
12510,
43,
853,
395,
49601,
7,
18747,
2599,
198,
220,
... | 2.286052 | 423 |
params={
'enc_type': 'lstm',
'dec_type': 'lstm',
'nz': 32,
'ni': 512,
'enc_nh': 1024,
'dec_nh': 1024,
'dec_dropout_in': 0.5,
'dec_dropout_out': 0.5,
'batch_size': 32,
'epochs': 100,
'test_nepoch': 5,
'train_data': 'datasets/yelp_data/yelp.train.txt',
'val_data': 'datasets/yelp_data/yelp.valid.txt',
'test_data': 'datasets/yelp_data/yelp.test.txt',
'label':True,
'icnn_num_layers': 2,
'icnn_nh': 1024
}
| [
198,
37266,
34758,
198,
220,
220,
220,
705,
12685,
62,
4906,
10354,
705,
75,
301,
76,
3256,
198,
220,
220,
220,
705,
12501,
62,
4906,
10354,
705,
75,
301,
76,
3256,
198,
220,
220,
220,
705,
27305,
10354,
3933,
11,
198,
220,
220,
2... | 1.794677 | 263 |
#!/usr/bin/env python3
# https://www.hackerrank.com/challenges/piling-up
# #python
import collections
import io
import sys
import unittest
if __name__ == '__main__': # pragma: no cover
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
3740,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
79,
4386,
12,
929,
198,
2,
1303,
29412,
198,
198,
11748,
17268,
198,
11748,
33245,
198,
11748,
25064,
19... | 2.743243 | 74 |
# -*- coding: utf-8 -*-
# (c) Copyright 2020 Sensirion AG, Switzerland
from __future__ import absolute_import, division, print_function
from sensirion_i2c_sht.sht3x import Sht3xTemperature, Sht3xHumidity, \
Sht3xStatusRegister
import pytest
@pytest.mark.parametrize("value", [
dict({'ticks': 0, 'degrees_celsius': -45., 'degrees_fahrenheit': -49.}),
dict(
{'ticks': 65535, 'degrees_celsius': 130., 'degrees_fahrenheit': 266.}),
])
def test_temperature(value):
"""
Test if the Temperature() type works as expected for different values.
"""
result = Sht3xTemperature(value.get('ticks'))
assert type(result) is Sht3xTemperature
assert type(result.ticks) is int
assert result.ticks == value.get('ticks')
assert type(result.degrees_celsius) is float
assert result.degrees_celsius == value.get('degrees_celsius')
assert type(result.degrees_fahrenheit) is float
assert result.degrees_fahrenheit == value.get('degrees_fahrenheit')
@pytest.mark.parametrize("value", [
dict({'ticks': 0, 'percent_rh': 0.}),
dict({'ticks': 65535, 'percent_rh': 100.}),
])
def test_humidity(value):
"""
Test if the Humidity() type works as expected for different values.
"""
result = Sht3xHumidity(value.get('ticks'))
assert type(result) is Sht3xHumidity
assert type(result.ticks) is int
assert result.ticks == value.get('ticks')
assert type(result.percent_rh) is float
assert result.percent_rh == value.get('percent_rh')
@pytest.mark.parametrize("value", [
dict({'input': 0x0000,
'write_data_checksum_status': False,
'command_status': False,
'system_reset_detected': False,
'temperature_tracking_alert': False,
'humidity_tracking_alert': False,
'heater_status': False,
'alert_pending_status': False}),
dict({'input': 0x0001,
'write_data_checksum_status': True,
'command_status': False,
'system_reset_detected': False,
'temperature_tracking_alert': False,
'humidity_tracking_alert': False,
'heater_status': False,
'alert_pending_status': False}),
dict({'input': 0x0002,
'write_data_checksum_status': False,
'command_status': True,
'system_reset_detected': False,
'temperature_tracking_alert': False,
'humidity_tracking_alert': False,
'heater_status': False,
'alert_pending_status': False}),
dict({'input': 0x0010,
'write_data_checksum_status': False,
'command_status': False,
'system_reset_detected': True,
'temperature_tracking_alert': False,
'humidity_tracking_alert': False,
'heater_status': False,
'alert_pending_status': False}),
dict({'input': 0x0400,
'write_data_checksum_status': False,
'command_status': False,
'system_reset_detected': False,
'temperature_tracking_alert': True,
'humidity_tracking_alert': False,
'heater_status': False,
'alert_pending_status': False}),
dict({'input': 0x0800,
'write_data_checksum_status': False,
'command_status': False,
'system_reset_detected': False,
'temperature_tracking_alert': False,
'humidity_tracking_alert': True,
'heater_status': False,
'alert_pending_status': False}),
dict({'input': 0x2000,
'write_data_checksum_status': False,
'command_status': False,
'system_reset_detected': False,
'temperature_tracking_alert': False,
'humidity_tracking_alert': False,
'heater_status': True,
'alert_pending_status': False}),
dict({'input': 0x8000,
'write_data_checksum_status': False,
'command_status': False,
'system_reset_detected': False,
'temperature_tracking_alert': False,
'humidity_tracking_alert': False,
'heater_status': False,
'alert_pending_status': True}),
dict({'input': 0xFFFF,
'write_data_checksum_status': True,
'command_status': True,
'system_reset_detected': True,
'temperature_tracking_alert': True,
'humidity_tracking_alert': True,
'heater_status': True,
'alert_pending_status': True}),
])
def test_status_register(value):
"""
Test if the StatusRegister() type works as expected for different values.
"""
result = Sht3xStatusRegister(value.get('input'))
assert type(result) is Sht3xStatusRegister
for i, k in enumerate(value):
if k != 'input':
assert type(eval('result.{}'.format(k))) is bool
assert eval('result.{}'.format(k)) == value.get(k)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
357,
66,
8,
15069,
12131,
14173,
343,
295,
13077,
11,
14679,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
6738,
3054... | 2.280436 | 2,111 |
#!/usr/bin/env python
__author__ = 'meatz'
import os
import sys
import gzip
import json
import hashlib
import resource
import datetime
#from guppy import hpy
from CacheBuckets import CacheBuckets
from StorageSystem import StorageSystem
START_TIME = 0
USER_ID = 1
HOST_ID = 2
PROCESS_ID = 3
REQUEST = 4
PARAMS = 5
FILE_SIZE = 6
EXECUTION_TIME = 7
ADDITIONAL_INFO = 8
KB = 1024
MB = 1024 * KB
GB = 1024 * MB
TB = 1024 * GB
# return """
# {
# "buckets": {
# "big": [
# 16777216,
# 536870912,
# 1099511627776,
# "LRUCache"
# ],
# "enormous": [
# 536870912,
# 9223372036854775807,
# 2199023255552,
# "LRUCache"
# ],
# "small": [
# 262144,
# 16777216,
# 53687091200,
# "LRUCache"
# ],
# "tiny": [
# 0,
# 262144,
# 10737418240,
# "LRUCache"
# ]
# },
# "cache_config": "A_LRUCache",
# "name": "foo-A_LRUCache",
# "trace": "/home/meatz/ecmwf_traces/foo.gz"
# }
# """
if __name__ == "__main__":
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
834,
9800,
834,
796,
705,
1326,
27906,
6,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
308,
13344,
198,
11748,
33918,
198,
11748,
12234,
8019,
198,
11748,
8271,
198,
11748... | 1.85786 | 598 |
""" cRIO server requests
'acquire_raw_data'
"""
import socket
import scipy
from scipy.fftpack import fft
import math as m
import numpy as np
##Fuction to calculate heading based on 3 hydrophone's data
man = CRIOManager()
man.callback()
| [
37811,
269,
7112,
46,
4382,
7007,
198,
220,
220,
220,
705,
330,
29782,
62,
1831,
62,
7890,
6,
198,
198,
37811,
198,
11748,
17802,
198,
11748,
629,
541,
88,
198,
6738,
629,
541,
88,
13,
487,
83,
8002,
1330,
277,
701,
198,
11748,
10... | 2.905882 | 85 |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the iPOPO @Provides decorator.
:author: Thomas Calmant
"""
# Tests
from tests.interfaces import IEchoService
from tests.ipopo import install_bundle, install_ipopo
# Pelix
from pelix.framework import FrameworkFactory, BundleContext
# Standard library
try:
import unittest2 as unittest
except ImportError:
import unittest
# ------------------------------------------------------------------------------
__version__ = "1.0.0"
NAME_A = "componentA"
# ------------------------------------------------------------------------------
class ProvidesTest(unittest.TestCase):
"""
Tests the component "provides" behavior
"""
def setUp(self):
"""
Called before each test. Initiates a framework.
"""
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.ipopo = install_ipopo(self.framework)
def tearDown(self):
"""
Called after each test
"""
self.framework.stop()
FrameworkFactory.delete_framework(self.framework)
def testProvides(self):
"""
Tests if the provides decorator works
"""
module = install_bundle(self.framework)
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Assert that the service is not yet available
self.assertIsNone(context.get_service_reference(IEchoService),
"Service is already registered")
# Instantiate the component
compoA = self.ipopo.instantiate(module.FACTORY_A, NAME_A)
try:
# Service should be there
ref = context.get_service_reference(IEchoService)
self.assertIsNotNone(ref, "Service hasn't been registered")
# Second service should be there
ref2 = context.get_service_reference("TestService")
self.assertIsNotNone(ref, "Service hasn't been registered")
# References must be different
self.assertNotEqual(ref, ref2,
"Service references must be different")
# Compare service instances
svc = context.get_service(ref)
self.assertIs(svc, compoA,
"Different instances for service and component")
svc2 = context.get_service(ref2)
self.assertEqual(svc, svc2, "Got different service instances")
# Clean up
context.unget_service(ref)
context.unget_service(ref2)
svc = None
svc2 = None
# Invalidate the component
self.ipopo.invalidate(NAME_A)
# Service should not be there anymore
self.assertIsNone(context.get_service_reference(IEchoService),
"Service is still registered")
finally:
try:
self.ipopo.kill(NAME_A)
except:
pass
def testController(self):
"""
Tests the service controller
"""
module = install_bundle(self.framework)
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Assert that the service is not yet available
self.assertIsNone(context.get_service_reference(IEchoService),
"Service is already registered")
self.assertIsNone(context.get_service_reference("TestService"),
"TestService is already registered")
# Instantiate the component
self.ipopo.instantiate(module.FACTORY_A, NAME_A)
try:
# Service should be there (controller default value is True)
self.assertIsNotNone(context.get_service_reference(IEchoService),
"EchoService hasn't been registered")
ref = context.get_service_reference("TestService")
self.assertIsNotNone(ref, "TestService hasn't been registered")
# Get the service instance
svc = context.get_service(ref)
# Change the value of the controller
svc.change_controller(False)
self.assertIsNone(context.get_service_reference("TestService"),
"TestService hasn't been unregistered")
self.assertIsNotNone(context.get_service_reference(IEchoService),
"EchoService has been unregistered")
# Re-change the value
svc.change_controller(True)
self.assertIsNotNone(context.get_service_reference("TestService"),
"TestService hasn't been re-registered")
self.assertIsNotNone(context.get_service_reference(IEchoService),
"EchoService has been unregistered")
# Invalidate the component
self.ipopo.invalidate(NAME_A)
# Re-change the value (once invalidated)
svc.change_controller(True)
# Service should not be there anymore
self.assertIsNone(context.get_service_reference("TestService"),
"TestService is still registered")
self.assertIsNone(context.get_service_reference(IEchoService),
"EchoService is still registered")
# Clean up
context.unget_service(ref)
svc = None
ref = None
finally:
try:
self.ipopo.kill(NAME_A)
except:
pass
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set logging level
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
1377,
14041,
12,
27195,
7656,
25,
41002,
12,
23,
1377,
198,
37811,
198,
51,
3558,
262,
9736,
3185,
46,
2488,
15946,
1460,
11705,
1352,
13,
198,
198,
25,
9800,
25,
5658,
38280,
415... | 2.289504 | 2,563 |
from django.contrib import admin
from forms import ArticleForm
from models import Article
from models import Tag
admin.site.register(Article, ArticleAdmin)
admin.site.register(Tag)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
5107,
1330,
10172,
8479,
198,
6738,
4981,
1330,
10172,
198,
6738,
4981,
1330,
17467,
628,
198,
28482,
13,
15654,
13,
30238,
7,
14906,
11,
10172,
46787,
8,
198,
28482,
13,
15654,... | 3.893617 | 47 |
#importing all the necessary libraries
import matplotlib.pyplot as plt
from sklearn import linear_model
import pandas as pd
import pylab as pl
import numpy as np
import tkinter as tk
import time
af = pd.read_csv('fish.csv') #taking the dataset
print(af.head())
# # af = df[['Rank','Rating','Votes']]
# # print(af.head())
# # # plt.scatter(af['Blast Furnace Slag'], af.Strength, color='blue')
# # # plt.xlabel("Engine size")
# # # plt.ylabel("Emission")
# # # plt.show()
msk = np.random.rand(len(af)) < 0.8 #dividing data set into train/test 8:2
train = af[msk]
test = af[~msk]
# # plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')
# # plt.xlabel("Engine size")
# # plt.ylabel("Emission")
# # plt.show()
regr = linear_model.LinearRegression()
#choosing the train array
x = np.asanyarray(train[['Length1','Length2','Length3','Height','Width']])
y = np.asanyarray(train[['Weight']])
regr.fit(x, y)
# The coefficients
# print('Coefficients: ', regr.coef_)
#predicting the values
y_hat = regr.predict(test[['Length1','Length2','Length3','Height','Width']])
# y_hat=regr.predict(np.asanyarray([[23.2, 25.4, 30.0, 11.52, 4.02]]))
# print(y_hat)
x = np.asanyarray(test[['Length1','Length2','Length3','Height','Width']])
y = np.asanyarray(test[['Weight']])
#finding the error
print("Residual sum of squares: %.2f"
% np.mean((y_hat - y) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(x, y))
#fields for tkinter
fields = 'Vertical Length', 'Diagonal Length', 'Cross Length', 'Height', 'Width'
arr = []
#fetching data from the user
#creating the GUI in tkinter
#main loop
if __name__ == '__main__':
root = tk.Tk()
ents = makeform(root, fields)
root.bind('<Return>', (lambda event, e=ents: fetch(e)))
b1 = tk.Button(root, text='Show',
command=(lambda e=ents: fetch(e)))
b1.pack(side=tk.LEFT, padx=5, pady=5)
b2 = tk.Button(root, text='Quit', command=root.quit)
b2.pack(side=tk.LEFT, padx=5, pady=5)
# w = tk.Label(root, text=arr[2])
# w.pack()
root.mainloop() | [
2,
11748,
278,
477,
262,
3306,
12782,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
6738,
1341,
35720,
1330,
14174,
62,
19849,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
279,
2645,
397,
... | 2.287815 | 952 |
import unittest
from surgery_of_1c_storage.__main__ import SurgeryOf1CStorage
import tempfile
import os
import sys
from contextlib import contextmanager
from io import StringIO
class TestSurgeryOf1CStorage(unittest.TestCase):
"""Тест проходятся только при наличии развернутых тестовых баз на Postgres и MS SQL
Параметры подключения должны быть указаны в файлах .test_base_conf_psql.ini и .test_base_conf_mssql.ini"""
@contextmanager
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
8185,
62,
1659,
62,
16,
66,
62,
35350,
13,
834,
12417,
834,
1330,
39037,
5189,
16,
34,
31425,
198,
11748,
20218,
7753,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
4732,
8019,
1330,
4732,
37153,
19... | 1.889734 | 263 |
lista = [['João', 18], ['Carla', 25], ['Maria', 30]]
# PARA Estrutura 1.
print(f'Lista completa: {lista}')
print(f'Lista com estrutura: {lista[0]}')
print(f'Lista com critérios: {lista[1][0]}') | [
4868,
64,
796,
16410,
6,
9908,
28749,
3256,
1248,
4357,
37250,
9914,
5031,
3256,
1679,
4357,
37250,
46827,
3256,
1542,
11907,
198,
2,
350,
24401,
412,
2536,
315,
5330,
352,
13,
198,
4798,
7,
69,
6,
8053,
64,
1224,
8326,
25,
1391,
48... | 2.193182 | 88 |
from django.contrib import admin
from django.urls import path, re_path
from django.conf.urls import include
from HomeCloud import views
import django.views.i18n
urlpatterns = [
#DaSH
path('',views.index,name="DaSH-Index"),
path('i18n/', include('django.conf.urls.i18n')),
path('cpuusage',views.getCPUpercent),
path('swapusage',views.getSwapmemory),
path('virtualusage',views.getVirtualmemory),
#DaSH Auth
path('accounts/login/',views.getlogin),
path('auth/',views.login),
path('logout/',views.logout),
#DaSH Admin
path('admin/',views.admin),
path('admin/Users/',views.Users, name="DaSH-USER-LIST"),
path('admin/Users/create',views.CreateUser),
path('admin/Users/save',views.Save),
path('admin/Users/del<str:user>',views.DeleteUser),
path('admin/Users/chngepass',views.CHPass, name="DaSH-USER-CHANGEPASS"),
path('admin/Users/setpass/<str:user>',views.SetPass, name="DaSH-USER-SETPASSWORD"),
#File
re_path(r'^file/',include('File.urls'))
]
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
302,
62,
6978,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
198,
6738,
5995,
18839,
1330,
5009,
198,
11748,
42625,
... | 2.524752 | 404 |
#!/usr/bin/env python
from api.txtly.txtly_request import TxtlyRequest
__author__ = "Likhit Jain and Yashita P Jain"
__copyright__ = "Copyright 2019, Kaleyra"
__license__ = "MIT"
__version__ = "1.0"
__email__ = "support@kaleyra.com"
__status__ = "Production"
# The user can delete the created txtly links.
# If Txtly URL got deleted, then user won't be able to use it and redirect anywhere.
# id (Txtly ID) is a mandatory parameter.
txtlyRequest = TxtlyRequest(id='')
txtlyResponse = txtlyRequest.delete()
print(txtlyResponse.to_json())
print(txtlyResponse.get_status())
print(txtlyResponse.get_message())
print(txtlyResponse.get_data())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
40391,
13,
14116,
306,
13,
14116,
306,
62,
25927,
1330,
309,
742,
306,
18453,
198,
198,
834,
9800,
834,
796,
366,
43,
13848,
270,
449,
391,
290,
575,
1077,
5350,
350,
449,... | 2.866667 | 225 |
# -*- coding: utf8 -*-
import matplotlib.pyplot as plt
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.232558 | 43 |
# Generated by Django 3.2 on 2021-11-16 15:45
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
319,
33448,
12,
1157,
12,
1433,
1315,
25,
2231,
201,
198,
201,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
201,
198,
201,
198
] | 2.657143 | 35 |
from tkinter import *
import paho.mqtt.client as mqtt
__author__ = 'Niels'
root = Tk()
app = App(root)
| [
6738,
256,
74,
3849,
1330,
1635,
198,
11748,
279,
17108,
13,
76,
80,
926,
13,
16366,
355,
285,
80,
926,
198,
198,
834,
9800,
834,
796,
705,
34153,
1424,
6,
628,
198,
15763,
796,
309,
74,
3419,
198,
198,
1324,
796,
2034,
7,
15763,
... | 2.326087 | 46 |
# -*- coding: utf-8 -*-
"""
--------------------------------------
@File : proxy.py
@Author : maixiaochai
@Email : maixiaochai@outlook.com
@CreatedOn : 2020/8/15 19:06
--------------------------------------
"""
from re import compile
import requests
from .web_handler import WebHandler
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
3880,
23031,
198,
31,
8979,
220,
220,
220,
220,
220,
220,
1058,
15741,
13,
9078,
198,
31,
13838,
220,
220,
220,
220,
1058,
17266,
844,
544,
5374,
1872... | 2.990196 | 102 |
"""
가로 길이가 2이고 세로의 길이가 1인 직사각형모양의 타일이 있습니다. 이 직사각형 타일을 이용하여 세로의 길이가 2이고 가로의 길이가 n인 바닥을 가득 채우려고 합니다. 타일을 채울 때는 다음과 같이 2가지 방법이 있습니다.
타일을 가로로 배치 하는 경우
타일을 세로로 배치 하는 경우
직사각형의 가로의 길이 n이 매개변수로 주어질 때, 이 직사각형을 채우는 방법의 수를 return 하는 solution 함수를 완성해주세요.
제한사항
가로의 길이 n은 60,000이하의 자연수 입니다.
경우의 수가 많아 질 수 있으므로, 경우의 수를 1,000,000,007으로 나눈 나머지를 return해주세요.
"""
| [
37811,
198,
166,
108,
222,
167,
94,
250,
220,
166,
116,
116,
35975,
112,
166,
108,
222,
362,
35975,
112,
166,
111,
254,
23821,
226,
116,
167,
94,
250,
35975,
246,
220,
166,
116,
116,
35975,
112,
166,
108,
222,
352,
35975,
116,
238... | 0.563107 | 618 |
from logging import getLogger
from kaggle_adcal_2021.utils.template import GokartTask
logger = getLogger(__name__)
| [
6738,
18931,
1330,
651,
11187,
1362,
198,
198,
6738,
479,
9460,
293,
62,
324,
9948,
62,
1238,
2481,
13,
26791,
13,
28243,
1330,
402,
482,
433,
25714,
198,
198,
6404,
1362,
796,
651,
11187,
1362,
7,
834,
3672,
834,
8,
628
] | 2.878049 | 41 |
# Find the divisors!
# Create a function named divisors/Divisors that takes an integer n > 1 and returns an array with all of the integer's
# divisors(except for 1 and the number itself), from smallest to largest. If the number is prime return the string
# '(integer) is prime' (null in C#) (use Either String a in Haskell and Result<Vec<u32>, String> in Rust).
# Example:
# divisors(12); #should return [2,3,4,6]
# divisors(25); #should return [5]
# divisors(13); #should return "13 is prime"
divisors(15)
# [3, 5]
divisors(12)
# [2, 3, 4, 6]
divisors(13)
# "13 is prime"
| [
2,
9938,
262,
2659,
271,
669,
0,
198,
198,
2,
13610,
257,
2163,
3706,
2659,
271,
669,
14,
24095,
271,
669,
326,
2753,
281,
18253,
299,
1875,
352,
290,
5860,
281,
7177,
351,
477,
286,
262,
18253,
338,
198,
2,
2659,
271,
669,
7,
1... | 2.764151 | 212 |
# pylint: disable=wildcard-import
"""Statistical tests and diagnostics for ArviZ."""
from .density_utils import *
from .diagnostics import *
from .stats import *
from .stats import _calculate_ics
from .stats_refitting import *
from .stats_utils import *
__all__ = [
"apply_test_function",
"bfmi",
"compare",
"hdi",
"kde",
"loo",
"loo_pit",
"psislw",
"r2_samples",
"r2_score",
"summary",
"waic",
"ELPDData",
"ess",
"rhat",
"mcse",
"autocorr",
"autocov",
"make_ufunc",
"wrap_xarray_ufunc",
"reloo",
"_calculate_ics",
"psens"
]
| [
2,
279,
2645,
600,
25,
15560,
28,
21992,
9517,
12,
11748,
201,
198,
37811,
17126,
19929,
5254,
290,
6689,
34558,
329,
943,
8903,
57,
526,
15931,
201,
198,
6738,
764,
43337,
62,
26791,
1330,
1635,
201,
198,
6738,
764,
47356,
34558,
133... | 2.024845 | 322 |
# (c) Crown copyright Met Office. All rights reserved.
# For further details please refer to the file COPYRIGHT
# which you should have received as part of this distribution
"""
Fortran language handling classes.
"""
import logging
from fparser.common.readfortran import FortranFileReader # type: ignore
from fparser.two.Fortran2003 import ( # type: ignore
Use_Stmt, Module_Stmt, Program_Stmt, Subroutine_Stmt, Function_Stmt, Language_Binding_Spec,
Char_Literal_Constant, Interface_Block, Name, Comment, Module)
from fparser.two.parser import ParserFactory # type: ignore
from fparser.two.utils import FortranSyntaxError # type: ignore
from fab.dep_tree import AnalysedFile, EmptySourceFile
from fab.tasks import TaskException
from fab.util import log_or_dot, HashedFile
logger = logging.getLogger(__name__)
# todo: a nicer recursion pattern?
def iter_content(obj):
"""
Return a generator which yields every node in the tree.
"""
yield obj
if hasattr(obj, "content"):
for child in _iter_content(obj.content):
yield child
class FortranAnalyser(object):
"""
A build step which analyses a fortran file using fparser2, creating an :class:`~fab.dep_tree.AnalysedFile`.
"""
_intrinsic_modules = ['iso_fortran_env']
def _parse_file(self, fpath):
"""Get a node tree from a fortran file."""
reader = FortranFileReader(str(fpath), ignore_comments=False)
reader.exit_on_error = False # don't call sys.exit, it messes up the multi-processing
try:
tree = self.f2008_parser(reader)
return tree
except FortranSyntaxError as err:
# we can't return the FortranSyntaxError, it breaks multiprocessing!
logger.error(f"\nsyntax error in {fpath}\n{err}")
raise Exception(f"syntax error in {fpath}\n{err}")
except Exception as err:
logger.error(f"\nunhandled error '{type(err)}' in {fpath}\n{err}")
raise Exception(f"unhandled error '{type(err)}' in {fpath}\n{err}")
| [
2,
357,
66,
8,
12223,
6634,
3395,
4452,
13,
1439,
2489,
10395,
13,
198,
2,
1114,
2252,
3307,
3387,
3522,
284,
262,
2393,
27975,
38162,
9947,
198,
2,
543,
345,
815,
423,
2722,
355,
636,
286,
428,
6082,
198,
37811,
198,
21926,
2596,
... | 2.697644 | 764 |
# create an empty list
coding_languages = []
coding_languages.append('Python')
print(coding_languages)
coding_languages.append('JavaScript')
print(coding_languages)
| [
2,
2251,
281,
6565,
1351,
198,
66,
7656,
62,
75,
33213,
796,
17635,
198,
198,
66,
7656,
62,
75,
33213,
13,
33295,
10786,
37906,
11537,
198,
4798,
7,
66,
7656,
62,
75,
33213,
8,
198,
198,
66,
7656,
62,
75,
33213,
13,
33295,
10786,
... | 2.929825 | 57 |
# package org.apache.helix.participant
#from org.apache.helix.participant import *
#from java.lang.management import ManagementFactory
#from org.apache.log4j import Logger
from org.apache.helix.ControllerChangeListener import ControllerChangeListener
from org.apache.helix.HelixDataAccessor import HelixDataAccessor
from org.apache.helix.HelixManager import HelixManager
from org.apache.helix.HelixManagerFactory import HelixManagerFactory
from org.apache.helix.InstanceType import InstanceType
from org.apache.helix.NotificationContext import NotificationContext
from org.apache.helix.PropertyKey import Builder
from org.apache.helix.PropertyType import PropertyType
from org.apache.helix.controller.GenericHelixController import GenericHelixController
from org.apache.helix.controller.HelixControllerMain import HelixControllerMain
from org.apache.helix.controller.restlet.ZKPropertyTransferServer import ZKPropertyTransferServer
from org.apache.helix.model.LeaderHistory import LeaderHistory
from org.apache.helix.model.LiveInstance import LiveInstance
class DistClusterControllerElection(ControllerChangeListener):
"""
Java modifiers:
private static
Type:
Logger
"""
LOG = Logger.getLogger(DistClusterControllerElection.class)
"""
Parameters:
String zkAddr
"""
def onControllerChange(self, changeContext):
"""
Returns void
Parameters:
changeContext: NotificationContext
@Override
Java modifiers:
synchronized
"""
# HelixManager
manager = changeContext.getManager()
if manager == None:
LOG.error("missing attributes in changeContext. requires HelixManager")
return
# InstanceType
type = manager.getInstanceType()
if type != InstanceType.CONTROLLER && type != InstanceType.CONTROLLER_PARTICIPANT:
LOG.error("fail to become controller because incorrect instanceType (was " + str(type.toString())+ ", requires CONTROLLER | CONTROLLER_PARTICIPANT)")
return
try:
if (changeContext.getType() == NotificationContext.Type.INIT) or (changeContext.getType() == NotificationContext.Type.CALLBACK):
# HelixDataAccessor
accessor = manager.getHelixDataAccessor()
# Builder
keyBuilder = accessor.keyBuilder()
while (accessor.getProperty(keyBuilder.controllerLeader()) == None:
# boolean
success = tryUpdateController(manager)
if success:
updateHistory(manager)
if type == InstanceType.CONTROLLER:
HelixControllerMain.addListenersToController(manager, _controller)
manager.startTimerTasks()
else:
if type == InstanceType.CONTROLLER_PARTICIPANT:
# String
clusterName = manager.getClusterName()
# String
controllerName = manager.getInstanceName()
_leader = HelixManagerFactory.getZKHelixManager(clusterName, controllerName, InstanceType.CONTROLLER, _zkAddr)
_leader.connect()
_leader.startTimerTasks()
HelixControllerMain.addListenersToController(_leader, _controller)
else:
if (changeContext.getType() == NotificationContext.Type.FINALIZE):
if _leader != None:
_leader.disconnect()
except Exception, e:
LOG.error("Exception when trying to become leader"+ str(e))
def tryUpdateController(self, manager):
"""
Returns boolean
Parameters:
manager: HelixManager
Java modifiers:
private
"""
# HelixDataAccessor
accessor = manager.getHelixDataAccessor()
# Builder
keyBuilder = accessor.keyBuilder()
# LiveInstance
leader = LiveInstance(manager.getInstanceName())
try:
leader.setLiveInstance(ManagementFactory.getRuntimeMXBean().getName())
leader.setSessionId(manager.getSessionId())
leader.setHelixVersion(manager.getVersion())
if ZKPropertyTransferServer.getInstance() != None:
# String
zkPropertyTransferServiceUrl = ZKPropertyTransferServer.getInstance().getWebserviceUrl()
if zkPropertyTransferServiceUrl != None:
leader.setWebserviceUrl(zkPropertyTransferServiceUrl)
else:
LOG.warn("ZKPropertyTransferServer instnace is null")
# boolean
success = accessor.createProperty(keyBuilder.controllerLeader(), leader)
if success:
return True
else:
LOG.info("Unable to become leader probably because some other controller becames the leader")
except Exception, e:
LOG.error("Exception when trying to updating leader record in cluster:" + str(manager.getClusterName())+ ". Need to check again whether leader node has been created or not"+ str(e))
leader = accessor.getProperty(keyBuilder.controllerLeader())
if leader != None:
# String
leaderName = leader.getInstanceName()
LOG.info("Leader exists for cluster:" + manager.getClusterName() + ", currentLeader:" + leaderName)
if leaderName != None && (leaderName == manager.getInstanceName()):
return True
return False
def updateHistory(self, manager):
"""
Returns void
Parameters:
manager: HelixManager
Java modifiers:
private
"""
# HelixDataAccessor
accessor = manager.getHelixDataAccessor()
# Builder
keyBuilder = accessor.keyBuilder()
# LeaderHistory
history = accessor.getProperty(keyBuilder.controllerLeaderHistory())
if history == None:
history = LeaderHistory(PropertyType.HISTORY.toString())
history.updateHistory(manager.getClusterName(), manager.getInstanceName())
accessor.setProperty(keyBuilder.controllerLeaderHistory(), history)
| [
2,
5301,
8745,
13,
43073,
13,
2978,
844,
13,
48013,
415,
198,
2,
6738,
8745,
13,
43073,
13,
2978,
844,
13,
48013,
415,
1330,
1635,
198,
2,
6738,
20129,
13,
17204,
13,
27604,
1330,
8549,
22810,
198,
2,
6738,
8745,
13,
43073,
13,
64... | 2.343514 | 2,783 |
import pytest
import importlib
module = importlib.import_module("12_subterranean_sustainability")
parse = module.parse
state_value = module.state_value
@pytest.mark.parametrize(
"number_of_generations, total_sum, output_state", [
(1, 91, "#...#....#.....#..#..#..#"),
(2, 132, "##..##...##....#..#..#..##"),
(3, 102, "#.#...#..#.#....#..#..#...#"),
(4, 154, "#.#..#...#.#...#..#..##..##"),
(5, 115, "#...##...#.#..#..#...#...#"),
(6, 174, "##.#.#....#...#..##..##..##"),
(7, 126, "#..###.#...##..#...#...#...#"),
(8, 213, "#....##.#.#.#..##..##..##..##"),
(9, 138, "##..#..#####....#...#...#...#"),
(10, 213, "#.#..#...#.##....##..##..##..##"),
(11, 136, "#...##...#.#...#.#...#...#...#"),
(12, 218, "##.#.#....#.#...#.#..##..##..##"),
(13, 133, "#..###.#....#.#...#....#...#...#"),
(14, 235, "#....##.#....#.#..##...##..##..##"),
(15, 149, "##..#..#.#....#....#..#.#...#...#"),
(16, 226, "#.#..#...#.#...##...#...#.#..##..##"),
(17, 170, "#...##...#.#.#.#...##...#....#...#"),
(18, 280, "##.#.#....#####.#.#.#...##...##..##"),
(19, 287, "#..###.#..#.#.#######.#.#.#..#.#...#"),
(20, 325, "#....##....#####...#######....#.#..##"),
])
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
198,
11748,
1330,
8019,
198,
198,
21412,
796,
1330,
8019,
13,
11748,
62,
21412,
7203,
1065,
62,
7266,
353,
16474,
62,
82,
19542,
1799,
4943,
198,
29572,
796,
8265,
13,
29572,
198,
5219,
62,
8367,
796,
8265,
13,
521... | 1.956268 | 686 |
import sys
from os import environ, stat, chmod
from os.path import dirname, realpath
from os import remove
from shutil import copyfile
from pathlib import Path
import subprocess
def app_dir() -> str:
"""Get app dir."""
return environ.get('APP_DIR', dirname(realpath(__file__)))
def windows():
"""Prepare windows install."""
dot_binary = Path(app_dir()) / 'dot.bat'
dot_binary = dot_binary.as_posix()
if Path(dot_binary).exists():
remove(dot_binary)
file_commands = [
f'@echo off',
f'set APP_DIR={app_dir()}',
r'set VENV=%APP_DIR%venv',
r'call %VENV%\Scripts\activate & python -u "%APP_DIR%dot.py" %*',
]
with open(dot_binary, 'w') as dot_file:
for line in file_commands:
dot_file.write(f'{line}\n')
dot_file.close()
def make_executable(path):
"""CHMOD -x for unix like."""
mode = stat(path).st_mode
mode |= (mode & 0o444) >> 2
chmod(path, mode)
def linux():
"""Prepare linux install."""
dot_binary = Path.home() / '.local' / 'bin' / 'dot'
dot_binary = dot_binary.as_posix()
if Path(dot_binary).exists():
remove(dot_binary)
file_commands = [
'#!/bin/bash',
'args=("$@")',
f'APP_DIR="{app_dir()}"',
'ENV_DIR=$APP_DIR/.venv',
'CMD_FILE=$APP_DIR/dot.py',
'function command {',
'\t export APP_DIR=$APP_DIR && \\',
'\t export DOT_CONF=$DOT_CONF && \\',
'\t source $ENV_DIR/bin/activate && \\',
'\t python -u $CMD_FILE ${args[0]} ${args[1]} ${args[2]};',
'}',
'function dot() {',
'\tcommand',
'}',
'dot',
]
with open(dot_binary, 'w') as dot_file:
for line in file_commands:
dot_file.write(f'{line}\n')
dot_file.close()
make_executable(dot_binary)
def install() -> None:
"""Prepare system."""
if 'win' in str(sys.platform):
windows()
else:
linux()
conf_path = Path(app_dir()) / 'config.yml'
if not Path(conf_path).exists():
copyfile(Path(app_dir()) / 'config.example.yml', conf_path)
install()
| [
11748,
25064,
198,
6738,
28686,
1330,
551,
2268,
11,
1185,
11,
442,
4666,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
11,
1103,
6978,
198,
6738,
28686,
1330,
4781,
198,
6738,
4423,
346,
1330,
4866,
7753,
198,
6738,
3108,
8019,
1330,
... | 2.133531 | 1,011 |
from faker import Faker
fake = Faker("ar_SA")
print(fake.name())
print(fake.email())
print(fake.first_name())
print(fake.last_name())
print(fake.phone_number())
print(fake.address())
print(fake.text())
print(fake.name_male())
print(fake.name_female())
print(fake.job())
print(fake.word())
print(fake.words(7))
print(fake.currency())
print(fake.currency_name())
print(fake.currency_code())
| [
6738,
277,
3110,
1330,
376,
3110,
201,
198,
201,
198,
30706,
796,
376,
3110,
7203,
283,
62,
4090,
4943,
201,
198,
201,
198,
4798,
7,
30706,
13,
3672,
28955,
201,
198,
4798,
7,
30706,
13,
12888,
28955,
201,
198,
4798,
7,
30706,
13,
... | 2.521212 | 165 |
# pylint: disable=invalid-name,no-self-use
import argparse
import os
from allennlp.common.testing import AllenNlpTestCase
from allennlp.commands.fine_tune import FineTune, fine_tune_model_from_file_paths, fine_tune_model_from_args
| [
2,
279,
2645,
600,
25,
15560,
28,
259,
12102,
12,
3672,
11,
3919,
12,
944,
12,
1904,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
6738,
477,
1697,
34431,
13,
11321,
13,
33407,
1330,
9659,
45,
34431,
14402,
20448,
198,
6738,
... | 2.807229 | 83 |
import re
import random
import requests
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
if __name__ == "__main__":
text=get_naver_news()
print(text) | [
11748,
302,
198,
11748,
4738,
198,
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
197,
52... | 3.107143 | 56 |
import os
import unittest
from smart_getenv import getenv
class GetenvTests(unittest.TestCase):
"""
Tests for getenv.
"""
test_var_name = '__ENV_UTILS_TEST_VAR'
def delete_test_var(self):
"""
Delete test environment variable.
"""
if self.test_var_name in os.environ.keys():
del os.environ[self.test_var_name]
def test_getenv_default(self):
"""
If environment variable does not exist:
ensure getenv returns None if default value is not specified,
ensure getenv returns default value if it is specified,
ensure getenv does not cast default value to desired type.
"""
self.assertEqual(getenv(self.test_var_name), None)
self.assertEqual(getenv(self.test_var_name, default='default string value'), 'default string value')
self.assertEqual(getenv(self.test_var_name, type=int, default='default string value'), 'default string value')
def test_getenv_type_str(self):
"""
Ensure getenv returns string if environment variable exists and desired type is string.
"""
os.environ[self.test_var_name] = 'abc'
self.assertEqual(getenv(self.test_var_name, type=str), 'abc')
def test_getenv_type_int(self):
"""
If environment variable exists and desired type is int:
ensure getenv returns int,
ensure getenv excepts if value can not be casted to int.
"""
os.environ[self.test_var_name] = '123'
self.assertEqual(getenv(self.test_var_name, type=int), 123)
os.environ[self.test_var_name] = 'absolutely not an int'
try:
getenv(self.test_var_name, type=int)
self.fail('Calling getenv_int on a environment variable with'
' non-castable to int value should fail with exception!')
except ValueError:
pass
def test_getenv_type_float(self):
"""
If environment variable exists and desired type is float:
ensure getenv returns float,
ensure getenv excepts if value can not be casted to float.
"""
os.environ[self.test_var_name] = '123.4'
self.assertEqual(getenv(self.test_var_name, type=float), 123.4)
os.environ[self.test_var_name] = 'absolutely not a float'
try:
getenv(self.test_var_name, type=float)
self.fail('Calling getenv_int on a environment variable with'
' non-castable to float value should fail with exception!')
except ValueError:
pass
def test_getenv_type_bool(self):
"""
If environment variable exists and desired type is bool, ensure getenv returns bool.
"""
os.environ[self.test_var_name] = 'true'
self.assertEqual(getenv(self.test_var_name, type=bool), True)
os.environ[self.test_var_name] = 'True'
self.assertEqual(getenv(self.test_var_name, type=bool), True)
os.environ[self.test_var_name] = '1'
self.assertEqual(getenv(self.test_var_name, type=bool), True)
os.environ[self.test_var_name] = 'absolutely not a boolean'
self.assertEqual(getenv(self.test_var_name, type=bool), True)
os.environ[self.test_var_name] = ' '
self.assertEqual(getenv(self.test_var_name, type=bool), True)
os.environ[self.test_var_name] = 'false'
self.assertEqual(getenv(self.test_var_name, type=bool), False)
os.environ[self.test_var_name] = 'False'
self.assertEqual(getenv(self.test_var_name, type=bool), False)
os.environ[self.test_var_name] = '0'
self.assertEqual(getenv(self.test_var_name, type=bool), False)
os.environ[self.test_var_name] = ''
self.assertEqual(getenv(self.test_var_name, type=bool), False)
def test_getenv_type_list(self):
"""
If environment variable exists and desired type is list:
ensure getenv returns list,
ensure getenv default separator is ',',
ensure getenv supports custom separator.
"""
os.environ[self.test_var_name] = 'abc'
self.assertEqual(getenv(self.test_var_name, type=list), ['abc'])
os.environ[self.test_var_name] = 'a,b,c'
self.assertEqual(getenv(self.test_var_name, type=list), ['a', 'b', 'c'])
os.environ[self.test_var_name] = ',a,b,c,'
self.assertEqual(getenv(self.test_var_name, type=list, separator=','), ['', 'a', 'b', 'c', ''])
os.environ[self.test_var_name] = 'a:b:c'
self.assertEqual(getenv(self.test_var_name, type=list, separator=':'), ['a', 'b', 'c'])
def test_getenv_type_tuple(self):
"""
If environment variable exists and desired type is tuple:
ensure getenv returns tuple,
ensure getenv default separator is ',',
ensure getenv supports custom separator.
"""
os.environ[self.test_var_name] = 'abc'
self.assertEqual(getenv(self.test_var_name, type=tuple), ('abc',))
os.environ[self.test_var_name] = 'a,b,c'
self.assertEqual(getenv(self.test_var_name, type=tuple), ('a', 'b', 'c'))
os.environ[self.test_var_name] = ',a,b,c,'
self.assertEqual(getenv(self.test_var_name, type=tuple, separator=','), ('', 'a', 'b', 'c', ''))
os.environ[self.test_var_name] = 'a:b:c'
self.assertEqual(getenv(self.test_var_name, type=tuple, separator=':'), ('a', 'b', 'c'))
def test_getenv_type_dict(self):
"""
If environment variable exists and desired type is dict:
ensure getenv returns dict,
ensure getenv supports custom separator.
"""
os.environ[self.test_var_name] = '{"key": "value"}'
self.assertEqual(getenv(self.test_var_name, type=dict), {'key': 'value'})
os.environ[self.test_var_name] = '{ "key": "value" }'
self.assertEqual(getenv(self.test_var_name, type=dict), {'key': 'value'})
os.environ[self.test_var_name] = '{ "key": "value" }'
self.assertEqual(getenv(self.test_var_name, type=dict), {'key': 'value'})
os.environ[self.test_var_name] = 'absolutely not a dict'
try:
getenv(self.test_var_name, type=dict)
self.fail('Calling getenv with desired type of dict on a environment variable with'
' non-castable to dict value should fail with exception!')
except TypeError:
pass
except SyntaxError:
pass
if __name__ == '__main__':
unittest.main()
| [
11748,
28686,
198,
11748,
555,
715,
395,
198,
6738,
4451,
62,
1136,
24330,
1330,
651,
24330,
628,
198,
4871,
3497,
24330,
51,
3558,
7,
403,
715,
395,
13,
14402,
20448,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
30307,
329,
... | 2.227852 | 2,980 |
from django import forms
from django.core.mail import send_mail
from django.conf import settings
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
7295,
13,
4529,
1330,
3758,
62,
4529,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
628
] | 3.769231 | 26 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import retworkx
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.802632 | 152 |
import copy
import numpy as np
from flatland.envs.agent_utils import RailAgentStatus
from collections import defaultdict
from itertools import groupby
| [
11748,
4866,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
6228,
1044,
13,
268,
14259,
13,
25781,
62,
26791,
1330,
12950,
36772,
19580,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
340,
861,
10141,
1330,
1448,
1525,
198
] | 3.973684 | 38 |
# program version
PROGRAM_VERSION = '1.0'
| [
2,
1430,
2196,
198,
4805,
7730,
24115,
62,
43717,
796,
705,
16,
13,
15,
6,
198
] | 2.625 | 16 |
from enum import Enum, unique
from typing import Union
from aquaui.utils import quotify, run_applescript
from .types.buttons import Buttons
from .types.result import Result
@unique
class Alert:
"""
Returns an object of type Result, which as a button_returned property.
"""
def with_buttons(self, buttons: Union[Buttons, None] = None):
"""
If a default button is not specified, the last button is the list will become the default.
If buttons is None, default buttons are displayed
"""
if buttons is not None:
self.applescript += f"{buttons.applescript_fragment}"
return self
def of_type(self, alert_type: AlertType = AlertType.INFORMATIONAL):
"""Different alert types use different icons"""
self.applescript += f"as {alert_type.value} "
return self
| [
6738,
33829,
1330,
2039,
388,
11,
3748,
198,
6738,
19720,
1330,
4479,
198,
6738,
14839,
559,
72,
13,
26791,
1330,
23611,
1958,
11,
1057,
62,
1324,
829,
6519,
198,
6738,
764,
19199,
13,
4360,
27288,
1330,
887,
27288,
198,
6738,
764,
19... | 2.835526 | 304 |
from pynwb import NWBHDF5IO
io = NWBHDF5IO(r'C:\Users\knasi\Consulting\NWB project\Ed\Jack\Jack.nwb', 'r')
nwbfile = io.read()
# Accessing raw data (if they are saved on the NWB file)
print(nwbfile.acquisition)
photonSeries = nwbfile.acquisition['TwoPhotonSeries']
# Accessing processed data
print(nwbfile.processing)
data_interfaces = nwbfile.processing['ophys0']
fluorescence = data_interfaces['Fluorescence']
roi_response_series = fluorescence.roi_response_series
roi_response_series_slice0 = roi_response_series['RoiResponseSeries_slice0']
data_ref = roi_response_series_slice0.data # This is just a reference to the data - it is not loaded yet
data = data_ref[:] # Now it is an ndarray
| [
6738,
279,
2047,
39346,
1330,
21966,
33,
39,
8068,
20,
9399,
201,
198,
201,
198,
952,
796,
21966,
33,
39,
8068,
20,
9399,
7,
81,
6,
34,
7479,
14490,
59,
15418,
17053,
59,
9444,
586,
278,
59,
27605,
33,
1628,
59,
7407,
59,
14295,
... | 2.700375 | 267 |
# ==============================================================================
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge floor operation test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensorflow as tf
from common import NgraphTest
| [
2,
38093,
25609,
28,
198,
2,
220,
15069,
13130,
8180,
10501,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
3... | 4.303419 | 234 |
import argparse
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy
import torch
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
83,
15799,
1330,
5436,
32572,
420,
1352,
198,
11748,
299,
32152,
198,
11748,
28034,
198,
198,
361,
11593,
3672,
834... | 2.888889 | 54 |
from django.urls import path, include
from . import api_views
urlpatterns = [
path('', api_views.WeatherIndexView.as_view(), name='weather_index'),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
198,
6738,
764,
1330,
40391,
62,
33571,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
40391,
62,
33571,
13,
41865,
15732,
7680,
13,
292,
62,
... | 2.980769 | 52 |
# -*- coding: utf-8 -*-
from rest_framework.serializers import ModelSerializer, PrimaryKeyRelatedField
from ..models import *
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
1334,
62,
30604,
13,
46911,
11341,
1330,
9104,
32634,
7509,
11,
21087,
9218,
9819,
15878,
198,
6738,
11485,
27530,
1330,
1635,
628,
628,
628,
628
] | 3.325 | 40 |
from tornado import web, escape
from .cache import cache
#: initialize options
from .util import set_default_option
__all__ = ["JulyHandler", "ApiHandler", "init_options", "run_server"]
class JulyHandler(web.RequestHandler):
"""July Handler
Subclass JulyHandler to make an app, it provides a way to organize a July
App, and will support more features in the future.
"""
def flash_message(self, msg=None, category=None):
"""flash_message provide an easy way to communicate with users.
create message in your handler::
class HomeHandler(JulyHandler):
def get(self):
self.flash_message('thanks')
self.render('home.html')
and get messages in ``home.html``::
<ul>
{% for category, message in flash_message() $}
<li>{{category}}: {{message}}</li>
{% end %}
</ul>
"""
#: use xsrf token or not ?
key = '%s_flash_message' % self.xsrf_token
if msg is None:
messages = cache.get(key)
if messages is None:
return []
if category is not None:
return get_category_message(messages, category)
#: clear flash message
cache.delete(key)
return messages
message = (category, msg)
messages = cache.get(key)
if isinstance(messages, list):
messages.append(message)
else:
messages = [message]
cache.set(key, messages, 600)
return message
set_default_option('address', default='127.0.0.1', type=str,
help='run server at this address')
set_default_option('port', default=8000, type=int,
help='run server on this port')
set_default_option('settings', default='', type=str,
help='setting file path')
#: application settings
set_default_option('locale_path', type=str,
help='absolute path of locale directory')
set_default_option('default_locale', default='en_US', type=str)
| [
6738,
33718,
1330,
3992,
11,
6654,
198,
6738,
764,
23870,
1330,
12940,
198,
198,
2,
25,
41216,
3689,
198,
6738,
764,
22602,
1330,
900,
62,
12286,
62,
18076,
198,
198,
834,
439,
834,
796,
14631,
16157,
25060,
1600,
366,
32,
14415,
2506... | 2.294372 | 924 |
#!/usr/bin/env python
"""
a simple script can run and test your html rendering classes.
Uncomment the steps as you add to your rendering.
"""
from io import StringIO
# importing the html_rendering code with a short name for easy typing.
import html_render as hr
# reloading in case you are running this in iPython
# -- we want to make sure the latest version is used
import importlib
importlib.reload(hr)
# writing the file out:
def render_page(page, filename):
"""
render the tree of elements
This uses StringIO to render to memory, then dump to console and
write to file -- very handy!
"""
f = StringIO()
page.render(f, " ")
f.seek(0)
print(f.read())
f.seek(0)
open(filename, 'w').write(f.read())
# Step 1
#########
page = hr.Element()
page.append("Here is a paragraph of text -- there could be more of them, but this is enough to show that we can do some text")
page.append("And here is another piece of text -- you should be able to add any number")
render_page(page, "test_html_output1.html")
# ## Step 2
# ##########
# page = hr.Html()
# body = hr.Body()
# body.append(hr.P("Here is a paragraph of text -- there could be more of them, but this is enough to show that we can do some text"))
# body.append(hr.P("And here is another piece of text -- you should be able to add any number"))
# page.append(body)
# render_page(page, "test_html_output2.html")
# # Step 3
# ##########
# page = hr.Html()
# head = hr.Head()
# head.append(hr.Title("PythonClass = Revision 1087:"))
# page.append(head)
# body = hr.Body()
# body.append(hr.P("Here is a paragraph of text -- there could be more of them, but this is enough to show that we can do some text"))
# body.append(hr.P("And here is another piece of text -- you should be able to add any number"))
# page.append(body)
# render_page(page, "test_html_output3.html")
# # Step 4
# ##########
# page = hr.Html()
# head = hr.Head()
# head.append(hr.Title("PythonClass = Revision 1087:"))
# page.append(head)
# body = hr.Body()
# body.append(hr.P("Here is a paragraph of text -- there could be more of them, but this is enough to show that we can do some text",
# style="text-align: center; font-style: oblique;"))
# page.append(body)
# render_page(page, "test_html_output4.html")
# # Step 5
# #########
# page = hr.Html()
# head = hr.Head()
# head.append(hr.Title("PythonClass = Revision 1087:"))
# page.append(head)
# body = hr.Body()
# body.append(hr.P("Here is a paragraph of text -- there could be more of them, but this is enough to show that we can do some text",
# style="text-align: center; font-style: oblique;"))
# body.append(hr.Hr())
# page.append(body)
# render_page(page, "test_html_output5.html")
# # Step 6
# #########
# page = hr.Html()
# head = hr.Head()
# head.append(hr.Title("PythonClass = Revision 1087:"))
# page.append(head)
# body = hr.Body()
# body.append(hr.P("Here is a paragraph of text -- there could be more of them, but this is enough to show that we can do some text",
# style="text-align: center; font-style: oblique;"))
# body.append(hr.Hr())
# body.append("And this is a ")
# body.append( hr.A("http://google.com", "link") )
# body.append("to google")
# page.append(body)
# render_page(page, "test_html_output6.html")
# # Step 7
# #########
# page = hr.Html()
# head = hr.Head()
# head.append(hr.Title("PythonClass = Revision 1087:"))
# page.append(head)
# body = hr.Body()
# body.append( hr.H(2, "PythonClass - Class 6 example") )
# body.append(hr.P("Here is a paragraph of text -- there could be more of them, but this is enough to show that we can do some text",
# style="text-align: center; font-style: oblique;"))
# body.append(hr.Hr())
# list = hr.Ul(id="TheList", style="line-height:200%")
# list.append( hr.Li("The first item in a list") )
# list.append( hr.Li("This is the second item", style="color: red") )
# item = hr.Li()
# item.append("And this is a ")
# item.append( hr.A("http://google.com", "link") )
# item.append("to google")
# list.append(item)
# body.append(list)
# page.append(body)
# render_page(page, "test_html_output7.html")
# # Step 8
# ########
# page = hr.Html()
# head = hr.Head()
# head.append( hr.Meta(charset="UTF-8") )
# head.append(hr.Title("PythonClass = Revision 1087:"))
# page.append(head)
# body = hr.Body()
# body.append( hr.H(2, "PythonClass - Class 6 example") )
# body.append(hr.P("Here is a paragraph of text -- there could be more of them, but this is enough to show that we can do some text",
# style="text-align: center; font-style: oblique;"))
# body.append(hr.Hr())
# list = hr.Ul(id="TheList", style="line-height:200%")
# list.append( hr.Li("The first item in a list") )
# list.append( hr.Li("This is the second item", style="color: red") )
# item = hr.Li()
# item.append("And this is a ")
# item.append( hr.A("http://google.com", "link") )
# item.append("to google")
# list.append(item)
# body.append(list)
# page.append(body)
# render_page(page, "test_html_output8.html")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
64,
2829,
4226,
460,
1057,
290,
1332,
534,
27711,
14837,
6097,
13,
198,
198,
3118,
23893,
262,
4831,
355,
345,
751,
284,
534,
14837,
13,
198,
198,
37811,
198,
198,
6... | 2.740642 | 1,870 |
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import logging
from django.core.urlresolvers import reverse # noqa
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.system_config.forms \
import CreateSDNController
from openstack_dashboard.dashboards.admin.system_config.forms \
import EditPipeline
from openstack_dashboard.dashboards.admin.system_config.forms \
import UpdatecDNS
from openstack_dashboard.dashboards.admin.system_config.forms \
import UpdatecEXT_OAM
from openstack_dashboard.dashboards.admin.system_config.forms \
import UpdatecNTP
from openstack_dashboard.dashboards.admin.system_config.forms \
import UpdateiStorage
from openstack_dashboard.dashboards.admin.system_config.forms \
import UpdateiStoragePools
from openstack_dashboard.dashboards.admin.system_config.forms \
import UpdateSDNController
from openstack_dashboard.dashboards.admin.system_config.forms \
import UpdateSystem
from openstack_dashboard.dashboards.admin.system_config.tables \
import SDNControllerTable
from openstack_dashboard.dashboards.admin.system_config.tabs \
import ConfigTabs
LOG = logging.getLogger(__name__)
######################################################
# SDN Controller Modal Views #
######################################################
######################################################
# Pipeline/PM Views #
######################################################
| [
2,
198,
2,
15069,
357,
66,
8,
2211,
12,
7908,
3086,
5866,
11998,
11,
3457,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
198,
2,
43907,
25,
7400,
11338,
28,
19,
6482,
10394,... | 3.255319 | 564 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2021 Kaede Hoshikawa
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable
import abc
import asyncio
import functools
import os
TestHelper = AsyncioTestHelper
try:
import curio
except ImportError:
pass
else:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
220,
220,
15069,
33448,
509,
8432,
68,
367,
3768,
40398,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
... | 3.192308 | 260 |
import yaml
test_marks.unittest = ['.marks']
if __name__ == '__main__':
import test_appliance
test_appliance.run(globals())
| [
198,
11748,
331,
43695,
198,
198,
9288,
62,
14306,
13,
403,
715,
395,
796,
685,
4458,
14306,
20520,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1330,
1332,
62,
1324,
75,
3610,
198,
220,
22... | 2.385965 | 57 |
from app.tempo.models.following import *
from app.tempo.models.like import *
from app.tempo.models.notification import *
from app.tempo.models.post import *
from app.tempo.models.session import *
from app.tempo.models.song_post import *
from app.tempo.models.song import *
from app.tempo.models.spotify_cred import *
from app.tempo.models.user import *
| [
6738,
598,
13,
11498,
7501,
13,
27530,
13,
27780,
278,
1330,
1635,
198,
6738,
598,
13,
11498,
7501,
13,
27530,
13,
2339,
1330,
1635,
198,
6738,
598,
13,
11498,
7501,
13,
27530,
13,
1662,
2649,
1330,
1635,
198,
6738,
598,
13,
11498,
... | 3.043103 | 116 |
import sys
a = int(sys.argv[1])
b = 10
print("Result: ", a*b)
| [
11748,
25064,
198,
198,
64,
796,
493,
7,
17597,
13,
853,
85,
58,
16,
12962,
198,
65,
796,
838,
198,
198,
4798,
7203,
23004,
25,
33172,
257,
9,
65,
8,
628,
628
] | 2.09375 | 32 |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from nose import with_setup
from nose.tools import raises
from Kqlmagic.constants import Constants
from Kqlmagic.kql_magic import Kqlmagic as Magic
from textwrap import dedent
import os.path
import re
import tempfile
ip = get_ipython()
TEST_URI_SCHEMA_NAME = "kusto"
query1 = "$TEST_CONNECTION_STR let T = view () { datatable(n:long, name:string)[1,'foo',2,'bar'] }; T"
@with_setup(_setup, _teardown)
@with_setup(_setup, _teardown)
@with_setup(_setup, _teardown)
@with_setup(_setup, _teardown)
query2 = """
$TEST_CONNECTION_STR
let T = view () { datatable(first_name:string, last_name:string, year_of_death:long)
['William', 'Shakespeare', 1616, 'Bertold', 'Brecht', 1956] };
T
"""
@with_setup(_setup, _teardown)
query3 = """
$TEST_CONNECTION_STR
x <<
let T = view () { datatable(first_name:string, last_name:string, year_of_death:long)
['William', 'Shakespeare', 1616, 'Bertold', 'Brecht', 1956] };
T
"""
@with_setup(_setup, _teardown)
@with_setup(_setup, _teardown)
query4 = """
$TEST_CONNECTION_STR
let T = view () { datatable(first_name:string, last_name:string, year_of_death:long)
['William', 'Shakespeare', 1616, 'Bertold', 'Brecht', 1956] };
T | project last_name, last_nameX = last_name
"""
@with_setup(_setup, _teardown)
@with_setup(_setup, _teardown)
query5 = """
$TEST_CONNECTION_STR
let T = view () { datatable(Result:string)
['apple', 'banana', 'cherry'] };
T
| sort by Result asc
"""
query6 = "$TEST_CONNECTION_STR let T = view () { datatable(first_name:string, last_name:string, year_of_death:long)['William', 'Shakespeare', 1616, 'Bertold', 'Brecht', 1956] }; T"
@with_setup(_setup, _teardown)
@with_setup(_setup, _teardown)
@with_setup(_setup, _teardown)
@with_setup(_setup, _teardown)
@with_setup(_setup, _teardown)
@with_setup(_setup, _teardown)
@with_setup(_setup, _teardown)
| [
2,
10097,
45537,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321,
13,
198,
2,
10097,
35937,
198,
1... | 2.53787 | 911 |
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
9973,
19182,
30617,
7824,
628,
220,
220,
220,
1400,
6764,
2810,
357,
27568,
416,
2451,
7928,
6127,
5235,
3740,
1378,
12567,
13,
785,
14,
2032,
7928,
12,
15042,
14,... | 3.17037 | 135 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import namedtuple
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.http import Http404
from django_filters.rest_framework import DjangoFilterBackend
from django.utils.translation import ugettext_lazy as _
from rest_framework import generics
from rest_framework import renderers
from rest_framework import status
from rest_framework import viewsets
from rest_framework.decorators import list_route
from rest_framework.fields import empty
from rest_framework.filters import SearchFilter
from rest_framework.pagination import PageNumberPagination
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
import reversion
from nfdrenderers import pdf as pdfrenderers
from nfdrenderers import csv as csvrenderers
from nfdrenderers import xlsx as xlsrenderers
from nfdrenderers import shp as shprenderers
from . import constants
from . import filters
from . import itis
from . import models
from . import serializers
from .permissions import (
CanCreateAnimals,
CanUpdateFeatureType,
CanCreateFungus,
CanCreateNaturalAreas,
CanCreatePlants,
CanCreateSlimeMold,
CanWriteOrUpdateAny,
get_permissions,
)
logger = logging.getLogger(__name__)
FilterField = namedtuple("FilterField", [
"name",
"value",
"lookup"
])
FeatureTypeFormCategory = namedtuple("FeatureTypeFormCategory", [
"subtype",
"is_writer",
"is_publisher",
])
def get_units_part(*taxonomic_units):
"""Return a portion of the SQL used in the aggregation query"""
taxonomic_units = list(
taxonomic_units) if taxonomic_units else ["species"]
select_fragments = [] # for use in the SELECT clause
group_fragments = [] # for use in the GROUP BY clause
for unit in taxonomic_units:
part = "t.upper_ranks #>> '{%(unit)s, name}'" % {
"unit": unit
}
select_fragments.append(part + " AS %(unit)s" % {"unit": unit})
group_fragments.append(part)
select_part = ", ".join(select_fragments)
group_part = ", ".join(group_fragments)
ordering_part = ", ".join(group_fragments[::-1])
return select_part, group_part, ordering_part
def get_aggregation_query(taxonomic_units=None):
"""Return the SQL query that will be executed"""
taxonomic_units = list(taxonomic_units) if taxonomic_units else []
select_part, group_part, ordering_part = get_units_part(*taxonomic_units)
query = """
SELECT
COUNT(o.id) AS occurrences,
{select_part}
FROM nfdcore_occurrencetaxon AS o
LEFT JOIN nfdcore_taxon AS t ON (t.tsn = o.taxon_id)
LEFT JOIN nfdcore_occurrencecategory AS c ON (c.id = o.occurrence_cat_id)
WHERE c.main_cat = %(category)s
GROUP BY
{group_part}
ORDER BY
{ordering_part}
""".format(
select_part=select_part,
group_part=group_part,
ordering_part=ordering_part
)
return query
class FeatureTypeFormViewSet(viewsets.ViewSet):
"""ViewSet for featuretype forms"""
randerer_classes = (
renderers.JSONRenderer,
renderers.BrowsableAPIRenderer,
)
serializer_class = serializers.FormDefinitionsSerializer
@list_route()
@list_route()
@list_route()
@list_route()
@list_route()
@list_route()
@list_route()
@list_route()
@list_route()
@list_route()
@list_route()
class OccurrenceAggregatorViewSet(viewsets.ViewSet):
"""ViewSet for occurrence stats"""
renderer_classes = (
renderers.JSONRenderer,
renderers.BrowsableAPIRenderer,
pdfrenderers.PdfOccurrenceStatsRenderer,
)
serializer_class = serializers.OccurrenceAggregatorSerializer
taxonomic_units = [
"species",
"family",
"phylum",
]
@list_route(methods=["get",])
@list_route(methods=["get",])
@list_route(methods=["get",])
@list_route(methods=["get",])
# permission_classes = [IsAuthenticated, CanCreateAnimals]
class LayerDetail(APIView):
"""
Retrieve, update or delete an occurrence instance.
"""
permission_classes = [IsAuthenticated, CanUpdateFeatureType]
renderer_classes = (
renderers.JSONRenderer,
renderers.BrowsableAPIRenderer,
pdfrenderers.PdfLayerDetailRenderer,
csvrenderers.CsvRenderer,
xlsrenderers.XlsxRenderer,
shprenderers.ShpRenderer,
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
... | 2.607616 | 1,812 |
# Owner(s): ["module: multiprocessing"]
import os
import random
import signal
import sys
import time
import unittest
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN)
import torch.multiprocessing as mp
@unittest.skipIf(
NO_MULTIPROCESSING_SPAWN,
"Disabled for environments that don't support the spawn start method")
@unittest.skipIf(
IS_WINDOWS,
"Fork is only available on Unix",
)
if __name__ == '__main__':
run_tests()
| [
2,
23853,
7,
82,
2599,
14631,
21412,
25,
18540,
305,
919,
278,
8973,
198,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
6737,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
555,
715,
395,
198,
198,
6738,
28034,
13,
33407,
13557,
3... | 2.740741 | 189 |
# encoding=utf-8
## SOLVED 2015/01/12
## 1097343
# The smallest number expressible as the sum of a prime square, prime cube, and
# prime fourth power is 28. In fact, there are exactly four numbers below fifty
# that can be expressed in such a way:
# 28 = 22 + 23 + 24
# 33 = 32 + 23 + 24
# 49 = 52 + 23 + 24
# 47 = 22 + 33 + 24
# How many numbers below fifty million can be expressed as the sum of a prime
# square, prime cube, and prime fourth power?
import helpers.prime as prime
import math
MAX = 50000000
| [
2,
21004,
28,
40477,
12,
23,
198,
2235,
36817,
53,
1961,
1853,
14,
486,
14,
1065,
198,
2235,
838,
5607,
32118,
198,
198,
2,
383,
18197,
1271,
4911,
856,
355,
262,
2160,
286,
257,
6994,
6616,
11,
6994,
23441,
11,
290,
198,
2,
6994,... | 3.316129 | 155 |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#####
# BSD 2-clause "Simplified" License
# original repository https://github.com/hido/frequent-direction
# rearranged to class by AtsushiHashimoto (based on the clone accessed on 26th Jun. 2017)
#####
import numpy as np
import numpy.linalg as ln
import math
import sys
""" This is a simple and deterministic method for matrix sketch.
The original method has been introduced in [Liberty2013]_ .
[Liberty2013] Edo Liberty, "Simple and Deterministic Matrix Sketching", ACM SIGKDD, 2013.
"""
def calculateError(mat_a, mat_b):
"""Compute the degree of error by sketching
:param mat_a: original matrix
:param mat_b: sketch matrix
:returns: reconstruction error
"""
dot_mat_a = np.dot(mat_a.T, mat_a)
dot_mat_b = np.dot(mat_b.T, mat_b)
return ln.norm(dot_mat_a - dot_mat_b, ord = 2)
def squaredFrobeniusNorm(mat_a):
"""Compute the squared Frobenius norm of a matrix
:param mat_a: original matrix
:returns: squared Frobenius norm
"""
return ln.norm(mat_a, ord = 'fro') ** 2
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
4242,
2,
198,
2,
347,
10305,
362,
12,
565,
682,
366,
8890,
489,
1431,
1,
13789,
198,
2,
2656,
16099,
3740,
137... | 2.737913 | 393 |
from math import ceil
import performance
import dislib as ds
from dislib.recommendation import ALS
if __name__ == '__main__':
main()
| [
6738,
10688,
1330,
2906,
346,
198,
198,
11748,
2854,
198,
198,
11748,
595,
8019,
355,
288,
82,
198,
6738,
595,
8019,
13,
47335,
437,
341,
1330,
27249,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
... | 3.021277 | 47 |
from functools import reduce
from operator import mul
from typing import NamedTuple, Optional
if __name__ == "__main__":
file_input = load_input("input.txt")
converted_file_input = hex_message_to_binary(file_input)
input_instruction = parse_instruction_from_binary(converted_file_input)
print(add_version_numbers(input_instruction))
print(process_instruction(input_instruction))
| [
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
10088,
1330,
35971,
198,
6738,
19720,
1330,
34441,
51,
29291,
11,
32233,
628,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
2393,
6... | 3.044776 | 134 |
from tkinter import *
from os import path
from tkinter import filedialog
from tkinter import Menu
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import messagebox
import os
from sintactico import ejecutar_analisis
from reportes.RealizarReportes import RealizarReportes
##############################################EVENTO REDIMENSIONAR LA VENTANA####################################
##############################################EVENTOS DE LOS BOTONES DEL MENU####################################
##############################################EVENTOS DE LOS BOTONES DEL FRAME####################################
##############################################CREA PESTAÑAS EN EL TAB####################################
#def OnMouseWheel(self,event):
# print("scrool mouse")
if __name__ == '__main__':
main() | [
6738,
256,
74,
3849,
1330,
1635,
198,
198,
6738,
28686,
1330,
3108,
198,
6738,
256,
74,
3849,
1330,
5717,
498,
519,
198,
198,
6738,
256,
74,
3849,
1330,
21860,
198,
6738,
256,
74,
3849,
1330,
256,
30488,
198,
6738,
256,
74,
3849,
13... | 3.366667 | 270 |
from itertools import product
import numpy as np
import driving_gridworld.road as dg_road
from driving_gridworld.obstacles import Bump
from driving_gridworld.obstacles import Pedestrian
from driving_gridworld.car import Car
from driving_gridworld.actions import ACTIONS, RIGHT, LEFT, UP, DOWN, NO_OP
import driving_gridworld.rewards as dg_rewards
import pytest
@pytest.mark.parametrize("action", ACTIONS)
@pytest.mark.parametrize("obst", [Bump(0, 0), Pedestrian(0, 0)])
@pytest.mark.parametrize("obst", [Bump(0, 0), Pedestrian(0, 0)])
@pytest.mark.parametrize("action", ACTIONS)
@pytest.mark.parametrize(
"obst",
[Bump(row=-1, col=-1), Pedestrian(row=0, col=-1)])
@pytest.mark.parametrize("action", ACTIONS)
@pytest.mark.parametrize("obst", [Bump(-1, -1), Pedestrian(0, -1)])
@pytest.mark.parametrize("action", ACTIONS)
@pytest.mark.parametrize("speed", [1, 2, 3])
@pytest.mark.parametrize('col', range(4))
@pytest.mark.parametrize('headlight_range', range(1, 11))
@pytest.mark.parametrize('headlight_range', range(1, 11))
@pytest.mark.parametrize('headlight_range', range(1, 11))
@pytest.mark.parametrize('speed', range(4))
@pytest.mark.parametrize('col', [0, 1])
@pytest.mark.parametrize('col', [0, 1])
@pytest.mark.parametrize('col', [-1, 4])
@pytest.mark.parametrize("speed", range(7))
@pytest.mark.parametrize('p', [0.1 * i for i in range(1, 11)])
@pytest.mark.parametrize('p', [0.1 * i for i in range(1, 11)])
@pytest.mark.parametrize('p', [0.1 * i for i in range(1, 11)])
| [
6738,
340,
861,
10141,
1330,
1720,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
5059,
62,
25928,
6894,
13,
6344,
355,
288,
70,
62,
6344,
198,
6738,
5059,
62,
25928,
6894,
13,
672,
301,
9928,
1330,
347,
931,
198,
6738,
5059,
62,
2... | 2.426101 | 636 |
import numpy as np
# THIS IS ULTRA SPECIFIC TO THE PROBLEM, Dont dare to use it!!!!
TRUE_B = 2.3101
# b=2.31
# a = 0.01584
# epsilon_t = a*(b+t)**(-0.55)
# epsilon_t = np.max(min_epsilon,epsilon_t)
| [
11748,
299,
32152,
355,
45941,
628,
198,
2,
220,
220,
12680,
3180,
471,
27734,
3861,
28196,
30643,
5390,
3336,
220,
4810,
9864,
2538,
44,
11,
360,
756,
16498,
284,
779,
340,
13896,
198,
5446,
8924,
62,
33,
796,
362,
13,
3132,
486,
6... | 1.784615 | 130 |
print sum4([1, 2])
print sum4([3, 4])
print sum4([4, 5, 6])
print sum4([4, 9, 4, 9, 4, 9])
print sum4([]) | [
198,
4798,
2160,
19,
26933,
16,
11,
362,
12962,
198,
4798,
2160,
19,
26933,
18,
11,
604,
12962,
198,
4798,
2160,
19,
26933,
19,
11,
642,
11,
718,
12962,
198,
4798,
2160,
19,
26933,
19,
11,
860,
11,
604,
11,
860,
11,
604,
11,
860... | 2.038462 | 52 |
from enum import Enum
| [
6738,
33829,
1330,
2039,
388,
628
] | 3.833333 | 6 |
#!/usr/bin/python3
"""
Path: project3/sysAdminTask.py
Author: Ricardo Franzen - rfranzen@gmail.com
Date: 2022-03-09
Description: Creates a file that contains:
- current users logged in
- hostname
- date/time (HH:MM:SS DD/MM/YYYY)
- number of processes running
- top 10 process names/command, id and memory usage sorted from most memory % to least memory %
This script will:
1. Get a list of current users logged in
2. Get the hostname
3. Get the current date/time in the proper format
4. Get the number of processes running
5. Get the top 10 process names/command, id and memory usage sorted from most memory % to least memory %
6. Write the information to a file in /tmp/report.txt
"""
from datetime import date
from sys import platform
import subprocess
OUTPUT_FILE = "/tmp/report.txt"
# Function to get current logged in users, tty and login_time
# returns a list of tuples: [(user, tty, login_time), ...]
# Function to get hostname
# Function to get current date/time
# Function to get number of processes running
# Function to get top 10 process names/command, id and memory usage sorted from most memory % to least memory %
# Function to write the information to a file
# main
if __name__ == '__main__':
# Check if the OS is a Linux OS
if platform == "linux" or platform == "linux2":
# Get current logged in users
users = get_logged_users()
# Get hostname
hostname = get_hostname()
# Get current date/time
date_time = get_date_time()
# Get number of processes running
process_count = get_process_count()
# Get the top 10 process
top_processes = get_top_processes()
# Write the information to a file
write_to_file(OUTPUT_FILE, users, hostname, date_time, process_count, top_processes)
else:
print("Sorry, this script is only for Linux OS")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
37811,
198,
15235,
25,
1628,
18,
14,
17597,
46787,
25714,
13,
9078,
198,
13838,
25,
38847,
12323,
4801,
532,
374,
69,
2596,
4801,
31,
14816,
13,
785,
198,
10430,
25,
33160,
12,
307... | 3.239016 | 569 |
import requests,sys
from config_helper import get_proxies
| [
11748,
7007,
11,
17597,
198,
6738,
4566,
62,
2978,
525,
1330,
651,
62,
1676,
87,
444,
198
] | 3.411765 | 17 |
import sys
import unittest
sys.path.insert(1, '../')
from flint.tokenizer import Tokenizer
if __name__ == '__main__':
unittest.main()
| [
11748,
25064,
198,
11748,
555,
715,
395,
198,
198,
17597,
13,
6978,
13,
28463,
7,
16,
11,
705,
40720,
11537,
198,
6738,
781,
600,
13,
30001,
7509,
1330,
29130,
7509,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
... | 2.660377 | 53 |
'''
cows and bulls
Create a program that will play the “cows and bulls” game with the user. The game works like this:
Randomly generate a 4-digit number. Ask the user to guess a 4-digit number. For every digit that the user guessed correctly in the correct place, they have a “cow”. For every digit the user guessed correctly in the wrong place is a “bull.” Every time the user makes a guess, tell them how many “cows” and “bulls” they have. Once the user guesses the correct number, the game is over. Keep track of the number of guesses the user makes throughout teh game and tell the user at the end.
'''
| [
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
198,
198,
7061,
6,
198,
66,
1666,
29... | 3.367647 | 204 |
from flask import Flask, render_template, Response
from mask_detect_video_app import VideoCamera
from flask import send_file
app = Flask(__name__)
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
@app.route('/project')
def project():
""" Project Page"""
return render_template('project.html')
@app.route('/about_me')
def about_me():
""" About Me page"""
return render_template('about_me.html')
@app.route('/download')
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(VideoCamera()),mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == "__main__":
app.run(debug=True)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
18261,
198,
6738,
9335,
62,
15255,
478,
62,
15588,
62,
1324,
1330,
7623,
35632,
198,
6738,
42903,
1330,
3758,
62,
7753,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
628,
198,
3... | 2.551637 | 397 |
import argparse
from typing import Dict
from dataset import Dataset
from model import Model
from utils import setup_logger, get_logger
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
6738,
19720,
1330,
360,
713,
198,
198,
6738,
27039,
1330,
16092,
292,
316,
198,
6738,
2746,
1330,
9104,
198,
6738,
3384,
4487,
1330,
9058,
62,
6404,
1362,
11,
651,
62,
6404,
1362,
628,
628,
198,
361,
11593,
36... | 3.178571 | 56 |
import libs.requests as requests
import libs.telebot as telebot
import time
import datetime
import re
TOKEN_TG = '' #TOKEN BOT TELEGRAM
TOKEN_VK = '' #ACCESS KEY VK
tg_channel = '@name' #chat in telegram
vk_group = '' #example 'vkapi' (vk.com/vkapi)
sticker_id = ['CAADAgADEgADuYK-DAwZdY5q1ZiJAg', 'CAADAgADEwADuYK-DC9fGK4AAfkdLQI'] #?? Stickers ?? (lol)
tg_bot = telebot.TeleBot(TOKEN_TG)
check_group_wall(vk_group)
| [
11748,
9195,
82,
13,
8897,
3558,
355,
7007,
198,
11748,
9195,
82,
13,
46813,
13645,
355,
5735,
13645,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
302,
628,
198,
10468,
43959,
62,
35990,
796,
10148,
1303,
10468,
43959,
347,
2394... | 2.248677 | 189 |
#Socket client for python
import socket # socket library
import sys # for exit handling
import threading # for starting transport layer thread
from client import TransportLayer
import Queue
main = ['-Main Menu-', '1. Login', '2. Make New User', '3. Hall of Fame', '4. Exit'];
game = ['-Game Menu-', '1. Start New Game', '2. Get List of Games', '3. Hall of Fame', '4. Exit'];
level = ['Choose Difficulty:', '1. Easy', '2. Medium', '3. Hard'];
data_q = Queue.Queue()
reply_q = Queue.Queue()
# -------------------------------------------------------------------- #
init();
line();
print 'Welcome to the bastardized version of hangman!'
uname = ""
while 1:
line()
if not uname: reply = display_menu(main)
else: reply = display_menu(game)
if reply == 1:
if not uname: uname = login()
else: start_game()
elif reply == 2:
if not uname: make_user()
else: get_games()
elif reply == 3:
hall_of_fame()
elif reply == 4:
safe_exit()
else:
print "Invalid command entered."
| [
2,
39105,
5456,
329,
21015,
198,
198,
11748,
17802,
1303,
17802,
5888,
198,
11748,
25064,
1303,
329,
8420,
9041,
198,
11748,
4704,
278,
1303,
329,
3599,
4839,
7679,
4704,
198,
6738,
5456,
1330,
19940,
49925,
198,
11748,
4670,
518,
198,
... | 2.98494 | 332 |
from collections import Counter
| [
6738,
17268,
1330,
15034,
198,
220,
220,
220,
220
] | 4 | 9 |
import os
import json
sharepath = os.path.dirname(__file__)
default_config = {
'profile': 'profile.json',
'server': 'gunicorn',
'workers': 10,
'log': None,
'loglevel': 'info',
'accesslog': None,
'host': 'localhost',
'port': 8080,
'root': '',
'debug': False,
'colormaps': 'colormaps',
'cache': {
'driver': 'filesystem',
'src': 'cache/{layer}/{zoom}/{x},{z};{sha1(colormap)}.{format}',
},
'storage': [
{
'requires': ['layer', 'zoom', 'x', 'z'],
'driver': 'filesystem',
'src': 'layers/{layer}/{zoom}/{x},{z}.png',
},
{
'requires': ['layer', 'format'],
'driver': 'filesystem',
'src': 'layers/{layer}.{format}',
},
],
}
| [
11748,
28686,
198,
11748,
33918,
628,
198,
20077,
6978,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
8,
198,
198,
12286,
62,
11250,
796,
1391,
198,
220,
220,
220,
705,
13317,
10354,
705,
13317,
13,
17752,
3256,
198,
220,
... | 1.916067 | 417 |
ultimo=10
fila=list(range(1,ultimo+1))#Função Sequenciadora de 1 para ultimo +1
fila2=list(range(1,ultimo+1))
while True:# Sempre dará verdadeiro em loop e só sairá no break
print(f"Existem{len(fila)} clientes na fila\n")
print(f"Existem{len(fila2)} clientes na fila\n")
print(f"Fila atual: {fila}\n")
print(f"Fila atual: {fila2}\n")
print("Digite a Fila que deseja atualizar 1 ou a 2: ")
choice=input("Escolha (1) ou (2): )")
if choice=="1":
print("Digite F para adicionar no final da fila,")
print(f"A para realizar o atendimento ou S para sair")
operacao= input("Operação (F,A ou S): ")
if operacao == "A" or operacao=="a":
if len (fila)>0:
atendido = fila.pop(0)
print(f"Cliente {atendido} atendido")
else:
print("Fila vazia!!!")
elif operacao == "F" or operacao=="f":
ultimo+=1
fila.append(ultimo)
elif operacao =="S" or operacao =="s":
break
else:
print("Operação inválida !")
elif choice=="2":
print("Digite F para adicionar no final da fila,")
print(f"A para realizar o atendimento ou S para sair")
operacao= input("Operação (F,A ou S): ")
if operacao == "A" or operacao=="a":
if len (fila2)>0:
atendido = fila2.pop(0)
print(f"Cliente {atendido} atendido")
else:
print("Fila vazia!!!")
elif operacao == "F" or operacao=="f":
ultimo+=1
fila.append(ultimo)
elif operacao =="S" or operacao =="s":
break
else:
print("Operação inválida !")
| [
586,
25147,
28,
940,
201,
198,
69,
10102,
28,
4868,
7,
9521,
7,
16,
11,
586,
25147,
10,
16,
4008,
2,
24629,
16175,
28749,
24604,
268,
979,
324,
5799,
390,
352,
31215,
3789,
25147,
1343,
16,
201,
198,
69,
10102,
17,
28,
4868,
7,
... | 1.762795 | 1,016 |
"""
demo.py
is an app demo use the library that validate transactions
Created on : June 9, 2016
Author : Mohammad Alrefai
"""
import json
import validate_transaction
main()
| [
37811,
198,
9536,
78,
13,
9078,
198,
220,
220,
220,
318,
281,
598,
13605,
779,
262,
5888,
326,
26571,
8945,
198,
220,
220,
220,
220,
220,
220,
220,
15622,
319,
220,
1058,
220,
220,
2795,
860,
11,
1584,
198,
220,
220,
220,
220,
220... | 2.666667 | 78 |
from .data_profile import Profiler, MyEncoder, Hyperparams
__all__ = ['Profiler', 'Hyperparams']
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) # type: ignore | [
6738,
764,
7890,
62,
13317,
1330,
4415,
5329,
11,
2011,
27195,
12342,
11,
15079,
37266,
198,
198,
834,
439,
834,
796,
37250,
15404,
5329,
3256,
705,
38197,
37266,
20520,
198,
198,
6738,
279,
10025,
22602,
1330,
9117,
62,
6978,
198,
834,... | 3.098361 | 61 |
# -*- coding: utf-8 -*-
"""
Config for API Access
"""
API_USER = "<your-username-here>"
API_PASSWORD = "<your-password-here>"
API_ADDRESS = "http://<your-address-here>:9981/"
API_IS_SELFSIGNED = <True or False> # True for selfsigned SSL certificate
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
16934,
329,
7824,
8798,
198,
37811,
198,
198,
17614,
62,
29904,
796,
33490,
14108,
12,
29460,
12,
1456,
24618,
198,
17614,
62,
47924,
54,
12532,
796,
33... | 2.670213 | 94 |
from typing import Tuple
import numpy as np
import torch
import torch.utils.data as data
from auxiliary.utils import hwc_chw, gamma_correct, brg_to_rgb
from classes.data.DataAugmenter import DataAugmenter
| [
6738,
19720,
1330,
309,
29291,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
26791,
13,
7890,
355,
1366,
198,
198,
6738,
37419,
13,
26791,
1330,
289,
86,
66,
62,
354,
86,
11,
34236,
62,
30283,
11,
... | 3.2 | 65 |
# Twitter API Keys
consumer_key = "Enter your key"
consumer_secret = "Enter your key"
access_token = "Enter your key"
access_token_secret = "Enter your key"
| [
2,
3009,
7824,
26363,
201,
198,
49827,
62,
2539,
796,
366,
17469,
534,
1994,
1,
201,
198,
49827,
62,
21078,
796,
366,
17469,
534,
1994,
1,
201,
198,
15526,
62,
30001,
796,
366,
17469,
534,
1994,
1,
201,
198,
15526,
62,
30001,
62,
... | 3.115385 | 52 |
# This file is adapted from https://github.com/tensorflow/benchmarks
# /blob/master/scripts/tf_cnn_benchmarks/allreduce.py
#
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for allreduce."""
from __future__ import print_function
import collections as pycoll
import logging
import numpy as np
import re
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib import nccl
from tensorflow.contrib.all_reduce.python import all_reduce
logger = logging.getLogger(__name__)
AllReduceSpecTuple = pycoll.namedtuple('AllReduceSpecTuple',
'alg shards limit')
def parse_general_int(s):
"""Parse integer with power-of-2 suffix eg. 32k."""
mo = re.match(r'(\d+)([KkMGT]?)$', s)
if mo:
i, suffix = mo.group(1, 2)
v = int(i)
if suffix:
if suffix == 'K' or suffix == 'k':
v *= 1024
elif suffix == 'M':
v *= (1024 * 1024)
elif suffix == 'G':
v *= (1024 * 1024 * 1024)
elif suffix == 'T':
v *= (1024 * 1024 * 1024 * 1024)
else:
raise ValueError('invalid integer string %s' % s)
return v
else:
v = int(s)
return v
def parse_all_reduce_spec(all_reduce_spec):
"""Parse all_reduce_spec.
Args:
all_reduce_spec: a string specifying a combination of all-reduce
algorithms to apply for gradient reduction.
Returns:
a list of AllReduceSpecTuple.
Raises:
ValueError: all_reduce_spec is not well-formed.
An all_reduce_spec has BNF form:
int ::= positive whole number
g_int ::= int[KkMGT]?
alg_spec ::= alg | alg#int
range_spec ::= alg_spec | alg_spec/alg_spec
spec ::= range_spec | range_spec:g_int:range_spec
Not all syntactically correct specifications are supported.
Examples of supported all_reduce_spec strings, with semantics explained:
'xring' == apply ring all-reduce to all tensors
'xring#2' == apply ring all-reduce to all tensors, using two simultaneous
transfer rings, each operating on 1/2 of each tensor.
'nccl' == apply NCCL all-reduce to all tensors (only works within
a single worker process where all devices are GPUs)
'nccl/xring' == apply NCCL all-reduce to all tensors within each worker
to produce at least one full-reduced (locally) value,
then apply ring all-reduce to one such value from each
worker, then apply NCCL broadcast to propagate those globally
reduced values back to every device within each worker.
'pscpu' == Shuffle reduce using worker CPUs as the gather devices: each
distributed tensor is reduced by copying all instances to
one of the worker CPUs, computing the reduction there, then
copying back to each participating device. Tensor reductions
are assigned to specific CPUs round-robin.
'psgpu#4' == Arrange all GPUs across all workers into groups of 4.
Each distributed tensor is shuffle reduced against one
such group of 4 GPUs, selected round-robin. That is, each
tensor is split across 4 shards for the reduction.
'pscpu:2k:pscpu#2:64k:xring' == Apply single-shard pscpu to
tensors of size <= 2048 elements, apply 2-shard pscpu to
tensors up to size 64k elements, apply xring to larger tensors.
'pscpu/pscpu#2' == Use shuffle gather to locally reduce each tensor on
the worker's CPU, then use 2-shard shuffle to reduce those
locally reduced tensors across workers (on the worker CPUs), then
scatter the globally reduced values locally from each worker CPU.
"""
range_parts = all_reduce_spec.split(':') + ['-1']
if len(range_parts) % 2:
raise ValueError(
'all_reduce_spec not well formed: %s' % all_reduce_spec)
limit = 0
spec = []
alg = None
shards = 1
for i, range_part in enumerate(range_parts):
if i % 2 == 1:
try:
limit = parse_general_int(range_part)
spec.append(
AllReduceSpecTuple(alg=alg, shards=shards, limit=limit))
except ValueError:
raise ValueError(
'all_reduce_spec (%s) contains non-integer range %s' %
(all_reduce_spec, range_part))
else:
alg = range_part
alg_parts = range_part.split('#')
alg = alg_parts[0]
if len(alg_parts) > 1:
try:
shards = int(alg_parts[1])
except ValueError:
raise ValueError(
'all_reduce_spec (%s) contains non-integer '
'shards %s' % all_reduce_spec, alg_parts[1])
else:
shards = 1
if alg not in [
'nccl', 'nccl/xring', 'nccl/rechd', 'nccl/pscpu', 'xring',
'pscpu', 'psgpu', 'pscpu/pscpu'
]:
raise ValueError('all_reduce_spec (%s) contains invalid alg %s'
% (all_reduce_spec, alg))
return spec
def build_all_reduce_device_prefixes(job_name, num_tasks):
"""Build list of device prefix names for all_reduce.
Args:
job_name: 'worker', 'ps' or 'localhost'.
num_tasks: number of jobs across which device names should be generated.
Returns:
A list of device name prefix strings. Each element spells out the full
host name without adding the device.
e.g. '/job:worker/task:0'
"""
if job_name != 'localhost':
return ['/job:%s/task:%d' % (job_name, d) for d in range(0, num_tasks)]
else:
assert num_tasks == 1
return ['/job:%s' % job_name]
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: list of strings naming devices.
group_size: int >= 1
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size = 0 then each device will appear
exactly once.
Raises:
ValueError: group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if
(num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(0, num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= theshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
def aggregate_single_gradient(grad_and_vars, use_mean, check_inf_nan):
"""Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = tf.add_n(grads)
if use_mean and len(grads) > 1:
grad = tf.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def aggregate_gradients_using_copy_with_device_selection(
tower_grads, avail_devices, use_mean=True, check_inf_nan=False):
"""Aggregate gradients, controlling device for the aggregation.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for i, single_grads in enumerate(zip(*tower_grads)):
with tf.device(avail_devices[i % len(avail_devices)]):
grad_and_var, has_nan_or_inf = aggregate_single_gradient(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
return agg_grads
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with tf.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl.all_sum(scaled_grads)
elif alg == 'simple':
summed_grads = build_reduce_sum(scaled_grads)
elif alg == 'trivial':
summed_grads = build_trivial_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, tf.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(
scaled_grads, num_shards, tf.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, tf.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, tf.add, tf.add_n)
elif alg == 'pscpu/pscpu':
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads,
aux_devices,
# TODO(tucker): devise a way of better specifying the device
# for the second level.
[aux_devices[0]],
tf.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, tf.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def contains_any(haystack, needles):
"""Tests if any needle is a substring of haystack.
Args:
haystack: a string
needles: list of strings
Returns:
True if any element of needles is a substring of haystack,
False otherwise.
"""
for n in needles:
if n in haystack:
return True
return False
def sum_gradients_all_reduce(dev_prefixes,
tower_grads,
num_workers,
alg,
num_shards,
gpu_indices,
agg_small_grads_max_bytes=0):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
tower_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
agg_small_grads_max_bytes: largest tensor eligible for aggregation,
in number of bytes.
Returns:
list of reduced tensors, packing values
"""
alg_contains_shuffle = contains_any(alg, ['pscpu', 'psgpu'])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
if agg_small_grads_max_bytes > 0:
tower_grads, packing = pack_small_tensors(
tower_grads, max_bytes=agg_small_grads_max_bytes)
else:
packing = None
new_tower_grads = []
if alg == 'better':
raw_devices = ['/gpu:%i' % (i) for i in gpu_indices]
agg_grads = aggregate_gradients_using_copy_with_device_selection(
tower_grads, raw_devices)
for arr in tower_grads:
new_tower_grads.append(
[(g, v) for (_, v), (g, _) in zip(arr, agg_grads)])
else:
reduced_gv_list = []
for grad_and_vars in zip(*tower_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index],
num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_tower_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_tower_grads, packing
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists it will be returned as multiple
ranges.
Returns:
ranges, singles where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one tower.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with tf.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with tf.device(g.device):
members.append(tf.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with tf.device(members[0].device):
return tf.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with tf.device(gv[0][0].device):
with tf.name_scope('unpack'):
splits = tf.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(tower_grads, max_bytes=0):
"""Concatenate gradients together more intelligently.
Does binpacking
Args:
tower_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
"""
assert max_bytes >= 0
orig_grads = [g for g, _ in tower_grads[0]]
# Check to make sure sizes are accurate; not entirely important
assert all(g.dtype == tf.float32 for g in orig_grads)
sizes = [4 * g.shape.num_elements() for g in orig_grads]
print_stats(sizes)
small_ranges = []
large_indices = []
new_sizes = []
cur_range = []
cur_size = 0
for i, s in reversed(list(enumerate(sizes))):
if cur_size > max_bytes:
end_interval(cur_range, small_ranges, large_indices)
new_sizes.insert(0, cur_size)
cur_range = []
cur_size = 0
cur_range.insert(0, i)
cur_size += s
end_interval(cur_range, small_ranges, large_indices)
new_sizes.insert(0, cur_size)
print_stats(new_sizes)
num_gv = len(orig_grads)
packing = {}
if len(small_ranges):
new_tower_grads = []
for dev_idx, gv_list in enumerate(tower_grads):
assert len(gv_list) == num_gv, (
"Possible cause: "
"Networks constructed on different workers "
"don't have the same number of variables. "
"If you use tf.GraphKeys or tf.global_variables() "
"with multiple graphs per worker during network "
"construction, you need to use "
"appropriate scopes, see "
"https://github.com/ray-project/ray/issues/3136")
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_tower_grads.append(new_gv_list)
return new_tower_grads, packing
else:
return tower_grads, None
def unpack_small_tensors(tower_grads, packing):
"""Undo the structure alterations to tower_grads done by pack_small_tensors.
Args:
tower_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to tower_grads.
Returns:
new_tower_grads: identical to tower_grads except that concatentations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return tower_grads
new_tower_grads = []
num_devices = len(tower_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(tower_grads):
new_gv_list = gv_list[num_packed:]
for i in xrange(0, num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_tower_grads.append(new_gv_list)
return new_tower_grads
| [
2,
770,
2393,
318,
16573,
422,
3740,
1378,
12567,
13,
785,
14,
83,
22854,
11125,
14,
26968,
14306,
198,
2,
1220,
2436,
672,
14,
9866,
14,
46521,
14,
27110,
62,
66,
20471,
62,
26968,
14306,
14,
439,
445,
7234,
13,
9078,
198,
2,
198... | 2.256599 | 9,926 |
_base_ = ['./default_runtime.py']
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
])
work_dir = './work_dirs/'
| [
62,
8692,
62,
796,
685,
4458,
14,
12286,
62,
43282,
13,
9078,
20520,
198,
9122,
4122,
62,
11250,
796,
8633,
7,
3849,
2100,
28,
940,
8,
198,
2,
331,
499,
69,
25,
40223,
198,
6404,
62,
11250,
796,
8633,
7,
198,
220,
220,
220,
1665... | 2.252747 | 91 |
# coding: utf-8
# # TV Script Generation
# In this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern).
# ## Get the Data
# The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
# In[72]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
# ## Explore the Data
# Play around with `view_sentence_range` to view different parts of the data.
# In[73]:
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
# ## Implement Preprocessing Functions
# The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
# - Lookup Table
# - Tokenize Punctuation
#
# ### Lookup Table
# To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
# - Dictionary to go from the words to an id, we'll call `vocab_to_int`
# - Dictionary to go from the id to word, we'll call `int_to_vocab`
#
# Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
# In[74]:
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
vocab_to_int = dict((v, i) for i, v in enumerate(set(text)))
int_to_vocab = dict((i, v) for i, v in enumerate(set(text)))
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
# ### Tokenize Punctuation
# We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
#
# Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
# - Period ( . )
# - Comma ( , )
# - Quotation Mark ( " )
# - Semicolon ( ; )
# - Exclamation mark ( ! )
# - Question mark ( ? )
# - Left Parentheses ( ( )
# - Right Parentheses ( ) )
# - Dash ( -- )
# - Return ( \n )
#
# This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
# In[75]:
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
token_dict = {}
token_dict['.']= '||Period||'
token_dict[',']= '||Comma||'
token_dict['"']= '||Quotation_Mark||'
token_dict[';']= '||Semicolon||'
token_dict['!']= '||Exclamation_Mark||'
token_dict['?']= '||Question_Mark||'
token_dict['(']= '||Left_Parentheses||'
token_dict[')']= '||Right_Parentheses||'
#token_dict['-']= '||Dash||'
token_dict['--']= '||Double_Dash||'
token_dict['\n']= '||Return||'
return token_dict
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
# ## Preprocess all the data and save it
# Running the code cell below will preprocess all the data and save it to file.
# In[76]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
# # Check Point
# This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
# In[77]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
# ## Build the Neural Network
# You'll build the components necessary to build a RNN by implementing the following functions below:
# - get_inputs
# - get_init_cell
# - get_embed
# - build_rnn
# - build_nn
# - get_batches
#
# ### Check the Version of TensorFlow and Access to GPU
# In[78]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
# ### Input
# Implement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:
# - Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.
# - Targets placeholder
# - Learning Rate placeholder
#
# Return the placeholders in the following the tuple `(Input, Targets, LearingRate)`
# In[79]:
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
input = tf.placeholder(tf.int32, (None, None), "input")
targets = tf.placeholder(tf.int32, (None, None), "targets")
learning_rate = tf.placeholder(tf.float32, name="learning_rate")
return input, targets, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
# ### Build RNN Cell and Initialize
# Stack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).
# - The Rnn size should be set using `rnn_size`
# - Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell#zero_state) function
# - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)
#
# Return the cell and initial state in the following tuple `(Cell, InitialState)`
# In[80]:
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
cell = tf.contrib.rnn.BasicLSTMCell(rnn_size, forget_bias=1.0)
cell = tf.contrib.rnn.MultiRNNCell([cell])
init_state = cell.zero_state(batch_size, tf.float32)
init_state = tf.identity(init_state, "initial_state")
return cell, init_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
# ### Word Embedding
# Apply embedding to `input_data` using TensorFlow. Return the embedded sequence.
# In[81]:
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
embeddings = tf.Variable(tf.random_uniform([vocab_size, embed_dim],-1.0, 1.0), tf.int32)
embedded_input = tf.nn.embedding_lookup(embeddings, input_data)
return embedded_input
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
# ### Build RNN
# You created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.
# - Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn)
# - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)
#
# Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
# In[82]:
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
output, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32, time_major=True)
final_state = tf.identity(final_state, "final_state")
return output, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
# ### Build the Neural Network
# Apply the functions you implemented above to:
# - Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.
# - Build RNN using `cell` and your `build_rnn(cell, inputs)` function.
# - Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.
#
# Return the logits and final state in the following tuple (Logits, FinalState)
# In[83]:
def build_nn(cell, rnn_size, input_data, vocab_size):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
input_data=get_embed(input_data, vocab_size,rnn_size)
cell,final_state=build_rnn(cell,input_data)
logits=tf.contrib.layers.fully_connected(cell, *[vocab_size])
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
# ### Batches
# Implement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:
# - The first element is a single batch of **input** with the shape `[batch size, sequence length]`
# - The second element is a single batch of **targets** with the shape `[batch size, sequence length]`
#
# If you can't fill the last batch with enough data, drop the last batch.
#
# For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)` would return a Numpy array of the following:
# ```
# [
# # First Batch
# [
# # Batch of Input
# [[ 1 2 3], [ 7 8 9]],
# # Batch of targets
# [[ 2 3 4], [ 8 9 10]]
# ],
#
# # Second Batch
# [
# # Batch of Input
# [[ 4 5 6], [10 11 12]],
# # Batch of targets
# [[ 5 6 7], [11 12 13]]
# ]
# ]
# ```
# In[84]:
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
length = len(int_text)
batch_len = batch_size * seq_length
num_of_batches = int(length/batch_len)
xs = np.array(int_text[: num_of_batches * batch_size * seq_length])#drop last few
ys = np.array(int_text[1: num_of_batches * batch_size * seq_length + 1])
x_batches = np.split(xs.reshape(batch_size, -1), num_of_batches, 1)#split reshape to batches
y_batches = np.split(ys.reshape(batch_size, -1), num_of_batches, 1)
return np.asarray(list(zip(x_batches, y_batches)))#join
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
# ## Neural Network Training
# ### Hyperparameters
# Tune the following parameters:
#
# - Set `num_epochs` to the number of epochs.
# - Set `batch_size` to the batch size.
# - Set `rnn_size` to the size of the RNNs.
# - Set `seq_length` to the length of sequence.
# - Set `learning_rate` to the learning rate.
# - Set `show_every_n_batches` to the number of batches the neural network should print progress.
# In[85]:
# Number of Epochs
num_epochs = 10
# Batch Size
batch_size = 100
# RNN Size
rnn_size = 10
# Sequence Length
seq_length = 10
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 10
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
# ### Build the Graph
# Build the graph using the neural network you implemented.
# In[86]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
# ## Train
# Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
# In[87]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
# ## Save Parameters
# Save `seq_length` and `save_dir` for generating a new TV script.
# In[88]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
# # Checkpoint
# In[89]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
# ## Implement Generate Functions
# ### Get Tensors
# Get tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graph#get_tensor_by_name). Get the tensors using the following names:
# - "input:0"
# - "initial_state:0"
# - "final_state:0"
# - "probs:0"
#
# Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
# In[102]:
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
input = loaded_graph.get_tensor_by_name('input:0')
initial_state = loaded_graph.get_tensor_by_name("initial_state:0")
final_state = loaded_graph.get_tensor_by_name("final_state:0")
probs = loaded_graph.get_tensor_by_name("probs:0")
return input, initial_state, final_state, probs
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
# ### Choose Word
# Implement the `pick_word()` function to select the next word using `probabilities`.
# In[127]:
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
#ix = np.unravel_index(probabilities.argmax(), probabilities.shape)
ix = np.random.choice(np.arange(len(int_to_vocab)), 1, p=probabilities)[0]
return int_to_vocab[ix]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
# ## Generate TV Script
# This will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
# In[128]:
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
# # The TV Script is Nonsensical
# It's ok if the TV script doesn't make any sense. We trained on less than a megabyte of text. In order to get good results, you'll have to use a smaller vocabulary or get more data. Luckly there's more data! As we mentioned in the begging of this project, this is a subset of [another dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data). We didn't have you train on all the data, because that would take too long. However, you are free to train your neural network on all the data. After you complete the project, of course.
# # Submitting This Project
# When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_tv_script_generation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
3195,
12327,
16588,
198,
2,
554,
428,
1628,
11,
345,
1183,
7716,
534,
898,
685,
8890,
31410,
16151,
5450,
1378,
268,
13,
31266,
13,
2398,
14,
15466,
14,
464,
62,
8890,
31410,... | 2.781608 | 7,688 |
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
| [
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
6738,
21121,
17,
13,
29412,
1330,
4755,
201,
198,
6738,
14078,
1330,
1813,
201,
198,
11748,
21121,
17,
13,
29412,
13,
36362,
313,
8497,
62,
9288,
62,
22602,
355,
289,
84,
201,
198,
... | 2.75 | 52 |
"""
This file contains the grammar and code for parsing a SQL query
string into a tree of operators defined in ops.py
The key thing this DOES NOT do is convert the FROM clause
into a tree of Join operators. This is performed in
optimizer.py because we need additional logic to figure out
whether or not tables have join conditions
"""
import re
import math
import numpy as np
from .ops import *
from .udfs import *
from .parseops import *
from dateutil.parser import parse as parsedate
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
grammar = Grammar(
r"""
query = ws select_cores orderby? limit? ws
select_cores = select_core (compound_op select_core)*
select_core = SELECT distinct_clause? wsp select_results from_clause? where_clause? gb_clause? having_clause?
select_results = select_result (ws "," ws select_result)*
select_result = sel_res_all_star / sel_res_tab_star / sel_res_val / sel_res_col
sel_res_tab_star = name ".*"
sel_res_all_star = "*"
sel_res_val = exprand (AS wsp name)?
sel_res_col = col_ref (AS wsp name)
distinct_clause = wsp ("DISTINCT" / "distinct")
from_clause = FROM join_source
join_source = ws single_source (ws "," ws single_source)*
single_source = source_func / source_table / source_subq
source_table = table_name (AS wsp name)?
source_subq = "(" ws query ws ")" (AS wsp name)?
source_func = function (AS wsp name)?
where_clause = WHERE wsp exprand
gb_clause = GROUP BY group_clause
group_clause = grouping_term (ws "," grouping_term)*
grouping_term = ws expr
having_clause = HAVING wsp exprand
orderby = ORDER BY ordering_term (ws "," ordering_term)*
ordering_term = ws expr (ASC/DESC)?
limit = LIMIT wsp expr (OFFSET wsp expr)?
col_ref = (table_name ".")? column_name
exprand = expr (AND wsp expr)*
expr = btwnexpr / inexpr / biexpr / unexpr / value
inexpr = value IN wsp value
btwnexpr = value BETWEEN wsp value AND wsp value
biexpr = value ws binaryop_no_andor ws expr
unexpr = unaryop expr
value = parenq /
parenval /
listval /
number /
boolean /
date /
function /
col_ref /
string /
attr
parenval = "(" ws expr ws ")"
parenq = "(" query ")"
listval = "(" ws expr (ws "," ws expr)* ws ")"
function = fname "(" ws arg_list? ws ")"
arg_list = expr (ws "," ws expr)*
number = ~"\d*\.?\d+"i
string = ~"([\"\'])(\\\\?.)*?\\1"i
attr = ~"\w[\w\d]*"i
fname = ~"\w[\w\d]*"i
boolean = "true" / "false"
date = "date(" dateregex ")"
compound_op = "UNION" / "union"
binaryop = "+" / "-" / "*" / "/" / "==" / "=" / "<>" / "!=" /
"<=" / ">=" / "<" / ">" / "and" /
"AND" / "or" / "OR" / "like" / "LIKE"
binaryop_no_andor = "+" / "-" / "*" / "/" / "==" / "=" / "<>" / "!=" /
"<=" / ">=" / "<" / ">" / "or " / "OR " / "like" / "LIKE"
unaryop = "+" / "-" / "not" / "NOT"
ws = ~"\s*"i
wsp = ~"\s+"i
dateregex = ~"'[0-9]{4}[\-][0-9]{2}[\-][0-9]{2}'"
name = ~"[a-zA-Z\_][\w\_]*"i / ~"`[a-zA-Z][\w\.\-\_\:\*]*`"i / ~"\[[a-zA-Z][\w\.\-\_\:\*]*\]"i
table_name = name
column_name = name
ADD = wsp ("ADD" / "and")
ALL = wsp ("ALL" / "all")
ALTER = wsp ("ALTER" / "alter")
AND = wsp ("AND" / "and")
AS = wsp ("AS" / "as")
ASC = wsp ("ASC" / "asc")
BETWEEN = wsp ("BETWEEN" / "between")
BY = wsp ("BY" / "by")
CAST = wsp ("CAST" / "cast")
COLUMN = wsp ("COLUMN" / "column")
DESC = wsp ("DESC" / "desc")
DISTINCT = wsp ("DISTINCT" / "distinct")
E = "E"
ESCAPE = wsp ("ESCAPE" / "escape")
EXCEPT = wsp ("EXCEPT" / "except")
EXISTS = wsp ("EXISTS" / "exists")
EXPLAIN = ws ("EXPLAIN" / "explain")
EVENT = ws ("EVENT" / "event")
FORALL = wsp ("FORALL" / "forall")
FROM = wsp ("FROM" / "from")
GLOB = wsp ("GLOB" / "glob")
GROUP = wsp ("GROUP" / "group")
HAVING = wsp ("HAVING" / "having")
IN = wsp ("IN" / "in")
INNER = wsp ("INNER" / "inner")
INSERT = ws ("INSERT" / "insert")
INTERSECT = wsp ("INTERSECT" / "intersect")
INTO = wsp ("INTO" / "into")
IS = wsp ("IS" / "is")
ISNULL = wsp ("ISNULL" / "isnull")
JOIN = wsp ("JOIN" / "join")
KEY = wsp ("KEY" / "key")
LEFT = wsp ("LEFT" / "left")
LIKE = wsp ("LIKE" / "like")
LIMIT = wsp ("LIMIT" / "limit")
MATCH = wsp ("MATCH" / "match")
NO = wsp ("NO" / "no")
NOT = wsp ("NOT" / "not")
NOTNULL = wsp ("NOTNULL" / "notnull")
NULL = wsp ("NULL" / "null")
OF = wsp ("OF" / "of")
OFFSET = wsp ("OFFSET" / "offset")
ON = wsp ("ON" / "on")
OR = wsp ("OR" / "or")
ORDER = wsp ("ORDER" / "order")
OUTER = wsp ("OUTER" / "outer")
PRIMARY = wsp ("PRIMARY" / "primary")
QUERY = wsp ("QUERY" / "query")
RAISE = wsp ("RAISE" / "raise")
REFERENCES = wsp ("REFERENCES" / "references")
REGEXP = wsp ("REGEXP" / "regexp")
RENAME = wsp ("RENAME" / "rename")
REPLACE = ws ("REPLACE" / "replace")
RETURN = wsp ("RETURN" / "return")
ROW = wsp ("ROW" / "row")
SAVEPOINT = wsp ("SAVEPOINT" / "savepoint")
SELECT = ws ("SELECT" / "select")
SET = wsp ("SET" / "set")
TABLE = wsp ("TABLE" / "table")
TEMP = wsp ("TEMP" / "temp")
TEMPORARY = wsp ("TEMPORARY" / "temporary")
THEN = wsp ("THEN" / "then")
TO = wsp ("TO" / "to")
UNION = wsp ("UNION" / "union")
USING = wsp ("USING" / "using")
VALUES = wsp ("VALUES" / "values")
VIRTUAL = wsp ("VIRTUAL" / "virtual")
WITH = wsp ("WITH" / "with")
WHERE = wsp ("WHERE" / "where")
"""
)
def flatten(children, sidx, lidx):
"""
Helper function used in Visitor to flatten and filter
lists of lists
"""
ret = [children[sidx]]
rest = children[lidx]
if not isinstance(rest, list): rest = [rest]
ret.extend(list(filter(bool, rest)))
return ret
class Visitor(NodeVisitor):
"""
Each expression in the grammar above of the form
XXX = ....
can be handled with a custom function by writing
def visit_XXX(self, node, children):
You can assume the elements in children are the handled
versions of the corresponding child nodes
"""
grammar = grammar
#
# SELECT CLAUSE
#
#
# FROM CLAUSE
#
#
# Other clauses
#
############################################
#
# Expressions
#
############################################
| [
37811,
198,
1212,
2393,
4909,
262,
23491,
290,
2438,
329,
32096,
257,
16363,
12405,
198,
8841,
656,
257,
5509,
286,
12879,
5447,
287,
39628,
13,
9078,
198,
198,
464,
1994,
1517,
428,
38359,
5626,
466,
318,
10385,
262,
16034,
13444,
198,... | 2.174471 | 3,118 |
import os
import unittest
from ..ddup import dedup_lines_bloom
| [
11748,
28686,
198,
11748,
555,
715,
395,
198,
6738,
11485,
67,
646,
79,
1330,
4648,
929,
62,
6615,
62,
2436,
4207,
628
] | 2.909091 | 22 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 SkyWater PDK Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
"""
run_all_drc.py --- A script that will run run_standard_drc for all .gds files
under the cells/ folder.
Must be run from repository root.
Usage: python3 run_all_drc.py --help
Results:
Prints a report to standard output.
"""
import os
import re
import subprocess
import traceback
from concurrent import futures
from typing import List, Tuple
import click
acceptable_errors = []
SCRIPT_DIR = os.path.realpath(os.path.dirname(__file__))
STANDARD_DRC_SCRIPT = os.path.join(SCRIPT_DIR, "run_standard_drc.py")
PDK_SUBSET = os.getenv("PDK_ROOT") or os.path.join(SCRIPT_DIR, "sky130A")
DRCError = Tuple[str, List[str]]
PARSE_DRC_REPORT_EXAMPLE = """
This first set of lines is the 'header':
DRC errors for a cell that doesn't exist
It's skipped over by this function.
--------------------------------------------
This is an acceptable error.
These lines are details for the acceptable error.
There are usually a couple of lines.
This is an unacceptable error.
These lines are details for the unacceptable error.
There are usually a couple of lines.
This is another unacceptable error.
It has less lines of detail.
"""
def parse_drc_report(
report: str, acceptable_errors: List[str]) -> List[DRCError]:
"""
Takes a magic report in the format as seen in PARSE_DRC_REPORT_EXAMPLE
above, and returns all errors as a list of tuples, where the first element
of the tuple is the name of the error and the other lines are the details.
>>> from pprint import pprint as p
>>> p(parse_drc_report(
... PARSE_DRC_REPORT_EXAMPLE.strip(),
... ["This is an acceptable error."]))
[('This is an unacceptable error.',
['These lines are details for the unacceptable error.',
'There are usually a couple of lines.']),
('This is another unacceptable error.', ['It has less lines of detail.'])]
"""
components = [x.split("\n") for x in report.split("\n\n")]
errors = []
header = components.pop(0) # noqa: F841
for error in components:
error_name = error[0]
if error_name in acceptable_errors:
continue
errors.append((error[0], error[1:]))
return errors
def drc_gds(path: str) -> Tuple[str, List[DRCError]]:
"""
Takes a GDS path. Returns the name of the cell and returns a list of
DRC errors.
"""
cell_name = os.path.basename(path)[:-4]
env = os.environ.copy()
env["PDKPATH"] = PDK_SUBSET
res = subprocess.run([
"python3",
STANDARD_DRC_SCRIPT,
path
], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
report_path = path[:-4] + "_drc.txt"
try:
report = open(report_path).read()
if os.getenv("ACTIONS_STEP_DEBUG") or False:
print("::group::%s" % report_path)
print(report)
print("::endgroup::")
return cell_name, parse_drc_report(report, acceptable_errors)
except FileNotFoundError:
return cell_name, [
(
"Magic did not produce a report.",
[res.stdout.decode("utf8"), res.stderr.decode("utf8")]
)
]
@click.command()
@click.option(
"-t",
"--top",
default=".",
help="Directory to run the process inside."
" Default: Current working directory"
)
@click.option(
"-a",
"--acceptable-errors-file",
default="/dev/null",
help="A file containing a list of newline-delimited acceptable DRC errors."
" Default: No file will be read and all errors deemed unacceptable."
)
@click.option(
"-m",
"--match-cell-directories",
default="^.*$",
help="A regex that that will match cell names to be checked (which will"
" match subdirectories under cells/)."
" Default: ^.*$ (matches everything)"
)
@click.option(
"-b",
"--known-bad",
default="",
help="A comma,delimited list of cells that are known bad and"
" thus do not cause a non-zero exit upon failure."
" Default: empty string (None of them.)"
)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
12131,
5274,
19184,
14340,
42,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,... | 2.69022 | 1,769 |
from twisted.python import util
util.moduleMovedForSplit('twisted.protocols.irc', 'twisted.words.protocols.irc',
'IRC protocol support', 'Words',
'http://twistedmatrix.com/trac/wiki/TwistedWords',
globals())
| [
6738,
19074,
13,
29412,
1330,
7736,
198,
198,
22602,
13,
21412,
44,
2668,
1890,
41205,
10786,
4246,
6347,
13,
11235,
4668,
82,
13,
1980,
3256,
705,
4246,
6347,
13,
10879,
13,
11235,
4668,
82,
13,
1980,
3256,
198,
220,
220,
220,
220,
... | 1.993007 | 143 |
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli.nice_command import NiceCommand
import tccli.error_msg as ErrorMsg
import tccli.help_template as HelpTemplate
from tccli import __version__
from tccli.utils import Utils
from tccli.configure import Configure
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.ssl.v20191205 import ssl_client as ssl_client_v20191205
from tencentcloud.ssl.v20191205 import models as models_v20191205
from tccli.services.ssl import v20191205
from tccli.services.ssl.v20191205 import help as v20191205_help
CLIENT_MAP = {
"v20191205": ssl_client_v20191205,
}
MODELS_MAP = {
"v20191205": models_v20191205,
}
ACTION_MAP = {
"DescribeCertificates": doDescribeCertificates,
"ModifyCertificateProject": doModifyCertificateProject,
"UploadCertificate": doUploadCertificate,
"CancelCertificateOrder": doCancelCertificateOrder,
"CommitCertificateInformation": doCommitCertificateInformation,
"DeleteCertificate": doDeleteCertificate,
"ModifyCertificateAlias": doModifyCertificateAlias,
"DownloadCertificate": doDownloadCertificate,
"ReplaceCertificate": doReplaceCertificate,
"ApplyCertificate": doApplyCertificate,
"DescribeCertificateOperateLogs": doDescribeCertificateOperateLogs,
"SubmitCertificateInformation": doSubmitCertificateInformation,
"DescribeCertificateDetail": doDescribeCertificateDetail,
"DescribeCertificate": doDescribeCertificate,
}
AVAILABLE_VERSION_LIST = [
v20191205.version,
]
AVAILABLE_VERSIONS = {
'v' + v20191205.version.replace('-', ''): {"help": v20191205_help.INFO,"desc": v20191205_help.DESC},
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
256,
535,
4528,
13,
25811,
62,
13086,
355,
18634,
7469,
500,
198,
11748,
256,
535,
4528,
13,
18982,
62,
22915,
355,
18980,
2... | 2.954688 | 640 |
from selenium import webdriver
from time import sleep
import requests
from aigpy import download
if __name__ == '__main__':
Main()
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
640,
1330,
3993,
198,
11748,
7007,
198,
6738,
257,
328,
9078,
1330,
4321,
628,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
877... | 3.25 | 44 |
import os
import sys
import numpy as np
import cv2
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from skimage.measure import find_contours
class MaskRCNNTracker():
"""Implements tracker based on segmentation outputs.
Params:
-
Inputs:
-
Output:
A dictionay that maps the current frame's instance indexes to
the unique instance IDs that identify individual objects
"""
def __init__(self, class_names):
"""
class_names: list of class names of the dataset.
used to map detected instances to classes
"""
self.class_names = class_names
self.instance_memory_length = 2
self.image_size = None # the image size (x, y) of the current frame
self.occlusion_factor_thresh = 0.4 # parameter
# the inner area consists of the inner grids not touching any sides
self.N_divide_width = 8 # the number of grids along x
self.N_divide_height = 4 # the number of grids along y
self.left_top_right_bottom = None # A rectangle for inner frame
# If this flag set True, do not do inner area check:
# without inner area check ===> more false positives
# with inner area check ===> reduction in false positives, more false negatives
self.disable_inner_area_check = False
# once the number of consecutive inactivity frames exceeds the period,
# reset the tracker
self.max_inactivity_period = 50 # in frames
self.frame_stale_timer = 20 # do not keep info about a frame that is too old
# the maximum Euclidean histogram distance for two similar histograms
self.hist_dissimilarity_thresh = 0.2
self.reset()
def fill_polygons_in_bounding_map(self, poly_vertices):
"""
Given one or multiple ploygons rach consisting of a sequence of vertices,
determine a box or map that encloses them. Then fill the polygon(s) within
the map and calculate its area.
Input:
- poly_vertices: A list of polygons. Each item is a list of points [x,y]
"""
left = 10000 # sufficiently large coordinate in x
right = 0 # the minimum possible coordinate in x
top = 10000 # sufficiently large coordinate in y
bottom = 0 # the minimum possible coordinate in y
# polyVertices: a list of N-by-2 arrays
for poly in poly_vertices:
left = min(left, np.amin(poly[:,0]))
right = max(right, np.amax(poly[:,0]))
top = min(top, np.amin(poly[:,1]))
bottom = max(bottom, np.amax(poly[:,1]))
pts = []
for poly in poly_vertices:
pts.append(poly-np.array([left,top]))
# This map is a 2-D array
map = np.zeros((bottom-top+1, right-left+1),dtype=np.uint8)
# mask the area
cv2.fillPoly(map, pts, color=(255))
polyArea = np.count_nonzero(map)
return (left, top, right, bottom, map, polyArea, self.frame_number)
def compute_intersection_polygons(self, tuplePolygonA, tuplePolygonB):
"""
Calculate intersection between two regions each outlined by one
or multiple polygons.
Inputs:
- tuplePolygonA, tuplePolygonB: A tuple to represent a region outlined
by one or multiple polygons. See the output of method
"fill_polygons_in_bounding_map".
Return: Intersection over Union (IoU) in the range from 0 to 1.0
"""
# tuplePolygonA and tuplePolygonB
# (xmin, ymin, xmax, ymax, filledPolygon2Dmap, frame_number)
A_left = tuplePolygonA[0]
A_right = tuplePolygonA[2]
A_top = tuplePolygonA[1]
A_bottom = tuplePolygonA[3]
B_left = tuplePolygonB[0]
B_right = tuplePolygonB[2]
B_top = tuplePolygonB[1]
B_bottom = tuplePolygonB[3]
# check if the two maps intersect
if B_left >= A_right or B_top >= A_bottom:
return 0
if A_left >= B_right or A_top >= B_bottom:
return 0
# calculate the overlapping part of the two bounding maps
Overlap_left = max(A_left, B_left)
Overlap_right = min(A_right, B_right)
Overlap_top = max(A_top, B_top)
Overlap_bottom = min(A_bottom, B_bottom)
# get the overlapping part within the two maps respectively
Overlap_A_map = tuplePolygonA[4][(Overlap_top-A_top):(min(A_bottom,Overlap_bottom)-A_top+1),
(Overlap_left-A_left):(min(A_right,Overlap_right)-A_left+1)]
Overlap_B_map = tuplePolygonB[4][(Overlap_top-B_top):(min(B_bottom,Overlap_bottom)-B_top+1),
(Overlap_left-B_left):(min(B_right,Overlap_right)-B_left+1)]
# calculate the intersection between the two silhouettes within the overlapping part
Overlap_map_boolean = np.logical_and(Overlap_A_map, Overlap_B_map)
# calculate the area of silhouette intersection
Overlap_count = np.count_nonzero(Overlap_map_boolean)
Union_count = tuplePolygonA[5] + tuplePolygonB[5] - Overlap_count
return Overlap_count/Union_count
def reset(self):
"""
Reset the tracker: flush all buffers and reset all internal dynamic state variables
"""
self.inactivity_counter = 0 # the number of consectutive frames where no instances detected
self.instance_id_manager = 0
self.dict_instance_history = {}
self.dict_trajectories = {}
self.frame_number = 0 # the current frame number
self.dict_location_prediction = {}
self.dict_appearance_prediction = {}
# store each instance's states.
# For example, "occlusion"
self.dict_instance_states = {}
self.dict_hue_histogram = {} # keys: ID assigned to instance under track
# If an instance is deemed to be out of track, its relevant information like color
# histogram will be stored in the dictionary. It may be re-claimed later based on
# color matching.
self.dict_instances_out_of_track = {} # keys: instance unique ID
self.list_recycled_instance_id = []
def assign_unique_instance_id(self):
"""
Allocate a unique ID to an instance.
Return the ID number
"""
if len(self.list_recycled_instance_id) > 0:
uid = self.list_recycled_instance_id[0]
self.list_recycled_instance_id.pop(0)
else:
self.instance_id_manager += 1
uid = self.instance_id_manager
return uid
def receive_first_segmentation_output(self, results, image):
"""
This method is called when the segmentation results for the very first frame received
Input:
- results: segmentation results as output of Mask R-CNN
- image: the current image or video frame
Output:
- Tuple:
item 0: the current instance ID to assigned unique ID (dict)
item 1: Contours for current instances (dict)
"""
boxes = results['rois']
masks = results['masks']
class_ids = results['class_ids']
scores = results['scores']
# Number of instances
N = boxes.shape[0]
if not N:
self.inactivity_counter += 1
return None
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# increment the frame counter
self.frame_number = 1
# Find the instances of interest, e.g., persons
instances_of_interest = []
for i in range(N):
class_id = class_ids[i]
if class_id == self.class_names.index('person') and scores[i] >= 0.75:
instances_of_interest.append(i)
if len(instances_of_interest) == 0:
self.inactivity_counter += 1
else:
self.inactivity_counter = 0
# calculate the histograms of color (hue) for each segmented instances
dict_histograms_hue = self.calculate_hue_histograms(instances_of_interest, masks, image)
# Find the contours that cover detected instances
dict_contours = {}
for i in instances_of_interest:
# Mask
mask = masks[:, :, i]
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
dict_contours[i] = find_contours(padded_mask, 0.5)
# Analyze the contours and calculate the areas
dict_polygons_in_bounding_map = {}
for i in dict_contours:
pts2d = [] # each element is an array of the shape (-1,2)
for c in dict_contours[i]: # the value is a list
pts2d.append(c.astype(np.int32))
dict_polygons_in_bounding_map[i] = self.fill_polygons_in_bounding_map(pts2d)
# Initialize the buffers
dict_inst_index_to_uid = {} # mapping current frame's instance index to unique ID
assert self.instance_id_manager == 0
for i in dict_polygons_in_bounding_map:
uid = self.assign_unique_instance_id()
dict_inst_index_to_uid[i] = uid
self.dict_instance_history[uid] = [dict_polygons_in_bounding_map[i]]
y1, x1, y2, x2 = boxes[i]
self.dict_trajectories[uid] = [[self.frame_number, (x1 + x2)//2, (y1 + y2)//2]]
self.dict_hue_histogram[uid] = [dict_histograms_hue[i]]
# calculate the center of the box that encloses a instance's contour
dict_box_center = {}
for i in dict_polygons_in_bounding_map:
cy = (dict_polygons_in_bounding_map[i][0] + dict_polygons_in_bounding_map[i][2])//2
cx = (dict_polygons_in_bounding_map[i][1] + dict_polygons_in_bounding_map[i][3])//2
dict_box_center[i] = (cx, cy)
# predict the locations of indentified instances in the next frame
self.dict_location_prediction = {}
for uid in self.dict_trajectories:
self.dict_location_prediction[uid] = self.predict_location(uid)
dx, dy = self.dict_location_prediction[uid][2:4]
self.dict_appearance_prediction[uid] = self.shift_instance_appearance(uid, dx, dy)
return (dict_inst_index_to_uid, dict_contours, dict_box_center)
def receive_subsequent_segmentation_output(self, results, image):
"""
Update tracker states upon new detection results
Input:
- results: segmentation results as output of Mask R-CNN
- image: the current image or video frame
Output:
- Tuple:
item 0: the current instance ID to assigned unique ID (dict)
item 1: Contours for current instances (dict)
"""
boxes = results['rois']
masks = results['masks']
class_ids = results['class_ids']
scores = results['scores']
# Number of instances
N = boxes.shape[0]
if not N:
self.inactivity_counter += 1
return None
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# increment the frame counter
self.frame_number += 1
# pop up the old data if necessary
self.update_buffers()
# Find the instances of interest, e.g., persons
instances_of_interest = []
for i in range(N):
class_id = class_ids[i]
if class_id == self.class_names.index('person') and scores[i] >= 0.75:
instances_of_interest.append(i)
if len(instances_of_interest) == 0:
self.inactivity_counter += 1
if self.inactivity_counter >= self.max_inactivity_period:
self.reset()
else:
self.inactivity_counter = 0
# calculate the histograms of color (hue) for each segmented instances
dict_histograms_hue = self.calculate_hue_histograms(instances_of_interest, masks, image)
# Find the contours that cover detected instances
dict_contours = {}
for i in instances_of_interest:
# Mask
mask = masks[:, :, i]
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
dict_contours[i] = find_contours(padded_mask, 0.5)
# Analyze the contours and calculate the areas
dict_polygons_in_bounding_map = {}
for i in dict_contours:
pts2d = [] # each element is an array of the shape (-1,2)
for c in dict_contours[i]: # the value is a list
pts2d.append(c.astype(np.int32))
dict_polygons_in_bounding_map[i] = self.fill_polygons_in_bounding_map(pts2d)
# Correspondence between existing instances and the instances in the current frame
dict_inst_index_to_uid = {} # mapping current frame's instance index to unique ID
list_matching_scores = []
dict_inst_occlusion = {}
for i in dict_polygons_in_bounding_map:
uid_matching = 0 # invalid ID
max_iou = 0.0 # how much does it match the existing detected instances
# here "uid" is a unique ID assigned to each detected instance
for uid in self.dict_instance_history:
contour_map = self.get_instance_appearance(uid)
iou = self.compute_intersection_polygons(dict_polygons_in_bounding_map[i], contour_map)
if iou > max_iou:
max_iou = iou
uid_matching = uid
if max_iou > 0:
list_matching_scores.append((i, uid_matching, max_iou))
list_matching_scores.sort(key=lambda item: item[2], reverse=True) # in decending order
uid_set = set(self.dict_instance_history.keys())
# key = instance ID in the current frame
# Values of IoU scores used for debugging purpose
dict_instance_score_color_mismatch = {}
for e in list_matching_scores: # e is a tuple
i = e[0] # the instance ID in the current frame
uid = e[1] # unique existing instance ID
iou_score = e[2]
if iou_score > 0.05 and uid in uid_set:
if not self.is_occluded_next_frame(uid):
hue_dissimilarity = self.calculate_distance_between_histograms(dict_histograms_hue[i],
self.dict_hue_histogram[uid][-1])
if hue_dissimilarity < self.hist_dissimilarity_thresh:
uid_set.remove(uid) # this unique ID is claimed and won't be taken by other instances
dict_inst_index_to_uid[i] = uid
self.dict_instance_history[uid].append(dict_polygons_in_bounding_map[i]) # store the current frame
else:
dict_instance_score_color_mismatch[i] = iou_score # mismatch probably due to color contamination
else:
dict_inst_occlusion[i] = True
# What if the instances do not match any of the existing identified instances ?
# The instances that appear suddenly within the inner area of frame may be false positives
id_list = list(dict_polygons_in_bounding_map.keys())
for i in id_list:
if i not in dict_inst_index_to_uid:
y1, x1, y2, x2 = boxes[i]
# possibly a false positive or in occlusion
is_within_inner_area = self.is_within_inner_area((x1 + x2)//2, (y1 + y2)//2)
is_occluded = i in dict_inst_occlusion
is_color_mismatch = i in dict_instance_score_color_mismatch # this may not be a new instance
if is_within_inner_area or is_occluded or is_color_mismatch:
dict_polygons_in_bounding_map.pop(i)
dict_contours.pop(i)
instances_of_interest.remove(i)
# Reclaim the instances out of track if possible
for i in dict_polygons_in_bounding_map:
if i not in dict_inst_index_to_uid: # make sure it's not associated with any instance on track
min_hue_dissimilarity = 1.0
uid_min_hue_dissimilarity = 0
for uid in self.dict_instances_out_of_track:
histogram_color = self.dict_instances_out_of_track[uid]['hist_hue']
hue_dissimilarity = self.calculate_distance_between_histograms(histogram_color, dict_histograms_hue[i])
if hue_dissimilarity < min_hue_dissimilarity:
min_hue_dissimilarity = hue_dissimilarity
uid_min_hue_dissimilarity = uid
if min_hue_dissimilarity < self.hist_dissimilarity_thresh:
self.dict_instance_history[uid_min_hue_dissimilarity] = [dict_polygons_in_bounding_map[i]]
dict_inst_index_to_uid[i] = uid_min_hue_dissimilarity
self.dict_instances_out_of_track.pop(uid_min_hue_dissimilarity)
# Now assign unique IDs to new instances
for i in dict_polygons_in_bounding_map:
if i not in dict_inst_index_to_uid: # this would be a new instance
uid = self.assign_unique_instance_id()
self.dict_instance_history[uid] = [dict_polygons_in_bounding_map[i]]
dict_inst_index_to_uid[i] = uid
# calculate the center of the box that encloses a instance's contour
dict_box_center = {}
for i in dict_polygons_in_bounding_map:
cy = (dict_polygons_in_bounding_map[i][0] + dict_polygons_in_bounding_map[i][2])//2
cx = (dict_polygons_in_bounding_map[i][1] + dict_polygons_in_bounding_map[i][3])//2
dict_box_center[i] = (cx, cy)
for i in dict_inst_index_to_uid:
y1, x1, y2, x2 = boxes[i]
uid = dict_inst_index_to_uid[i]
if uid not in self.dict_trajectories:
self.dict_trajectories[uid] = [[self.frame_number, (x1 + x2)//2, (y1 + y2)//2]]
else:
self.dict_trajectories[uid].append([self.frame_number, (x1 + x2)//2, (y1 + y2)//2])
# predict the locations of the identified instances in the next frame
for uid in self.dict_trajectories:
self.dict_location_prediction[uid] = self.predict_location(uid)
dx, dy = self.dict_location_prediction[uid][2:4]
self.dict_appearance_prediction[uid] = self.shift_instance_appearance(uid, dx, dy)
list_occlusion = self.predict_occlusion(self.occlusion_factor_thresh)
self.dict_instance_states = {}
for uid in list_occlusion:
self.dict_instance_states[uid] = dict(occlusion=True)
for i in dict_inst_index_to_uid:
uid = dict_inst_index_to_uid[i]
if uid in self.dict_hue_histogram:
self.dict_hue_histogram[uid].append(dict_histograms_hue[i])
else:
self.dict_hue_histogram[uid] = [dict_histograms_hue[i]]
return (dict_inst_index_to_uid, dict_contours, dict_box_center)
def receive_segmentation_output(self, results, image):
"""
Update tracker states upon new detection results
Input:
- results: segmentation results as output of Mask R-CNN
- image: the current image or video frame
Output:
- Tuple:
item 0: the current instance ID to assigned unique ID (dict)
item 1: Contours for current instances (dict)
"""
self.image_size = (image.shape[1], image.shape[0])
self.update_inner_frame_area()
if self.instance_id_manager == 0:
return self.receive_first_segmentation_output(results, image)
else:
return self.receive_subsequent_segmentation_output(results, image)
def save_trajectory_to_textfile(self, uid, fname):
"""
Dump a specified instance's location trajectory to a text file
Input:
- uid: Unique instance ID
- fname: out filename
"""
if uid in self.dict_trajectories:
outfile = open(str(fname) + "_%04d"%(uid) + ".txt", "w")
for u in self.dict_trajectories[uid]:
outfile.write(str(u[0])+"\t"+str(u[1])+"\t"+str(u[2])+"\n")
outfile.close()
def estimate_velocity(self, uid):
"""
Return estimated velocity
"""
if uid not in self.dict_trajectories:
return None
pos = np.array(self.dict_trajectories[uid])
m = pos.shape[0] # the number of points (memory for the past images)
if m < 2: # single point
return (0, 0)
# partition the set of points
x0, y0 = pos[0:m//2, 1].mean(), pos[0:m//2, 2].mean()
x1, y1 = pos[m//2:, 1].mean(), pos[m//2:, 2].mean()
timespan = np.amax([1.0, (pos[-1, 0] - pos[0, 0])/2])
return (round((x1 - x0)/timespan), round((y1 - y0)/timespan)) # unit: pixels per frame
def predict_location(self, uid):
"""
Predict the location (x, y) of specified instance in the next frame
"""
if uid not in self.dict_trajectories:
return None
assert uid in self.dict_instance_history
dx = dy = 0 # displacement relative to the last true location
if self.dict_instance_history[uid][-1][6] == self.frame_number:
_, x, y = self.dict_trajectories[uid][-1] # the latest (last) item
else:
assert uid in self.dict_location_prediction
x, y, dx, dy = self.dict_location_prediction[uid] # based on the prediction for the last frame
v = self.estimate_velocity(uid)
x_t = min([max([0, x + v[0]]), self.image_size[0]])
y_t = min([max([0, y + v[1]]), self.image_size[1]])
dx += v[0] # accumulated displacement in x taking into account frames where trajectory not updated
dy += v[1] # accumulated displacement in y taking into account frames where trajectory not updated
#print("uid", uid, "Velocity", v,"prediction: ","x", x, "->", x_t, "y", y, "->", y_t, "dx", dx, "dy", dy)
return (x_t, y_t, dx, dy)
def shift_instance_appearance(self, uid, dx, dy):
"""
Generate the instance appearance at the predicted location (xpos, ypos) for the next frame.
It is just a shift of the last appearance
Inputs:
- uid Unique ID of instance
- dx, dy: location displacement relative to the last trajectory update
"""
if uid not in self.dict_instance_history:
return None
last_profile = self.dict_instance_history[uid][-1]
# sigh! because polygons rotated by 90 degrees, we have to switch x and y
dy, dx = int(dx), int(dy)
left = min([max([0, last_profile[0] + dx]), self.image_size[1]])
right = min([max([0, last_profile[2] + dx]), self.image_size[1]])
top = min([max([0, last_profile[1] + dy]), self.image_size[0]])
bottom = min([max([0, last_profile[3] + dy]), self.image_size[0]])
return (left, top, right, bottom, last_profile[4], last_profile[5], self.frame_number)
def get_instance_appearance(self, uid):
"""
Return instance's last appearance
"""
if uid in self.dict_appearance_prediction:
return self.dict_appearance_prediction[uid]
elif uid in self.dict_instance_history:
return self.dict_instance_history[uid][-1]
else:
return None
def compute_occlusion_factor(self, tuplePolygonA, tuplePolygonB):
"""
Calculate the occlusion factor of a region which may be partially
or totally occluded by another region
Inputs:
- tuplePolygonA, tuplePolygonB: A tuple to represent a region outlined
by one or multiple polygons. See the output of method
"fill_polygons_in_bounding_map".
Return: How much region A is occcluded by region B in the range from 0 to 1.0
"""
# tuplePolygonA and tuplePolygonB
# (xmin, ymin, xmax, ymax, filledPolygon2Dmap, frame_number)
A_left = tuplePolygonA[0]
A_right = tuplePolygonA[2]
A_top = tuplePolygonA[1]
A_bottom = tuplePolygonA[3]
B_left = tuplePolygonB[0]
B_right = tuplePolygonB[2]
B_top = tuplePolygonB[1]
B_bottom = tuplePolygonB[3]
# check if the two maps intersect
if B_left >= A_right or B_top >= A_bottom:
return 0
if A_left >= B_right or A_top >= B_bottom:
return 0
# calculate the overlapping part of the two bounding maps
Overlap_left = max(A_left, B_left)
Overlap_right = min(A_right, B_right)
Overlap_top = max(A_top, B_top)
Overlap_bottom = min(A_bottom, B_bottom)
# get the overlapping part within the two maps respectively
Overlap_A_map = tuplePolygonA[4][(Overlap_top-A_top):(min(A_bottom,Overlap_bottom)-A_top+1),
(Overlap_left-A_left):(min(A_right,Overlap_right)-A_left+1)]
Overlap_B_map = tuplePolygonB[4][(Overlap_top-B_top):(min(B_bottom,Overlap_bottom)-B_top+1),
(Overlap_left-B_left):(min(B_right,Overlap_right)-B_left+1)]
# calculate the intersection between the two silhouettes within the overlapping part
Overlap_map_boolean = np.logical_and(Overlap_A_map, Overlap_B_map)
# calculate the area of silhouette intersection
Overlap_count = np.count_nonzero(Overlap_map_boolean)
assert tuplePolygonA[5] > 0
return Overlap_count/tuplePolygonA[5]
def predict_occlusion(self, occlusion_factor_thresh):
"""
Based on the predicted instance appearances for the next frame, find the existing
instances that will be partially or totally occluded in the next frame
Output: A list of instances to be occluded
"""
output_list = []
uid_list = list(self.dict_appearance_prediction.keys())
num = len(uid_list)
for uid1 in uid_list:
of_max = 0
for uid2 in uid_list:
if uid1 == uid2:
continue
of = self.compute_occlusion_factor(self.dict_appearance_prediction[uid1], self.dict_appearance_prediction[uid2])
if of > of_max:
of_max = of
if of_max > occlusion_factor_thresh:
output_list.append(uid1)
return output_list
def is_occluded_next_frame(self, uid):
"""
Will the object be occluded in the next frame ?
"""
if uid in self.dict_instance_states:
if self.dict_instance_states[uid]['occlusion']:
return True
return False
def update_inner_frame_area(self):
"""
Given the frame size (i.e., self.image_size), determine the inner area of the frame
"""
left = self.image_size[0]//self.N_divide_width
right = (self.N_divide_width - 1) * self.image_size[0]//self.N_divide_width
top = self.image_size[1]//self.N_divide_height
bottom = (self.N_divide_height - 1) * self.image_size[1]//self.N_divide_height
self.left_top_right_bottom = (left, top, right, bottom)
def is_within_inner_area(self, x, y, leftside=True, rightside=True,topside=False, bottomside=True):
"""
Given the location of an object, check if it is in the inner of the frame
Inputs:
- x, y: coordinates of location
- leftside, rightside, topside, bottomside: if False, objects never get into or out of the frame
the frame across the side
"""
if self.disable_inner_area_check:
return False
if leftside and x < self.left_top_right_bottom[0]:
return False
if rightside and x > self.left_top_right_bottom[2]:
return False
if topside and y < self.left_top_right_bottom[1]:
return False
if bottomside and y > self.left_top_right_bottom[3]:
return False
return True
def calculate_hue_histograms(self, instance_ids, masks, image):
"""
Calculate the histogram of hue for each segmented instance
instance_ids: a list of instances of interest
masks: 3D-array
image: video frame
"""
dict_hue_histogram = {}
num_bins = 36
hue_range = [0,180] # for opencv
for i in instance_ids:
mask = masks[:, :, i]
contour_indexes = np.where(mask == 1)
b = image[:,:,0]
g = image[:,:,1]
r = image[:,:,2]
b = b[contour_indexes].reshape(-1,1)
g = g[contour_indexes].reshape(-1,1)
r = r[contour_indexes].reshape(-1,1)
bgr = np.stack((b, g, r), axis=-1)
hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
hist, bins = np.histogram(hsv[:,:,0].ravel(),bins=num_bins, range=hue_range, density=True)
dict_hue_histogram[i] = hist * ((hue_range[1] - hue_range[0]) / num_bins)
return dict_hue_histogram
def calculate_distance_between_histograms(self, hist1, hist2):
"""
Calculate the distance between two normalized histograms
"""
assert hist1.shape == hist2.shape
return np.linalg.norm(hist1 - hist2)
def get_average_histogram_hue(self, uid):
"""
Calcualte and return an instance's average histogram of hue
"""
if uid not in self.dict_hue_histogram:
return None
avg_hist = np.zeros(self.dict_hue_histogram[uid][0].shape)
for hist in self.dict_hue_histogram[uid]:
avg_hist = avg_hist + hist
return avg_hist / np.sum(avg_hist) # normalized such that the sum is 1 | [
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
198,
2,
20410,
8619,
286,
262,
1628,
198,
13252,
2394,
62,
34720,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7203,
40720,
4943,
198... | 2.493342 | 11,039 |
"""
``DataLoaderFactory`` can be used to create data loader objects from the ``data_loader`` module.
The following code will create a data loader object for loading enhanced pbp from stats.nba.com.
.. code-block:: python
from pbpstats.data_loader.factory import DataLoaderFactory
data_loader = DataLoaderFactory()
stats_enhanced_pbp_data_loader = data_loader.get_data_loader("stats_nba", "EnhancedPbp")
print(stats_enhanced_pbp_data_loader[0])
# prints "<class 'pbpstats.data_loader.stats_nba.enhanced_pbp_loader.StatsNbaEnhancedPbpLoader'>"
"""
from collections import defaultdict
import pbpstats.data_loader as data_loader
class DataLoaderFactory(object):
"""
Class for factory of data loader classes. On initialization will load in all data loader classes in ``data_loader`` module
"""
def _load_data_loaders(self):
"""
loads data loaders from data_loader package
"""
loaders = dict(
[
(name, cls)
for name, cls in data_loader.__dict__.items()
if isinstance(cls, type)
]
)
for name, loader_cls in loaders.items():
if hasattr(loader_cls, "resource"):
file_source = loaders[name.replace("Loader", "FileLoader")]
web_source = loaders[name.replace("Loader", "WebLoader")]
loader = {
"loader": loader_cls,
"file_source": file_source,
"web_source": web_source,
}
self.loaders[loader_cls.resource][loader_cls.data_provider].append(
loader
)
def get_data_loader(self, data_provider, resource):
"""
Gets data loader classes for given data provider and resource.
:param str data_provider: Which data provider should data be loaded from. Options are 'stats_nba' or 'data_nba' or 'live'
:param str resource: Name of class from resources directory
:return: list of data loader classes
:rtype: list
"""
return self.loaders[resource][data_provider]
| [
37811,
198,
15506,
6601,
17401,
22810,
15506,
460,
307,
973,
284,
2251,
1366,
40213,
5563,
422,
262,
7559,
7890,
62,
29356,
15506,
8265,
13,
198,
198,
464,
1708,
2438,
481,
2251,
257,
1366,
40213,
2134,
329,
11046,
13105,
279,
46583,
42... | 2.364333 | 914 |
import copy
import pickle
from this import d
from pathlib import Path
import numpy as np
from skimage import io
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, calibration_kitti, common_utils, object3d_kitti
from ..dataset import DatasetTemplate
import open3d
import numpy as np
from scipy.spatial.transform import Rotation as R
# don't run this for creation
if __name__ == '__main__':
import sys
if sys.argv.__len__() > 1 and sys.argv[1] == 'create_kitti_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.load(open(sys.argv[2])))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
create_kitti_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'kitti',
save_path=ROOT_DIR / 'data' / 'kitti'
)
| [
11748,
4866,
198,
11748,
2298,
293,
198,
6738,
428,
1330,
288,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
9060,
1330,
33245,
198,
198,
6738,
2644,
2840,
13,
305,
544,
1574,
62,
7742,
18,
6... | 2.335714 | 420 |
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from FoxhoundApp.TrafficApp.serializers import CategorySerializer
from FoxhoundApp.TrafficApp.utils import HeatMapDataGetter
| [
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
6738,
1334,
62,
30604,
13,
525,
8481,
1330,
1148,
47649,
3474,
198,
6738,
5426,
39047,
4677,
13,
15721,
2108,
4677,
13,
... | 4.092308 | 65 |
from .java import Postagger
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution('stanford_postagger').version
except DistributionNotFound:
__version__ = 'Undefined'
__all__ = ['__version__', 'Postagger']
| [
6738,
764,
12355,
1330,
2947,
7928,
198,
6738,
279,
10025,
62,
37540,
1330,
651,
62,
17080,
3890,
11,
27484,
3673,
21077,
198,
198,
28311,
25,
198,
220,
220,
220,
11593,
9641,
834,
796,
651,
62,
17080,
3890,
10786,
14192,
3841,
62,
73... | 3.329114 | 79 |
from __future__ import annotations
__all__ = ("HTTP",)
import asyncio
import json
import aiohttp
import logging
from urllib.parse import quote as _quote
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Literal
from .connection import Connection
from .dataclasses import Embed, File
from .models import ClientUser, Member, Message
from .utils import UNDEFINED, get_event_loop
from .app import Button
from . import exceptions
if TYPE_CHECKING:
from .client import Client
aiohttp.hdrs.WEBSOCKET = "websocket" # type: ignore
_logger = logging.getLogger("discode")
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
834,
439,
834,
796,
5855,
40717,
1600,
8,
198,
198,
11748,
30351,
952,
198,
11748,
33918,
198,
11748,
257,
952,
4023,
198,
11748,
18931,
198,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
... | 3.303867 | 181 |