content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import pickle
from firebase import firebase
import pyautogui
import time
firebase = firebase.FirebaseApplication('https://sleep-tight-8a6df.firebaseio.com/', None)
id2 = pickle.load(open("chrome","rb"))
X = firebase.get('/sleep-tight-8a6df/Chrome/'+ str(id2) , 'CX' )
Y = firebase.get('/sleep-tight-8a6df/Chrome/'+ str(id2) , 'CY' )
pyautogui.click(X, Y)
time.sleep(5)
pyautogui.write('https://cuchd.blackboard.com/ultra/course')
pyautogui.keyDown('enter')
time.sleep(10)
id2 = pickle.load(open("sign","rb"))
X = firebase.get('/sleep-tight-8a6df/signin/'+ str(id2) , 'SX' )
Y = firebase.get('/sleep-tight-8a6df/signin/'+ str(id2) , 'SY' )
pyautogui.click(X, Y)
time.sleep(15)
st = "ELT"
i = pickle.load(open(st,"rb"))
saq = int(i)
space(saq)
slass(st)
pyautogui.alert('After clicking ok move your mouse on join session and wait for another prompt.')
time.sleep(5)
currentMouseX, currentMouseY = pyautogui.position()
pyautogui.alert('Done!!!')
time.sleep(2)
pyautogui.click(currentMouseX, currentMouseY)
data = {
'X': currentMouseX,
'Y': currentMouseY
}
result = firebase.post('/sleep-tight-8a6df/jssion/',data)
final = ''.join(key + str(val) for key, val in result.items())
data = str(final)
proxy = data[4:24]
pickle.dump(proxy, open("jesi","wb"))
pyautogui.alert('After clicking ok move your mouse on course room and wait for another prompt.')
time.sleep(4)
currentMouseX, currentMouseY = pyautogui.position()
pyautogui.alert('Done!!!')
time.sleep(2)
data = {
'X': currentMouseX,
'Y': currentMouseY
}
result = firebase.post('/sleep-tight-8a6df/jssion1/',data)
final = ''.join(key + str(val) for key, val in result.items())
data = str(final)
proxy = data[4:24]
pickle.dump(proxy, open("jesin","wb"))
pyautogui.alert('Now Run tropy.py using the command given in github README.md file.') | [
11748,
2298,
293,
198,
6738,
2046,
8692,
1330,
2046,
8692,
198,
11748,
12972,
2306,
519,
9019,
198,
11748,
640,
198,
198,
6495,
8692,
796,
2046,
8692,
13,
13543,
8692,
23416,
10786,
5450,
1378,
42832,
12,
33464,
12,
23,
64,
21,
7568,
... | 2.461022 | 744 |
import pytest
_EXPECTED = {
"data": {
"dog": {
"name": "Dog",
"nickname": "Doggo",
"barkVolume": 2,
"doesKnowCommand": True,
"isHousetrained": False,
"owner": {"name": "Hooman"},
}
}
}
@pytest.mark.asyncio
@pytest.mark.ttftt_engine(
resolvers={
"Query.dog": resolve_query_dog,
"Dog.doesKnowCommand": resolve_dog_does_know_command,
"Dog.isHousetrained": resolve_dog_is_housetrained,
"Dog.owner": resolve_dog_owner,
"Dog.friends": resolve_dog_friends,
"Query.cat": resolve_query_cat,
"Cat.doesKnowCommand": resolve_cat_does_know_command,
"Query.human": resolve_query_human,
"Query.catOrDog": resolve_query_cat_or_dog,
}
)
@pytest.mark.parametrize(
"operation_name,query,variables,expected",
[
(
None,
"""
query {
dog {
name
nickname
barkVolume
doesKnowCommand(dogCommand: DOWN)
isHousetrained(atOtherHomes: true)
owner {
name
}
}
}
""",
None,
_EXPECTED,
),
(
"Dog",
"""
fragment HumanFields on Human {
... on Human {
name
}
}
fragment LightCatOrDogFields on CatOrDog {
... on Cat {
name
nickname
}
... on Dog {
name
nickname
}
}
fragment LightDogFields on Dog {
name
barkVolume
}
fragment DogFields on Dog {
name
doesKnowCommand(dogCommand: DOWN)
isHousetrained(atOtherHomes: true)
owner {
... on Human {
...HumanFields
}
}
friends {
...LightCatOrDogFields
}
}
fragment CatFields on Cat {
name
}
fragment QueryDogFields on Query {
... on Query {
... {
dog {
... on Dog {
...DogFields
}
}
dog {
name
nickname
barkVolume
}
dog {
...LightDogFields
}
}
}
}
query Dog {
... on Query {
...QueryDogFields
}
}
query Cat {
cat {
...CatFields
}
}
""",
None,
{
"data": {
"dog": {
"name": "Dog",
"doesKnowCommand": True,
"isHousetrained": False,
"owner": {"name": "Hooman"},
"friends": [
{"name": "Dog", "nickname": "Doggo"},
{"name": "Cat", "nickname": "Catto"},
],
"nickname": "Doggo",
"barkVolume": 2,
}
}
},
),
(
None,
"""
query CatOrDog {
catOrDog(id: 1) {
... on Dog {
name
}
... on Dog {
nickname
}
... on Cat {
name
}
}
}
""",
None,
{"data": {"catOrDog": {"name": "Dog", "nickname": "Doggo"}}},
),
],
)
| [
11748,
12972,
9288,
628,
628,
628,
628,
628,
198,
198,
62,
49864,
9782,
1961,
796,
1391,
198,
220,
220,
220,
366,
7890,
1298,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
9703,
1298,
1391,
198,
220,
220,
220,
220,
220,
220,
... | 1.488431 | 2,766 |
# Copyright (c) Microsoft Corporation. All Rights Reserved.
# Licensed under the MIT license. See LICENSE file on the project webpage for details.
"""Setup for azure_media_services XBlock."""
import os
from setuptools import setup
def package_data(pkg, roots):
"""Generic function to find package_data.
All of the files under each of the `roots` will be declared as package
data for package `pkg`.
"""
data = []
for root in roots:
for dirname, __, files in os.walk(os.path.join(pkg, root)):
for fname in files:
data.append(os.path.relpath(os.path.join(dirname, fname), pkg))
return {pkg: data}
setup(
name='azure_media_services-xblock',
version='0.0.1',
description='This XBlock implements a video player that utilizes the Azure Media Services.',
packages=[
'azure_media_services',
],
include_package_data=True,
dependency_links=[
# At the moment of writing PyPI hosts outdated version of xblock-utils, hence git
# Replace dependency links with numbered versions when it's released on PyPI
'git+https://github.com/edx/xblock-utils.git@v1.0.5#egg=xblock-utils==1.0.5',
],
install_requires=[
'PyJWT',
'bleach',
'mako',
'requests>=2.9.1,<3.0.0',
'XBlock>=0.4.10,<2.0.0',
'xblock-utils>=1.0.2,<=1.0.5',
],
entry_points={
'xblock.v1': [
'azure_media_services = azure_media_services:AMSXBlock',
]
},
package_data=package_data("azure_media_services", ["static", "templates", "public", "translations"]),
)
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
6923,
33876,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
4091,
38559,
24290,
2393,
319,
262,
1628,
35699,
329,
3307,
13,
198,
198,
37811,
40786,
329,
35560,
495,
62,
11431,
62,
30416,
1... | 2.432191 | 671 |
from biostuff import BlastLine, BlastFile
some_attrs = ('qstart', 'qstop', 'sstart', 'sstop', 'pctid', 'score', 'query',
'subject')
| [
6738,
3182,
455,
1648,
1330,
20641,
13949,
11,
20641,
8979,
198,
198,
11246,
62,
1078,
3808,
796,
19203,
80,
9688,
3256,
705,
80,
11338,
3256,
705,
82,
9688,
3256,
705,
82,
11338,
3256,
705,
79,
310,
312,
3256,
705,
26675,
3256,
705,
... | 2.409836 | 61 |
import pygame
from pygame.sprite import Sprite
# a class to represent a single enemy in the fleet
# init the enemy and it's starting position
# function to check if enemy is at edge of screen
# enemy update function
| [
11748,
12972,
6057,
198,
6738,
12972,
6057,
13,
34975,
578,
1330,
33132,
198,
198,
2,
257,
1398,
284,
2380,
257,
2060,
4472,
287,
262,
11026,
628,
220,
220,
220,
1303,
2315,
262,
4472,
290,
340,
338,
3599,
2292,
628,
220,
220,
220,
... | 3.714286 | 63 |
import copy
import logging
import re
import uuid
from django.conf import settings
from raven import Client
logger = logging.getLogger(__name__)
REPORTS_BASE_URL = 'http://qa.orange.biolab.si/errors/{}'
PYTHON_FOLDERS = [
"site-packages",
"dist-packages",
"Python34.lib",
"anaconda3.lib",
"lib.python3.4",
"orange3",
]
ORANGE_ADDONS = [
'orangecontrib',
'lekbf',
'_textable',
'orangebiodepot',
]
FRAMES_RE = re.compile('File "([^"]+)", line (\d+), in ([^ ]+) (.*)')
DEVICE_RE = re.compile('Python ([\d\.]+) on ([^ ]+) ([^ ]+) (.+) ([^ ]+)$')
# Modules that should not be grouped by
GENERAL_MODULES = [
"Orange.data.domain:232", # domain.index(attr_name)
"sklearn.utils.validation:424", # check_array
"Orange.util:141", # attrgetter(attr)(obj)
"Orange.statistics.util:52", # bincount
]
ORANGE3_DATASETS = ('Orange3-Datasets', "https://2cb16c369f474e799ae384045dbf489e:b35f4e39d8b1417190aeb475e8c3df0a@sentry.io/167538")
ORANGE_SPECTROSCOPY = "https://1cb3697dbfc04f748bae548865f1b1a8:eb0b726e492b44358a277c97c8c631f2@sentry.io/176038"
DSN_3RDPARTY = "https://d077c44bbab1407595c9838ace02aea5:f3f434118ea44e0a9e61c580ca156505@sentry.io/176069"
DSN_TEXTABLE = "https://489e53f2068441f48d0d7bb3f5f066d5:299379ad47a140dfaee2042a6bb4204f@sentry.io/207453"
SINGLE_CELL = "https://3acf738fd9a3458ab76cabcfaa072dcf:6b24664b8a67412382986cd388de965b@sentry.io/209789"
DSN_ORANGE = "https://6f0311046ad2438598ae121cdabd878f:df101b5249ea4c89a82fc1f5da73886d@sentry.io/124497"
# For addons with separate DSNs mapping from namespace to addon name
# must be provided for reporting addon version as release.
NAMESPACE_TO_ADDON = {
'associate': ('Orange3-Associate', "https://cde61b47c74c4f98931264c1112b1bc2:10cfb3b76a16466fb6583a7952c660a8@sentry.io/167541"),
'bioinformatics': ('Orange3-Bioinformatics', "https://2e100fa55b83432e83aa04dc54962e5f@sentry.io/1311211"),
'conformal': ('Orange3-Conformal-Prediction', "https://3cf0bca1e5ed4b6a811c9980f27ed8ee:94015ed538b04bdcb4da2c35f0d792f8@sentry.io/167539"),
'datafusion': ('Orange3-DataFusion', "https://894bd2e1f47a4271834b8fbc019fc90b:e9d52ebb81354ca0b84fa64624f3882a@sentry.io/167542"),
'wbd': ORANGE3_DATASETS,
'datasets': ORANGE3_DATASETS,
'educational': ('Orange3-Educational', "https://93323bc17a094974a830b25abbae01b5:4fd5e7c529e34afd97ceca08ed4f059d@sentry.io/167545"),
'geo': ('Orange3-Geo', "https://f3b7d23593d14247808b70ff964b3956:ff25c1d23d3a4eca849429c731c874d9@sentry.io/167528"),
'imageanalytics': ('Orange3-ImageAnalytics', "https://cc2ef6171aad4b6ba344e2851169db7d:cd21ed3e80ae4f4385b31a24e0d036cf@sentry.io/161064"),
'network': ('Orange3-Network', "https://14706c0ff3e047d999cff64e6100eb25:1dd7b84d0afc449abba1757e3520b0c2@sentry.io/167534"),
'prototypes': ('Orange3-Prototypes', "https://d7440097e7f64e4cbff90dd31fc8876e:dde09f7ba917431884b7eb04c814b824@sentry.io/167530"),
'recommendation': ('Orange3-Recommendation', "https://e447ddb4e80149289bca679121359c03:e4b9a0f1a1414f7d906e56b8e28be9cc@sentry.io/167543"),
'text': ('Orange3-Text', "https://38ffabded40c46b9952b2acebc726866:147d6a5becfa40499b6d79e858fb6ef1@sentry.io/128443"),
'timeseries': ('Orange3-Timeseries', "https://e8f30f9dbaf74635bb10e37abe0b5354:2478a41e2f95463db8ceebfeb060cc99@sentry.io/161065"),
'testing': ('', "https://261797e8fa4544ffb931bc495157d2e3:44e30b93f9f1463a975725f82ca18039@sentry.io/128442"),
'lekbf': ('lekbf', "https://7da121cc693045c688d5ffd2d320e65b:1e2b3e613c85437ba8f005035572b3b7@sentry.io/174357"),
'infrared': ('Orange-Infrared', ORANGE_SPECTROSCOPY),
'spectroscopy': ('Orange-Spectroscopy', ORANGE_SPECTROSCOPY),
'monroe_anal': ('monroe-anal', "https://26940ac80e9f4cf095dd6c90e7e7e674:37d903fdd6364d52be6e50614d5cfccf@sentry.io/242335"),
'spark': ('Orange3-spark', DSN_3RDPARTY),
'tomwer': ('tomwer', DSN_3RDPARTY),
'textable_prototypes': ('Orange3-Textable-Prototypes', DSN_TEXTABLE),
'orangebiodepot': ('orangebiodepot', DSN_3RDPARTY),
'_textable': ('Orange3-Textable', DSN_TEXTABLE),
'variants': ('Orange3-Variants', SINGLE_CELL),
'single_cell': ('Orange3-SingleCell', SINGLE_CELL),
'chem': ('Orange3-Chemoinformatics', "https://a2cfd734538c4892ad3c02679891fa44:1fdd2cbd5bef4c7897d1a10077e9de97@sentry.io/275477"),
}
| [
11748,
4866,
198,
11748,
18931,
198,
11748,
302,
198,
11748,
334,
27112,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
37735,
1330,
20985,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
... | 1.953399 | 2,339 |
# Copyright 2006-2007 Virtutech AB
import sim_commands
sim_commands.new_pci_header_command('AM79C973', None)
sim_commands.new_info_command('AM79C973', get_info)
sim_commands.new_status_command('AM79C973', get_status)
| [
2,
15069,
4793,
12,
12726,
11285,
1133,
354,
9564,
198,
198,
11748,
985,
62,
9503,
1746,
198,
198,
14323,
62,
9503,
1746,
13,
3605,
62,
79,
979,
62,
25677,
62,
21812,
10786,
2390,
3720,
34,
24,
4790,
3256,
6045,
8,
198,
14323,
62,
... | 2.576471 | 85 |
#!usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pytest setup_module() and teardown_module() demo.
Assumption: creating a user is a very resource-consuming process
=> Thus, we don't want to do user creation every time we run a test.
"""
__author__ = 'Ziang Lu'
import pytest
from pytest_for_python.src.codes import User, is_member, is_prime_member
user = None
| [
2,
0,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
9078,
9288,
9058,
62,
21412,
3419,
290,
573,
446,
593,
62,
21412,
3419,
13605,
13,
198,
198,
8021,
... | 2.92126 | 127 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import socket
import errno
from getpass import getpass
from optparse import OptionParser
from sassh.connectionlib import Library, Connection
from sassh.sshclient import SSHClient
from paramiko import SSHException
try:
import pygtk
pygtk.require('2.0')
import gtk
GTK_AVAILABLE = True
except ImportError:
GTK_AVAILABLE = False
EXTRA_HELP = """\
While connected the following key binds are available:
'CTRL-X' followed by 'p' to send the connection password (e.g. for sudo)
; 'CTRL-X' followed by 'n' to generate new password (e.g. when password expired)
"""
class Main():
""" Main class for the application """
def parse_args(self):
""" Parse command line arguments """
parser = OptionParser(epilog=EXTRA_HELP)
parser.add_option("-a", "--add-connection",
action="store", type="string", dest="add_connection",
help="Add connection to the configuration database")
parser.add_option("-d", "--del-connection",
action="store_true", dest="del_connection",
help="Delete host from the configuration database")
parser.add_option("-g", "--get",
action="store", type="string", dest="get_file",
help="Get file from server")
parser.add_option("--put",
action="store", type="string", dest="put_file",
help="Put file from server")
parser.add_option("-k", "--use-key",
action="store_true", dest="set_use_key",
help="Set connection to use key based authentication")
parser.add_option("-l", "--list",
action="store_true", dest="list",
help="List configured connections names")
parser.add_option("-L", "--long-list",
action="store_true", dest="long_list",
help="List configured connections (with details)")
parser.add_option("-p", "--set-password",
action="store", type="string", dest="set_password",
help="Set connection password")
parser.add_option("-r", "--run",
action="store", type="string", dest="run_command",
help="Run command and exit")
parser.add_option("-R", "--run-su",
action="store", type="string", dest="run_su_script",
help="Run script with super user privileges")
parser.add_option("--reset",
action="store_true", dest="reset",
help="Change password for connection")
parser.add_option("-s", "--set-connection",
action="store", type="string", dest="set_connection",
help="Set login information for connection")
parser.add_option("-S", "--set-step-stone",
action="store", type="string", dest="set_step_stone",
help="Set stepping stone connection")
parser.add_option("-t", "--change-tags",
action="store", type="string", dest="change_tags",
help="Change connection tags")
parser.add_option("--super",
action="store_true", dest="super",
help="Perform 'sudo su -' after logging in")
parser.add_option("-w", "--show-connection",
action="store_true", dest="show_connection",
help="Show connection information")
self.options, self.args = parser.parse_args()
def _get_sassh_gpg_pub_key(self):
""" Check that the environment variable SASSH_GPG_PUB_KEY is defined """
sassh_gpg_pub_key = os.getenv('SASSH_GPG_PUB_KEY')
if not sassh_gpg_pub_key:
print """
sassh uses a GPG encrypted file to store connection passwords.
You must generate a GPG keypair with "gpg --gen-key" .
YOU SHOULD PROTECT THE KEY WITH A PASSPHRASE .
Then set your shell's SASSH_GPG_PUB_KEY variable to to the public id as
displayed from "gpg --list-keys", e.g: pub 4096R/7FD63AB0
export SASSH_GPG_PUB_KEY="7FD63AB0"
"""
sys.exit(1)
self.sassh_gpg_pub_key = sassh_gpg_pub_key
def _list_connections(self, pattern, long_list):
""" List all the configured connections """
library = self.host_library
for connection_name in library.connections:
connection = None
if pattern and pattern[0] == '+':
connection = library.getbyname(connection_name)
if not connection.tags or pattern not in connection.tags:
continue
else:
if not connection_name.lower().startswith(pattern.lower()):
continue
if long_list:
connection = connection or library.getbyname(connection_name)
show_fields = connection.name+" "
show_fields += "-a "+connection.url+" "
if connection.use_key:
show_fields += "-k "
if connection.step_stone:
show_fields += "-S "+connection.step_stone+" "
if connection.tags and len(connection.tags) > 1:
show_fields += "-t "+connection.tags
print show_fields
else:
print connection_name
sys.exit(0)
def _process_args(self):
""" Return connection definition after processing cmd arguments """
options, args = self.options, self.args
# Check connection availability and management options
if len(args) < 1 and not (options.list or options.long_list):
print "Usage:"
print " %s connection_name [options]" % sys.argv[0]
print " %s --list" % sys.argv[0]
sys.exit(2)
library = self.host_library
if (options.list or options.long_list):
pattern = args[0] if len(args) > 0 else ''
self._list_connections(pattern, options.long_list)
connection_name = args[0].lower()
if options.set_step_stone:
try:
library.getbyname(options.set_step_stone)
except IOError:
print 'No connection with name %s !' % options.set_step_stone
sys.exit(4)
try:
connection = library.getbyname(connection_name)
except IOError:
if not options.add_connection:
print 'No connection with name %s !' % connection_name
print 'If you want to add it use "--add-connection"'
sys.exit(3)
else:
connection = Connection(connection_name)
else:
if options.add_connection:
print "Connection with name %s is already stored!" % \
connection_name
sys.exit(4)
if options.del_connection:
library.remove(connection)
sys.exit(0)
if options.show_connection:
print "URL", connection.url
if GTK_AVAILABLE:
show_password = '(Copied to th clipboard)'
clipboard = gtk.clipboard_get()
clipboard.set_text(connection.password)
clipboard.store()
else:
show_password = connection.password
print "PASSWORD", show_password
if connection.use_key:
print "USING KEY"
print connection.tags or '+'
sys.exit(0)
if options.reset:
options.set_connection = connection.url
options.password = None
if options.change_tags:
if options.change_tags[0] != '+':
print "Tags format is: +tag1+tag2...+tagN"
sys.exit(4)
connection.change_tags(options.change_tags)
if options.set_step_stone:
connection.step_stone = options.set_step_stone
if options.set_password:
if options.set_use_key:
sys.stderr.write('You are already setting to key authentication!\n')
sys.exit(5)
else:
connection.use_key = False
connection.password = options.set_password
if options.set_use_key:
connection.use_key = True
# Ask for login password if setting a connection url
new_connection_url = options.add_connection or options.set_connection
if new_connection_url:
connection.url = new_connection_url
if not connection.password and not connection.use_key:
options.set_password = True
while True:
print "Type the password for connection %s [%s]: " \
% (connection_name, connection.url)
password1 = getpass()
if len(password1) < 1:
print "Password must be at least 1 chars long!"
print
continue
print "Re-type the password for connection %s [%s]: " \
% (connection_name, connection.url)
password2 = getpass()
if password1 != password2:
print "Passwords do not match!"
print
else:
break
connection.password = password1
only_save = new_connection_url \
or options.set_step_stone \
or options.change_tags \
or options.set_password \
or options.set_use_key
if only_save:
library.save(connection)
return None
else:
return connection
def run(self):
""" parse arguments and call the corresponding execution logic """
stderr = sys.stderr
self.parse_args()
connection = self._process_args()
options = self.options
if not connection: # Connection was changed
return
sshclient = SSHClient(connection, self.host_library)
if options.run_command or options.get_file or options.put_file or options.run_su_script:
sshclient.verbose = False
try:
sshclient.connect()
except SSHException, err:
stderr.write( "SSH error connecting to %s - %s\n"
% (connection.name, err.args[0]))
sys.exit(4)
except socket.timeout:
stderr.write("Connection timeout - unable to connect to %s !\n"
% connection.name)
sys.exit(2)
except socket.error, err:
errorcode = err[0]
if errorcode == errno.ECONNREFUSED:
stderr.write("Connection refused - unable to connect to %s !\n"
% connection.name)
sys.exit(3)
else:
raise
if options.super:
sshclient.perform_sudo()
if options.run_su_script:
sshclient.run_su_script(options.run_su_script)
elif options.run_command:
sshclient.run_command(options.run_command)
elif options.get_file:
sshclient.get_file(options.get_file)
elif options.put_file:
sshclient.put_file(options.put_file)
else:
sshclient.interactive_shell()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
17802,
198,
11748,
11454,
3919,
198,
6738,
651,
6603,
1330,
651,
6603,
... | 2.024307 | 5,842 |
import asyncio
import logging
import os
import json
import etl.io_config.server_protocol as protocol
alert_dns = '127.0.0.1'
predictor_dns = '0.0.0.0'
SRV_LOG_FMT = '%(asctime)s|%(name)s|%(process)s-%(thread)s|%(levelname)s|%(message)s'
logging.basicConfig(level=logging.INFO, format=SRV_LOG_FMT)
loop = asyncio.get_event_loop()
coro = asyncio.start_server(notification_loop, predictor_dns, 8182, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
logging.info('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
| [
11748,
30351,
952,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
2123,
75,
13,
952,
62,
11250,
13,
15388,
62,
11235,
4668,
355,
8435,
198,
198,
44598,
62,
67,
5907,
796,
705,
16799,
13,
15,
13,
15,
13,
16,
6,
... | 2.573477 | 279 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standard set of plugins."""
import base64
import datetime
import os
import sys
import netaddr
from oslo_config import cfg
from oslo_utils import units
import six
from ironic_inspector.common.i18n import _, _LC, _LE, _LI, _LW
from ironic_inspector import conf
from ironic_inspector.plugins import base
from ironic_inspector import utils
CONF = cfg.CONF
LOG = utils.getProcessingLogger('ironic_inspector.plugins.standard')
class RootDiskSelectionHook(base.ProcessingHook):
"""Smarter root disk selection using Ironic root device hints.
This hook must always go before SchedulerHook, otherwise root_disk field
might not be updated.
"""
def before_update(self, introspection_data, node_info, **kwargs):
"""Detect root disk from root device hints and IPA inventory."""
hints = node_info.node().properties.get('root_device')
if not hints:
LOG.debug('Root device hints are not provided',
node_info=node_info, data=introspection_data)
return
inventory = introspection_data.get('inventory')
if not inventory:
raise utils.Error(
_('Root device selection requires ironic-python-agent '
'as an inspection ramdisk'),
node_info=node_info, data=introspection_data)
disks = inventory.get('disks', [])
if not disks:
raise utils.Error(_('No disks found'),
node_info=node_info, data=introspection_data)
for disk in disks:
properties = disk.copy()
# Root device hints are in GiB, data from IPA is in bytes
properties['size'] //= units.Gi
for name, value in hints.items():
actual = properties.get(name)
if actual != value:
LOG.debug('Disk %(disk)s does not satisfy hint '
'%(name)s=%(value)s, actual value is %(actual)s',
{'disk': disk.get('name'), 'name': name,
'value': value, 'actual': actual},
node_info=node_info, data=introspection_data)
break
else:
LOG.debug('Disk %(disk)s of size %(size)s satisfies '
'root device hints',
{'disk': disk.get('name'), 'size': disk['size']},
node_info=node_info, data=introspection_data)
introspection_data['root_disk'] = disk
return
raise utils.Error(_('No disks satisfied root device hints'),
node_info=node_info, data=introspection_data)
class SchedulerHook(base.ProcessingHook):
"""Nova scheduler required properties."""
KEYS = ('cpus', 'cpu_arch', 'memory_mb', 'local_gb')
def before_update(self, introspection_data, node_info, **kwargs):
"""Update node with scheduler properties."""
inventory = introspection_data.get('inventory')
errors = []
root_disk = introspection_data.get('root_disk')
if root_disk:
introspection_data['local_gb'] = root_disk['size'] // units.Gi
if CONF.processing.disk_partitioning_spacing:
introspection_data['local_gb'] -= 1
elif inventory:
errors.append(_('root disk is not supplied by the ramdisk and '
'root_disk_selection hook is not enabled'))
if inventory:
try:
introspection_data['cpus'] = int(inventory['cpu']['count'])
introspection_data['cpu_arch'] = six.text_type(
inventory['cpu']['architecture'])
except (KeyError, ValueError, TypeError):
errors.append(_('malformed or missing CPU information: %s') %
inventory.get('cpu'))
try:
introspection_data['memory_mb'] = int(
inventory['memory']['physical_mb'])
except (KeyError, ValueError, TypeError):
errors.append(_('malformed or missing memory information: %s; '
'introspection requires physical memory size '
'from dmidecode') %
inventory.get('memory'))
else:
LOG.warning(_LW('No inventory provided: using old bash ramdisk '
'is deprecated, please switch to '
'ironic-python-agent'),
node_info=node_info, data=introspection_data)
missing = [key for key in self.KEYS
if not introspection_data.get(key)]
if missing:
raise utils.Error(
_('The following required parameters are missing: %s') %
missing,
node_info=node_info, data=introspection_data)
if errors:
raise utils.Error(_('The following problems encountered: %s') %
'; '.join(errors),
node_info=node_info, data=introspection_data)
LOG.info(_LI('Discovered data: CPUs: %(cpus)s %(cpu_arch)s, '
'memory %(memory_mb)s MiB, disk %(local_gb)s GiB'),
{key: introspection_data.get(key) for key in self.KEYS},
node_info=node_info, data=introspection_data)
overwrite = CONF.processing.overwrite_existing
properties = {key: str(introspection_data[key])
for key in self.KEYS if overwrite or
not node_info.node().properties.get(key)}
node_info.update_properties(**properties)
class ValidateInterfacesHook(base.ProcessingHook):
"""Hook to validate network interfaces."""
def _get_interfaces(self, data=None):
"""Convert inventory to a dict with interfaces.
:return: dict interface name -> dict with keys 'mac' and 'ip'
"""
result = {}
inventory = data.get('inventory', {})
if inventory:
for iface in inventory.get('interfaces', ()):
name = iface.get('name')
mac = iface.get('mac_address')
ip = iface.get('ipv4_address')
if not name:
LOG.error(_LE('Malformed interface record: %s'),
iface, data=data)
continue
LOG.debug('Found interface %(name)s with MAC "%(mac)s" and '
'IP address "%(ip)s"',
{'name': name, 'mac': mac, 'ip': ip}, data=data)
result[name] = {'ip': ip, 'mac': mac}
else:
LOG.warning(_LW('No inventory provided: using old bash ramdisk '
'is deprecated, please switch to '
'ironic-python-agent'), data=data)
result = data.get('interfaces')
return result
def _validate_interfaces(self, interfaces, data=None):
"""Validate interfaces on correctness and suitability.
:return: dict interface name -> dict with keys 'mac' and 'ip'
"""
if not interfaces:
raise utils.Error(_('No interfaces supplied by the ramdisk'),
data=data)
pxe_mac = utils.get_pxe_mac(data)
if not pxe_mac and CONF.processing.add_ports == 'pxe':
LOG.warning(_LW('No boot interface provided in the introspection '
'data, will add all ports with IP addresses'))
result = {}
for name, iface in interfaces.items():
mac = iface.get('mac')
ip = iface.get('ip')
if not mac:
LOG.debug('Skipping interface %s without link information',
name, data=data)
continue
if not utils.is_valid_mac(mac):
LOG.warning(_LW('MAC %(mac)s for interface %(name)s is not '
'valid, skipping'),
{'mac': mac, 'name': name},
data=data)
continue
mac = mac.lower()
if name == 'lo' or (ip and netaddr.IPAddress(ip).is_loopback()):
LOG.debug('Skipping local interface %s', name, data=data)
continue
if (CONF.processing.add_ports == 'pxe' and pxe_mac
and mac != pxe_mac):
LOG.debug('Skipping interface %s as it was not PXE booting',
name, data=data)
continue
elif CONF.processing.add_ports != 'all' and not ip:
LOG.debug('Skipping interface %s as it did not have '
'an IP address assigned during the ramdisk run',
name, data=data)
continue
result[name] = {'ip': ip, 'mac': mac.lower()}
if not result:
raise utils.Error(_('No suitable interfaces found in %s') %
interfaces, data=data)
return result
def before_processing(self, introspection_data, **kwargs):
"""Validate information about network interfaces."""
bmc_address = utils.get_ipmi_address_from_data(introspection_data)
if bmc_address:
introspection_data['ipmi_address'] = bmc_address
else:
LOG.debug('No BMC address provided in introspection data, '
'assuming virtual environment', data=introspection_data)
all_interfaces = self._get_interfaces(introspection_data)
interfaces = self._validate_interfaces(all_interfaces,
introspection_data)
LOG.info(_LI('Using network interface(s): %s'),
', '.join('%s %s' % (name, items)
for (name, items) in interfaces.items()),
data=introspection_data)
introspection_data['all_interfaces'] = all_interfaces
introspection_data['interfaces'] = interfaces
valid_macs = [iface['mac'] for iface in interfaces.values()]
introspection_data['macs'] = valid_macs
def before_update(self, introspection_data, node_info, **kwargs):
"""Drop ports that are not present in the data."""
if CONF.processing.keep_ports == 'present':
expected_macs = {
iface['mac']
for iface in introspection_data['all_interfaces'].values()
}
elif CONF.processing.keep_ports == 'added':
expected_macs = set(introspection_data['macs'])
else:
return
# list is required as we modify underlying dict
for port in list(node_info.ports().values()):
if port.address not in expected_macs:
LOG.info(_LI("Deleting port %(port)s as its MAC %(mac)s is "
"not in expected MAC list %(expected)s"),
{'port': port.uuid,
'mac': port.address,
'expected': list(sorted(expected_macs))},
node_info=node_info, data=introspection_data)
node_info.delete_port(port)
class RamdiskErrorHook(base.ProcessingHook):
"""Hook to process error send from the ramdisk."""
DATETIME_FORMAT = '%Y.%m.%d_%H.%M.%S_%f'
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 2.078603 | 5,814 |
from .cgevolver import CGEvolver
from .eulerevolver import EulerEvolver
from .rungekuttaevolver import RungeKuttaEvolver
from .spintevolver import SpinTEvolver
from .spinxferevolver import SpinXferEvolver
from .uhh_thetaevolver import UHH_ThetaEvolver
from .xf_thermheunevolver import Xf_ThermHeunEvolver
from .xf_thermspinxferevolver import Xf_ThermSpinXferEvolver
| [
6738,
764,
66,
469,
10396,
332,
1330,
327,
8264,
10396,
332,
198,
6738,
764,
68,
2261,
260,
10396,
332,
1330,
412,
18173,
36,
10396,
332,
198,
6738,
764,
5143,
469,
74,
315,
8326,
1990,
14375,
1330,
5660,
469,
42,
315,
8326,
36,
103... | 2.652174 | 138 |
from django.conf import settings
from django.http import HttpResponseForbidden
target_methods = settings.METHOD_ORIGIN.keys()
http_methods = ['CONNECT', 'DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT'] | [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
1890,
37978,
198,
198,
16793,
62,
24396,
82,
796,
6460,
13,
49273,
62,
1581,
3528,
1268,
13,
13083,
3419,
198,
198,
4023,
62,
24396,
... | 2.971014 | 69 |
''' Crie um programa onde o usuário possa digitar sete valores numéricos e cadastre-os
em uma lista única que mantenha separados os valores pares e ímpares.
No final, mostre os valores pares e ímpares em ordem crescente.'''
'''princ = []
impar= []
par= []
for c in range (0,7):
n = int(input('Digite um número: '))
if n % 2 == 0:
par.append(n)
else:
impar.append(n)
princ.append(sorted(impar[:]))
princ.append(sorted(par[:]))
print(f'Os valores pares digitados foram: {princ[0]}\n'
f'Os valores ímpares digitados foram: {princ[1]}')'''
#guanabara methods
núm = [[], []]
valor = 0
for c in range (1,8):
valor = int(input(f'Digite o {c}ª valor: '))
if valor %2 ==0:
núm[0].append(valor)
else:
núm[1].append(valor)
print('~'*30)
núm[0].sort()
núm[1].sort()
print(f'Os valores pares digitados foram: {núm[0]}')
print(f'Os valores ímpares digitados foram: {núm[1]}') | [
7061,
6,
327,
5034,
23781,
1430,
64,
319,
2934,
267,
514,
84,
6557,
27250,
1184,
64,
3100,
7940,
900,
68,
1188,
2850,
997,
2634,
1173,
418,
304,
20603,
459,
260,
12,
418,
198,
368,
334,
2611,
1351,
64,
6184,
118,
77,
3970,
8358,
2... | 2.076063 | 447 |
import time
from slackclient import SlackClient
import common
import config
if __name__ == '__main__':
conn = SlackConn(config.slack_token)
conn.upload_img('/Users/omibot/data/omibot/sentry/Dienstag, 31. Oktober 2017 um 14:15:51/Image2.jpeg', '#allgemein')
| [
11748,
640,
198,
198,
6738,
30740,
16366,
1330,
36256,
11792,
198,
198,
11748,
2219,
198,
11748,
4566,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
48260,
796,
36256,
37321,
7,
11250,
13,
... | 2.596154 | 104 |
import os
from datetime import datetime
| [
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
628
] | 4.1 | 10 |
"""
File: draw_line.py
Name: Kevin Fang
-------------------------
TODO:
"""
from campy.graphics.gobjects import GOval, GLine
from campy.graphics.gwindow import GWindow
from campy.gui.events.mouse import onmouseclicked
# Assign window as constant to create canvas
window = GWindow()
SIZE = 10
# a, b ,c ,d are global variables, so define them as 0 value
a = b = c = d = 0
def main():
"""
This program creates lines on an instance of GWindow class.
There is a circle indicating the user’s first click. A line appears
at the condition where the circle disappears as the user clicks
on the canvas for the second time.
"""
onmouseclicked(set_point)
if __name__ == "__main__":
main()
| [
37811,
201,
198,
8979,
25,
3197,
62,
1370,
13,
9078,
201,
198,
5376,
25,
7939,
24468,
201,
198,
22369,
12,
201,
198,
51,
3727,
46,
25,
201,
198,
37811,
201,
198,
6738,
1413,
88,
13,
70,
11549,
13,
70,
48205,
1330,
10351,
2100,
11,... | 2.858238 | 261 |
from flask import get_flashed_messages
from flask_login import current_user
from app.auth.services.registration import (
AutoActivateUserPostProcessor,
AutologinPostProcessor,
SendActivationPostProcessor,
)
from app.core.auth.activation import AccountActivator
from app.utils.settings import app_config
| [
6738,
42903,
1330,
651,
62,
2704,
5263,
62,
37348,
1095,
198,
6738,
42903,
62,
38235,
1330,
1459,
62,
7220,
198,
198,
6738,
598,
13,
18439,
13,
30416,
13,
2301,
33397,
1330,
357,
198,
220,
220,
220,
11160,
25526,
378,
12982,
6307,
187... | 3.393617 | 94 |
""" This function should return a string like
"There are NUM planets in the solar system"
where NUM is provided as an argument."""
# Should equal "There are 8 planets in the solar system"
ss1 = solar_system(8)
# Should equal "There are 9 planets in the solar system"
ss2 = solar_system(9)
""" This function should return a string of the format
"On the DAYth day of MONTH in the year YEAR"
where DAY, MONTH, and YEAR are provided.
"""
# Should equal "On the 8th day of July in the year 2019"
date1 = fancy_date("July", 8, 2019)
# Should equal "On the 24th day of June in the year 1984"
date2 = fancy_date("June", 24, 1984)
""" This function should return a string
which starts with the provided place, then
has an @ sign, then the comma-separated lat and lng"""
# Should equal "Tilden Farm @ 37.91, -122.29"
loc1 = location("Tilden Farm", 37.91, -122.29)
# Should equal "Salton Sea @ 33.309, -115.979"
loc2 = location("Salton Sea", 33.309,-115.979)
""" This function should return a string
which starts with the provided item,
then a colon, then a $ sign and the provided cost."""
# Should equal "Avocado toast: $9.99"
menu1 = menu("Avocado toast", 9.99)
# Should equal "Cronut: $3.99"
menu2 = menu("Cronut", 3.99) | [
37811,
770,
2163,
815,
1441,
257,
4731,
588,
198,
1,
1858,
389,
36871,
14705,
287,
262,
6591,
1080,
1,
198,
3003,
36871,
318,
2810,
355,
281,
4578,
526,
15931,
198,
198,
2,
10358,
4961,
366,
1858,
389,
807,
14705,
287,
262,
6591,
10... | 3.176623 | 385 |
import logging
import vlc
import xml.etree.ElementTree as ET
import os
import sys
import re
from threading import Timer
from time import sleep
@vlc.CallbackDecorators.LogCb
| [
11748,
18931,
198,
11748,
410,
44601,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
302,
198,
6738,
4704,
278,
1330,
5045,
263,
198,
6738,
640,
1330,
3993,
628,
198,
31,... | 3.320755 | 53 |
from django.contrib import admin
from user import models
# Register your models here.
admin.site.register(models.User)
admin.site.register(models.Staff)
admin.site.register(models.CourseAdviser)
admin.site.register(models.Student)
admin.site.register(models.Biodata)
admin.site.register(models.AcademicData)
admin.site.register(models.AcademicHistory)
admin.site.register(models.HealthData)
admin.site.register(models.FamilyData)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
2836,
1330,
4981,
198,
198,
2,
17296,
534,
4981,
994,
13,
628,
198,
28482,
13,
15654,
13,
30238,
7,
27530,
13,
12982,
8,
198,
28482,
13,
15654,
13,
30238,
7,
27530,
13,
3144... | 3.255639 | 133 |
line = input()
sides = {}
while line != "Lumpawaroo":
if " | " in line:
args = line.split(" | ")
side = args[0]
user = args[1]
# TODO If you receive forceSide | forceUser, you should check if such forceUser already exists, and if not, add him/her to the corresponding side
if side not in sides:
sides[side] = []
all_values = []
for current_list in sides.values():
all_values += current_list
if user not in all_values:
sides[side].append(user)
else:
args = line.split(" -> ")
user = args[0]
side = args[1]
old_side = ""
for key, value in sides.items():
if user in value:
old_side = key
break
if old_side != "":
sides[old_side].remove(user)
if side not in sides:
sides[side] = []
sides[side].append(user)
else:
if side not in sides:
sides[side] = []
sides[side].append(user)
print(f"{user} joins the {side} side!")
line = input()
sides = dict(sorted(sides.items(), key=lambda x: (-len(x[1]), x[0])))
for side, users in sides.items():
if len(users) == 0:
continue
print(f"Side: {side}, Members: {len(users)}")
for user in sorted(users):
print(f"! {user}")
| [
1370,
796,
5128,
3419,
198,
82,
1460,
796,
23884,
198,
198,
4514,
1627,
14512,
366,
43,
931,
707,
38049,
1298,
198,
220,
220,
220,
611,
366,
930,
366,
287,
1627,
25,
198,
220,
220,
220,
220,
220,
220,
220,
26498,
796,
1627,
13,
35... | 2.065982 | 682 |
import requests
import os
url = 'http://admin:SuperAdminPasssword6742344234!!@localhost:8080/'#'http://admin:SuperAdminPasssword6742344234!!@a18e-2601-182-ce00-c860-3c42-c8b2-be91-176.ngrok.io/'
#resp = requests.post(url, data={'newUser': True, 'username': 'new_user', 'password': 'test_pass'})
## makefile : filename
## writefile : filename, data : str
## deletefile : filename
## readfile : filename (gotten from GET request)
## makefolder : foldername
## deletefolder : foldername
## listfolder : foldername
## changedir : foldername
## renamefile : filename, newname : str
## renamefolder : foldername, newname : str
##
"""requests.put(url, data={'deletefile': "4.png"})
img = Image.open('shitpost.png')
requests.post(url, data={'makefile': "4.png"})
resp = requests.put(url, data={"writefile": "4.png", "authToken": "new_user_user_1", "username": "new_user", "data": img.tobytes()})
resp = requests.get(url + "4.png")
image = Image.frombytes('RGBA', img.size, resp.content)
img.save('4.png', format='PNG')"""
#req = requests.post(url, data={"makefile": "test2.txt"})
#print(req.content)
#req = requests.put(url, data={"writefile": "test2.txt", "authToken": "admin_super_0", "username": "admin_super_0", "data": "test helfgsdfgsdfglo world"})
#print(req.content)
req = requests.get(url + "test2.txt")
print(req.content) | [
11748,
7007,
201,
198,
11748,
28686,
201,
198,
201,
198,
6371,
796,
705,
4023,
1378,
28482,
25,
12442,
46787,
14478,
30553,
3134,
43356,
2598,
24409,
3228,
31,
36750,
25,
1795,
1795,
14,
6,
2,
6,
4023,
1378,
28482,
25,
12442,
46787,
1... | 2.630769 | 520 |
##############################################
# Switching Linear Dynamical System
# Code for both SLDS generative model as well
# as variational inference code
##############################################
import torch
import torch.nn as nn
import numpy as np
import math
from torch.autograd import Variable
import itertools
import torch.nn.functional as F
import utils
from masked_cross_entropy import masked_cross_entropy
from EncDec import Encoder, Decoder, gather_last, sequence_mask
from data_utils import EOS_TOK, SOS_TOK, PAD_TOK, transform
| [
29113,
7804,
4242,
2235,
198,
2,
220,
220,
220,
14645,
278,
44800,
14970,
605,
4482,
220,
198,
2,
220,
220,
220,
6127,
329,
1111,
12419,
5258,
1152,
876,
2746,
355,
880,
198,
2,
220,
220,
220,
355,
5553,
864,
32278,
2438,
198,
29113... | 3.69281 | 153 |
import torch
from torch import nn
from houttuynia.nn import init
__all__ = [
'Conv1d', 'Conv2d', 'Conv3d', 'GramConv1',
]
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
198,
6738,
289,
448,
28047,
2047,
544,
13,
20471,
1330,
2315,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
3103,
85,
16,
67,
3256,
705,
3103,
85,
17,
67,
3256,
705... | 2.163934 | 61 |
from enum import Enum
from typing import Dict, List, Optional
import click
from pygments.lexers.shell import BashLexer
from prompt_toolkit import prompt, print_formatted_text, PromptSession
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.shortcuts import ProgressBar
from prompt_toolkit.application import run_in_terminal
from . import color
confirmStrToSwitch: Dict[str, SwitchState] = {
"y": SwitchState.Yes,
"n": SwitchState.No,
"o": SwitchState.OK,
"c": SwitchState.Cancel
}
switchToConfirmStr: Dict[SwitchState, str] = {
v: k for k, v in confirmStrToSwitch.items()}
defaultInputCommandSession = PromptSession(
message="> ", lexer=PygmentsLexer(BashLexer), auto_suggest=AutoSuggestFromHistory())
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
198,
11748,
3904,
198,
6738,
12972,
11726,
13,
2588,
364,
13,
29149,
1330,
15743,
45117,
263,
198,
6738,
6152,
62,
25981,
15813,
1330,
6152,
11,
3601,
... | 3.151163 | 258 |
# --------------
##File path for the file
file_path
#Code starts here
sample_message= str(read_file(file_path))
print(sample_message)
# --------------
#Code starts here
file_path_1
file_path_2
message_1=read_file(file_path_1)
message_2=read_file(file_path_2)
print("message1", message_1)
print("message2",message_2)
#print(int(message_2)//int(message_1))
secret_msg_1 = fuse_msg(message_1,message_2)
print(secret_msg_1)
# --------------
#Code starts here
file_path_3
message_3 = read_file(file_path_3)
print("message 3:", message_3)
secret_msg_2=substitute_msg(message_3)
print("secret msg2 :",secret_msg_2)
# --------------
# File path for message 4 and message 5
file_path_4
file_path_5
#Code starts here
message_4 = str(read_file(file_path_4))
message_5 = str(read_file(file_path_5))
print("message 4:",message_4)
print("message 5:",message_5)
secret_msg_3 = str(compare_msg(message_4, message_5))
print("secret msg3 :", secret_msg_3)
# --------------
#Code starts here
file_path_6
message_6= str(read_file(file_path_6))
print("message 6 :",message_6)
secret_msg_4 = extract_msg(message_6)
print("secret msg 4:",secret_msg_4)
# --------------
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
final_path= user_data_dir + '/secret_message.txt'
#Code starts here
secret_msg = " ".join(message_parts)
secret_message = write_file(secret_msg,final_path)
print("secret_msg :")
| [
2,
220,
26171,
198,
2235,
8979,
3108,
329,
262,
2393,
220,
201,
198,
7753,
62,
6978,
220,
201,
198,
201,
198,
2,
10669,
4940,
994,
201,
198,
201,
198,
201,
198,
39873,
62,
20500,
28,
965,
7,
961,
62,
7753,
7,
7753,
62,
6978,
400... | 2.407066 | 651 |
"""Test string
"""
import ARgorithmToolkit
algo = ARgorithmToolkit.StateSet()
st = ARgorithmToolkit.String('st', algo, "Hello world! 1234")
def test_body():
"""Test string contents
"""
assert st.body == "Hello world! 1234"
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_declare'
assert last_state.content["state_def"]["body"] == "Hello world! 1234"
def test_append():
"""Test string append
"""
global st
st.append(" Hahaha")
assert st.body == "Hello world! 1234 Hahaha"
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_append'
assert last_state.content["state_def"]["element"] == " Hahaha"
st+='xyz'
assert st.body == "Hello world! 1234 Hahahaxyz"
last_state = algo.states[-1]
second_last_state = algo.states[-2]
assert last_state.content["state_type"] == 'string_append'
assert last_state.content["state_def"]["element"] == "xyz"
assert second_last_state.content["state_type"] == 'string_declare'
assert second_last_state.content["state_def"]["body"] == "Hello world! 1234 Hahaha"
assert second_last_state.content["state_def"]["variable_name"] == "st_super"
def test_indexing():
"""Test string indexing
"""
assert st[1] == st.body[1]
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_iter'
assert last_state.content["state_def"]["index"] == 1
subst = st[1:3]
assert isinstance(subst,ARgorithmToolkit.String)
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_declare'
assert last_state.content["state_def"]["variable_name"] == 'st_super_sub'
assert last_state.content["state_def"]["body"] == st.body[1:3]
def test_iteration():
"""Test string iteration
"""
for i,(a,b) in enumerate(zip(st,st.body)):
assert a==b
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_iter'
assert last_state.content["state_def"]["index"] == i
| [
37811,
14402,
4731,
198,
37811,
198,
11748,
5923,
42289,
25391,
15813,
198,
198,
282,
2188,
796,
5923,
42289,
25391,
15813,
13,
9012,
7248,
3419,
198,
301,
796,
5923,
42289,
25391,
15813,
13,
10100,
10786,
301,
3256,
435,
2188,
11,
366,
... | 2.549383 | 810 |
from logzero import logger
import logzero
import logging
import glob
import pandas as pd
# Scrapy packages
import scrapy
import requests
from scrapy.selector import Selector
from TA_scrapy.items import ReviewRestoItem, RestoItem, UserItem
from TA_scrapy.spiders import get_info
# Chromedriver package and options
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
| [
198,
6738,
2604,
22570,
1330,
49706,
198,
11748,
2604,
22570,
198,
11748,
18931,
198,
11748,
15095,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
1446,
2416,
88,
10392,
198,
11748,
15881,
88,
198,
11748,
7007,
198,
6738,
15881,
88,... | 3.444444 | 171 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sqlalchemy as sql
from sqlalchemy import orm
from keystone import config
CONF = config.CONF
| [
2,
15069,
2211,
4946,
25896,
5693,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
26... | 3.411215 | 214 |
"""测试场景组件的使用"""
from manimlib.imports import *
class Graph2DExample(GraphScene):
"""二维坐标图实例"""
CONFIG = {
"x_min": -1,
"x_max": 6,
"x_axis_width": 10,
"x_axis_label": "time",
#"x_label_color": RED,
"y_min": -1,
"y_max": 20,
"y_axis_height": 8,
"y_axis_label": "amp",
#"y_label_color": YELLOW,
"y_tick_frequency": 1,
}
class ThreeDExample(ThreeDScene):
"""三维场景实例"""
class MovingCameraExample(MovingCameraScene):
"""运动摄像机实例"""
class SampleSpaceExample(SampleSpaceScene):
"""概率采样空间实例"""
class ZoomedExample(ZoomedScene):
"""缩放摄像机实例"""
class VectorExample(LinearTransformationScene):
"""向量场实例"""
class ConfigSceneExample(Scene):
"""CONFIG参数修改设置实例"""
CONFIG = {
"camera_config": {
"frame_rate": 30,
},
}
class UpdateExample(Scene):
"""更新器设置实例"""
class CoorExample(Scene):
"""三维坐标轴例程""" | [
37811,
38184,
233,
46237,
243,
28839,
118,
162,
247,
107,
163,
119,
226,
20015,
114,
21410,
45635,
18796,
101,
37811,
198,
198,
6738,
582,
320,
8019,
13,
320,
3742,
1330,
1635,
198,
198,
4871,
29681,
17,
35,
16281,
7,
37065,
36542,
25... | 1.631579 | 589 |
# Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import cv2
import torch
import numpy as np
import math
from operator import itemgetter
from .dataset import normalize, pad_width
def infer_fast_post_process(net_outputs, PoseClass):
"""
Args:
net_outputs (dict): the output of the networks, and it contains,
--heatmaps:
--pafs:
PoseClass (type of tools.human_pose2d_estimators.utils.pose_utils.OpenPoseBody25):
Returns:
outputs (dict): the output results, and it contains the followings keys,
--pose_entries:
--all_keypoints:
--current_poses:
"""
heatmaps = net_outputs["heatmaps"]
pafs = net_outputs["pafs"]
pad = net_outputs["pad"]
scale = net_outputs["scale"]
stride = net_outputs["stride"]
upsample_ratio = net_outputs["upsample_ratio"]
height, width = net_outputs["orig_shape"]
num_keypoints = PoseClass.num_kpts
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(num_keypoints): # 19th for bg
total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, PoseClass, demo=True)
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
if len(all_keypoints):
all_keypoints[:, 0] = np.clip(all_keypoints[:, 0], 0, width)
all_keypoints[:, 1] = np.clip(all_keypoints[:, 1], 0, height)
current_poses = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.zeros((num_keypoints, 3), dtype=all_keypoints.dtype)
for kpt_id in range(num_keypoints):
kpt_num_id = int(pose_entries[n][kpt_id])
if kpt_num_id != -1: # keypoint was found
pose_keypoints[kpt_id] = all_keypoints[kpt_num_id, 0:3]
else:
pose_keypoints[kpt_id, 0:2] = -1.0
# print(n, pose_keypoints)
pose = PoseClass(pose_keypoints, pose_entries[n][-2])
current_poses.append(pose)
outputs = {
"pose_entries": pose_entries,
"all_keypoints": all_keypoints,
"current_poses": current_poses
}
return outputs
| [
2,
15069,
357,
66,
8,
12131,
12,
1238,
2481,
28671,
1352,
13,
2398,
7035,
357,
54,
268,
18258,
290,
10511,
844,
259,
350,
13481,
737,
1439,
2489,
10395,
13,
198,
198,
11748,
269,
85,
17,
198,
11748,
28034,
198,
11748,
299,
32152,
35... | 2.222511 | 1,155 |
import numpy as np
import pymc as pm
from matplotlib import pyplot as plt
challenger_data = np.genfromtxt("challenger_data.csv", skip_header=1,
usecols=[1, 2], missing_values="NA",
delimiter=",")
# drop the NA values
challenger_data = challenger_data[~np.isnan(challenger_data[:, 1])]
temperature = challenger_data[:, 0]
D = challenger_data[:, 1] # defect or not?
# notice the`value` here. We explain why below.
beta = pm.Normal("beta", 0, 0.001, value=0)
alpha = pm.Normal("alpha", 0, 0.001, value=0)
@pm.deterministic
# connect the probabilities in `p` with our observations through a
# Bernoulli random variable.
observed = pm.Bernoulli("bernoulli_obs", p, value=D, observed=True)
model = pm.Model([observed, beta, alpha])
# Mysterious code to be explained in Chapter 3
map_ = pm.MAP(model)
map_.fit()
mcmc = pm.MCMC(model)
mcmc.sample(120000, 100000, 2)
alpha_samples = mcmc.trace('alpha')[:, None] # best to make them 1d
beta_samples = mcmc.trace('beta')[:, None]
# histogram of the samples:
plt.subplot(211)
plt.title(r"Posterior distributions of the variables $\alpha, \beta$")
plt.hist(beta_samples, histtype='stepfilled', bins=35, alpha=0.85,
label=r"posterior of $\beta$", color="#7A68A6", normed=True)
plt.legend()
plt.subplot(212)
plt.hist(alpha_samples, histtype='stepfilled', bins=35, alpha=0.85,
label=r"posterior of $\alpha$", color="#A60628", normed=True)
plt.legend()
plt.show()
prob_31 = logistic(31, beta_samples, alpha_samples)
plt.xlim(0.995, 1)
plt.hist(prob_31, bins=1000, normed=True, histtype='stepfilled')
plt.title("Posterior distribution of probability of defect, given $t = 31$")
plt.xlabel("probability of defect occurring in O-ring")
plt.show()
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
279,
4948,
66,
355,
9114,
201,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
201,
198,
201,
198,
36747,
6540,
62,
7890,
796,
45941,
13,
5235,
6738,
14116,
7203,
36747,
... | 2.380208 | 768 |
from tkinter import *
window = Tk()
window.geometry("600x500+30+20")
window.title("Welcome to Python Programming")
btn = Button(window, text = "Click to add name", fg ="blue")
btn.place(x= 80, y= 100)
lbl = Label(window, text = "Student Personal Information", fg = "Blue", bg = "orange")
lbl.place(relx=.5, y =50, anchor="center")
lbl2 = Label(window, text ="Gender", fg="red")
lbl2.place(x= 80,y = 150)
txtfld = Entry(window, bd = 3, font = ("verdana",16))
txtfld.place(x=150,y=100)
v1 = StringVar()
v2 = StringVar()
v1.set(1)
r1 = Radiobutton(window, text="Male",variable=v1)
r1.place(x=80,y=200)
r2 = Radiobutton(window, text="Female",variable=v2)
r2.place(x=200,y=200)
v3 = IntVar()
v4 = IntVar()
v5 = IntVar()
chkbox = Checkbutton(window, text="basketball",variable=v3)
chkbox2 = Checkbutton(window, text="volleyball",variable=v4)
chkbox3 = Checkbutton(window, text="swimming",variable=v5)
chkbox.place(x=80, y=300)
chkbox2.place(x=250, y=300)
chkbox3.place(x=350, y=300)
lbl3 = Label(window, text ="Sports")
lbl3.place(x=80,y=250)
lbl4 = Label(window, text ="Subjects")
lbl4.place(x=80,y=350)
data1 ="arithmetric"
data2 ="writing"
data3 ="math"
lstbox = Listbox(window, height=5, selectmode="multiple")
lstbox.insert(END,data1,data2,data3)
lstbox.place(x=80, y=400)
window.mainloop()
| [
6738,
256,
74,
3849,
1330,
1635,
201,
198,
17497,
796,
309,
74,
3419,
201,
198,
201,
198,
17497,
13,
469,
15748,
7203,
8054,
87,
4059,
10,
1270,
10,
1238,
4943,
201,
198,
17497,
13,
7839,
7203,
14618,
284,
11361,
30297,
4943,
201,
1... | 2.207516 | 612 |
from collections import defaultdict
forward, reverse = build_mapping_from_input()
# Part 1
print(len(can_contain('shiny gold')))
# Part 2
print(count_bags('shiny gold'))
| [
6738,
17268,
1330,
4277,
11600,
628,
198,
198,
11813,
11,
9575,
796,
1382,
62,
76,
5912,
62,
6738,
62,
15414,
3419,
628,
198,
2,
2142,
352,
628,
198,
4798,
7,
11925,
7,
5171,
62,
3642,
391,
10786,
1477,
3541,
3869,
6,
22305,
628,
... | 2.967213 | 61 |
import itertools
import numpy as np
from draw import Draw
from replay import Replay
from config import WHITELIST
class Comparer:
"""
A class for managing a set of replay comparisons.
Attributes:
List replays1: A list of Replay instances to compare against replays2.
List replays2: A list of Replay instances to be compared against. Optional, defaulting to None. No attempt to error check
this is made - if a compare() call is made, the program will throw an AttributeError. Be sure to only call
methods that involve the first set of replays if this argument is not passed.
Integer threshold: If a comparison scores below this value, the result is printed.
See Also:
Investigator
"""
def __init__(self, threshold, replays1, replays2=None):
"""
Initializes a Comparer instance.
Note that the order of the two replay lists has no effect; they are only numbered for consistency.
Comparing 1 to 2 is the same as comparing 2 to 1.
Args:
List replays1: A list of Replay instances to compare against replays2.
List replays2: A list of Replay instances to be compared against. Optional, defaulting to None. No attempt to error check
this is made - if a compare() call is made, the program will throw an AttributeError. Be sure to only call
methods that involve the first set of replays.
Integer threshold: If a comparison scores below this value, the result is printed.
"""
self.replays1 = replays1
self.replays2 = replays2
self.threshold = threshold
def compare(self, mode):
"""
If mode is "double", compares all replays in replays1 against all replays in replays2.
If mode is "single", compares all replays in replays1 against all other replays in replays1 (len(replays1) choose 2 comparisons).
In both cases, prints the result of each comparison according to _print_result.
Args:
String mode: One of either "double" or "single", determining how to choose which replays to compare.
"""
iterator = itertools.product(self.replays1, self.replays2) if mode == "double" else itertools.combinations(self.replays1, 2)
for replay1, replay2 in iterator:
if(self.check_names(replay1.player_name, replay2.player_name)):
continue
result = Comparer._compare_two_replays(replay1, replay2)
self._print_result(result, replay1, replay2)
def check_names(self, player1, player2):
"""
Returns True if both players are in the whitelist or are the same name, False otherwise.
Args:
String player1: The name of the first player.
String player2: The name of the second player.
"""
return ((player1 in WHITELIST and player2 in WHITELIST) or (player1 == player2))
def _print_result(self, result, replay1, replay2):
"""
Prints a human readable version of the result if the average distance
is below the threshold set from the command line.
Args:
Tuple result: A tuple containing (average distance, standard deviation) of a comparison.
Replay replay1: The replay to print the name of and to draw against replay2
Replay replay2: The replay to print the name of and to draw against replay1
"""
mean = result[0]
sigma = result[1]
if(mean > self.threshold):
return
print("{:.1f} similarity, {:.1f} std deviation ({} vs {})".format(mean, sigma, replay1.player_name, replay2.player_name))
answer = input("Would you like to see a visualization of both replays? ")
if answer[0].lower() == "y":
animation = Draw.draw_replays(replay1, replay2)
@staticmethod
def _compare_two_replays(replay1, replay2):
"""
Compares two Replays and return their average distance
and standard deviation of distances.
"""
# get all coordinates in numpy arrays so that they're arranged like:
# [ x_1 x_2 ... x_n
# y_1 y_2 ... y_n ]
# indexed by columns first.
data1 = replay1.as_list_with_timestamps()
data2 = replay2.as_list_with_timestamps()
# interpolate
(data1, data2) = Replay.interpolate(data1, data2)
# remove time from each tuple
data1 = [d[1:] for d in data1]
data2 = [d[1:] for d in data2]
(mu, sigma) = Comparer._compute_data_similarity(data1, data2)
return (mu, sigma)
@staticmethod
def _compute_data_similarity(data1, data2):
"""
Finds the similarity and standard deviation between two datasets.
Args:
List data1: A list of tuples containing the (x, y) coordinate of points
List data2: A list of tuples containing the (x, y) coordinate of points
Returns:
A tuple containing (similarity value, standard deviation) between the two datasets
"""
data1 = np.array(data1)
data2 = np.array(data2)
# switch if the second is longer, so that data1 is always the longest.
if len(data2) > len(data1):
(data1, data2) = (data2, data1)
shortest = len(data2)
distance = data1[:shortest] - data2
# square all numbers and sum over the second axis (add row 2 to row 1),
# finally take the square root of each number to get all distances.
# [ x_1 x_2 ... x_n => [ x_1 ** 2 ... x_n ** 2
# y_1 y_2 ... y_n ] => y_1 ** 2 ... y_n ** 2 ]
# => [ x_1 ** 2 + y_1 ** 2 ... x_n ** 2 + y_n ** 2 ]
# => [ d_1 ... d_2 ]
distance = (distance ** 2).sum(axis=1) ** 0.5
mu, sigma = distance.mean(), distance.std()
return (mu, sigma)
| [
11748,
340,
861,
10141,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
3197,
1330,
15315,
198,
6738,
24788,
1330,
23635,
198,
6738,
4566,
1330,
7655,
2043,
3698,
8808,
198,
198,
4871,
3082,
11258,
25,
198,
220,
220,
220,
37227... | 2.50693 | 2,381 |
from setuptools import setup
setup(
name='proximal',
version='0.1.7',
packages=['proximal',
'proximal.prox_fns',
'proximal.lin_ops',
'proximal.algorithms',
'proximal.utils',
'proximal.halide',
'proximal.tests',
'proximal.tests.data'],
package_dir={'proximal': 'proximal'},
package_data={'proximal.tests.data': ['angela.jpg'],
'proximal.halide': ['src/*.cpp', 'src/core/*', 'src/external/*', 'src/fft/*',
'subprojects/halide.wrap',
'subprojects/pybind11.wrap',
'subprojects/packagefiles/halide/meson.build',
'meson.build']},
url='http://github.com/comp-imaging/ProxImaL/',
install_requires=["numpy >= 1.9",
"scipy >= 0.15",
"numexpr",
"Pillow",
"meson >= 0.54"],
use_2to3=True,
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
1676,
87,
4402,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
22,
3256,
198,
220,
220,
220,
10392,
28,
17816,
1676,
87,
4402,
3256,
... | 1.690236 | 594 |
import math
import numpy as np
import subprocess
import re
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import astropy.units as units
from astropy.cosmology import Planck15 as cosmo,z_at_value
from matplotlib.backends.backend_pdf import PdfPages
flare_dir="../flare"
Ms=1.5e4*10**(np.arange(16)/3.0)
#Ms=2.0e5*10**(np.arange(13)/3.0)
print "Ms=",Ms
SNRstudy(Ms,[1,2,4,10],[10,100,1000],300)
#logz = np.arange(10)/2.5
#print "logz=",logz
#print [10**x for x in logz]
#logD = [cosmo.luminosity_distance(1+10**lz)/units.Mpc for lz in logz]
#print logD
#plt.clf()
#plot=plt.plot(logz,logD)
#plt.show()
| [
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
850,
14681,
198,
11748,
302,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
8071,
2052,
1330,
48599,
9248,
198,
11748,
6468,
... | 2.244681 | 282 |
"""
Schema for event objects.
"""
from .common import sources, extras
media_schema = {
"description": ("This \"special\" schema is used in two places in the Event"
" schema, on the top level and inside the agenda item. This is an"
" optional component that may be omited entirely from a document."),
"items": {
"properties": {
"name": {
"type": "string",
"description": ('name of the media link, such as "Recording of'
' the meeting" or "Discussion of construction'
' near the watershed"'),
},
"type": {
"type": "string",
"description": ('type of the set of recordings, such as'
' "recording" or "testimony".'),
},
"date": {
"pattern": "^[0-9]{4}(-[0-9]{2}){0,2}$",
"type": ["string", "null"],
"description": "date of the recording.",
},
"offset": {
"type": ["number", "null"],
"description": ("Offset where the related part starts. This is"
" optional and may be ommited entirely."),
},
"links": {
"description": ("List of links to the same media item, each"
" with a different MIME type."),
"items": {
"properties": {
"mimetype": {
"description": ("Mimetype of the media, such"
" as video/mp4 or audio/webm"),
"type": ["string", "null"]
},
"url": {
"type": "string",
"description": "URL where this media may be accessed",
},
},
"type": "object"
},
"type": "array"
},
},
"type": "object"
},
"type": "array"
}
schema = {
"description": "event data",
"_order": (
('Basics', ('_type', 'name', 'description', 'when', 'end', 'status', 'location')),
('Linked Entities', ('media', 'links', 'participants', 'agenda', 'documents',)),
('Common Fields', ['updated_at', 'created_at', 'sources']),
),
"properties": {
"_type": {
"enum": ["event"],
"type": "string",
"description": ("All events must have a _type field set to one of"
" the entries in the enum below."),
},
"name": {
"type": "string",
"description": ('A simple name of the event, such as "Fiscal'
' subcommittee hearing on pudding cups"')
},
"all_day": {
"type": ["boolean"],
"description": ("Indicates if the event is an all-day event"),
},
"type": {
"type": ["string"],
"description": ("type of event"),
},
# TODO: turn into enum
"updated_at": {
"type": ["string", "datetime"],
"required": False,
"description": "the time that this object was last updated.",
},
"created_at": {
"type": ["string", "datetime"],
"required": False,
"description": "the time that this object was first created.",
},
"description": {
"type": ["string", "null"],
"description": ('A longer description describing the event. As an'
' example, "Topics for discussion include this that'
' and the other thing. In addition, lunch will be'
' served".'),
},
"when": {
"type": ["datetime"],
"description": ("Starting date / time of the event. This should be"
" fully timezone qualified."),
},
"end": {
"type": ["datetime", "null"],
"description": ("Ending date / time of the event. This should"
" be fully timezone qualified."),
},
"status": {
"type": ["string", "null"],
"enum": ["cancelled", "tentative", "confirmed", "passed"],
"description": ("String that denotes the status of the meeting."
" This is useful for showing the meeting is cancelled"
" in a machine-readable way."),
},
"location": {
"description": "Where the event will take place.",
"type": "object",
"properties": {
"name": {
"type": "string",
"description": ('name of the location, such as "City Hall,'
' Boston, MA, USA", or "Room E201, Dolan'
' Science Center, 20700 North Park Blvd'
' University Heights Ohio, 44118"'),
},
"note": {
"type": ["string", "null"],
"description": ('human readable notes regarding the location,'
' something like "The meeting will take place'
' at the Minority Whip\'s desk on the floor"')
},
"url": {
"required": False,
"type": "string",
"description": "URL of the location, if applicable.",
},
"coordinates": {
"description": ('coordinates where this event will take'
' place. If the location hasn\'t (or isn\'t)'
' geolocated or geocodable, than this should'
' be set to null.'),
"type": ["object", "null"],
"properties": {
"latitude": {
"type": "string",
"description": "latitude of the location, if any",
},
"longitude": {
"type": "string",
"description": "longitude of the location, if any",
}
}
},
},
},
"media": media_schema,
"documents": {
"description": ("Links to related documents for the event. Usually,"
" this includes things like pre-written testimony,"
" spreadsheets or a slide deck that a presenter will"
" use."),
"items": {
"properties": {
"name": {
"type": "string",
"description": ('name of the document. Something like'
' "Fiscal Report" or "John Smith\'s'
' Slides".'),
},
"url": {
"type": "string",
"description": "URL where the content may be found.",
},
"mimetype": {
"type": "string",
"description": "Mimetype of the document.",
},
},
"type": "object"
},
"type": "array"
},
"links": {
"description": ("Links related to the event that are not documents"
" or items in the Agenda. This is filled with helpful"
" links for the event, such as a committee's homepage,"
" reference material or links to learn more about subjects"
" related to the event."),
"items": {
"properties": {
"note": {
"description": ('Human-readable name of the link. Something'
' like "Historical precedent for popsicle procurement"'),
"type": "string",
"blank": True,
},
"url": {
"description": "A URL for a link about the event",
"format": "uri",
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"participants": {
"description": ("List of participants in the event. This includes"
" committees invited, legislators chairing the event"
" or people who are attending."),
"items": {
"properties": {
"chamber": {
"type": ["string", "null"],
"description": ("Optional field storing the chamber of"
" the related participant."),
},
"name": {
"type": "string",
"description": "Human readable name of the entitity.",
},
"id": {
"type": ["string", "null"],
"description": "ID of the participant",
},
"type": {
"enum": ["organization", "person"],
"type": "string",
"description": ("What type of entity is this? `person`"
" may be used if the person is not a Legislator,"
" butattending the event, such as an"
" invited speaker or one who is offering"
" testimony."),
},
"note": {
"type": "string",
"description": ("Note regarding the relationship, such"
" as `chair` for the chair of a meeting."),
},
},
"type": "object"
},
"type": "array"
},
"agenda": {
"description": ("Agenda of the event, if any. This contains information"
" about the meeting's agenda, such as bills to"
" discuss or people to present."),
"items": {
"properties": {
"description": {
"type": "string",
"description": ("Human-readable string that represents this"
" agenda item. A good example would be something like"
" The Committee will consider SB 2339, HB 100"),
},
"order": {
"type": ["string", "null"],
"description": ("order of this item, useful for re-creating"
" meeting minutes. This may be ommited entirely."
" It may also optionally contains \"dots\""
" to denote nested agenda items, such as \"1.1.2.1\""
" or \"2\", which may go on as needed."),
},
"subjects": {
"description": ("List of related topics of this agenda"
" item relates to."),
"items": {"type": "string"},
"type": "array"
},
"media": media_schema,
"notes": {
"description": ("List of notes taken during this agenda"
" item, may be used to construct meeting minutes."),
"items": {
"properties": {
"description": {
"type": "string",
"description": ("simple string containing the"
" content of the note."),
},
},
"type": "object"
},
"type": "array"
},
"related_entities": {
"description": ("Entities that relate to this agenda"
" item, such as presenters, legislative"
" instruments, or committees."),
"items": {
"properties": {
"type": {
"type": "string",
"description": ("type of the related object, like"
" `bill` or `organization`."),
},
"id": {
"type": ["string", "null"],
"description": "ID of the related entity",
},
"name": {
"type": "string",
"description": ("human readable string"
" representing the entity,"
" such as `John Q. Smith`."),
},
"note": {
"type": ["string", "null"],
"description": ("human readable string (if any) noting"
" the relationship between the entity and"
" the agenda item, such as \"Jeff"
" will be presenting on the effects"
" of too much cookie dough\""),
},
},
"type": "object",
},
"minItems": 0,
"type": "array",
},
},
"type": "object"
},
"minItems": 0,
"type": "array"
},
"sources": sources,
"extras": extras,
},
"type": "object"
}
| [
37811,
198,
220,
220,
220,
10011,
2611,
329,
1785,
5563,
13,
198,
37811,
198,
198,
6738,
764,
11321,
1330,
4237,
11,
33849,
198,
198,
11431,
62,
15952,
2611,
796,
1391,
198,
220,
220,
220,
366,
11213,
1298,
5855,
1212,
19990,
20887,
7... | 1.660739 | 9,223 |
from django.contrib.auth import views as auth_views
from django.views.generic.base import TemplateView
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import RedirectView
from fileupload.views import *
app_name='fileupload'
urlpatterns = [
#~ url(r'^list/$', 'fileupload.views.list_files', name='list'),
url(r'^create_report/', 'fileupload.views.create_report', name='create_report'),
url(r'^(?P<report_id>[0-9]+)/', 'fileupload.views.view_report', name='view_report'),
url(r'^browse/$', 'fileupload.views.browse', name='browse'),
url(r'^user_reports/(?P<id>[0-9]+)/$', 'fileupload.views.user_reports', name='user_reports'),
url(r'^inbox/$', 'fileupload.views.inbox', name='inbox'),
url(r'^create_message/$', 'fileupload.views.create_message', name='create_message'),
url(r'^trash/$', 'fileupload.views.trash', name='trash'),
url(r'^delete_report/(?P<report_id>[0-9]+)/$', 'fileupload.views.delete_report', name='delete_report'),
url(r'^edit_report/(?P<report_id>[0-9]+)/$', 'fileupload.views.edit_report', name='edit_report'),
url(r'^view_message/(?P<message_id>[0-9]+)/', 'fileupload.views.view_message', name='view_message'),
url(r'^reply_message/(?P<message_id>[0-9]+)/', 'fileupload.views.reply_message', name='reply_message'),
url(r'^create_folder/$', 'fileupload.views.create_folder', name='create_folder'),
url(r'^edit_folder/(?P<folder_id>[0-9]+)/', 'fileupload.views.edit_folder', name='edit_folder'),
url(r'^delete_folder/(?P<folder_id>[0-9]+)/', 'fileupload.views.delete_folder', name='delete_folder'),
]
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
5009,
355,
6284,
62,
33571,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
13,
8692,
1330,
37350,
7680,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
2291,
11,
19... | 2.670295 | 643 |
#!/usr/bin/env python3
# encoding: utf-8
"""Choices are enumeration values you can choose, by selecting index number.
It is a special TabStop, its content are taken literally, thus said, they will not be parsed recursively.
"""
from UltiSnips import vim_helper
from UltiSnips.position import Position
from UltiSnips.text_objects.tabstop import TabStop
from UltiSnips.snippet.parsing.lexer import ChoicesToken
class Choices(TabStop):
"""See module docstring."""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
37811,
22164,
1063,
389,
27056,
341,
3815,
345,
460,
3853,
11,
416,
17246,
6376,
1271,
13,
198,
1026,
318,
257,
2041,
16904,
19485,
11,
... | 3.27972 | 143 |
import pytest
from mltk.utils.test_helper import run_model_operation, generate_run_model_params
@pytest.mark.parametrize(*generate_run_model_params())
@pytest.mark.parametrize(*generate_run_model_params()) | [
198,
11748,
12972,
9288,
198,
6738,
285,
2528,
74,
13,
26791,
13,
9288,
62,
2978,
525,
1330,
1057,
62,
19849,
62,
27184,
11,
7716,
62,
5143,
62,
19849,
62,
37266,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
46491,... | 2.824324 | 74 |
name = str(input('Type your name: ')).strip()
print('Uppercase name: {}'.format(name.upper()))
print('Lowercase name: {}'.format(name.lower()))
print('Total letters: {}'.format(len(name) - name.count(' ')))
#print('First name has {} letters. '.format(name.find(' ')))
s = name.split()
print('First name has {} letters.'.format(len(s[0]))) | [
3672,
796,
965,
7,
15414,
10786,
6030,
534,
1438,
25,
705,
29720,
36311,
3419,
198,
4798,
10786,
52,
39921,
589,
1438,
25,
23884,
4458,
18982,
7,
3672,
13,
45828,
3419,
4008,
198,
4798,
10786,
31426,
7442,
1438,
25,
23884,
4458,
18982,
... | 2.99115 | 113 |
#====================================================
# MODULES
#====================================================
import pandas as pd
import ROOT
import matplotlib.pyplot as plt
import numpy as np
#====================================================
# DATA PREPARATION
#====================================================
model_outputs = pd.read_csv('model_outputs.csv')
model_outputs['Label'] = pd.read_csv('dataset_higgs_challenge.csv')['Label']
model_outputs['KaggleWeight'] = pd.read_csv('dataset_higgs_challenge.csv')['KaggleWeight']
model_outputs['KaggleSet'] = pd.read_csv('dataset_higgs_challenge.csv')['KaggleSet']
predictions_train = model_outputs['Predictions'][model_outputs['KaggleSet'] == 't']
predictions_test = model_outputs['Predictions'][model_outputs['KaggleSet'] == 'v']
weights_train = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 't']
weights_test = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 'v']
labels_train = model_outputs['Label'][model_outputs['KaggleSet'] == 't']
labels_test = model_outputs['Label'][model_outputs['KaggleSet'] == 'v']
predictions_train = (predictions_train - min(predictions_train)) / (max(predictions_train) - min(predictions_train))
predictions_test = (predictions_test - min(predictions_test)) / (max(predictions_test) - min(predictions_test))
train_signal = predictions_train[model_outputs['KaggleSet'] == 't'][model_outputs['Label']=='s']
train_bkg = predictions_train[model_outputs['KaggleSet'] == 't'][model_outputs['Label']=='b']
test_signal = predictions_test[model_outputs['KaggleSet'] == 'v'][model_outputs['Label']=='s']
test_bkg = predictions_test[model_outputs['KaggleSet'] == 'v'][model_outputs['Label']=='b']
weights_train_signal = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 't'][model_outputs['Label']=='s']
weights_train_bkg = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 't'][model_outputs['Label']=='b']
weights_test_signal = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 'v'][model_outputs['Label']=='s']
weights_test_bkg = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 'v'][model_outputs['Label']=='b']
#====================================================
# STYLE SETTINGS
#====================================================
ROOT.gROOT.SetStyle("ATLAS")
c = ROOT.TCanvas("c", "", 750, 700)
bins = 20
hist_train_s = ROOT.TH1D("hist_train_s", "train signal", bins, 0, 1)
hist_test_s = ROOT.TH1D("hist_test_s", "test signal", bins, 0, 1)
hist_train_b = ROOT.TH1D("hist_train_b", "train bkg", bins, 0, 1)
hist_test_b = ROOT.TH1D("hist_test_b", "test bkg", bins, 0, 1)
#====================================================
# FIRST UNWEIGHTED AND NORMALIZED TO UNITY
#====================================================
for i in range(len(train_signal)):
hist_train_s.Fill(train_signal.values[i])
for i in range(len(test_signal)):
hist_test_s.Fill(test_signal.values[i])
for i in range(len(train_bkg)):
hist_train_b.Fill(train_bkg.values[i])
for i in range(len(test_bkg)):
hist_test_b.Fill(test_bkg.values[i])
for hist in [hist_test_s, hist_test_b]:
for i in range(1, hist.GetNbinsX()+1):
hist.SetBinError(i, np.sqrt(hist.GetBinContent(i)))
for hist in [hist_train_s, hist_test_s, hist_train_b, hist_test_b]:
hist.Scale(1/hist.Integral(), 'nosw2')
#Plot settings:
hist_train_b.SetAxisRange(3e-3, 5, 'Y')
hist_train_b.GetYaxis().SetLabelSize(0.04)
hist_train_b.GetYaxis().SetTitleSize(0.04)
hist_train_b.GetYaxis().SetTitle('Event Fraction')
hist_train_b.GetXaxis().SetLabelSize(0.04)
hist_train_b.GetXaxis().SetTitleSize(0.04)
hist_train_b.GetXaxis().SetTitle('Model Output')
hist_train_b.SetLineColor(ROOT.kRed)
hist_train_b.SetLineWidth(3)
hist_train_b.Draw('HIST')
hist_test_b.SetMarkerSize(1.3)
hist_test_b.SetMarkerStyle(3)
hist_test_b.Draw('same')
hist_train_s.SetLineColor(ROOT.kBlue)
hist_train_s.SetLineWidth(3)
hist_train_s.Draw('hist same')
hist_test_s.SetMarkerSize(1.3)
hist_test_s.SetMarkerStyle(8)
hist_test_s.Draw('same')
c.SetLogy()
#Add legend:
legend = ROOT.TLegend(0.52, 0.75, 0.92, 0.9)
legend.SetTextFont(42)
legend.SetFillStyle(0)
legend.SetBorderSize(0)
legend.SetTextSize(0.04)
legend.SetTextAlign(12)
legend.AddEntry(hist_train_s, "Signal (Training)", "lf")
legend.AddEntry(hist_test_s, "Signal (Test)", "pe")
legend.AddEntry(hist_train_b, "Background (Training)" ,"l")
legend.AddEntry(hist_test_b, "Background (Test)", "ep")
legend.Draw("SAME")
text = ROOT.TLatex()
text.SetNDC()
text.SetTextFont(42)
text.SetTextSize(0.04)
text.DrawLatex(0.23, 0.87, "Simulation")
text.DrawLatex(0.23, 0.83, "H #rightarrow #tau^{+}#tau^{-}")
text.DrawLatex(0.23, 0.79, "#sqrt{s} = 8 TeV")
c.Draw()
#Set marker:
marker_types = ROOT.TCanvas('marker_types', '', 0,0,500,200)
marker = ROOT.TMarker()
marker.DisplayMarkerTypes()
marker_types.Draw()
#====================================================
# NOW THE WEIGHTED DISTRIBUTION
#====================================================
c2 = ROOT.TCanvas("c2", "", 750, 700)
bins = 10
hist_train_sw = ROOT.TH1D("hist_train_sw", "train signal", bins, 0, 1)
hist_train_bw = ROOT.TH1D("hist_train_bw", "train bkg", bins, 0, 1)
hist_test_w = ROOT.TH1D("hist_test_w", "test bkg", bins, 0, 1)
for i in range(len(train_signal)):
hist_train_sw.Fill(train_signal.values[i], weights_train_signal.values[i])
for i in range(len(train_bkg)):
hist_train_bw.Fill(train_bkg.values[i], weights_train_bkg.values[i])
for i in range(len(predictions_test)):
hist_test_w.Fill(predictions_test.values[i], weights_test.values[i])
for hist in [hist_train_sw, hist_train_bw, hist_test_w]:
for i in range(1, hist.GetNbinsX()+1):
hist.SetBinError(i, np.sqrt(hist.GetBinContent(i)))
hist_train_sw.SetFillColorAlpha(ROOT.kAzure-1,.6)
hist_train_bw.SetFillColorAlpha(ROOT.kRed-4, .9)
hist_train_sw.SetLineWidth(1)
hist_train_bw.SetLineWidth(1)
#Axes
hist_train_bw.GetYaxis().SetLabelSize(0.04)
hist_train_bw.GetYaxis().SetTitleSize(0.04)
hist_train_bw.GetYaxis().SetTitle('Events')
hist_train_bw.GetXaxis().SetLabelSize(0.04)
hist_train_bw.GetXaxis().SetTitleSize(0.04)
hist_train_bw.GetXaxis().SetTitle('Model Output')
hist_train_bw.Draw()
#Stack
hs = ROOT.THStack("hs", "Weighted Distributions")
hs.Add(hist_train_sw)
hs.Add(hist_train_bw)
hs.SetMinimum(20)
hs.SetMaximum(1e7)
hs.Draw('hist')
hs.SetHistogram(hist_train_bw)
hist_test_w.Draw('same')
#Legend
legend = ROOT.TLegend(0.5, 0.75, 0.8, 0.9)
legend.SetTextFont(42)
legend.SetFillStyle(0)
legend.SetBorderSize(0)
legend.SetTextSize(0.04)
legend.SetTextAlign(12)
legend.AddEntry(hist_train_sw, "Signal (Training)", "f")
legend.AddEntry(hist_train_bw, "Background (Training)", "f")
legend.AddEntry(hist_test_w, "Test", "pe")
legend.Draw("SAME")
#Text
text = ROOT.TLatex()
text.SetNDC()
text.SetTextFont(42)
text.SetTextSize(0.04)
text.DrawLatex(0.23, 0.87, "Simulation")
text.DrawLatex(0.23, 0.83, "H #rightarrow #tau^{+}#tau^{-}")
text.DrawLatex(0.23, 0.79, "#sqrt{s} = 8 TeV")
c2.SetLogy()
c2.Draw()
#====================================================
# SAVE CANVAS
#====================================================
c2.SaveAs('weighted.png')
c2.SaveAs('weighted.pdf')
w = ROOT.TColorWheel()
cw = ROOT.TCanvas("cw","cw",0,0,800,800)
w.SetCanvas(cw)
w.Draw()
cw.Draw()
#====================================================
# RATIO PLOT
#====================================================
bins = 10
hist_train_sw = ROOT.TH1D("hist_train_sw", "train signal", bins, 0, 1)
hist_train_bw = ROOT.TH1D("hist_train_bw", "train bkg", bins, 0, 1)
hist_test_w = ROOT.TH1D("hist_test_w", "test bkg", bins, 0, 1)
for i in range(len(train_signal)):
hist_train_sw.Fill(train_signal.values[i], weights_train_signal.values[i])
for i in range(len(train_bkg)):
hist_train_bw.Fill(train_bkg.values[i], weights_train_bkg.values[i])
for i in range(len(predictions_test)):
hist_test_w.Fill(predictions_test.values[i], weights_test.values[i])
for hist in [hist_train_sw, hist_train_bw, hist_test_w]:
for i in range(1, hist.GetNbinsX()+1):
hist.SetBinError(i, np.sqrt(hist.GetBinContent(i)))
c3 = ROOT.TCanvas("c3", "Ratio Plot", 700, 750)
upper_pad = ROOT.TPad("upper_pad", "", 0, 0.25, 1, 1)
lower_pad = ROOT.TPad("lower_pad", "", 0, 0, 1, 0.25)
for pad in [upper_pad, lower_pad]:
pad.SetLeftMargin(0.14)
pad.SetRightMargin(0.05)
pad.SetTickx(True)
pad.SetTicky(True)
upper_pad.SetBottomMargin(0)
lower_pad.SetTopMargin(0)
lower_pad.SetBottomMargin(0.3)
upper_pad.Draw()
lower_pad.Draw()
c3.Draw() | [
2,
10052,
4770,
1421,
198,
2,
220,
220,
220,
220,
19164,
6239,
1546,
198,
2,
10052,
4770,
1421,
198,
11748,
19798,
292,
355,
279,
67,
220,
198,
11748,
15107,
2394,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
1... | 2.422098 | 3,575 |
import unittest
from katas.kyu_7.filter_list import filter_list
| [
11748,
555,
715,
395,
198,
198,
6738,
479,
265,
292,
13,
2584,
84,
62,
22,
13,
24455,
62,
4868,
1330,
8106,
62,
4868,
628
] | 2.75 | 24 |
#coding:utf-8
PURPLE = '\033[35m'
RED = '\033[31m'
CYAN = '\033[36m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
import csv
import sys
import codecs
from urllib.parse import urlparse #URL --> Domain
from time import sleep
dict_web_id = {}
dict_url = {}
dict_topic = {}
dict_suggest = {}
dict_sub = {}
dict_bin = {}
domains =set()
urls =set()
###################サブトピックリストの読み込み###################
a = open('sub_list.csv', 'r')
set_subtopic_keys = set()
for line in a:
LINE = line.rstrip().split(',')
web_id = LINE[0]
url = LINE[1]
topic = LINE[2]
sub_id = LINE[3]
Domain = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(url))
domains.add(Domain)
dict_web_id.setdefault(Domain, set()).add(web_id)
dict_sub.setdefault(Domain, set()).add(sub_id)
dict_topic.setdefault(Domain, set()).add(topic)
set_topic=dict_topic[Domain]
set_sub=dict_sub[Domain]
set_subtopic_keys=dict_sub.keys() #dict_subのkeyの集合
a.close()
#################ビンリストの読み込み###########################
A = open('bin_list.csv','r')
set_bin_keys = set()
for line in A:
LINE = line.rstrip().split(',')
web_id = LINE[0]
url = LINE[1]
topic = LINE[2]
bin_id = LINE[3]
Domain = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(url))
domains.add(Domain)
dict_web_id.setdefault(Domain, set()).add(web_id)
dict_topic.setdefault(Domain, set()).add(topic)
dict_bin.setdefault(Domain, set()).add(bin_id)
set_topic = dict_topic[Domain]
set_bin = dict_bin[Domain]
set_bin_keys = dict_bin.keys()
A.close()
###################ノウハウサイトの読み込み######################
b = open('know-how.csv','r')
count = 0
set_know_how = set()
dict_title = {}
dict_predict={}
dict_confidence={}
dict_truth={}
for line in b:
count = count + 1
print(line)
LINE = line.rstrip().split(',')
Domain = LINE[2]
Domain = Domain + '/'
Title = LINE[3]
predict= LINE[4]
confidence=LINE[5]
truth=LINE[1]
set_know_how.add(Domain)
dict_title[Domain] = Title
dict_predict[Domain]=predict
dict_confidence[Domain]=confidence
dict_truth[Domain]=truth
b.close()
####################ドメインごとにHTMLを作成#####################
p = open('result.csv','w')
p.write('domain_id\ttitle\tpredict\tconfidence\tsum_page\tsum_topic\ttopics\ttruth\n')
make_domain_dict()
#suggest_id()
p.close()
print (len(set_know_how))
print (RED + 'Prgram ended' + ENDC)
| [
2,
66,
7656,
25,
40477,
12,
23,
198,
47,
4261,
16437,
220,
796,
705,
59,
44427,
58,
2327,
76,
6,
198,
22083,
220,
220,
220,
220,
796,
705,
59,
44427,
58,
3132,
76,
6,
198,
34,
56,
1565,
220,
220,
220,
796,
705,
59,
44427,
58,
... | 2.242647 | 1,088 |
#personaldetails
print("NAME:Wealth Okete \nE-MAIL: wealth.okete@gmail.com \nSLACK USERNAME: @Wealth \nBIOSTACK: Genomics \nTwitter Handle: @Wealty")
print(hamming_distance('@Wealth','@Wealty'))
| [
2,
6259,
1940,
316,
1768,
201,
198,
4798,
7203,
20608,
25,
1135,
1094,
6762,
14471,
3467,
77,
36,
12,
5673,
4146,
25,
5129,
13,
482,
14471,
31,
14816,
13,
785,
3467,
77,
8634,
8120,
1294,
1137,
20608,
25,
2488,
1135,
1094,
3467,
77,... | 2.5 | 80 |
conditional statement in python:
this performs different computations or actions depending on whatever a specific boolean expression evaluaates to true or false.
they are handled by if statements in python.
from maths:
equals: a==b
not equals: a != b
less than: a<b
greater than: a>b
greater than or equals to: a>=b
example of if statement:
ade_height= 6.25
oyin_height= 5.75
if ade_height > oyin_height:
print("ade is taller tham oyin")
The elif keyword:
the elif keyword is python way of saying "if the previous condition were not true, then try this condition"
example-
boys score=24.77
girls score=25.01
if boys score>girls score:
print("boys win, girls lose")
elif girls score>boys score:
print("girls win, boys lose")
the else keyword:
if
the else keyword catches anything which isnt caught by the preceding conditions.
example-
#program to calc the longer journey
#between lagos-ibadan and lagos london
lb_max_time=2.5
ll_max_time=6
if lb_max_time>ll_max_time:
print("lagos to ibadan takes more time")
elif lb_max_time<ll_max_time:
print("lagos to london takes more time")
else:
print("both take equal time")
using logical operators:
you can use operators 'and,or and not' in python conditional statements.
for example:
x=200
y=33
z=500
if x> y and z>x:
print("both condition are true")
the pass keyword
if statements cannot be empty, but if you for some reason have an if statement with no content, put in the pass statement to avoid getting an error.
example
boys=17
if boys==17:
pass
| [
17561,
1859,
2643,
287,
21015,
25,
201,
198,
5661,
17706,
1180,
2653,
602,
393,
4028,
6906,
319,
4232,
257,
2176,
25131,
5408,
5418,
6413,
689,
284,
2081,
393,
3991,
13,
201,
198,
9930,
389,
12118,
416,
611,
6299,
287,
21015,
13,
201,... | 2.957328 | 539 |
"""
sentry.event_manager
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import math
import six
from datetime import datetime, timedelta
from collections import OrderedDict
from django.conf import settings
from django.db import connection, IntegrityError, router, transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.encoding import force_bytes
from hashlib import md5
from uuid import uuid4
from sentry import eventtypes
from sentry.app import buffer, tsdb
from sentry.constants import (
CLIENT_RESERVED_ATTRS, LOG_LEVELS, DEFAULT_LOGGER_NAME, MAX_CULPRIT_LENGTH
)
from sentry.interfaces.base import get_interface, iter_interfaces
from sentry.models import (
Activity, Event, EventMapping, EventUser, Group, GroupHash, GroupResolution,
GroupStatus, Project, Release, TagKey, UserReport
)
from sentry.plugins import plugins
from sentry.signals import first_event_received, regression_signal
from sentry.utils.logging import suppress_exceptions
from sentry.tasks.merge import merge_group
from sentry.tasks.post_process import post_process_group
from sentry.utils.cache import default_cache
from sentry.utils.db import get_db_engine
from sentry.utils.safe import safe_execute, trim, trim_dict
from sentry.utils.strings import truncatechars
from sentry.utils.validators import validate_ip
if not settings.SENTRY_SAMPLE_DATA:
else:
| [
37811,
198,
82,
13000,
13,
15596,
62,
37153,
198,
27156,
8728,
198,
198,
25,
22163,
4766,
25,
357,
66,
8,
3050,
12,
4967,
416,
262,
11352,
563,
4816,
11,
766,
37195,
20673,
329,
517,
3307,
13,
198,
25,
43085,
25,
347,
10305,
11,
7... | 3.366953 | 466 |
__author__ = "RADICAL-SAGA Development Team"
__copyright__ = "Copyright 2013, RADICAL"
__license__ = "MIT"
import os
import radical.utils as ru
# ------------------------------------------------------------------------------
#
import utils
# ------------------------------------------------------------------------------
#
from .constants import *
from .task import Task, Container
from .attributes import Attributes, Callback
from .session import Session, DefaultSession
from .context import Context
from .url import Url
from .exceptions import SagaException
from .exceptions import NotImplemented
from .exceptions import IncorrectURL
from .exceptions import BadParameter
from .exceptions import AlreadyExists
from .exceptions import DoesNotExist
from .exceptions import IncorrectState
from .exceptions import PermissionDenied
from .exceptions import AuthorizationFailed
from .exceptions import AuthenticationFailed
from .exceptions import Timeout
from .exceptions import NoSuccess
from . import job
from . import filesystem
from . import replica
from . import advert
from . import resource
# import radical.saga.messages
# ------------------------------------------------------------------------------
#
pwd = os.path.dirname (__file__)
version_short, version_detail, version_base, version_branch, \
sdist_name, sdist_path = ru.get_version ([pwd])
version = version_short
# FIXME: the logger init will require a 'classical' ini based config, which is
# different from the json based config we use now. May need updating once the
# radical configuration system has changed to json
_logger = ru.Logger('radical.saga')
_logger.info ('radical.saga version: %s' % version_detail)
# ------------------------------------------------------------------------------
| [
198,
834,
9800,
834,
220,
220,
220,
796,
366,
49,
2885,
20151,
12,
4090,
9273,
7712,
4816,
1,
198,
834,
22163,
4766,
834,
796,
366,
15269,
2211,
11,
33540,
20151,
1,
198,
834,
43085,
834,
220,
220,
796,
366,
36393,
1,
628,
198,
11... | 3.413121 | 564 |
import json
from .utils import Utils
utils = Utils()
| [
11748,
33918,
198,
6738,
764,
26791,
1330,
7273,
4487,
198,
198,
26791,
796,
7273,
4487,
3419,
198
] | 3.176471 | 17 |
# -*- coding: utf-8 -*-
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2 | 32 |
# -*- coding: utf-8 -*-
from django.test.client import Client
from networkapi.test.test_case import NetworkApiTestCase
from networkapi.util.geral import mount_url
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
9288,
13,
16366,
1330,
20985,
198,
198,
6738,
3127,
15042,
13,
9288,
13,
9288,
62,
7442,
1330,
7311,
32,
14415,
14402,
20448,
198,
6738,
3127,
15... | 3.018182 | 55 |
## -*- coding: utf-8 -*-
## (C) 2018 Muthiah Annamalai, <ezhillang@gmail.com>
import codecs
import os
from .resources import get_data_dir
| [
2235,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2235,
357,
34,
8,
2864,
337,
1071,
9520,
5506,
321,
282,
1872,
11,
1279,
8471,
12639,
648,
31,
14816,
13,
785,
29,
198,
198,
11748,
40481,
82,
198,
11748,
28686,
19... | 2.545455 | 55 |
import os, sys
from math import sqrt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.laplace import Laplace
from torch.distributions.normal import Normal
from torch.optim import Adam
from einops import rearrange, reduce, repeat
from advbench import perturbations
from advbench.lib.manifool.functions.algorithms.manifool import manifool
from advbench.datasets import FFCV_AVAILABLE
torch.backends.cudnn.benchmark = True
| [
11748,
28686,
11,
25064,
198,
6738,
10688,
1330,
19862,
17034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
1... | 3.245033 | 151 |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import os
import sys
import argparse
import redmapper
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute the zred background for all galaxies')
parser.add_argument('-c', '--configfile', action='store', type=str, required=True,
help='YAML config file')
args = parser.parse_args()
config = redmapper.Configuration(args.configfile)
zb = redmapper.ZredBackgroundGenerator(config)
zb.run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
4112,
62,
11748,
11,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
2266,
76,
11463,
198,
... | 2.823232 | 198 |
import socket
import uuid
from struct import pack
| [
11748,
17802,
198,
11748,
334,
27112,
198,
6738,
2878,
1330,
2353,
198
] | 4.166667 | 12 |
from setuptools import setup
setup(
name='pythonthegathering',
packages=['pythonthegathering'],
version='1.2.1',
description='Replaces everything good and practical about Python with MTG!',
author='Theo Hamilton/linky00',
author_email='linky00@plotholestudios.com',
url='https://github.com/linky00/pythonthegathering',
download_url='https://github.com/linky00/pythonthegathering/archive/v1.2.1.tar.gz',
keywords='decorators mtg',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
license='MIT'
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
79,
5272,
756,
258,
70,
25545,
3256,
198,
220,
220,
220,
10392,
28,
17816,
79,
5272,
756,
258,
70,
25545,
6,
4357,
198,
220,
220,
220,
2196,
... | 2.568493 | 292 |
from . import common
| [
6738,
764,
1330,
2219,
628
] | 4.4 | 5 |
# -*- coding: utf-8 -*-
def default_params():
'''
Description:
It defines the default parameters of the program.
Args:
None
Return:
defaults_dict
'''
defaults_dict = {}
defaults_dict['program_name'] = 'MATSDP'
defaults_dict['version'] = '0.2.4'
defaults_dict['logfile'] = 'matsdp.log'
defaults_dict['output_dir_name'] = 'outputs'
defaults_dict['projects_dir_name'] = 'projects'
defaults_dict['projects_summary_dir_name'] = 'projects_summary'
defaults_dict['task_summary_dir_name'] = 'task_summary'
defaults_dict['test_dir_name'] = 'test'
defaults_dict['greek_capital_letter_list'] = ['Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon', 'Zeta', 'Eta', 'Theta', 'Iota', 'Kappa', 'Lambda', 'Mu', 'Nu', 'Xi', 'Omicron', 'Pi', 'Rho', 'Sigma', 'Tau', 'Upsilon', 'Phi', 'Chi', 'Psi', 'Omega']
defaults_dict['greek_small_letter_list'] = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', 'eta', 'theta', 'iota', 'kappa', 'lambda', 'mu', 'nu', 'xi', 'omicron', 'pi', 'rho', 'sigma', 'tau', 'upsilon', 'phi', 'chi', 'psi', 'omega']
return defaults_dict
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
4299,
4277,
62,
37266,
33529,
201,
198,
220,
220,
220,
705,
7061,
201,
198,
220,
220,
220,
12489,
25,
201,
198,
220,
220,
220,
220,
220,
220,
220,
632,
15738,
2... | 2.319277 | 498 |
import scrapy | [
11748,
15881,
88
] | 4.333333 | 3 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ====================================================================
# @author: Joe Del Rocco
# @since: 11/02/2017
# @summary: A module with angle and coordinate transformations.
# @note: Parts of this file came from angle_utilities.py written by Dan Knowlton of PCG at Cornell.
# Redistributed with permission.
# ====================================================================
# Provides functionality to convert between UV coordinates and angles as well
# as other useful angle utilities.
#
# Copyright 2014-2015 Program of Computer Graphics, Cornell University
# 580 Rhodes Hall
# Cornell University
# Ithaca NY 14853
# Web: http://www.graphics.cornell.edu/
#
# Not for commercial use. Do not redistribute without permission.
# ====================================================================
import math
import numpy as np
import common
'''
Convert a sky coordinate (azimuth, altitude) to fisheye UV coordinate (0-1, 0-1).
Note that images in this application were taken with North facing downward, so we must account for this in UV.
Note sampling pattern coordinates in this application were measured in altitude, but calculation below requires zenith.
Note altering of zenith to account for warp of lens used:
http://paulbourke.net/dome/fisheyecorrect/
'''
'''
Convert a fisheye UV coordinate (0-1, 0-1) to a sky coordinate (azimuth, altitude).
'''
'''
Convert an image pixel coordinate to a fisheye UV coordinate (0-1, 0-1).
'''
'''
Take in a pair of (azimuth, altitude) sky coordintes and return the corresponding central angle between them.
https://en.wikipedia.org/wiki/Great-circle_distance#Formulas
'''
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
38093,
18604,
198,
2,
2488,
9800,
25,
5689,
4216,
13545,
1073,
198,
2,
2488,
20777,
25,
1367,
14,
2999,
14,
5539,
198,
... | 3.747228 | 451 |
from .base import *
import os
MINIMUM_SAMPLE_SIZE = 3
TRANSCRIPT_PHRASE_POSITIVE_CONFIDENCE_LIMIT = .51
TRANSCRIPT_PHRASE_NEGATIVE_CONFIDENCE_LIMIT = -.51
TRANSCRIPT_PHRASE_CORRECTION_LOWER_LIMIT = .51
TRANSCRIPT_PHRASE_CORRECTION_UPPER_LIMIT = .66
INSTALLED_APPS += ('storages',)
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = False
ADMINS = [(os.environ['ADMIN_NAME'], os.environ['ADMIN_EMAIL'])]
ALLOWED_HOSTS = ['fixit.americanarchive.org', 'fixit.wgbh-mla.org']
LOG_DIRECTORY = '/home/wgbh/logs'
GA_CODE = os.environ['GA_CODE']
AWS_HEADERS = {
'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',
'Cache-Control': 'max-age=94608000',
}
AWS_STORAGE_BUCKET_NAME = os.environ['S3_BUCKET_NAME']
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_S3_CUSTOM_DOMAIN = 's3.amazonaws.com/{}'.format(
AWS_STORAGE_BUCKET_NAME
)
STATIC_URL = 'https://s3.amazonaws.com/{}/'.format(AWS_S3_CUSTOM_DOMAIN)
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] = (
'rest_framework.renderers.JSONRenderer',
)
NEWRELIC_CONFIG_PATH = os.environ['NEWRELIC_CONFIG_PATH']
NEWRELIC_ENV = os.environ['NEWRELIC_ENV']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ['PG_HOST'],
'NAME': 'mla',
'USER': 'mla',
'PASSWORD': os.environ['PG_PASS'],
},
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': '127.0.0.1:11211',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '{}/django.log'.format(LOG_DIRECTORY),
},
'metadata_errors': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '{}/metadata_error.log'.format(LOG_DIRECTORY),
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'metadata_errors': {
'handlers': ['metadata_errors'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| [
6738,
764,
8692,
1330,
1635,
198,
11748,
28686,
198,
198,
23678,
3955,
5883,
62,
49302,
16437,
62,
33489,
796,
513,
198,
198,
5446,
1565,
6173,
46023,
62,
11909,
49,
11159,
62,
37997,
2043,
9306,
62,
10943,
37,
2389,
18310,
62,
43,
39... | 1.954128 | 1,199 |
import urllib.request
import os
import pandas as pd
import numpy as np
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
df = pd.read_csv('../data/raw/movie_data.csv',encoding='utf-8')
#print(df.head(3))
# init Objects
tokenizer=RegexpTokenizer(r'\w+')
en_stopwords=set(stopwords.words('english'))
ps=PorterStemmer()
df['review'].apply(getStemmedReview)
#df.to_csv(r'../data/processed/movie_data[clean].csv')
# X_train = df.loc[:35000, 'review'].values
# y_train = df.loc[:35000, 'sentiment'].values
# X_test = df.loc[35000:, 'review'].values
# y_test = df.loc[35000:, 'sentiment'].values
#
# from sklearn.feature_extraction.text import TfidfVectorizer
# vectorizer = TfidfVectorizer(sublinear_tf=True, encoding='utf-8',decode_error='ignore')
# vectorizer.fit(X_train)
# X_train=vectorizer.transform(X_train)
# X_test=vectorizer.transform(X_test)
#
# from sklearn.linear_model import LogisticRegression
# model=LogisticRegression(solver='liblinear')
# model.fit(X_train,y_train)
# print("Score on training data is: "+str(model.score(X_train,y_train)))
# print("Score on testing data is: "+str(model.score(X_test,y_test)))
#
# import sklearn.externals
# import joblib
# joblib.dump(en_stopwords,'stopwords.pkl')
# joblib.dump(model,'model.pkl')
# joblib.dump(vectorizer,'vectorizer.pkl')
| [
11748,
2956,
297,
571,
13,
25927,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
2528,
74,
13,
30001,
1096,
1330,
797,
25636,
79,
30642,
7509,
198,
6738,
299,
2528,
74,
13,... | 2.633721 | 516 |
import numpy as np
########################################
# Jeong approximate functions
########################################
| [
11748,
299,
32152,
355,
45941,
198,
198,
29113,
7804,
198,
2,
220,
220,
220,
220,
3852,
506,
27665,
5499,
198,
29113,
7804,
628,
628
] | 5.791667 | 24 |
from rest_framework.decorators import api_view
from fastrunner.utils import loader,newloader
from rest_framework.response import Response
from fastrunner.utils.parser import Format
from fastrunner import models
from django.conf import settings
import os,time,sys
from httprunner.utils import create_scaffold
from fastrunner.utils import runner
import traceback
from fastrunner.utils.newrunner import RunSingleApi,RunTree,RunSingleApiInStep,RunSingleApiInCase
"""运行方式
"""
import logging
logger = logging.getLogger('httprunner')
@api_view(['GET'])
def run_api_pk(request, **kwargs):
"""run api by pk
"""
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
debugApi = RunSingleApi(projectPath=projectPath, config=request.query_params['config'],
apiId=kwargs['pk'], type="singleapi")
debugApi.serializeTestCase()
debugApi.serializeTestSuite()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.run()
return Response(debugApi.summary)
@api_view(["POST"])
def run_testsuite(request):
"""debug testsuite
{
name: str,
body: dict
}
"""
body = request.data["body"]
project = request.data["project"]
name = request.data["name"]
testcase_list = []
config = None
for test in body:
test = loader.load_test(test, project=project)
if "base_url" in test["request"].keys():
config = test
continue
testcase_list.append(test)
summary = loader.debug_api(testcase_list, project, name=name, config=config)
return Response(summary)
@api_view(["POST"])
def run_test(request):
"""debug single test
{
body: dict
}
"""
body = request.data["body"]
summary = loader.debug_api(loader.load_test(body), request.data["project"])
return Response(summary)
@api_view(["GET"])
def run_testsuite_pk(request, **kwargs):
"""run testsuite by pk
{
project: int,
name: str
}
"""
pk = kwargs["pk"]
test_list = models.CaseStep.objects. \
filter(case__id=pk).order_by("step").values("body")
project = request.query_params["project"]
name = request.query_params["name"]
testcase_list = []
config = None
for content in test_list:
body = eval(content["body"])
if "base_url" in body["request"].keys():
config = eval(models.Config.objects.get(name=body["name"], project__id=project).body)
continue
testcase_list.append(body)
summary = loader.debug_api(testcase_list, project, name=name, config=config)
return Response(summary)
@api_view(['POST'])
@api_view(['POST'])
@api_view(['POST'])
@api_view(['POST'])
@api_view(['POST'])
def run_api(request):
""" run api by body
"""
api = Format(request.data)
api.parse()
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
create_scaffold(projectPath)
debugApi = RunSingleApi(project=api.project,projectPath=projectPath,config=request.data['config'],
apiBody=api.testcase,type="debugapi")
debugApi.serializeTestCase()
debugApi.serializeTestSuite()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.run()
return Response(debugApi.summary)
@api_view(['POST'])
@api_view(['POST'])
def run_casesinglestep(request):
"""run testsuite by tree
{
project: int
relation: list
name: str
async: bool
}
"""
# order by id default
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
singleStep = ''
if('apiId' in request.data.keys()):
singleStep = RunSingleApiInCase(config=request.data['config'], project=request.data['project'],
apiId=request.data['apiId'],
index=request.data['index'], projectPath=projectPath,relation = request.data['relation'][0])
elif('suiteId' in request.data.keys()):
#TODO:这里的实现只是个临时方案,还要重写的
singleStep = RunSingleApiInCase(config=request.data['config'], project=request.data['project'],
suiteId=request.data['suiteId'],
index=request.data['index'], projectPath=projectPath,
relation=request.data['relation'][0])
singleStep.serializeApi()
singleStep.serializeDebugtalk()
singleStep.generateMapping()
singleStep.serializeTestCase()
singleStep.serializeTestSuite()
singleStep.run()
return Response(singleStep.summary)
@api_view(['POST'])
def run_DebugSuiteStep(request):
""" run suitestep by body
"""
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
debugApi = RunSingleApiInStep(config=request.data['config'],project=request.data['project'],apiId=request.data['apiId'],
apiBody=request.data, projectPath=projectPath)
debugApi.serializeApi()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.serializeTestCase()
debugApi.serializeTestSuite()
debugApi.run()
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
return Response(debugApi.summary)
@api_view(['POST'])
def run_DebugCaseStep(request):
""" run casestep by body
"""
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
debugApi = RunSingleApiInStep(config=request.data['config'],project=request.data['project'],apiId=request.data['apiId'],
apiBody=request.data, projectPath=projectPath)
debugApi.serializeApi()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.serializeTestCase()
debugApi.run()
return Response(debugApi.summary)
| [
6738,
1334,
62,
30604,
13,
12501,
273,
2024,
1330,
40391,
62,
1177,
198,
6738,
3049,
16737,
13,
26791,
1330,
40213,
11,
3605,
29356,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
3049,
16737,
13,
26791,
13,
48610,
1330,... | 2.32344 | 2,965 |
from math import floor
print(count_by([6.1,4.2,6.3], floor))
print(count_by(['one', 'two', 'three'], len))
| [
198,
6738,
10688,
1330,
4314,
198,
198,
4798,
7,
9127,
62,
1525,
26933,
21,
13,
16,
11,
19,
13,
17,
11,
21,
13,
18,
4357,
4314,
4008,
198,
4798,
7,
9127,
62,
1525,
7,
17816,
505,
3256,
705,
11545,
3256,
705,
15542,
6,
4357,
1889... | 2.340426 | 47 |
from lifelines.datasets import load_rossi
from lifelines import CoxPHFitter
rossi_dataset = load_rossi()
cph = CoxPHFitter()
cph.fit(rossi_dataset, duration_col='week', event_col='arrest', show_progress=True)
cph.print_summary() # access the results using cph.summary | [
6738,
3868,
20655,
13,
19608,
292,
1039,
1330,
3440,
62,
1214,
72,
198,
6738,
3868,
20655,
1330,
18014,
11909,
37,
1967,
198,
198,
1214,
72,
62,
19608,
292,
316,
796,
3440,
62,
1214,
72,
3419,
198,
66,
746,
796,
18014,
11909,
37,
19... | 2.842105 | 95 |
# ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
from lia.common.LiaTestCase import LiaTestCase
from lucene import \
WhitespaceAnalyzer, StandardAnalyzer, Term, QueryParser, Locale, \
BooleanQuery, FuzzyQuery, IndexSearcher, TermRangeQuery, TermQuery, \
BooleanClause, Version
| [
2,
38093,
18604,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
220,
220,
921,... | 3.861789 | 246 |
import h5py
import numpy as np
# @staticmethod
# def _preprocess_group_value(group):
# data = group['data'][:]
# labels = group['labels'][:]
# result = [(data[i][np.newaxis], labels[i, 0]) for i in range(data.shape[0])]
# return result
# @staticmethod
# def _preprocess_set_value(value):
# data = np.vstack([v[0] for v in value])
# labels = np.vstack([v[1] for v in value])
# return data, labels
| [
11748,
289,
20,
9078,
198,
11748,
299,
32152,
355,
45941,
628,
628,
220,
220,
220,
1303,
2488,
12708,
24396,
198,
220,
220,
220,
1303,
825,
4808,
3866,
14681,
62,
8094,
62,
8367,
7,
8094,
2599,
198,
220,
220,
220,
1303,
220,
220,
22... | 2.269231 | 208 |
import itertools
import unittest
from typing import List, Optional, Union, Tuple
from . import children_entity_parser
from .predicate import Predicate
from ..most_general_unifier import MostGeneralUnifier
class Clause(object):
"""
Class for keeping predicates together and some several multi-predicate supported functionality
"""
def has_tautology(self) -> bool:
"""
Tautology checking procedure in the list of predicates
:return: Boolean flag representing whether the list has tautology or not. In case of having tautology True will
be returned, otherwise False.
"""
# Group each predicate by their name
for key, group in itertools.groupby(self.predicates, lambda predicate: predicate.get_name()):
# Separate them by their negation and test all the unification results of permutations of paired predicates
non_negated_predicates, negated_predicates = Clause._predicate_separator_by_sign(group)
for non_negated_predicate in non_negated_predicates:
for negated_predicate in negated_predicates:
unification, _ = MostGeneralUnifier.unify(non_negated_predicate.get_child(),
negated_predicate.get_child())
# If any of them can be unified, it means we got tautology
if unification:
return True
# If not achieved any tautology, it means we have no tautology
return False
def does_subsume(self, other: 'Clause') -> bool:
"""
Subsumption controlling function where the function tries to find
whether the other clause is more specific than the current clause
:param other: Other clause to check subsumption
:return: Boolean flag representing that the current clause subsumes the other clause
"""
# If no meet naming and negation match as a subset then immediately return False since subsumption cannot occur
fast_check_result = Clause._fast_check_by_negation_and_name(self, other)
if fast_check_result:
# Group by both name and negation
first_group = {key: list(group) for key, group in
itertools.groupby(self.predicates, lambda p: (p.get_name(), p.is_negated))}
second_group = {key: list(group) for key, group in
itertools.groupby(other.predicates, lambda p: (p.get_name(), p.is_negated))}
# Take common keys of each dict so that we can check if there exists any substitution which unifies them
common_keys = first_group.keys() & second_group.keys()
# And filter common predicates
filtered_first_group = [first_group[key] for key in common_keys]
filtered_second_group = [second_group[key] for key in common_keys]
# Then take multiplication of them
for multiplication in itertools.product(itertools.product(*filtered_first_group),
itertools.product(*filtered_second_group)):
# Each of the predicates must be the same or be less specific than the other's predicates
result = all(child == other_child or child.is_less_specific(other_child)
for child, other_child in zip(multiplication[0], multiplication[1]))
if result:
return True
# If none of them holds the condition, then return False
return False
else:
# If fast check fails
return False
def resolve_with(self, other: 'Clause') -> Tuple[Union['Clause', None], Union['Clause', None]]:
"""
Function to resolve two clauses
:param other: Other clause
:return: Resolvent clause in case of resolution otherwise None
"""
for predicate1, predicate2 in itertools.product(self.predicates, other.predicates):
# Try to unify them if they represent the same predicate but they have different negation states
if predicate1.get_name() == predicate2.get_name() and predicate1.is_negated != predicate2.is_negated:
result, substitutions = MostGeneralUnifier.unify(predicate1.get_child(), predicate2.get_child())
# Compose new predicate with combined predicates of both clauses except for resolvent predicates
new_clause_children = [Predicate.build(str(predicate)) for predicate in self.predicates]
new_clause_children.extend([Predicate.build(str(predicate)) for predicate in other.predicates])
new_clause_children.remove(predicate1)
new_clause_children.remove(predicate2)
# Return composed clause
return Clause(MostGeneralUnifier.apply_substitution(new_clause_children, substitutions)), substitutions
# If none of them can be resolved, return none
return None, None
@staticmethod
def _predicate_separator_by_sign(predicates):
"""
Grouping functionality of predicates
"""
non_negated, negated = [], []
for predicate in predicates:
(non_negated, negated)[predicate.is_negated].append(predicate)
return non_negated, negated
@staticmethod
def _fast_check_by_negation_and_name(clause1: 'Clause', clause2: 'Clause') -> bool:
"""
Fast subsumption check procedure which try to check there is any different predicate exists in other clause
so that the first clause cannot subsume
:param clause1: Clause to check subsume onto other clause
:param clause2: Clause which assumed to be subsumed by the first clause
:return: Boolean flag representing all predicates in the first clause are subset of that for second clause
"""
clause1 = set(map(lambda predicate: (predicate.is_negated, predicate.get_name()), clause1.predicates))
clause2 = set(map(lambda predicate: (predicate.is_negated, predicate.get_name()), clause2.predicates))
return clause1.issubset(clause2)
| [
11748,
340,
861,
10141,
198,
11748,
555,
715,
395,
198,
198,
6738,
19720,
1330,
7343,
11,
32233,
11,
4479,
11,
309,
29291,
198,
198,
6738,
764,
1330,
1751,
62,
26858,
62,
48610,
198,
6738,
764,
28764,
5344,
1330,
14322,
5344,
198,
673... | 2.540541 | 2,442 |
import PySimpleGUI as sg
import app_state as app
import dashboard
import backend
import os
from loguru import logger
import traceback
import arus
| [
11748,
9485,
26437,
40156,
355,
264,
70,
198,
11748,
598,
62,
5219,
355,
598,
198,
11748,
30415,
198,
11748,
30203,
198,
11748,
28686,
198,
6738,
2604,
14717,
1330,
49706,
198,
11748,
12854,
1891,
198,
11748,
610,
385,
628,
198
] | 3.794872 | 39 |
import time
import timeit
from handlerPdf import getPDFname, getLocalTime
from pathlib import Path, PurePath
| [
11748,
640,
198,
11748,
640,
270,
198,
6738,
21360,
47,
7568,
1330,
651,
20456,
3672,
11,
651,
14565,
7575,
198,
6738,
3108,
8019,
1330,
10644,
11,
17129,
15235,
628,
628
] | 3.733333 | 30 |
import unittest
import random
import axelrod
C, D = axelrod.Actions.C, axelrod.Actions.D
| [
11748,
555,
715,
395,
198,
11748,
4738,
198,
11748,
7877,
417,
14892,
198,
198,
34,
11,
360,
796,
7877,
417,
14892,
13,
32,
2733,
13,
34,
11,
7877,
417,
14892,
13,
32,
2733,
13,
35,
198
] | 2.5 | 36 |
import numpy as np
import pytest
import torch
from supermariopy.ptutils import nn
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
11748,
28034,
198,
6738,
2208,
76,
2743,
11081,
13,
457,
26791,
1330,
299,
77,
628,
628,
628,
628,
628,
628,
628,
628
] | 3.03125 | 32 |
from dimagi.ext.couchdbkit import DecimalProperty, Document, StringProperty
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
TYPE_DOMAIN = 'domain'
TYPE_PRODUCT = 'product'
TYPE_SUPPLY_POINT_TYPE = 'supply-point-type'
TYPE_SUPPLY_POINT = 'supply-point'
class DefaultConsumption(CachedCouchDocumentMixin, Document):
"""
Model for setting the default consumption value of an entity
"""
type = StringProperty() # 'domain', 'product', 'supply-point-type', 'supply-point'
domain = StringProperty()
product_id = StringProperty()
supply_point_type = StringProperty()
supply_point_id = StringProperty()
default_consumption = DecimalProperty()
@classmethod
@classmethod
@classmethod
@classmethod
| [
6738,
5391,
18013,
13,
2302,
13,
66,
7673,
9945,
15813,
1330,
4280,
4402,
21746,
11,
16854,
11,
10903,
21746,
198,
198,
6738,
4755,
71,
80,
13,
18211,
13,
23870,
71,
80,
13,
19816,
1040,
1330,
327,
2317,
34,
7673,
24941,
35608,
259,
... | 2.968872 | 257 |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:bandit_38]
# language: python
# name: conda-env-bandit_38-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# %%
# %%
headwater = '0259'
hw_suffix = ''
workdir = f'/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHW_sample/HW{headwater}{hw_suffix}/RESULTS'
ofs_file = f'{workdir}/objfun_{headwater}'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True)
x_vars = df.columns.tolist()[3:]
ncols = 3
numrows = int(round(len(x_vars) / float(ncols) + 0.5))
cstep = 4
# of_var = 'of_som'
# Layout info at: https://matplotlib.org/stable/tutorials/intermediate/constrainedlayout_guide.html
fig, axes = plt.subplots(nrows=numrows, ncols=ncols, figsize=(10, 10), constrained_layout=True)
fig.set_constrained_layout_pads(w_pad=4 / 72, h_pad=4 / 72, hspace=0.1, wspace=0.2)
ax = axes.flatten()
for ii,of in enumerate(x_vars):
ax[ii].set_title(f'of_prms vs {of}')
step_df = df[df.step == cstep]
step_df.plot(ax=ax[ii], kind='scatter', x=of, y='of_prms', color='red', alpha=0.2)
df_final = step_df.iloc[[-1]]
df_final.plot(ax=ax[ii], kind='scatter', x=of, y='of_prms', color='black')
# precal_ns_ref_df.plot(ax=ax[0], x='OF', y=precal_ns_ref_df.columns[1], ylim=(0.0, 1.0), color=calib_color,
# label='PRECAL-ref')
# ax = plt.gca()
# step_df = df[df.step == cstep]
# df_final = step_df.iloc[[-1]]
# step_df.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='red', alpha=0.2)
# df_final.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='black')
# step_two = df[df.step == 2]
# step_two.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='green', alpha=0.2)
# step_three = df[df.step == 3]
# step_three.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='blue', alpha=0.2)
# step_four = df[df.step == 4]
# step_four.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='yellow', alpha=0.2)
# df_final = step_one.iloc[[-1]]
# df_final.plot(kind='scatter', x='ofRUN', y='prmsOF', ax=ax, color='black')
# df_final.plot(kind='scatter', x='ofAET', y='prmsOF', ax=ax, color='black')
# df_final.plot(kind='scatter', x='ofSCA', y='prmsOF', ax=ax, color='black')
# df_final.plot(kind='scatter', x='ofRCH', y='prmsOF', ax=ax, color='black')
# df_final.plot(kind='scatter', x='ofSOM', y='prmsOF', ax=ax, color='black')
# %%
len(df.columns.tolist()[2:])
# %%
colors = ['red', 'green', 'blue', 'yellow']
ncols = 3
numrows = int(round(len(x_vars) / float(ncols) + 0.5))
rnd = 3
# of_var = 'of_som'
df = df[df.loc[:, 'round'] == rnd]
# Layout info at: https://matplotlib.org/stable/tutorials/intermediate/constrainedlayout_guide.html
fig, axes = plt.subplots(nrows=numrows, ncols=ncols, figsize=(15, 15), constrained_layout=True)
fig.set_constrained_layout_pads(w_pad=4 / 72, h_pad=4 / 72, hspace=0.1, wspace=0.2)
ax = axes.flatten()
for ii,of in enumerate(x_vars):
ax[ii].set_title(f'of_prms vs {of}')
for xx in range(1, 5):
p_df = df[df.step == xx]
p_df.plot(ax=ax[ii], kind='scatter', x=of, y='of_prms', color=colors[xx-1], alpha=0.2)
df_final = p_df.iloc[[-1]]
df_final.plot(ax=ax[ii], kind='scatter', x=of, y='of_prms', color='black')
# %%
df[df.loc[:, 'round'] == 1]
# %%
df.head()
# %%
df.info()
# %%
# %%
# %%
# %%
# %%
x_vars
# %% [markdown]
# ### Plot OFS from the original byHRU calibration
# %%
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run1/RESULTS'
ofs_file = f'{workdir}/OFS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
# df.plot(kind='scatter',x='num_children',y='num_pets',color='red')
ax = plt.gca()
df.plot(kind='scatter', x='ofRUN', y='prmsOF', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x='ofAET', y='prmsOF', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x='ofSCA', y='prmsOF', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x='ofRCH', y='prmsOF', ax=ax, color='yellow', alpha=0.2)
df.plot(kind='scatter', x='ofSOM', y='prmsOF', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x='ofRUN', y='prmsOF', ax=ax, color='black')
df_final.plot(kind='scatter', x='ofAET', y='prmsOF', ax=ax, color='black')
df_final.plot(kind='scatter', x='ofSCA', y='prmsOF', ax=ax, color='black')
df_final.plot(kind='scatter', x='ofRCH', y='prmsOF', ax=ax, color='black')
df_final.plot(kind='scatter', x='ofSOM', y='prmsOF', ax=ax, color='black')
# %%
# %% [markdown]
# ### Plot params
# %%
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run2/RESULTS'
ofs_file = f'{workdir}/PARAMS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
ax = plt.gca()
df.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x='fastcoef_lin', y='RUN', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x='freeh2o_cap', y='RUN', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x='gwflow_coef', y='RUN', ax=ax, color='yellow', alpha=0.2)
df.plot(kind='scatter', x='jh_coef', y='RUN', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='fastcoef_lin', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='freeh2o_cap', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='gwflow_coef', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='jh_coef', y='RUN', ax=ax, color='black')
# %% [markdown]
# ### Plot params from original calibration
# %%
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run1/RESULTS'
ofs_file = f'{workdir}/PARAMS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
ax = plt.gca()
df.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x='fastcoef_lin', y='RUN', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x='freeh2o_cap', y='RUN', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x='gwflow_coef', y='RUN', ax=ax, color='yellow', alpha=0.2)
df.plot(kind='scatter', x='jh_coef', y='RUN', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='fastcoef_lin', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='freeh2o_cap', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='gwflow_coef', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='jh_coef', y='RUN', ax=ax, color='black')
# %%
ax = plt.gca()
df.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='red', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='black')
# %%
df.columns
# %%
# %%
# %%
var = 'tmin_cbh_adj'
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run2/RESULTS'
ofs_file = f'{workdir}/PARAMS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
ax = plt.gca()
df.plot(kind='scatter', x=f'{var}', y='RUN', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.1', y='RUN', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.2', y='RUN', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.3', y='RUN', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x=f'{var}', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.1', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.2', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.3', y='RUN', ax=ax, color='black')
# %%
# %%
var = 'tmin_cbh_adj'
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run1/RESULTS'
ofs_file = f'{workdir}/PARAMS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
ax = plt.gca()
df.plot(kind='scatter', x=f'{var}', y='RUN', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.1', y='RUN', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.2', y='RUN', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.3', y='RUN', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x=f'{var}', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.1', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.2', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.3', y='RUN', ax=ax, color='black')
# %%
| [
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
17519,
25,
20966,
2047,
65,
11,
9078,
25,
25067,
198,
2,
220,
220,
220,
220,
2420,
62,
15603,
341,
25,
198,
2,
22... | 2.157093 | 4,265 |
import argparse
import json
import os
import re
import sys
from configparser import ConfigParser
from configparser import NoOptionError
from configparser import NoSectionError
from configparser import ParsingError
from typing import Union, Dict
from oelint_parser.cls_stash import Stash
from oelint_parser.constants import CONSTANTS
from oelint_adv.cls_rule import load_rules
from oelint_adv.color import set_colorize
from oelint_adv.rule_file import set_messageformat
from oelint_adv.rule_file import set_noinfo
from oelint_adv.rule_file import set_nowarn
from oelint_adv.rule_file import set_relpaths
from oelint_adv.rule_file import set_rulefile
from oelint_adv.rule_file import set_suppressions
sys.path.append(os.path.abspath(os.path.join(__file__, '..')))
def deserialize_boolean_options(options: Dict) -> Dict[str, Union[str, bool]]:
"""Converts strings in `options` that are either 'True' or 'False' to their boolean
representations.
"""
for k, v in options.items():
if isinstance(v, str):
if v.strip() == 'False':
options[k] = False
elif v.strip() == 'True':
options[k] = True
return options
if __name__ == '__main__':
main() # pragma: no cover
| [
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
6738,
4566,
48610,
1330,
17056,
46677,
198,
6738,
4566,
48610,
1330,
1400,
19722,
12331,
198,
6738,
4566,
48610,
1330,
1400,
16375,
12331,
19... | 2.715517 | 464 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
String manipulate.
"""
# left strip
assert " Hello ".lstrip() == "Hello "
# right strip
assert " Hello ".rstrip() == " Hello"
# strip
assert " Hello ".strip() == "Hello"
# upper case
assert "Hello".upper() == "HELLO"
# lower case
assert "Hello".lower() == "hello"
# swap case
assert "Hello".swapcase() == "hELLO"
# titlize
assert "this is so good".title() == "This Is So Good"
# center
assert "Hello".center(9, "-") == "--Hello--"
# index
assert "this is so good".index("is") == 2
# replace
assert "this is so good".replace("is", "are") == "thare are so good"
# find
assert "this is so good".find("is") == 2
# count
assert "this is so good".count("o") == 3
# split
assert "This is so good".split(" ") == ["This", "is", "so", "good"]
# join
assert ", ".join(["a", "b", "c"]) == "a, b, c"
# ascii code to string
assert chr(88) == "X"
# string to ascii code
assert ord("X") == 88
# partition
assert "this is so good".partition("is") == ("th", "is", " is so good")
# make translate table and translate
table = str.maketrans("abc", "xyz")
assert "abc".translate(table) == "xyz"
# concatenate
assert "hello" + " " + "world" == "hello world"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
10100,
18510,
13,
198,
37811,
198,
198,
2,
1364,
10283,
198,
30493,
366,
220,
18435,
220,
27071,
75,
... | 2.677704 | 453 |
"""
This file contains unittests for the api app.
Use test_settings when running this:
./manage.py test --settings=dyanote.test_settings api
This will use sqlite and other settings to make test execution faster.
Command used to create test database.
./manage.py dumpdata --indent=4
--natural
-e admin
-e sessions
-e contenttypes
-e auth.Permission
-e south.migrationhistory > api/fixtures/test-db.json
To see test coverage use:
coverage run ./manage.py test --settings=dyanote.test_settings api
coverage report -m --include=api/*
coverage html
"""
import unittest
import re
from urllib.parse import quote
from json import loads as load_json
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.contrib.auth.models import User
from django.core import mail
from django.core.exceptions import ValidationError
from rest_framework.test import APITestCase, APIClient
from rest_framework import status
from django.core.urlresolvers import get_script_prefix, resolve
from api.models import Page, ActivationKey
from api import utils
# Costant values found in the test database fixture
USERNAME = 'test@dyanote.com'
PASSWORD = 'pwd'
CLIENT_ID = 'bb05c6ab017f50116084'
CLIENT_SECRET = '4063c2648cdd7f2e4dae563da80a516f2eb6ebb6'
ACCESS_TOKEN = '1b24279ad7d5986301583538804e5240c3e588af'
ADMIN_USERNAME = 'admin'
ADMIN_PASSWORD = 'admin'
# Model test
# Utils tests
# User testing
| [
37811,
198,
1212,
2393,
4909,
555,
715,
3558,
329,
262,
40391,
598,
13,
198,
198,
11041,
1332,
62,
33692,
618,
2491,
428,
25,
198,
19571,
805,
496,
13,
9078,
1332,
1377,
33692,
28,
67,
4121,
1258,
13,
9288,
62,
33692,
40391,
198,
12... | 2.965092 | 487 |
import pytest
import shutil
import tempfile
import os
import pipes
import ujson as json
CORPUS = {
"docs": [
{
"url": "http://www.douglasadams.com/",
"content": """ <title>xxxxuniquecontent</title> """
},
{
"url": "http://www.example.com/page1",
"content": """ <title>xxxxuniquecontent2</title> """
}
],
"block": "1"
}
@pytest.mark.elasticsearch
| [
11748,
12972,
9288,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
198,
11748,
28686,
198,
11748,
19860,
198,
11748,
334,
17752,
355,
33918,
198,
198,
44879,
47,
2937,
796,
1391,
198,
220,
220,
220,
366,
31628,
1298,
685,
198,
220,
220... | 2.046296 | 216 |
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
if __name__ == "__main__":
pyunit_utils.standalone_test(glm_mean_residual_deviance)
else:
glm_mean_residual_deviance()
| [
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
16,
553,
40720,
40720,
40720,
4943,
198,
11748,
289,
17,
78,
198,
6738,
5254,
1330,
12972,
20850,
62,
26791,
198,
6738,
289,
17,
78,
13,
395,
320,
2024,
13,
4743,
76,
1330,
367,
17,
... | 2.542056 | 107 |
from ScopeFoundry import HardwareComponent
from ScopeFoundryHW.ni_daq.devices.NI_Daq import NI_DacTask | [
6738,
41063,
21077,
563,
1330,
28715,
21950,
198,
6738,
41063,
21077,
563,
39,
54,
13,
8461,
62,
48539,
13,
42034,
13,
22125,
62,
26531,
80,
1330,
24947,
62,
35,
330,
25714
] | 3.290323 | 31 |
"""cli.alerter -- control alerter
With ``alerter`` command you can control the :py:mod:`~elastico.alerter`
module.
For more help on a command, run::
elastico alerter <command> -h
"""
from .cli import command, opt, arg
from ..alerter import Alerter
from ..connection import elasticsearch
from ..util import write_output
from ..server import Server
import pyaml, logging, time, yaml, sys
logger = logging.getLogger('elastico.cli.alerter')
alerter_command = command.add_subcommands('alerter', description=__doc__)
@alerter_command("expand-rules",
arg("--list", '-l', choices=['names', 'keys', 'types', 'alerts'], default=None),
arg("--format", '-f', default=None),
)
def alerter_expand_rules(config):
"""Expand rules, that you can check, if they are correct
This command expands the rules like in a regular alerter run and prints
them to stdout in YAML format. This way you can check, if all variables
and defaults are expanded as expected.
"""
expanded_rules = Alerter.expand_rules(config)
if config['alerter.expand-rules.list']:
expand = config['alerter.expand-rules.list']
if expand in ('names', 'keys', 'types'):
for name in set([ rule[expand[:-1]] for rule in expanded_rules ]):
print(name)
if expand == 'alerts':
for name in set([ "%s-%s" % (rule['type'], rule['key']) for rule in expanded_rules ]):
print(name)
elif config['alerter.expand-rules.format']:
for rule in expanded_rules:
print(config['alerter.expand-rules.format'].format(**rule))
else:
pyaml.p(expanded_rules)
@alerter_command('check',
arg('--status', "-s", choices=['ok', 'alert', 'error'], default='ok'),
arg('alert', nargs="*", default=[]),
)
# need a command, where I simulate the data input for the checks, such that
# you can check, if messages are created correctly
# need a command to display dependency tree of alert rules and alerts
@alerter_command('deps')
@alerter_command('status', opt('--all')) #, arg("rule"))
@alerter_command('show',
arg('item', choices=('rules', 'alerts'), help="choose what to display"),
opt('--details', '--all', '-a', help="display rule details")
)
@alerter_command("run")
def alerter_run(config):
"""run alerter"""
alerter = Alerter(elasticsearch(config), config)
alerter.check_alerts()
@alerter_command("serve",
arg('--sleep-seconds', '-s', type=float, default=60, config="serve.sleep_seconds"),
arg('--count', '-c', type=int, default=0, config="serve.count"),
)
def alerter_serve(config):
"""run alerter"""
server = Server(config, run=_run)
server.run()
@alerter_command("query")
def alerter_run(config):
"""run alerter"""
pass
| [
37811,
44506,
13,
36213,
353,
1377,
1630,
435,
263,
353,
198,
198,
3152,
7559,
36213,
353,
15506,
3141,
345,
460,
1630,
262,
1058,
9078,
25,
4666,
25,
63,
93,
417,
3477,
78,
13,
36213,
353,
63,
198,
21412,
13,
198,
198,
1890,
517,
... | 2.677885 | 1,040 |
import time
import uuid
from ...config import AppConfig
from ...middleware.interface import MiddlewareBase
from ...session.interfaces import ISessionProvider
from ...util import md5, b64
| [
11748,
640,
201,
198,
11748,
334,
27112,
201,
198,
201,
198,
6738,
2644,
11250,
1330,
2034,
16934,
201,
198,
6738,
2644,
27171,
1574,
13,
39994,
1330,
6046,
1574,
14881,
201,
198,
6738,
2644,
29891,
13,
3849,
32186,
1330,
3180,
2521,
29... | 3.517857 | 56 |
LATESTMFILE = 'last_id.txt'
LOGFILE = "twitterbot_log.txt"
verbose = False
twitterName = "ui_cer_bot"
# Liste de terme qui servent pour répondre
answers = ['ahah :)' , 'YO' , 'O_O', 'stoi' , 'TG' , 'MER IL ET FOU']
# Liste des terme qui servent a repondre "stoi xxxx"
bad_words = {'boloss' : 'le boloss', 'boulette' : 'la boulette', 'accident' :"l'accident" , 'youtube':"le tube" , 'facebook':"le bouc" , 'dément': "qui ment"}
# Liste des terme relou ou le bot repond TG avec un mention paritculiere pour @infredwetrust :)
boring_words = {'#old' , 'oscours', '#oscours', "twitpic", "selfie" }
# Liste des termes qui enclenche une reponse
tg_list = ['tg','ta gueule', 'tg.', 'tg!', 'ta gueule.', 'ta gueule!']
#Liste des phrase que le bot tweete de lui-emme
talk = {"Sinon SAVA ?", "c'est l'amour à la plage, aoum tcha tcha tcha", "Je vous trouve très beau, surtout moi" , "y a quoi de beau à la télé ce soir ?", "sim est mort. #rip"}
| [
171,
119,
123,
43,
1404,
6465,
44,
25664,
796,
705,
12957,
62,
312,
13,
14116,
6,
198,
25294,
25664,
796,
366,
6956,
13645,
62,
6404,
13,
14116,
1,
198,
19011,
577,
796,
10352,
198,
6956,
5376,
796,
366,
9019,
62,
2189,
62,
13645,
... | 2.379747 | 395 |
from dsa.data_structures import LinkedList, ListNode
l = [1, 2, 3]
ll = LinkedList(l, doubly=False)
mid_n = ll.head.next_node
delete_middle_node(mid_n)
str(ll)
| [
6738,
288,
11400,
13,
7890,
62,
7249,
942,
1330,
7502,
276,
8053,
11,
7343,
19667,
628,
198,
198,
75,
796,
685,
16,
11,
362,
11,
513,
60,
198,
297,
796,
7502,
276,
8053,
7,
75,
11,
3385,
306,
28,
25101,
8,
198,
13602,
62,
77,
... | 2.328571 | 70 |
from django.db import models
from django.db.models.deletion import CASCADE
# Create your models here. | [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
1330,
35106,
34,
19266,
198,
198,
2,
13610,
534,
4981,
994,
13
] | 3.290323 | 31 |
#!/usr/bin/env python3
from collections import defaultdict
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('csv', nargs='?', default=None,
help='the CSV format callgraph to transform')
args = parser.parse_args()
import sys
with (open(args.csv) if args.csv else sys.stdin) as infile:
callgraph = read_callgraph(infile)
print_callgraph(callgraph)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
17268,
1330,
4277,
11600,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1330,
1822,
29572,
198,
220,
220,
220,
30751,... | 2.563536 | 181 |
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
import logging
from charms.operator_libs_linux.v0 import passwd
from helpers import lines_in_file
logger = logging.getLogger(__name__)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
33448,
19507,
605,
12052,
13,
198,
2,
4091,
38559,
24290,
2393,
329,
15665,
3307,
13,
628,
198,
11748,
18931,
198,
198,
6738,
41700,
13,
46616,
62,
8019,
82,
62,
23289,
... | 3.184211 | 76 |
# !/usr/bin/python3
import string
import time
import random
import json
import yaml
import ssl
import base64
import logging
from logging.config import fileConfig
import importlib
import argparse
import os
import re
from rich.logging import RichHandler
from datetime import datetime
import paho.mqtt.client as mqtt
from MessageConverters.MessageConverter import MessageConverter
LOGGING_CONFIG = 'logging.conf'
CONVERTERS_DIR = 'MessageConverters'
# list to store all mqtt connection infos
brokers = []
'''
def translate_to_tb_format(payload):
tb_payload = {}
measurements = []
measurement = {}
measurement['ts'] = payload.get('ts')
measurement['values'] = payload.get('fields')
deviceid = payload.get('tags').get('deviceid')
measurements.append(measurement)
tb_payload[deviceid] = measurements
return tb_payload
'''
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbose",
help="increase output verbosity",
action="store_true")
parser.add_argument(
"--conf_file",
help="configuration file",
type=str,
default="config.yaml")
args = parser.parse_args()
path_log_config_file = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'conf', LOGGING_CONFIG)
print(f'logging config file: {path_log_config_file}')
fileConfig(path_log_config_file)
logger = logging.getLogger(__name__)
logger.info("using logging conf from {}".format(path_log_config_file))
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
logger.info("verbosity turned on")
# load config
path_config_file = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'conf', args.conf_file)
with open(path_config_file) as yaml_conf_file:
configuration = yaml.full_load(yaml_conf_file)
logger.info("loaded config: {}".format(configuration))
# start all mqtt connections
logger.info('starting mqtt connections...')
# list to stor all active vlients
active_clients = {}
# dictionary to store all dynamically loaded converters
converters = {}
for name, conf in configuration.get("brokers").items():
logger.info(
f'starting client for broker {name}, connecting to host {conf.get("host")}')
client = connect_mqtt(name, conf)
if client:
# Bind function to callback
client.on_publish = on_publish
client.on_log = on_log
client.on_message = on_message
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.loop_start()
client.enable_logger(logger)
# create converter and routing info
converter_and_routing_info = {}
converter_and_routing_info['name'] = name
subscribe_converter = conf.get('subscribe-converter')
converter_and_routing_info['subscribe-converter'] = subscribe_converter
if subscribe_converter:
_load_converter(subscribe_converter)
publish_converter = conf.get('publish-converter')
converter_and_routing_info['publish-converter'] = publish_converter
if publish_converter:
_load_converter(publish_converter)
converter_and_routing_info['routes'] = []
for route in configuration.get("routing"):
if route["subscribe-broker"] == name:
converter_and_routing_info['routes'].append(route)
payload_converter = route.get('payload-converter')
if payload_converter:
_load_converter(
payload_converter)
logger.debug(f"added route {route['name']}")
client.user_data_set(converter_and_routing_info)
active_clients[name] = client
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
logger.info('interrupted!')
for name, client in active_clients.items():
disconnect_mqtt(client)
| [
2,
5145,
14,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
4731,
198,
11748,
640,
198,
11748,
4738,
198,
11748,
33918,
198,
11748,
331,
43695,
198,
11748,
264,
6649,
198,
11748,
2779,
2414,
198,
11748,
18931,
198,
6738,
18931,
13,
... | 2.295628 | 1,830 |
# Author-Patrick Rainsberry
# Description-Simplified Slicer for Fusion 360
# Importing sample Fusion Command
# Could import multiple Command definitions here
from .FusionSlicerLTCommand import FusionSlicerLTCommand, FusionSlicerLTCommand2
commands = []
command_definitions = []
# Define parameters for 1st command
cmd = {
'cmd_name': 'Fusion Slicer LT',
'cmd_description': 'Simplified Fusion Slicing App',
'cmd_id': 'cmdID_slicer_lt',
'cmd_resources': './resources',
'workspace': 'FusionSolidEnvironment',
'toolbar_panel_id': 'SolidScriptsAddinsPanel',
'class': FusionSlicerLTCommand
}
command_definitions.append(cmd)
# Define parameters for 1st command
cmd = {
'cmd_name': 'Fusion Slicer LT 2',
'cmd_description': 'Simplified Fusion Slicing App',
'cmd_id': 'cmdID_slicer_lt2',
'cmd_resources': './resources',
'workspace': 'FusionSolidEnvironment',
'toolbar_panel_id': 'SolidScriptsAddinsPanel',
'command_visible': False,
'class': FusionSlicerLTCommand2
}
command_definitions.append(cmd)
# Set to True to display various useful messages when debugging your app
debug = False
# Don't change anything below here:
for cmd_def in command_definitions:
command = cmd_def['class'](cmd_def, debug)
commands.append(command)
| [
2,
6434,
12,
32718,
371,
1299,
8396,
198,
2,
12489,
12,
8890,
489,
1431,
311,
677,
263,
329,
21278,
11470,
198,
198,
2,
17267,
278,
6291,
21278,
9455,
198,
2,
10347,
1330,
3294,
9455,
17336,
994,
198,
6738,
764,
37,
4241,
50,
677,
... | 2.864745 | 451 |
import subprocess
import tempfile
from pathlib import Path
import requests
from mutagen.easyid3 import EasyID3
from mutagen.id3 import APIC, ID3
from mutagen.mp3 import MP3
from pathvalidate import sanitize_filename
from PyQt5.QtCore import QThread
from vk_api.audio import VkAudio
from entities.album import VkAlbum
from entities.session import VkSession
from entities.song import VkSong
from utils import get_tracklist_iter
| [
11748,
850,
14681,
198,
11748,
20218,
7753,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
7007,
198,
6738,
4517,
11286,
13,
38171,
312,
18,
1330,
16789,
2389,
18,
198,
6738,
4517,
11286,
13,
312,
18,
1330,
3486,
2149,
11,
4522,
... | 3.325581 | 129 |
import h5py
import math
import time
import numpy
import sys
from functools import reduce
from keras.models import Sequential
from keras.layers import GRU, LSTM, Dropout, Dense
from keras.layers.wrappers import TimeDistributed
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
with open('./data/fb_news_comments.txt', 'r', encoding='utf-8') as file:
comments = file.read()
chars = list(sorted(set(comments)))
# print(''.join(chars))
# print([ord(x) for x in chars])
# exit()
start = 0
seq_length = 100
items = 200000
char_to_int = dict((c, i) for i, c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
n_vocab = len(chars)
n_patterns = items
model = Sequential()
model.add(GRU(512, input_shape=(seq_length, 1), return_sequences=True))
model.add(Dropout(0.2))
model.add(GRU(256))
model.add(Dropout(0.2))
model.add(Dense(n_vocab, activation='softmax'))
model.load_weights("./results/test_6/weights-improvement-60-1.7856.hdf5")
model.compile(loss='categorical_crossentropy', optimizer='adam')
filepath="./results/test_6/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, mode='min')
callbacks_list = [checkpoint]
for i in range(33, 100):
dataX = []
dataY = []
generate()
exit()
print()
for j in range(start + items * i, start + items * (i + 1)):
seq_in = comments[j:j + seq_length]
seq_out = comments[j + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
X = X / float(n_vocab)
y = np_utils.to_categorical(dataY)
model.fit(X, y, epochs=i * 2 + 2, initial_epoch=i * 2, batch_size=128, callbacks=callbacks_list)
| [
11748,
289,
20,
9078,
198,
11748,
10688,
198,
11748,
640,
198,
11748,
299,
32152,
198,
11748,
25064,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
1330... | 2.476839 | 734 |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RClue(RPackage):
"""Cluster Ensembles."""
cran = "clue"
version('0.3-61', sha256='71311b16ce380fd9a8834be95b55b3d1b47e4ee2b8acb35b8d481138c314dc31')
version('0.3-60', sha256='6d21ddfd0d621ed3bac861890c600884b6ed5ff7d2a36c9778b892636dbbef2a')
version('0.3-58', sha256='2ab6662eaa1103a7b633477e8ebd266b262ed54fac6f9326b160067a2ded9ce7')
version('0.3-57', sha256='6e369d07b464a9624209a06b5078bf988f01f7963076e946649d76aea0622d17')
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r-cluster', type=('build', 'run'))
| [
2,
15069,
2211,
12,
1238,
1828,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
... | 2.146739 | 368 |
print(min(a,b,c))
| [
4798,
7,
1084,
7,
64,
11,
65,
11,
66,
4008,
198
] | 1.636364 | 11 |
import nltk
from nltk import tokenize
from nltk.util import ngrams
import os
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from readability import Readability
import collections
from nltk.stem.porter import *
from nltk import word_tokenize
import string
import pickle
### This File contains functions for each type of feature. Use Compute_All_Features.py to run.
DIRNAME = os.path.dirname(__file__)
| [
11748,
299,
2528,
74,
201,
198,
6738,
299,
2528,
74,
1330,
11241,
1096,
201,
198,
6738,
299,
2528,
74,
13,
22602,
1330,
299,
4546,
82,
201,
198,
11748,
28686,
201,
198,
6738,
410,
5067,
31837,
3681,
13,
85,
5067,
31837,
3681,
1330,
... | 2.807229 | 166 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 Romain Boman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# example of PyQt (QMainWindow) + vtk (QVTKRenderWindowInteractor)
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
print("Qt %s loaded!" % QT_VERSION_STR)
import vtk
from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
import sys
if __name__ == "__main__":
app = QApplication(sys.argv)
window = SimpleView()
window.show()
window.widget.Initialize() # This is the line we need
app.exec_()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
220,
220,
15069,
2177,
3570,
391,
347,
5185,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
1... | 2.964674 | 368 |
import h5py
import os
| [
11748,
289,
20,
9078,
198,
11748,
28686,
220,
198
] | 2.555556 | 9 |
import tensorflow as tf
import numpy as np
from sklearn.feature_extraction import text
from sklearn import feature_selection
from tensorflow.python.util import nest
from autokeras import const
from autokeras.hypermodel import hyper_block as hb_module
class HyperPreprocessor(hb_module.HyperBlock):
"""Hyper preprocessing block base class."""
def build(self, hp, inputs=None):
"""Build into part of a Keras Model.
Since they are for preprocess data before feeding into the Keras Model,
they are not part of the Keras Model. They only pass the inputs
directly to outputs.
"""
return inputs
def set_hp(self, hp):
"""Set Hyperparameters for the Preprocessor.
Since the `update` and `transform` function are all for single training
instances instead of the entire dataset, the Hyperparameters needs to be
set in advance of call them.
Args:
hp: Hyperparameters. The hyperparameters for tuning the preprocessor.
"""
self._hp = hp
def update(self, x):
"""Incrementally fit the preprocessor with a single training instance.
Args:
x: EagerTensor. A single instance in the training dataset.
"""
raise NotImplementedError
def transform(self, x):
"""Incrementally fit the preprocessor with a single training instance.
Args:
x: EagerTensor. A single instance in the training dataset.
Returns:
A transformed instanced which can be converted to a tf.Tensor.
"""
raise NotImplementedError
def output_types(self):
"""The output types of the transformed data, e.g. tf.int64.
The output types are required by tf.py_function, which is used for transform
the dataset into a new one with a map function.
Returns:
A tuple of data types.
"""
raise NotImplementedError
def output_shape(self):
"""The output shape of the transformed data.
The output shape is needed to build the Keras Model from the AutoModel.
The output shape of the preprocessor is the input shape of the Keras Model.
Returns:
A tuple of ints or a TensorShape.
"""
raise NotImplementedError
def finalize(self):
"""Training process of the preprocessor after update with all instances."""
pass
class OneHotEncoder(object):
"""A class that can format data.
This class provides ways to transform data's classification label into
vector.
Attributes:
data: The input data
num_classes: The number of classes in the classification problem.
labels: The number of labels.
label_to_vec: Mapping from label to vector.
int_to_label: Mapping from int to label.
"""
def __init__(self):
"""Initialize a OneHotEncoder"""
self.data = None
self.num_classes = 0
self.labels = None
self.label_to_vec = {}
self.int_to_label = {}
def fit(self, data):
"""Create mapping from label to vector, and vector to label."""
data = np.array(data).flatten()
self.labels = set(data)
self.num_classes = len(self.labels)
for index, label in enumerate(self.labels):
vec = np.array([0] * self.num_classes)
vec[index] = 1
self.label_to_vec[label] = vec
self.int_to_label[index] = label
def transform(self, data):
"""Get vector for every element in the data array."""
data = np.array(data)
if len(data.shape) > 1:
data = data.flatten()
return np.array(list(map(lambda x: self.label_to_vec[x], data)))
def inverse_transform(self, data):
"""Get label for every element in data."""
return np.array(list(map(lambda x: self.int_to_label[x],
np.argmax(np.array(data), axis=1))))
class Normalize(HyperPreprocessor):
""" Perform basic image transformation and augmentation.
# Attributes
mean: Tensor. The mean value. Shape: (data last dimension length,)
std: Tensor. The standard deviation. Shape is the same as mean.
"""
def transform(self, x):
""" Transform the test data, perform normalization.
# Arguments
data: Tensorflow Dataset. The data to be transformed.
# Returns
A DataLoader instance.
"""
x = nest.flatten(x)[0]
return (x - self.mean) / self.std
class TextToIntSequence(HyperPreprocessor):
"""Convert raw texts to sequences of word indices."""
class TextToNgramVector(HyperPreprocessor):
"""Convert raw texts to n-gram vectors."""
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
1330,
2420,
198,
6738,
1341,
35720,
1330,
3895,
62,
49283,
198,
6738,
11192,
273,
11125,
13,
29412,
13,
22602,
... | 2.585405 | 1,850 |